32 #include <QtCore/QDateTime>
33 #include <QtCore/QSharedPointer>
34 #include <QtCore/QByteArray>
35 #include <QtCore/QFile>
36 #include <QtCore/QAtomicInt>
37 #include <QtCore/QList>
38 #include <QtCore/QMutex>
39 #include <QtCore/QMutexLocker>
41 #include <sys/types.h>
82 const unsigned int m = 0xc6a4a793;
85 const unsigned char * data =
reinterpret_cast<const unsigned char *
>(key);
87 unsigned int h = seed ^ (len * m);
89 int align =
reinterpret_cast<quintptr
>(data) & 3;
91 if(align & (len >= 4))
95 unsigned int t = 0, d = 0;
99 case 1: t |= data[2] << 16;
100 case 2: t |= data[1] << 8;
101 case 3: t |= data[0];
109 int sl = 8 * (4-align);
116 d = *
reinterpret_cast<const unsigned int *
>(data);
117 t = (t >> sr) | (d << sl);
129 int pack = len < align ? len : align;
135 case 3: d |= data[2] << 16;
136 case 2: d |= data[1] << 8;
137 case 1: d |= data[0];
138 case 0: h += (t >> sr) | (d << sl);
150 h += *
reinterpret_cast<const unsigned int *
>(data);
164 case 3: h += data[2] << 16;
165 case 2: h += data[1] << 8;
166 case 1: h += data[0];
198 #if defined(Q_CC_GNU) || defined(Q_CC_SUN)
199 #define ALIGNOF(x) (__alignof__ (x)) // GCC provides what we want directly
205 struct __alignmentHack
209 static const size_t size =
offsetof(__alignmentHack, obj);
211 #define ALIGNOF(x) (__alignmentHack<x>::size)
213 #endif // ALIGNOF undefined
221 quintptr
mask = size - 1;
224 quintptr basePointer =
reinterpret_cast<quintptr
>(start);
228 basePointer = (basePointer + mask) & ~mask;
230 return reinterpret_cast<T *
>(basePointer);
242 const char *ptr =
reinterpret_cast<const char*
>(base);
243 return alignTo<const T>(ptr + offset);
250 char *ptr =
reinterpret_cast<char *
>(base);
251 return alignTo<T>(ptr + offset);
262 throw KSDCCorrupted();
265 return (a + b - 1) / b;
277 for (count = 0; value != 0; count++) {
278 value &= (value - 1);
320 struct IndexTableEntry
324 mutable uint useCount;
326 mutable time_t lastUsedTime;
331 struct PageTableEntry
357 PIXMAP_CACHE_VERSION = 12,
358 MINIMUM_CACHE_SIZE = 4096
372 QAtomicInt evictionPolicy;
382 QAtomicInt cacheTimestamp;
387 static unsigned equivalentPageSize(
unsigned itemSize)
394 while ((itemSize >>= 1) != 0) {
400 log2OfSize = qBound(9, log2OfSize, 18);
402 return (1 << log2OfSize);
406 unsigned cachePageSize()
const
408 unsigned _pageSize =
static_cast<unsigned>(pageSize);
410 static const unsigned validSizeMask = 0x7FE00u;
414 throw KSDCCorrupted();
432 bool performInitialSetup(uint _cacheSize, uint _pageSize)
434 if (_cacheSize < MINIMUM_CACHE_SIZE) {
435 kError(
ksdcArea()) <<
"Internal error: Attempted to create a cache sized < "
436 << MINIMUM_CACHE_SIZE;
440 if (_pageSize == 0) {
441 kError(
ksdcArea()) <<
"Internal error: Attempted to create a cache with 0-sized pages.";
447 kError(
ksdcArea()) <<
"Unable to find an appropriate lock to guard the shared cache. "
448 <<
"This *should* be essentially impossible. :(";
452 bool isProcessShared =
false;
455 if (!tempLock->initialize(isProcessShared)) {
456 kError(
ksdcArea()) <<
"Unable to initialize the lock for the cache!";
460 if (!isProcessShared) {
462 <<
"shared across processes.";
467 cacheSize = _cacheSize;
468 pageSize = _pageSize;
469 version = PIXMAP_CACHE_VERSION;
470 cacheTimestamp =
static_cast<unsigned>(::time(0));
472 clearInternalTables();
481 void clearInternalTables()
484 cacheAvail = pageTableSize();
487 PageTableEntry *table = pageTable();
488 for (uint i = 0; i < pageTableSize(); ++i) {
493 IndexTableEntry *indices = indexTable();
494 for (uint i = 0; i < indexTableSize(); ++i) {
495 indices[i].firstPage = -1;
496 indices[i].useCount = 0;
497 indices[i].fileNameHash = 0;
498 indices[i].totalItemSize = 0;
499 indices[i].addTime = 0;
500 indices[i].lastUsedTime = 0;
504 const IndexTableEntry *indexTable()
const
508 return offsetAs<IndexTableEntry>(
this,
sizeof(*this));
511 const PageTableEntry *pageTable()
const
513 const IndexTableEntry *base = indexTable();
514 base += indexTableSize();
517 return alignTo<PageTableEntry>(base);
520 const void *cachePages()
const
522 const PageTableEntry *tableStart = pageTable();
523 tableStart += pageTableSize();
526 return alignTo<void>(tableStart, cachePageSize());
529 const void *page(pageID at)
const
531 if (static_cast<uint>(at) >= pageTableSize()) {
536 const char *pageStart =
reinterpret_cast<const char *
>(cachePages());
537 pageStart += (at * cachePageSize());
539 return reinterpret_cast<const void *
>(pageStart);
546 IndexTableEntry *indexTable()
548 const SharedMemory *that =
const_cast<const SharedMemory*
>(
this);
549 return const_cast<IndexTableEntry *
>(that->indexTable());
552 PageTableEntry *pageTable()
554 const SharedMemory *that =
const_cast<const SharedMemory*
>(
this);
555 return const_cast<PageTableEntry *
>(that->pageTable());
560 const SharedMemory *that =
const_cast<const SharedMemory*
>(
this);
561 return const_cast<void *
>(that->cachePages());
564 void *page(pageID at)
566 const SharedMemory *that =
const_cast<const SharedMemory*
>(
this);
567 return const_cast<void *
>(that->page(at));
570 uint pageTableSize()
const
572 return cacheSize / cachePageSize();
575 uint indexTableSize()
const
579 return pageTableSize() / 2;
586 pageID findEmptyPages(uint pagesNeeded)
const
589 return pageTableSize();
594 const PageTableEntry *table = pageTable();
595 uint contiguousPagesFound = 0;
597 for (pageID i = 0; i < static_cast<int>(pageTableSize()); ++i) {
598 if (table[i].index < 0) {
599 if (contiguousPagesFound == 0) {
602 contiguousPagesFound++;
605 contiguousPagesFound = 0;
608 if (contiguousPagesFound == pagesNeeded) {
613 return pageTableSize();
617 static bool lruCompare(
const IndexTableEntry &l,
const IndexTableEntry &r)
620 if (l.firstPage < 0 && r.firstPage >= 0) {
623 if (l.firstPage >= 0 && r.firstPage < 0) {
629 return l.lastUsedTime < r.lastUsedTime;
633 static bool seldomUsedCompare(
const IndexTableEntry &l,
const IndexTableEntry &r)
636 if (l.firstPage < 0 && r.firstPage >= 0) {
639 if (l.firstPage >= 0 && r.firstPage < 0) {
644 return l.useCount < r.useCount;
648 static bool ageCompare(
const IndexTableEntry &l,
const IndexTableEntry &r)
651 if (l.firstPage < 0 && r.firstPage >= 0) {
654 if (l.firstPage >= 0 && r.firstPage < 0) {
660 return l.addTime < r.addTime;
665 if (cacheAvail * cachePageSize() == cacheSize) {
675 pageID currentPage = 0;
676 pageID idLimit =
static_cast<pageID
>(pageTableSize());
677 PageTableEntry *pages = pageTable();
680 throw KSDCCorrupted();
684 while (currentPage < idLimit && pages[currentPage].index >= 0) {
688 pageID freeSpot = currentPage;
692 while (currentPage < idLimit) {
694 while (currentPage < idLimit && pages[currentPage].index < 0) {
698 if (currentPage >= idLimit) {
703 qint32 affectedIndex = pages[currentPage].index;
705 affectedIndex >= idLimit ||
706 indexTable()[affectedIndex].firstPage != currentPage))
708 throw KSDCCorrupted();
711 indexTable()[affectedIndex].firstPage = freeSpot;
715 while (currentPage < idLimit && pages[currentPage].index >= 0) {
716 const void *
const sourcePage = page(currentPage);
717 void *
const destinationPage = page(freeSpot);
720 throw KSDCCorrupted();
723 ::memcpy(destinationPage, sourcePage, cachePageSize());
724 pages[freeSpot].index = affectedIndex;
725 pages[currentPage].index = -1;
731 if (currentPage >= idLimit) {
738 if (affectedIndex != pages[currentPage].index) {
739 indexTable()[pages[currentPage].index].firstPage = freeSpot;
741 affectedIndex = pages[currentPage].index;
756 qint32 findNamedEntry(
const QByteArray &key)
const
759 uint position = keyHash % indexTableSize();
760 uint probeNumber = 1;
766 while (indexTable()[position].fileNameHash != keyHash &&
769 position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2)
774 if (indexTable()[position].fileNameHash == keyHash) {
775 pageID firstPage = indexTable()[position].firstPage;
776 if (firstPage < 0 || static_cast<uint>(firstPage) >= pageTableSize()) {
780 const void *resultPage = page(firstPage);
782 throw KSDCCorrupted();
785 const char *utf8FileName =
reinterpret_cast<const char *
>(resultPage);
786 if (qstrncmp(utf8FileName, key.constData(), cachePageSize()) == 0) {
795 static void deleteTable(IndexTableEntry *table) {
809 uint removeUsedPages(uint numberNeeded)
811 if (numberNeeded == 0) {
812 kError(
ksdcArea()) <<
"Internal error: Asked to remove exactly 0 pages for some reason.";
813 throw KSDCCorrupted();
816 if (numberNeeded > pageTableSize()) {
817 kError(
ksdcArea()) <<
"Internal error: Requested more space than exists in the cache.";
818 kError(
ksdcArea()) << numberNeeded <<
"requested, " << pageTableSize() <<
"is the total possible.";
819 throw KSDCCorrupted();
828 kDebug(
ksdcArea()) <<
"Removing old entries to free up" << numberNeeded <<
"pages,"
829 << cacheAvail <<
"are already theoretically available.";
831 if (cacheAvail > 3 * numberNeeded) {
833 uint result = findEmptyPages(numberNeeded);
835 if (result < pageTableSize()) {
839 kError(
ksdcArea()) <<
"Just defragmented a locked cache, but still there"
840 <<
"isn't enough room for the current request.";
847 QSharedPointer<IndexTableEntry> tablePtr(
new IndexTableEntry[indexTableSize()], deleteTable);
850 kError(
ksdcArea()) <<
"Unable to allocate temporary memory for sorting the cache!";
851 clearInternalTables();
852 throw KSDCCorrupted();
857 IndexTableEntry *table = tablePtr.data();
859 ::memcpy(table, indexTable(),
sizeof(IndexTableEntry) * indexTableSize());
866 for (uint i = 0; i < indexTableSize(); ++i) {
867 table[i].firstPage = table[i].useCount > 0 ?
static_cast<pageID
>(i)
873 bool (*compareFunction)(
const IndexTableEntry &,
const IndexTableEntry &);
874 switch((
int) evictionPolicy) {
878 compareFunction = seldomUsedCompare;
882 compareFunction = lruCompare;
886 compareFunction = ageCompare;
890 qSort(table, table + indexTableSize(), compareFunction);
901 while (i < indexTableSize() && numberNeeded > cacheAvail) {
902 int curIndex = table[i++].firstPage;
905 if (curIndex < 0 || static_cast<uint>(curIndex) >= indexTableSize()) {
907 <<
"out-of-bounds for index table of size" << indexTableSize();
908 throw KSDCCorrupted();
911 kDebug(
ksdcArea()) <<
"Removing entry of" << indexTable()[curIndex].totalItemSize
913 removeEntry(curIndex);
920 pageID result = pageTableSize();
921 while (i < indexTableSize() &&
922 (static_cast<uint>(result = findEmptyPages(numberNeeded))) >= pageTableSize())
924 int curIndex = table[i++].firstPage;
929 return findEmptyPages(numberNeeded);
932 if (
KDE_ISUNLIKELY(static_cast<uint>(curIndex) >= indexTableSize())) {
933 throw KSDCCorrupted();
936 removeEntry(curIndex);
944 static uint totalSize(uint cacheSize, uint effectivePageSize)
946 uint numberPages =
intCeil(cacheSize, effectivePageSize);
947 uint indexTableSize = numberPages / 2;
952 IndexTableEntry *indexTableStart =
953 offsetAs<IndexTableEntry>(
static_cast<void*
>(0),
sizeof (SharedMemory));
955 indexTableStart += indexTableSize;
957 PageTableEntry *pageTableStart =
reinterpret_cast<PageTableEntry *
>(indexTableStart);
958 pageTableStart = alignTo<PageTableEntry>(pageTableStart);
959 pageTableStart += numberPages;
962 char *cacheStart =
reinterpret_cast<char *
>(pageTableStart);
963 cacheStart += (numberPages * effectivePageSize);
966 cacheStart = alignTo<char>(cacheStart,
ALIGNOF(
void*));
970 return static_cast<uint
>(
reinterpret_cast<quintptr
>(cacheStart));
973 uint fileNameHash(
const QByteArray &utf8FileName)
const
980 clearInternalTables();
983 void removeEntry(uint index);
988 class KSharedDataCache::Private
992 unsigned defaultCacheSize,
993 unsigned expectedItemSize
999 , m_defaultCacheSize(defaultCacheSize)
1000 , m_expectedItemSize(expectedItemSize)
1010 void detachFromSharedMemory()
1016 if (shm && !::munmap(shm, m_mapSize)) {
1018 <<
static_cast<void*
>(shm);
1028 void mapSharedMemory()
1031 unsigned cacheSize = qMax(m_defaultCacheSize, uint(SharedMemory::MINIMUM_CACHE_SIZE));
1032 unsigned pageSize = SharedMemory::equivalentPageSize(m_expectedItemSize);
1037 cacheSize = qMax(pageSize * 256, cacheSize);
1041 QFile file(cacheName);
1053 if (size < cacheSize) {
1054 kError(
ksdcArea()) <<
"Asked for a cache size less than requested size somehow -- Logic Error :(";
1061 if (file.open(QIODevice::ReadWrite) &&
1062 (file.size() >= size || file.resize(size)) &&
1068 mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, file.handle(), 0);
1074 SharedMemory *mapped =
reinterpret_cast<SharedMemory *
>(mapAddress);
1079 if (mapped->version != SharedMemory::PIXMAP_CACHE_VERSION &&
1080 mapped->version > 0)
1088 recoverCorruptedCache();
1091 else if (mapped->cacheSize > cacheSize) {
1095 cacheSize = mapped->cacheSize;
1096 unsigned actualPageSize = mapped->cachePageSize();
1097 ::munmap(mapAddress, size);
1099 mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, file.handle(), 0);
1120 kWarning(
ksdcArea()) <<
"Failed to establish shared memory mapping, will fallback"
1121 <<
"to private memory -- memory usage will increase";
1123 mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1129 kError(
ksdcArea()) <<
"Unable to allocate shared memory segment for shared data cache"
1130 << cacheName <<
"of size" << cacheSize;
1139 shm =
reinterpret_cast<SharedMemory *
>(mapAddress);
1147 uint usecSleepTime = 8;
1148 while (shm->ready != 2) {
1151 kError(
ksdcArea()) <<
"Unable to acquire shared lock, is the cache corrupt?";
1154 detachFromSharedMemory();
1158 if (shm->ready.testAndSetAcquire(0, 1)) {
1159 if (!shm->performInitialSetup(cacheSize, pageSize)) {
1160 kError(
ksdcArea()) <<
"Unable to perform initial setup, this system probably "
1161 "does not really support process-shared pthreads or "
1162 "semaphores, even though it claims otherwise.";
1165 detachFromSharedMemory();
1170 usleep(usecSleepTime);
1177 m_expectedType = shm->shmLock.type;
1178 m_lock = QSharedPointer<KSDCLock>(
createLockFromId(m_expectedType, shm->shmLock));
1179 bool isProcessSharingSupported =
false;
1181 if (!m_lock->initialize(isProcessSharingSupported)) {
1182 kError(
ksdcArea()) <<
"Unable to setup shared cache lock, although it worked when created.";
1183 detachFromSharedMemory();
1189 void recoverCorruptedCache()
1193 detachFromSharedMemory();
1207 void verifyProposedMemoryAccess(
const void *base,
unsigned accessLength)
const
1209 quintptr startOfAccess =
reinterpret_cast<quintptr
>(base);
1210 quintptr startOfShm =
reinterpret_cast<quintptr
>(shm);
1213 throw KSDCCorrupted();
1216 quintptr endOfShm = startOfShm + m_mapSize;
1217 quintptr endOfAccess = startOfAccess + accessLength;
1222 (endOfAccess < startOfAccess) ||
1223 (endOfAccess > endOfShm)))
1225 throw KSDCCorrupted();
1231 if (
KDE_ISLIKELY(shm && shm->shmLock.type == m_expectedType)) {
1232 return m_lock->lock();
1236 throw KSDCCorrupted();
1246 mutable Private * d;
1255 while (!d->lock()) {
1256 d->recoverCorruptedCache();
1264 if (lockCount++ > 4) {
1265 kError(
ksdcArea()) <<
"There is a very serious problem with the KDE data cache"
1266 << d->m_cacheName <<
"giving up trying to access cache.";
1267 d->detachFromSharedMemory();
1276 CacheLocker(
const Private *_d) : d(const_cast<Private *>(_d))
1286 while (testSize > d->m_mapSize) {
1288 <<
"attempting to match new configuration.";
1293 QMutexLocker d_locker(&d->m_threadLock);
1294 if (testSize == d->m_mapSize) {
1302 #ifdef KSDC_MSYNC_SUPPORTED
1303 ::msync(d->shm, d->m_mapSize, MS_INVALIDATE | MS_ASYNC);
1305 ::munmap(d->shm, d->m_mapSize);
1309 QFile f(d->m_cacheName);
1310 if (!f.open(QFile::ReadWrite)) {
1312 <<
"the connection had to be dropped for"
1313 <<
"crash safety -- things will be much"
1318 void *newMap = ::mmap(0, testSize, PROT_READ | PROT_WRITE,
1319 MAP_SHARED, f.handle(), 0);
1322 <<
"things will be much slower now";
1326 d->shm =
reinterpret_cast<SharedMemory *
>(newMap);
1327 d->m_mapSize = testSize;
1329 if (!cautiousLock()) {
1346 return !d || d->shm == 0;
1351 QMutex m_threadLock;
1353 QSharedPointer<KSDCLock> m_lock;
1355 uint m_defaultCacheSize;
1356 uint m_expectedItemSize;
1361 void SharedMemory::removeEntry(uint index)
1363 if (index >= indexTableSize() || cacheAvail > pageTableSize()) {
1364 throw KSDCCorrupted();
1367 PageTableEntry *pageTableEntries = pageTable();
1368 IndexTableEntry *entriesIndex = indexTable();
1371 pageID firstPage = entriesIndex[index].firstPage;
1372 if (firstPage < 0 || static_cast<quint32>(firstPage) >= pageTableSize()) {
1373 kDebug(
ksdcArea()) <<
"Trying to remove an entry which is already invalid. This "
1374 <<
"cache is likely corrupt.";
1375 throw KSDCCorrupted();
1378 if (index != static_cast<uint>(pageTableEntries[firstPage].index)) {
1379 kError(
ksdcArea()) <<
"Removing entry" << index <<
"but the matching data"
1380 <<
"doesn't link back -- cache is corrupt, clearing.";
1381 throw KSDCCorrupted();
1384 uint entriesToRemove =
intCeil(entriesIndex[index].totalItemSize, cachePageSize());
1385 uint savedCacheSize = cacheAvail;
1386 for (uint i = firstPage; i < pageTableSize() &&
1387 (uint) pageTableEntries[i].index == index; ++i)
1389 pageTableEntries[i].index = -1;
1393 if ((cacheAvail - savedCacheSize) != entriesToRemove) {
1394 kError(
ksdcArea()) <<
"We somehow did not remove" << entriesToRemove
1395 <<
"when removing entry" << index <<
", instead we removed"
1396 << (cacheAvail - savedCacheSize);
1397 throw KSDCCorrupted();
1402 void *
const startOfData = page(firstPage);
1404 QByteArray str((
const char *) startOfData);
1405 str.prepend(
" REMOVED: ");
1406 str.prepend(QByteArray::number(index));
1407 str.prepend(
"ENTRY ");
1409 ::memcpy(startOfData, str.constData(), str.size() + 1);
1414 entriesIndex[index].fileNameHash = 0;
1415 entriesIndex[index].totalItemSize = 0;
1416 entriesIndex[index].useCount = 0;
1417 entriesIndex[index].lastUsedTime = 0;
1418 entriesIndex[index].addTime = 0;
1419 entriesIndex[index].firstPage = -1;
1423 unsigned defaultCacheSize,
1424 unsigned expectedItemSize)
1428 d =
new Private(cacheName, defaultCacheSize, expectedItemSize);
1430 catch(KSDCCorrupted) {
1435 d =
new Private(cacheName, defaultCacheSize, expectedItemSize);
1437 catch(KSDCCorrupted) {
1439 <<
"Even a brand-new cache starts off corrupted, something is"
1440 <<
"seriously wrong. :-(";
1456 #ifdef KSDC_MSYNC_SUPPORTED
1457 ::msync(d->shm, d->m_mapSize, MS_INVALIDATE | MS_ASYNC);
1459 ::munmap(d->shm, d->m_mapSize);
1471 Private::CacheLocker lock(d);
1472 if (lock.failed()) {
1476 QByteArray encodedKey = key.toUtf8();
1478 uint position = keyHash % d->shm->indexTableSize();
1481 IndexTableEntry *indices = d->shm->indexTable();
1488 const static double startCullPoint = 0.5l;
1489 const static double mustCullPoint = 0.96l;
1492 double loadFactor = 1.0 - (1.0l * d->shm->cacheAvail * d->shm->cachePageSize()
1493 / d->shm->cacheSize);
1494 bool cullCollisions =
false;
1497 cullCollisions =
true;
1499 else if (loadFactor > startCullPoint) {
1500 const int tripWireValue = RAND_MAX * (loadFactor - startCullPoint) / (mustCullPoint - startCullPoint);
1502 cullCollisions =
true;
1510 uint probeNumber = 1;
1511 while (indices[position].useCount > 0 && probeNumber <
MAX_PROBE_COUNT) {
1523 if (cullCollisions && (::time(0) - indices[position].lastUsedTime) > 60) {
1524 indices[position].useCount >>= 1;
1525 if (indices[position].useCount == 0) {
1526 kDebug(
ksdcArea()) <<
"Overwriting existing old cached entry due to collision.";
1527 d->shm->removeEntry(position);
1533 position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2)
1534 % d->shm->indexTableSize();
1538 if (indices[position].useCount > 0 && indices[position].firstPage >= 0) {
1539 kDebug(
ksdcArea()) <<
"Overwriting existing cached entry due to collision.";
1540 d->shm->removeEntry(position);
1546 uint fileNameLength = 1 + encodedKey.length();
1547 uint requiredSize = fileNameLength + data.size();
1548 uint pagesNeeded =
intCeil(requiredSize, d->shm->cachePageSize());
1549 uint firstPage = (uint) -1;
1551 if (pagesNeeded >= d->shm->pageTableSize()) {
1558 if (pagesNeeded > d->shm->cacheAvail ||
1559 (firstPage = d->shm->findEmptyPages(pagesNeeded)) >= d->shm->pageTableSize())
1562 uint freePagesDesired = 3 * qMax(1u, pagesNeeded / 2);
1564 if (d->shm->cacheAvail > freePagesDesired) {
1567 d->shm->defragment();
1568 firstPage = d->shm->findEmptyPages(pagesNeeded);
1575 d->shm->removeUsedPages(qMin(2 * freePagesDesired, d->shm->pageTableSize())
1576 - d->shm->cacheAvail);
1577 firstPage = d->shm->findEmptyPages(pagesNeeded);
1580 if (firstPage >= d->shm->pageTableSize() ||
1581 d->shm->cacheAvail < pagesNeeded)
1589 PageTableEntry *table = d->shm->pageTable();
1590 for (uint i = 0; i < pagesNeeded; ++i) {
1591 table[firstPage + i].index = position;
1595 indices[position].fileNameHash = keyHash;
1596 indices[position].totalItemSize = requiredSize;
1597 indices[position].useCount = 1;
1598 indices[position].addTime = ::time(0);
1599 indices[position].lastUsedTime = indices[position].addTime;
1600 indices[position].firstPage = firstPage;
1603 d->shm->cacheAvail -= pagesNeeded;
1606 void *dataPage = d->shm->page(firstPage);
1608 throw KSDCCorrupted();
1612 d->verifyProposedMemoryAccess(dataPage, requiredSize);
1615 uchar *startOfPageData =
reinterpret_cast<uchar *
>(dataPage);
1616 ::memcpy(startOfPageData, encodedKey.constData(), fileNameLength);
1617 ::memcpy(startOfPageData + fileNameLength, data.constData(), data.size());
1621 catch(KSDCCorrupted) {
1622 d->recoverCorruptedCache();
1630 Private::CacheLocker lock(d);
1631 if (lock.failed()) {
1636 QByteArray encodedKey = key.toUtf8();
1637 qint32 entry = d->shm->findNamedEntry(encodedKey);
1640 const IndexTableEntry *
header = &d->shm->indexTable()[entry];
1641 const void *resultPage = d->shm->page(header->firstPage);
1643 throw KSDCCorrupted();
1646 d->verifyProposedMemoryAccess(resultPage, header->totalItemSize);
1649 header->lastUsedTime = ::time(0);
1653 const char *cacheData =
reinterpret_cast<const char *
>(resultPage);
1654 cacheData += encodedKey.size();
1658 *destination = QByteArray(cacheData, header->totalItemSize - encodedKey.size() - 1);
1664 catch(KSDCCorrupted) {
1665 d->recoverCorruptedCache();
1674 Private::CacheLocker lock(d);
1676 if(!lock.failed()) {
1680 catch(KSDCCorrupted) {
1681 d->recoverCorruptedCache();
1688 Private::CacheLocker lock(d);
1689 if (lock.failed()) {
1693 return d->shm->findNamedEntry(key.toUtf8()) >= 0;
1695 catch(KSDCCorrupted) {
1696 d->recoverCorruptedCache();
1709 QFile::remove(cachePath);
1715 Private::CacheLocker lock(d);
1716 if (lock.failed()) {
1720 return d->shm->cacheSize;
1722 catch(KSDCCorrupted) {
1723 d->recoverCorruptedCache();
1731 Private::CacheLocker lock(d);
1732 if (lock.failed()) {
1736 return d->shm->cacheAvail * d->shm->cachePageSize();
1738 catch(KSDCCorrupted) {
1739 d->recoverCorruptedCache();
1747 return static_cast<EvictionPolicy>(d->shm->evictionPolicy.fetchAndAddAcquire(0));
1756 d->shm->evictionPolicy.fetchAndStoreRelease(static_cast<int>(newPolicy));
1763 return static_cast<unsigned>(d->shm->cacheTimestamp.fetchAndAddAcquire(0));
1772 d->shm->cacheTimestamp.fetchAndStoreRelease(static_cast<int>(newTimestamp));