32 #include <QtCore/QDateTime>
33 #include <QtCore/QSharedPointer>
34 #include <QtCore/QByteArray>
35 #include <QtCore/QFile>
36 #include <QtCore/QAtomicInt>
37 #include <QtCore/QList>
38 #include <QtCore/QMutex>
39 #include <QtCore/QMutexLocker>
41 #include <sys/types.h>
82 const unsigned int m = 0xc6a4a793;
85 const unsigned char * data =
reinterpret_cast<const unsigned char *
>(key);
87 unsigned int h = seed ^ (len * m);
89 int align =
reinterpret_cast<quintptr
>(data) & 3;
91 if(align & (len >= 4))
95 unsigned int t = 0, d = 0;
99 case 1: t |= data[2] << 16;
100 case 2: t |= data[1] << 8;
101 case 3: t |= data[0];
109 int sl = 8 * (4-align);
116 d = *
reinterpret_cast<const unsigned int *
>(data);
117 t = (t >> sr) | (d << sl);
129 int pack = len < align ? len : align;
135 case 3: d |= data[2] << 16;
136 case 2: d |= data[1] << 8;
137 case 1: d |= data[0];
138 case 0: h += (t >> sr) | (d << sl);
150 h += *
reinterpret_cast<const unsigned int *
>(data);
164 case 3: h += data[2] << 16;
165 case 2: h += data[1] << 8;
166 case 1: h += data[0];
198 #if defined(Q_CC_GNU) || defined(Q_CC_SUN)
199 #define ALIGNOF(x) (__alignof__ (x)) // GCC provides what we want directly
205 struct __alignmentHack
209 static const size_t size =
offsetof(__alignmentHack, obj);
211 #define ALIGNOF(x) (__alignmentHack<x>::size)
213 #endif // ALIGNOF undefined
221 quintptr
mask = size - 1;
224 quintptr basePointer =
reinterpret_cast<quintptr
>(start);
228 basePointer = (basePointer + mask) & ~mask;
230 return reinterpret_cast<T *
>(basePointer);
242 const char *ptr =
reinterpret_cast<const char*
>(base);
243 return alignTo<const T>(ptr + offset);
250 char *ptr =
reinterpret_cast<char *
>(base);
251 return alignTo<T>(ptr + offset);
259 static unsigned intCeil(
unsigned a,
unsigned b)
264 throw KSDCCorrupted();
267 return (a + b - 1) / b;
279 for (count = 0; value != 0; count++) {
280 value &= (value - 1);
322 struct IndexTableEntry
326 mutable uint useCount;
328 mutable time_t lastUsedTime;
333 struct PageTableEntry
359 PIXMAP_CACHE_VERSION = 12,
360 MINIMUM_CACHE_SIZE = 4096
374 QAtomicInt evictionPolicy;
384 QAtomicInt cacheTimestamp;
389 static unsigned equivalentPageSize(
unsigned itemSize)
396 while ((itemSize >>= 1) != 0) {
402 log2OfSize = qBound(9, log2OfSize, 18);
404 return (1 << log2OfSize);
408 unsigned cachePageSize()
const
410 unsigned _pageSize =
static_cast<unsigned>(pageSize);
412 static const unsigned validSizeMask = 0x7FE00u;
416 throw KSDCCorrupted();
434 bool performInitialSetup(uint _cacheSize, uint _pageSize)
436 if (_cacheSize < MINIMUM_CACHE_SIZE) {
437 kError(
ksdcArea()) <<
"Internal error: Attempted to create a cache sized < "
438 << MINIMUM_CACHE_SIZE;
442 if (_pageSize == 0) {
443 kError(
ksdcArea()) <<
"Internal error: Attempted to create a cache with 0-sized pages.";
449 kError(
ksdcArea()) <<
"Unable to find an appropriate lock to guard the shared cache. "
450 <<
"This *should* be essentially impossible. :(";
454 bool isProcessShared =
false;
457 if (!tempLock->initialize(isProcessShared)) {
458 kError(
ksdcArea()) <<
"Unable to initialize the lock for the cache!";
462 if (!isProcessShared) {
464 <<
"shared across processes.";
469 cacheSize = _cacheSize;
470 pageSize = _pageSize;
471 version = PIXMAP_CACHE_VERSION;
472 cacheTimestamp =
static_cast<unsigned>(::time(0));
474 clearInternalTables();
483 void clearInternalTables()
486 cacheAvail = pageTableSize();
489 PageTableEntry *table = pageTable();
490 for (uint i = 0; i < pageTableSize(); ++i) {
495 IndexTableEntry *indices = indexTable();
496 for (uint i = 0; i < indexTableSize(); ++i) {
497 indices[i].firstPage = -1;
498 indices[i].useCount = 0;
499 indices[i].fileNameHash = 0;
500 indices[i].totalItemSize = 0;
501 indices[i].addTime = 0;
502 indices[i].lastUsedTime = 0;
506 const IndexTableEntry *indexTable()
const
510 return offsetAs<IndexTableEntry>(
this,
sizeof(*this));
513 const PageTableEntry *pageTable()
const
515 const IndexTableEntry *base = indexTable();
516 base += indexTableSize();
519 return alignTo<PageTableEntry>(base);
522 const void *cachePages()
const
524 const PageTableEntry *tableStart = pageTable();
525 tableStart += pageTableSize();
528 return alignTo<void>(tableStart, cachePageSize());
531 const void *page(pageID at)
const
533 if (static_cast<uint>(at) >= pageTableSize()) {
538 const char *pageStart =
reinterpret_cast<const char *
>(cachePages());
539 pageStart += (at * cachePageSize());
541 return reinterpret_cast<const void *
>(pageStart);
548 IndexTableEntry *indexTable()
550 const SharedMemory *that =
const_cast<const SharedMemory*
>(
this);
551 return const_cast<IndexTableEntry *
>(that->indexTable());
554 PageTableEntry *pageTable()
556 const SharedMemory *that =
const_cast<const SharedMemory*
>(
this);
557 return const_cast<PageTableEntry *
>(that->pageTable());
562 const SharedMemory *that =
const_cast<const SharedMemory*
>(
this);
563 return const_cast<void *
>(that->cachePages());
566 void *page(pageID at)
568 const SharedMemory *that =
const_cast<const SharedMemory*
>(
this);
569 return const_cast<void *
>(that->page(at));
572 uint pageTableSize()
const
574 return cacheSize / cachePageSize();
577 uint indexTableSize()
const
581 return pageTableSize() / 2;
588 pageID findEmptyPages(uint pagesNeeded)
const
591 return pageTableSize();
596 const PageTableEntry *table = pageTable();
597 uint contiguousPagesFound = 0;
599 for (pageID i = 0; i < static_cast<int>(pageTableSize()); ++i) {
600 if (table[i].index < 0) {
601 if (contiguousPagesFound == 0) {
604 contiguousPagesFound++;
607 contiguousPagesFound = 0;
610 if (contiguousPagesFound == pagesNeeded) {
615 return pageTableSize();
619 static bool lruCompare(
const IndexTableEntry &l,
const IndexTableEntry &r)
622 if (l.firstPage < 0 && r.firstPage >= 0) {
625 if (l.firstPage >= 0 && r.firstPage < 0) {
631 return l.lastUsedTime < r.lastUsedTime;
635 static bool seldomUsedCompare(
const IndexTableEntry &l,
const IndexTableEntry &r)
638 if (l.firstPage < 0 && r.firstPage >= 0) {
641 if (l.firstPage >= 0 && r.firstPage < 0) {
646 return l.useCount < r.useCount;
650 static bool ageCompare(
const IndexTableEntry &l,
const IndexTableEntry &r)
653 if (l.firstPage < 0 && r.firstPage >= 0) {
656 if (l.firstPage >= 0 && r.firstPage < 0) {
662 return l.addTime < r.addTime;
667 if (cacheAvail * cachePageSize() == cacheSize) {
677 pageID currentPage = 0;
678 pageID idLimit =
static_cast<pageID
>(pageTableSize());
679 PageTableEntry *pages = pageTable();
682 throw KSDCCorrupted();
686 while (currentPage < idLimit && pages[currentPage].index >= 0) {
690 pageID freeSpot = currentPage;
694 while (currentPage < idLimit) {
696 while (currentPage < idLimit && pages[currentPage].index < 0) {
700 if (currentPage >= idLimit) {
705 qint32 affectedIndex = pages[currentPage].index;
707 affectedIndex >= idLimit ||
708 indexTable()[affectedIndex].firstPage != currentPage))
710 throw KSDCCorrupted();
713 indexTable()[affectedIndex].firstPage = freeSpot;
717 while (currentPage < idLimit && pages[currentPage].index >= 0) {
718 const void *
const sourcePage = page(currentPage);
719 void *
const destinationPage = page(freeSpot);
722 throw KSDCCorrupted();
725 ::memcpy(destinationPage, sourcePage, cachePageSize());
726 pages[freeSpot].index = affectedIndex;
727 pages[currentPage].index = -1;
733 if (currentPage >= idLimit) {
740 if (affectedIndex != pages[currentPage].index) {
741 indexTable()[pages[currentPage].index].firstPage = freeSpot;
743 affectedIndex = pages[currentPage].index;
758 qint32 findNamedEntry(
const QByteArray &key)
const
761 uint position = keyHash % indexTableSize();
762 uint probeNumber = 1;
768 while (indexTable()[position].fileNameHash != keyHash &&
771 position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2)
776 if (indexTable()[position].fileNameHash == keyHash) {
777 pageID firstPage = indexTable()[position].firstPage;
778 if (firstPage < 0 || static_cast<uint>(firstPage) >= pageTableSize()) {
782 const void *resultPage = page(firstPage);
784 throw KSDCCorrupted();
787 const char *utf8FileName =
reinterpret_cast<const char *
>(resultPage);
788 if (qstrncmp(utf8FileName, key.constData(), cachePageSize()) == 0) {
797 static void deleteTable(IndexTableEntry *table) {
811 uint removeUsedPages(uint numberNeeded)
813 if (numberNeeded == 0) {
814 kError(
ksdcArea()) <<
"Internal error: Asked to remove exactly 0 pages for some reason.";
815 throw KSDCCorrupted();
818 if (numberNeeded > pageTableSize()) {
819 kError(
ksdcArea()) <<
"Internal error: Requested more space than exists in the cache.";
820 kError(
ksdcArea()) << numberNeeded <<
"requested, " << pageTableSize() <<
"is the total possible.";
821 throw KSDCCorrupted();
830 kDebug(
ksdcArea()) <<
"Removing old entries to free up" << numberNeeded <<
"pages,"
831 << cacheAvail <<
"are already theoretically available.";
833 if (cacheAvail > 3 * numberNeeded) {
835 uint result = findEmptyPages(numberNeeded);
837 if (result < pageTableSize()) {
841 kError(
ksdcArea()) <<
"Just defragmented a locked cache, but still there"
842 <<
"isn't enough room for the current request.";
849 QSharedPointer<IndexTableEntry> tablePtr(
new IndexTableEntry[indexTableSize()], deleteTable);
852 kError(
ksdcArea()) <<
"Unable to allocate temporary memory for sorting the cache!";
853 clearInternalTables();
854 throw KSDCCorrupted();
859 IndexTableEntry *table = tablePtr.data();
861 ::memcpy(table, indexTable(),
sizeof(IndexTableEntry) * indexTableSize());
868 for (uint i = 0; i < indexTableSize(); ++i) {
869 table[i].firstPage = table[i].useCount > 0 ?
static_cast<pageID
>(i)
875 bool (*compareFunction)(
const IndexTableEntry &,
const IndexTableEntry &);
876 switch((
int) evictionPolicy) {
880 compareFunction = seldomUsedCompare;
884 compareFunction = lruCompare;
888 compareFunction = ageCompare;
892 qSort(table, table + indexTableSize(), compareFunction);
903 while (i < indexTableSize() && numberNeeded > cacheAvail) {
904 int curIndex = table[i++].firstPage;
907 if (curIndex < 0 || static_cast<uint>(curIndex) >= indexTableSize()) {
909 <<
"out-of-bounds for index table of size" << indexTableSize();
910 throw KSDCCorrupted();
913 kDebug(
ksdcArea()) <<
"Removing entry of" << indexTable()[curIndex].totalItemSize
915 removeEntry(curIndex);
922 pageID result = pageTableSize();
923 while (i < indexTableSize() &&
924 (static_cast<uint>(result = findEmptyPages(numberNeeded))) >= pageTableSize())
926 int curIndex = table[i++].firstPage;
931 return findEmptyPages(numberNeeded);
934 if (
KDE_ISUNLIKELY(static_cast<uint>(curIndex) >= indexTableSize())) {
935 throw KSDCCorrupted();
938 removeEntry(curIndex);
946 static uint totalSize(uint cacheSize, uint effectivePageSize)
948 uint numberPages =
intCeil(cacheSize, effectivePageSize);
949 uint indexTableSize = numberPages / 2;
954 IndexTableEntry *indexTableStart =
955 offsetAs<IndexTableEntry>(
static_cast<void*
>(0),
sizeof (SharedMemory));
957 indexTableStart += indexTableSize;
959 PageTableEntry *pageTableStart =
reinterpret_cast<PageTableEntry *
>(indexTableStart);
960 pageTableStart = alignTo<PageTableEntry>(pageTableStart);
961 pageTableStart += numberPages;
964 char *cacheStart =
reinterpret_cast<char *
>(pageTableStart);
965 cacheStart += (numberPages * effectivePageSize);
968 cacheStart = alignTo<char>(cacheStart,
ALIGNOF(
void*));
972 return static_cast<uint
>(
reinterpret_cast<quintptr
>(cacheStart));
975 uint fileNameHash(
const QByteArray &utf8FileName)
const
982 clearInternalTables();
985 void removeEntry(uint index);
990 class KSharedDataCache::Private
994 unsigned defaultCacheSize,
995 unsigned expectedItemSize
1001 , m_defaultCacheSize(defaultCacheSize)
1002 , m_expectedItemSize(expectedItemSize)
1012 void detachFromSharedMemory()
1018 if (shm && 0 != ::munmap(shm, m_mapSize)) {
1020 <<
static_cast<void*
>(shm) <<
":" << ::strerror(errno);
1030 void mapSharedMemory()
1033 unsigned cacheSize = qMax(m_defaultCacheSize, uint(SharedMemory::MINIMUM_CACHE_SIZE));
1034 unsigned pageSize = SharedMemory::equivalentPageSize(m_expectedItemSize);
1039 cacheSize = qMax(pageSize * 256, cacheSize);
1043 QFile file(cacheName);
1052 uint size = SharedMemory::totalSize(cacheSize, pageSize);
1055 if (size < cacheSize) {
1056 kError(
ksdcArea()) <<
"Asked for a cache size less than requested size somehow -- Logic Error :(";
1063 if (file.open(QIODevice::ReadWrite) &&
1064 (file.size() >= size ||
1070 mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, file.handle(), 0);
1076 SharedMemory *mapped =
reinterpret_cast<SharedMemory *
>(mapAddress);
1081 if (mapped->version != SharedMemory::PIXMAP_CACHE_VERSION &&
1082 mapped->version > 0)
1090 recoverCorruptedCache();
1093 else if (mapped->cacheSize > cacheSize) {
1097 cacheSize = mapped->cacheSize;
1098 unsigned actualPageSize = mapped->cachePageSize();
1099 ::munmap(mapAddress, size);
1100 size = SharedMemory::totalSize(cacheSize, actualPageSize);
1101 mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, file.handle(), 0);
1122 kWarning(
ksdcArea()) <<
"Failed to establish shared memory mapping, will fallback"
1123 <<
"to private memory -- memory usage will increase";
1125 mapAddress = ::mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1131 kError(
ksdcArea()) <<
"Unable to allocate shared memory segment for shared data cache"
1132 << cacheName <<
"of size" << cacheSize;
1141 shm =
reinterpret_cast<SharedMemory *
>(mapAddress);
1149 uint usecSleepTime = 8;
1150 while (shm->ready != 2) {
1153 kError(
ksdcArea()) <<
"Unable to acquire shared lock, is the cache corrupt?";
1156 detachFromSharedMemory();
1160 if (shm->ready.testAndSetAcquire(0, 1)) {
1161 if (!shm->performInitialSetup(cacheSize, pageSize)) {
1162 kError(
ksdcArea()) <<
"Unable to perform initial setup, this system probably "
1163 "does not really support process-shared pthreads or "
1164 "semaphores, even though it claims otherwise.";
1167 detachFromSharedMemory();
1172 usleep(usecSleepTime);
1179 m_expectedType = shm->shmLock.type;
1180 m_lock = QSharedPointer<KSDCLock>(
createLockFromId(m_expectedType, shm->shmLock));
1181 bool isProcessSharingSupported =
false;
1183 if (!m_lock->initialize(isProcessSharingSupported)) {
1184 kError(
ksdcArea()) <<
"Unable to setup shared cache lock, although it worked when created.";
1185 detachFromSharedMemory();
1191 void recoverCorruptedCache()
1195 detachFromSharedMemory();
1209 void verifyProposedMemoryAccess(
const void *base,
unsigned accessLength)
const
1211 quintptr startOfAccess =
reinterpret_cast<quintptr
>(base);
1212 quintptr startOfShm =
reinterpret_cast<quintptr
>(shm);
1215 throw KSDCCorrupted();
1218 quintptr endOfShm = startOfShm + m_mapSize;
1219 quintptr endOfAccess = startOfAccess + accessLength;
1224 (endOfAccess < startOfAccess) ||
1225 (endOfAccess > endOfShm)))
1227 throw KSDCCorrupted();
1233 if (
KDE_ISLIKELY(shm && shm->shmLock.type == m_expectedType)) {
1234 return m_lock->lock();
1238 throw KSDCCorrupted();
1248 mutable Private * d;
1257 while (!d->lock()) {
1258 d->recoverCorruptedCache();
1266 if (lockCount++ > 4) {
1267 kError(
ksdcArea()) <<
"There is a very serious problem with the KDE data cache"
1268 << d->m_cacheName <<
"giving up trying to access cache.";
1269 d->detachFromSharedMemory();
1278 CacheLocker(
const Private *_d) : d(const_cast<Private *>(_d))
1284 uint testSize = SharedMemory::totalSize(d->shm->cacheSize, d->shm->cachePageSize());
1288 while (testSize > d->m_mapSize) {
1290 <<
"attempting to match new configuration.";
1295 QMutexLocker d_locker(&d->m_threadLock);
1296 if (testSize == d->m_mapSize) {
1304 #ifdef KSDC_MSYNC_SUPPORTED
1305 ::msync(d->shm, d->m_mapSize, MS_INVALIDATE | MS_ASYNC);
1307 ::munmap(d->shm, d->m_mapSize);
1311 QFile f(d->m_cacheName);
1312 if (!f.open(QFile::ReadWrite)) {
1314 <<
"the connection had to be dropped for"
1315 <<
"crash safety -- things will be much"
1320 void *newMap = ::mmap(0, testSize, PROT_READ | PROT_WRITE,
1321 MAP_SHARED, f.handle(), 0);
1324 <<
"things will be much slower now";
1328 d->shm =
reinterpret_cast<SharedMemory *
>(newMap);
1329 d->m_mapSize = testSize;
1331 if (!cautiousLock()) {
1335 testSize = SharedMemory::totalSize(d->shm->cacheSize, d->shm->cachePageSize());
1348 return !d || d->shm == 0;
1353 QMutex m_threadLock;
1355 QSharedPointer<KSDCLock> m_lock;
1357 uint m_defaultCacheSize;
1358 uint m_expectedItemSize;
1363 void SharedMemory::removeEntry(uint index)
1365 if (index >= indexTableSize() || cacheAvail > pageTableSize()) {
1366 throw KSDCCorrupted();
1369 PageTableEntry *pageTableEntries = pageTable();
1370 IndexTableEntry *entriesIndex = indexTable();
1373 pageID firstPage = entriesIndex[index].firstPage;
1374 if (firstPage < 0 || static_cast<quint32>(firstPage) >= pageTableSize()) {
1375 kDebug(
ksdcArea()) <<
"Trying to remove an entry which is already invalid. This "
1376 <<
"cache is likely corrupt.";
1377 throw KSDCCorrupted();
1380 if (index != static_cast<uint>(pageTableEntries[firstPage].index)) {
1381 kError(
ksdcArea()) <<
"Removing entry" << index <<
"but the matching data"
1382 <<
"doesn't link back -- cache is corrupt, clearing.";
1383 throw KSDCCorrupted();
1386 uint entriesToRemove =
intCeil(entriesIndex[index].totalItemSize, cachePageSize());
1387 uint savedCacheSize = cacheAvail;
1388 for (uint i = firstPage; i < pageTableSize() &&
1389 (uint) pageTableEntries[i].index == index; ++i)
1391 pageTableEntries[i].index = -1;
1395 if ((cacheAvail - savedCacheSize) != entriesToRemove) {
1396 kError(
ksdcArea()) <<
"We somehow did not remove" << entriesToRemove
1397 <<
"when removing entry" << index <<
", instead we removed"
1398 << (cacheAvail - savedCacheSize);
1399 throw KSDCCorrupted();
1404 void *
const startOfData = page(firstPage);
1406 QByteArray str((
const char *) startOfData);
1407 str.prepend(
" REMOVED: ");
1408 str.prepend(QByteArray::number(index));
1409 str.prepend(
"ENTRY ");
1411 ::memcpy(startOfData, str.constData(), str.size() + 1);
1416 entriesIndex[index].fileNameHash = 0;
1417 entriesIndex[index].totalItemSize = 0;
1418 entriesIndex[index].useCount = 0;
1419 entriesIndex[index].lastUsedTime = 0;
1420 entriesIndex[index].addTime = 0;
1421 entriesIndex[index].firstPage = -1;
1425 unsigned defaultCacheSize,
1426 unsigned expectedItemSize)
1430 d =
new Private(cacheName, defaultCacheSize, expectedItemSize);
1432 catch(KSDCCorrupted) {
1437 d =
new Private(cacheName, defaultCacheSize, expectedItemSize);
1439 catch(KSDCCorrupted) {
1441 <<
"Even a brand-new cache starts off corrupted, something is"
1442 <<
"seriously wrong. :-(";
1458 #ifdef KSDC_MSYNC_SUPPORTED
1459 ::msync(d->shm, d->m_mapSize, MS_INVALIDATE | MS_ASYNC);
1461 ::munmap(d->shm, d->m_mapSize);
1473 Private::CacheLocker lock(d);
1474 if (lock.failed()) {
1478 QByteArray encodedKey = key.toUtf8();
1480 uint position = keyHash % d->shm->indexTableSize();
1483 IndexTableEntry *indices = d->shm->indexTable();
1490 const static double startCullPoint = 0.5l;
1491 const static double mustCullPoint = 0.96l;
1494 double loadFactor = 1.0 - (1.0l * d->shm->cacheAvail * d->shm->cachePageSize()
1495 / d->shm->cacheSize);
1496 bool cullCollisions =
false;
1499 cullCollisions =
true;
1501 else if (loadFactor > startCullPoint) {
1502 const int tripWireValue = RAND_MAX * (loadFactor - startCullPoint) / (mustCullPoint - startCullPoint);
1504 cullCollisions =
true;
1512 uint probeNumber = 1;
1513 while (indices[position].useCount > 0 && probeNumber <
MAX_PROBE_COUNT) {
1525 if (cullCollisions && (::time(0) - indices[position].lastUsedTime) > 60) {
1526 indices[position].useCount >>= 1;
1527 if (indices[position].useCount == 0) {
1528 kDebug(
ksdcArea()) <<
"Overwriting existing old cached entry due to collision.";
1529 d->shm->removeEntry(position);
1535 position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2)
1536 % d->shm->indexTableSize();
1540 if (indices[position].useCount > 0 && indices[position].firstPage >= 0) {
1541 kDebug(
ksdcArea()) <<
"Overwriting existing cached entry due to collision.";
1542 d->shm->removeEntry(position);
1548 uint fileNameLength = 1 + encodedKey.length();
1549 uint requiredSize = fileNameLength + data.size();
1550 uint pagesNeeded =
intCeil(requiredSize, d->shm->cachePageSize());
1551 uint firstPage = (uint) -1;
1553 if (pagesNeeded >= d->shm->pageTableSize()) {
1560 if (pagesNeeded > d->shm->cacheAvail ||
1561 (firstPage = d->shm->findEmptyPages(pagesNeeded)) >= d->shm->pageTableSize())
1564 uint freePagesDesired = 3 * qMax(1u, pagesNeeded / 2);
1566 if (d->shm->cacheAvail > freePagesDesired) {
1569 d->shm->defragment();
1570 firstPage = d->shm->findEmptyPages(pagesNeeded);
1577 d->shm->removeUsedPages(qMin(2 * freePagesDesired, d->shm->pageTableSize())
1578 - d->shm->cacheAvail);
1579 firstPage = d->shm->findEmptyPages(pagesNeeded);
1582 if (firstPage >= d->shm->pageTableSize() ||
1583 d->shm->cacheAvail < pagesNeeded)
1591 PageTableEntry *table = d->shm->pageTable();
1592 for (uint i = 0; i < pagesNeeded; ++i) {
1593 table[firstPage + i].index = position;
1597 indices[position].fileNameHash = keyHash;
1598 indices[position].totalItemSize = requiredSize;
1599 indices[position].useCount = 1;
1600 indices[position].addTime = ::time(0);
1601 indices[position].lastUsedTime = indices[position].addTime;
1602 indices[position].firstPage = firstPage;
1605 d->shm->cacheAvail -= pagesNeeded;
1608 void *dataPage = d->shm->page(firstPage);
1610 throw KSDCCorrupted();
1614 d->verifyProposedMemoryAccess(dataPage, requiredSize);
1617 uchar *startOfPageData =
reinterpret_cast<uchar *
>(dataPage);
1618 ::memcpy(startOfPageData, encodedKey.constData(), fileNameLength);
1619 ::memcpy(startOfPageData + fileNameLength, data.constData(), data.size());
1623 catch(KSDCCorrupted) {
1624 d->recoverCorruptedCache();
1632 Private::CacheLocker lock(d);
1633 if (lock.failed()) {
1638 QByteArray encodedKey = key.toUtf8();
1639 qint32 entry = d->shm->findNamedEntry(encodedKey);
1642 const IndexTableEntry *
header = &d->shm->indexTable()[entry];
1643 const void *resultPage = d->shm->page(header->firstPage);
1645 throw KSDCCorrupted();
1648 d->verifyProposedMemoryAccess(resultPage, header->totalItemSize);
1651 header->lastUsedTime = ::time(0);
1655 const char *cacheData =
reinterpret_cast<const char *
>(resultPage);
1656 cacheData += encodedKey.size();
1660 *destination = QByteArray(cacheData, header->totalItemSize - encodedKey.size() - 1);
1666 catch(KSDCCorrupted) {
1667 d->recoverCorruptedCache();
1676 Private::CacheLocker lock(d);
1678 if(!lock.failed()) {
1682 catch(KSDCCorrupted) {
1683 d->recoverCorruptedCache();
1690 Private::CacheLocker lock(d);
1691 if (lock.failed()) {
1695 return d->shm->findNamedEntry(key.toUtf8()) >= 0;
1697 catch(KSDCCorrupted) {
1698 d->recoverCorruptedCache();
1711 QFile::remove(cachePath);
1717 Private::CacheLocker lock(d);
1718 if (lock.failed()) {
1722 return d->shm->cacheSize;
1724 catch(KSDCCorrupted) {
1725 d->recoverCorruptedCache();
1733 Private::CacheLocker lock(d);
1734 if (lock.failed()) {
1738 return d->shm->cacheAvail * d->shm->cachePageSize();
1740 catch(KSDCCorrupted) {
1741 d->recoverCorruptedCache();
1749 return static_cast<EvictionPolicy>(d->shm->evictionPolicy.fetchAndAddAcquire(0));
1758 d->shm->evictionPolicy.fetchAndStoreRelease(static_cast<int>(newPolicy));
1765 return static_cast<unsigned>(d->shm->cacheTimestamp.fetchAndAddAcquire(0));
1774 d->shm->cacheTimestamp.fetchAndStoreRelease(static_cast<int>(newTimestamp));