Diff
Modified: trunk/Source/WebKit2/ChangeLog (182018 => 182019)
--- trunk/Source/WebKit2/ChangeLog 2015-03-26 18:16:02 UTC (rev 182018)
+++ trunk/Source/WebKit2/ChangeLog 2015-03-26 18:39:22 UTC (rev 182019)
@@ -1,3 +1,54 @@
+2015-03-26 Antti Koivisto <[email protected]>
+
+ Rename Storage::Entry to Storage::Record
+ https://bugs.webkit.org/show_bug.cgi?id=143101
+
+ Reviewed by Chris Dumez.
+
+ Lets have just one type called Entry in the cache code.
+
+ * NetworkProcess/cache/NetworkCache.cpp:
+ (WebKit::NetworkCache::Cache::retrieve):
+ (WebKit::NetworkCache::Cache::store):
+ (WebKit::NetworkCache::Cache::update):
+ (WebKit::NetworkCache::Cache::traverse):
+ (WebKit::NetworkCache::Cache::dumpContentsToFile):
+ * NetworkProcess/cache/NetworkCacheEntry.cpp:
+ (WebKit::NetworkCache::Entry::Entry):
+ (WebKit::NetworkCache::Entry::encodeAsStorageRecord):
+ (WebKit::NetworkCache::Entry::decodeStorageRecord):
+ (WebKit::NetworkCache::Entry::initializeBufferFromStorageRecord):
+ (WebKit::NetworkCache::Entry::buffer):
+ (WebKit::NetworkCache::Entry::shareableResourceHandle):
+ (WebKit::NetworkCache::Entry::encode): Deleted.
+ (WebKit::NetworkCache::Entry::decode): Deleted.
+ (WebKit::NetworkCache::Entry::initializeBufferFromStorageEntry): Deleted.
+ * NetworkProcess/cache/NetworkCacheEntry.h:
+ (WebKit::NetworkCache::Entry::sourceStorageRecord):
+ (WebKit::NetworkCache::Entry::sourceStorageEntry): Deleted.
+ * NetworkProcess/cache/NetworkCacheStorage.cpp:
+ (WebKit::NetworkCache::RecordMetaData::RecordMetaData):
+ (WebKit::NetworkCache::decodeRecordMetaData):
+ (WebKit::NetworkCache::decodeRecordHeader):
+ (WebKit::NetworkCache::decodeRecord):
+ (WebKit::NetworkCache::encodeRecordMetaData):
+ (WebKit::NetworkCache::encodeRecordHeader):
+ (WebKit::NetworkCache::Storage::dispatchReadOperation):
+ (WebKit::NetworkCache::retrieveFromMemory):
+ (WebKit::NetworkCache::Storage::store):
+ (WebKit::NetworkCache::Storage::update):
+ (WebKit::NetworkCache::Storage::traverse):
+ (WebKit::NetworkCache::Storage::dispatchPendingWriteOperations):
+ (WebKit::NetworkCache::Storage::dispatchFullWriteOperation):
+ (WebKit::NetworkCache::Storage::dispatchHeaderWriteOperation):
+ (WebKit::NetworkCache::EntryMetaData::EntryMetaData): Deleted.
+ (WebKit::NetworkCache::decodeEntryMetaData): Deleted.
+ (WebKit::NetworkCache::decodeEntryHeader): Deleted.
+ (WebKit::NetworkCache::decodeEntry): Deleted.
+ (WebKit::NetworkCache::encodeEntryMetaData): Deleted.
+ (WebKit::NetworkCache::encodeEntryHeader): Deleted.
+ * NetworkProcess/cache/NetworkCacheStorage.h:
+
2015-03-26 Chris Dumez <[email protected]>
[WK2] Let the compiler generate the NetworkCache::Key move constructor
Modified: trunk/Source/WebKit2/NetworkProcess/cache/NetworkCache.cpp (182018 => 182019)
--- trunk/Source/WebKit2/NetworkProcess/cache/NetworkCache.cpp 2015-03-26 18:16:02 UTC (rev 182018)
+++ trunk/Source/WebKit2/NetworkProcess/cache/NetworkCache.cpp 2015-03-26 18:39:22 UTC (rev 182019)
@@ -226,8 +226,8 @@
auto startTime = std::chrono::system_clock::now();
unsigned priority = originalRequest.priority();
- m_storage->retrieve(storageKey, priority, [this, originalRequest, completionHandler, startTime, storageKey, webPageID](std::unique_ptr<Storage::Entry> storageEntry) {
- if (!storageEntry) {
+ m_storage->retrieve(storageKey, priority, [this, originalRequest, completionHandler, startTime, storageKey, webPageID](std::unique_ptr<Storage::Record> record) {
+ if (!record) {
LOG(NetworkCache, "(NetworkProcess) not found in storage");
if (m_statistics)
@@ -237,26 +237,26 @@
return false;
}
- ASSERT(storageEntry->key == storageKey);
+ ASSERT(record->key == storageKey);
- auto cacheEntry = Entry::decode(*storageEntry);
+ auto entry = Entry::decodeStorageRecord(*record);
- auto useDecision = cacheEntry ? canUse(*cacheEntry, originalRequest) : UseDecision::NoDueToDecodeFailure;
+ auto useDecision = entry ? canUse(*entry, originalRequest) : UseDecision::NoDueToDecodeFailure;
switch (useDecision) {
case UseDecision::Use:
break;
case UseDecision::Validate:
- cacheEntry->setNeedsValidation();
+ entry->setNeedsValidation();
break;
default:
- cacheEntry = nullptr;
+ entry = nullptr;
};
#if !LOG_DISABLED
auto elapsedMS = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - startTime).count();
+ LOG(NetworkCache, "(NetworkProcess) retrieve complete useDecision=%d priority=%u time=%lldms", useDecision, originalRequest.priority(), elapsedMS);
#endif
- LOG(NetworkCache, "(NetworkProcess) retrieve complete useDecision=%d priority=%u time=%lldms", useDecision, originalRequest.priority(), elapsedMS);
- completionHandler(WTF::move(cacheEntry));
+ completionHandler(WTF::move(entry));
if (m_statistics)
m_statistics->recordRetrievedCachedEntry(webPageID, storageKey, originalRequest, useDecision);
@@ -315,9 +315,9 @@
Entry cacheEntry(makeCacheKey(originalRequest), response, WTF::move(responseData), collectVaryingRequestHeaders(originalRequest, response));
- auto storageEntry = cacheEntry.encode();
+ auto record = cacheEntry.encodeAsStorageRecord();
- m_storage->store(storageEntry, [completionHandler](bool success, const Data& bodyData) {
+ m_storage->store(record, [completionHandler](bool success, const Data& bodyData) {
MappedBody mappedBody;
#if ENABLE(SHAREABLE_RESOURCE)
if (bodyData.isMap()) {
@@ -341,9 +341,9 @@
Entry updateEntry(existingEntry.key(), response, existingEntry.buffer(), collectVaryingRequestHeaders(originalRequest, response));
- auto updateStorageEntry = updateEntry.encode();
+ auto updateRecord = updateEntry.encodeAsStorageRecord();
- m_storage->update(updateStorageEntry, existingEntry.sourceStorageEntry(), [](bool success, const Data&) {
+ m_storage->update(updateRecord, existingEntry.sourceStorageRecord(), [](bool success, const Data&) {
LOG(NetworkCache, "(NetworkProcess) updated, success=%d", success);
});
}
@@ -359,17 +359,17 @@
{
ASSERT(isEnabled());
- m_storage->traverse([traverseHandler](const Storage::Entry* storageEntry) {
- if (!storageEntry) {
+ m_storage->traverse([traverseHandler](const Storage::Record* record) {
+ if (!record) {
traverseHandler(nullptr);
return;
}
- auto cacheEntry = Entry::decode(*storageEntry);
- if (!cacheEntry)
+ auto entry = Entry::decodeStorageRecord(*record);
+ if (!entry)
return;
- traverseHandler(cacheEntry.get());
+ traverseHandler(entry.get());
});
}
@@ -386,18 +386,18 @@
if (!dumpFileHandle)
return;
WebCore::writeToFile(dumpFileHandle, "[\n", 2);
- m_storage->traverse([dumpFileHandle](const Storage::Entry* entry) {
- if (!entry) {
+ m_storage->traverse([dumpFileHandle](const Storage::Record* record) {
+ if (!record) {
WebCore::writeToFile(dumpFileHandle, "{}\n]\n", 5);
auto handle = dumpFileHandle;
WebCore::closeFile(handle);
return;
}
- auto cacheEntry = Entry::decode(*entry);
- if (!cacheEntry)
+ auto entry = Entry::decodeStorageRecord(*record);
+ if (!entry)
return;
StringBuilder json;
- cacheEntry->asJSON(json);
+ entry->asJSON(json);
json.append(",\n");
auto writeData = json.toString().utf8();
WebCore::writeToFile(dumpFileHandle, writeData.data(), writeData.length());
Modified: trunk/Source/WebKit2/NetworkProcess/cache/NetworkCacheEntry.cpp (182018 => 182019)
--- trunk/Source/WebKit2/NetworkProcess/cache/NetworkCacheEntry.cpp 2015-03-26 18:16:02 UTC (rev 182018)
+++ trunk/Source/WebKit2/NetworkProcess/cache/NetworkCacheEntry.cpp 2015-03-26 18:39:22 UTC (rev 182019)
@@ -49,14 +49,14 @@
{
}
-Entry::Entry(const Storage::Entry& storageEntry)
+Entry::Entry(const Storage::Record& storageEntry)
: m_key(storageEntry.key)
, m_timeStamp(storageEntry.timeStamp)
- , m_sourceStorageEntry(storageEntry)
+ , m_sourceStorageRecord(storageEntry)
{
}
-Storage::Entry Entry::encode() const
+Storage::Record Entry::encodeAsStorageRecord() const
{
Encoder encoder;
encoder << m_response;
@@ -76,7 +76,7 @@
return { m_key, m_timeStamp, header, body };
}
-std::unique_ptr<Entry> Entry::decode(const Storage::Entry& storageEntry)
+std::unique_ptr<Entry> Entry::decodeStorageRecord(const Storage::Record& storageEntry)
{
std::unique_ptr<Entry> entry(new Entry(storageEntry));
@@ -102,13 +102,13 @@
return entry;
}
-void Entry::initializeBufferFromStorageEntry() const
+void Entry::initializeBufferFromStorageRecord() const
{
- auto* data = ""
- size_t size = m_sourceStorageEntry.body.size();
+ auto* data = ""
+ size_t size = m_sourceStorageRecord.body.size();
#if ENABLE(SHAREABLE_RESOURCE)
- RefPtr<SharedMemory> sharedMemory = m_sourceStorageEntry.body.isMap() ? SharedMemory::createFromVMBuffer(const_cast<uint8_t*>(data), size) : nullptr;
- RefPtr<ShareableResource> shareableResource = sharedMemory ? ShareableResource::create(sharedMemory.release(), 0, m_sourceStorageEntry.body.size()) : nullptr;
+ RefPtr<SharedMemory> sharedMemory = m_sourceStorageRecord.body.isMap() ? SharedMemory::createFromVMBuffer(const_cast<uint8_t*>(data), size) : nullptr;
+ RefPtr<ShareableResource> shareableResource = sharedMemory ? ShareableResource::create(sharedMemory.release(), 0, m_sourceStorageRecord.body.size()) : nullptr;
if (shareableResource && shareableResource->createHandle(m_shareableResourceHandle))
m_buffer = m_shareableResourceHandle.tryWrapInSharedBuffer();
@@ -120,7 +120,7 @@
WebCore::SharedBuffer* Entry::buffer() const
{
if (!m_buffer)
- initializeBufferFromStorageEntry();
+ initializeBufferFromStorageRecord();
return m_buffer.get();
}
@@ -128,7 +128,7 @@
ShareableResource::Handle& Entry::shareableResourceHandle() const
{
if (!m_buffer)
- initializeBufferFromStorageEntry();
+ initializeBufferFromStorageRecord();
return m_shareableResourceHandle;
}
Modified: trunk/Source/WebKit2/NetworkProcess/cache/NetworkCacheEntry.h (182018 => 182019)
--- trunk/Source/WebKit2/NetworkProcess/cache/NetworkCacheEntry.h 2015-03-26 18:16:02 UTC (rev 182018)
+++ trunk/Source/WebKit2/NetworkProcess/cache/NetworkCacheEntry.h 2015-03-26 18:39:22 UTC (rev 182019)
@@ -47,8 +47,8 @@
public:
Entry(const Key&, const WebCore::ResourceResponse&, RefPtr<WebCore::SharedBuffer>&&, const Vector<std::pair<String, String>>& varyingRequestHeaders);
- Storage::Entry encode() const;
- static std::unique_ptr<Entry> decode(const Storage::Entry&);
+ Storage::Record encodeAsStorageRecord() const;
+ static std::unique_ptr<Entry> decodeStorageRecord(const Storage::Record&);
const Key& key() const { return m_key; }
std::chrono::milliseconds timeStamp() const { return m_timeStamp; }
@@ -63,13 +63,13 @@
bool needsValidation() const;
void setNeedsValidation();
- const Storage::Entry& sourceStorageEntry() const { return m_sourceStorageEntry; }
+ const Storage::Record& sourceStorageRecord() const { return m_sourceStorageRecord; }
void asJSON(StringBuilder&) const;
private:
- Entry(const Storage::Entry&);
- void initializeBufferFromStorageEntry() const;
+ Entry(const Storage::Record&);
+ void initializeBufferFromStorageRecord() const;
Key m_key;
std::chrono::milliseconds m_timeStamp;
@@ -81,7 +81,7 @@
mutable ShareableResource::Handle m_shareableResourceHandle;
#endif
- Storage::Entry m_sourceStorageEntry { };
+ Storage::Record m_sourceStorageRecord { };
};
}
Modified: trunk/Source/WebKit2/NetworkProcess/cache/NetworkCacheStorage.cpp (182018 => 182019)
--- trunk/Source/WebKit2/NetworkProcess/cache/NetworkCacheStorage.cpp 2015-03-26 18:16:02 UTC (rev 182018)
+++ trunk/Source/WebKit2/NetworkProcess/cache/NetworkCacheStorage.cpp 2015-03-26 18:39:22 UTC (rev 182019)
@@ -131,9 +131,9 @@
return hasher.hash();
}
-struct EntryMetaData {
- EntryMetaData() { }
- explicit EntryMetaData(const Key& key)
+struct RecordMetaData {
+ RecordMetaData() { }
+ explicit RecordMetaData(const Key& key)
: cacheStorageVersion(Storage::version)
, key(key)
{ }
@@ -149,7 +149,7 @@
uint64_t bodySize;
};
-static bool decodeEntryMetaData(EntryMetaData& metaData, const Data& fileData)
+static bool decodeRecordMetaData(RecordMetaData& metaData, const Data& fileData)
{
bool success = false;
fileData.apply([&metaData, &success](const uint8_t* data, size_t size) {
@@ -178,9 +178,9 @@
return success;
}
-static bool decodeEntryHeader(const Data& fileData, EntryMetaData& metaData, Data& data)
+static bool decodeRecordHeader(const Data& fileData, RecordMetaData& metaData, Data& data)
{
- if (!decodeEntryMetaData(metaData, fileData)) {
+ if (!decodeRecordMetaData(metaData, fileData)) {
LOG(NetworkCacheStorage, "(NetworkProcess) meta data decode failure");
return false;
}
@@ -203,11 +203,11 @@
return true;
}
-static std::unique_ptr<Storage::Entry> decodeEntry(const Data& fileData, int fd, const Key& key)
+static std::unique_ptr<Storage::Record> decodeRecord(const Data& fileData, int fd, const Key& key)
{
- EntryMetaData metaData;
+ RecordMetaData metaData;
Data headerData;
- if (!decodeEntryHeader(fileData, metaData, headerData))
+ if (!decodeRecordHeader(fileData, metaData, headerData))
return nullptr;
if (metaData.key != key)
@@ -230,7 +230,7 @@
}
}
- return std::make_unique<Storage::Entry>(Storage::Entry {
+ return std::make_unique<Storage::Record>(Storage::Record {
metaData.key,
metaData.timeStamp,
headerData,
@@ -238,35 +238,35 @@
});
}
-static Data encodeEntryMetaData(const EntryMetaData& entry)
+static Data encodeRecordMetaData(const RecordMetaData& metaData)
{
Encoder encoder;
- encoder << entry.cacheStorageVersion;
- encoder << entry.key;
- encoder << entry.timeStamp;
- encoder << entry.headerChecksum;
- encoder << entry.headerSize;
- encoder << entry.bodyChecksum;
- encoder << entry.bodySize;
+ encoder << metaData.cacheStorageVersion;
+ encoder << metaData.key;
+ encoder << metaData.timeStamp;
+ encoder << metaData.headerChecksum;
+ encoder << metaData.headerSize;
+ encoder << metaData.bodyChecksum;
+ encoder << metaData.bodySize;
encoder.encodeChecksum();
return Data(encoder.buffer(), encoder.bufferSize());
}
-static Data encodeEntryHeader(const Storage::Entry& entry)
+static Data encodeRecordHeader(const Storage::Record& record)
{
- EntryMetaData metaData(entry.key);
- metaData.timeStamp = entry.timeStamp;
- metaData.headerChecksum = hashData(entry.header);
- metaData.headerSize = entry.header.size();
- metaData.bodyChecksum = hashData(entry.body);
- metaData.bodySize = entry.body.size();
+ RecordMetaData metaData(record.key);
+ metaData.timeStamp = record.timeStamp;
+ metaData.headerChecksum = hashData(record.header);
+ metaData.headerSize = record.header.size();
+ metaData.bodyChecksum = hashData(record.body);
+ metaData.bodySize = record.body.size();
- auto encodedMetaData = encodeEntryMetaData(metaData);
- auto headerData = concatenate(encodedMetaData, entry.header);
- if (!entry.body.size())
+ auto encodedMetaData = encodeRecordMetaData(metaData);
+ auto headerData = concatenate(encodedMetaData, record.header);
+ if (!record.body.size())
return { headerData };
size_t dataOffset = WTF::roundUpToMultipleOf(pageSize(), headerData.size());
@@ -313,8 +313,8 @@
remove(read.key);
read.completionHandler(nullptr);
} else {
- auto entry = decodeEntry(fileData, channel->fileDescriptor(), read.key);
- bool success = read.completionHandler(WTF::move(entry));
+ auto record = decodeRecord(fileData, channel->fileDescriptor(), read.key);
+ bool success = read.completionHandler(WTF::move(record));
if (success)
updateFileAccessTime(*channel);
else
@@ -354,11 +354,11 @@
template <class T> bool retrieveFromMemory(const T& operations, const Key& key, Storage::RetrieveCompletionHandler& completionHandler)
{
for (auto& operation : operations) {
- if (operation->entry.key == key) {
+ if (operation->record.key == key) {
LOG(NetworkCacheStorage, "(NetworkProcess) found write operation in progress");
- auto entry = operation->entry;
- RunLoop::main().dispatch([entry, completionHandler] {
- completionHandler(std::make_unique<Storage::Entry>(entry));
+ auto record = operation->record;
+ RunLoop::main().dispatch([record, completionHandler] {
+ completionHandler(std::make_unique<Storage::Record>(record));
});
return true;
}
@@ -391,41 +391,41 @@
dispatchPendingReadOperations();
}
-void Storage::store(const Entry& entry, StoreCompletionHandler&& completionHandler)
+void Storage::store(const Record& record, StoreCompletionHandler&& completionHandler)
{
ASSERT(RunLoop::isMain());
- ASSERT(!entry.key.isNull());
+ ASSERT(!record.key.isNull());
if (!m_maximumSize) {
completionHandler(false, { });
return;
}
- m_pendingWriteOperations.append(new WriteOperation { entry, { }, WTF::move(completionHandler) });
+ m_pendingWriteOperations.append(new WriteOperation { record, { }, WTF::move(completionHandler) });
// Add key to the filter already here as we do lookups from the pending operations too.
- m_contentsFilter.add(entry.key.shortHash());
+ m_contentsFilter.add(record.key.shortHash());
dispatchPendingWriteOperations();
}
-void Storage::update(const Entry& updateEntry, const Entry& existingEntry, StoreCompletionHandler&& completionHandler)
+void Storage::update(const Record& updateRecord, const Record& existingRecord, StoreCompletionHandler&& completionHandler)
{
ASSERT(RunLoop::isMain());
- ASSERT(!existingEntry.key.isNull());
- ASSERT(existingEntry.key == updateEntry.key);
+ ASSERT(!existingRecord.key.isNull());
+ ASSERT(existingRecord.key == updateRecord.key);
if (!m_maximumSize) {
completionHandler(false, { });
return;
}
- m_pendingWriteOperations.append(new WriteOperation { updateEntry, existingEntry, WTF::move(completionHandler) });
+ m_pendingWriteOperations.append(new WriteOperation { updateRecord, existingRecord, WTF::move(completionHandler) });
dispatchPendingWriteOperations();
}
-void Storage::traverse(std::function<void (const Entry*)>&& traverseHandler)
+void Storage::traverse(std::function<void (const Record*)>&& traverseHandler)
{
StringCapture cachePathCapture(m_directoryPath);
ioQueue().dispatch([this, cachePathCapture, traverseHandler] {
@@ -436,11 +436,11 @@
const size_t headerReadSize = 16 << 10;
// FIXME: Traversal is slower than it should be due to lack of parallelism.
channel->readSync(0, headerReadSize, [this, &traverseHandler](Data& fileData, int) {
- EntryMetaData metaData;
+ RecordMetaData metaData;
Data headerData;
- if (decodeEntryHeader(fileData, metaData, headerData)) {
- Entry entry { metaData.key, metaData.timeStamp, headerData, { } };
- traverseHandler(&entry);
+ if (decodeRecordHeader(fileData, metaData, headerData)) {
+ Record record { metaData.key, metaData.timeStamp, headerData, { } };
+ traverseHandler(&record);
}
});
});
@@ -465,7 +465,7 @@
auto& write = *writeOperation;
m_activeWriteOperations.add(WTF::move(writeOperation));
- if (write.existingEntry && cacheMayContain(write.entry.key.shortHash())) {
+ if (write.existingRecord && cacheMayContain(write.record.key.shortHash())) {
dispatchHeaderWriteOperation(write);
continue;
}
@@ -478,25 +478,25 @@
ASSERT(RunLoop::isMain());
ASSERT(m_activeWriteOperations.contains(&write));
- if (!m_contentsFilter.mayContain(write.entry.key.shortHash()))
- m_contentsFilter.add(write.entry.key.shortHash());
+ if (!m_contentsFilter.mayContain(write.record.key.shortHash()))
+ m_contentsFilter.add(write.record.key.shortHash());
StringCapture cachePathCapture(m_directoryPath);
backgroundIOQueue().dispatch([this, &write, cachePathCapture] {
- auto encodedHeader = encodeEntryHeader(write.entry);
- auto headerAndBodyData = concatenate(encodedHeader, write.entry.body);
+ auto encodedHeader = encodeRecordHeader(write.record);
+ auto headerAndBodyData = concatenate(encodedHeader, write.record.body);
- auto channel = openFileForKey(write.entry.key, IOChannel::Type::Create, cachePathCapture.string());
+ auto channel = openFileForKey(write.record.key, IOChannel::Type::Create, cachePathCapture.string());
int fd = channel->fileDescriptor();
size_t bodyOffset = encodedHeader.size();
channel->write(0, headerAndBodyData, [this, &write, bodyOffset, fd](int error) {
LOG(NetworkCacheStorage, "(NetworkProcess) write complete error=%d", error);
if (error) {
- if (m_contentsFilter.mayContain(write.entry.key.shortHash()))
- m_contentsFilter.remove(write.entry.key.shortHash());
+ if (m_contentsFilter.mayContain(write.record.key.shortHash()))
+ m_contentsFilter.remove(write.record.key.shortHash());
}
- size_t bodySize = write.entry.body.size();
+ size_t bodySize = write.record.body.size();
size_t totalSize = bodyOffset + bodySize;
m_approximateSize += totalSize;
@@ -518,15 +518,15 @@
void Storage::dispatchHeaderWriteOperation(const WriteOperation& write)
{
ASSERT(RunLoop::isMain());
- ASSERT(write.existingEntry);
+ ASSERT(write.existingRecord);
ASSERT(m_activeWriteOperations.contains(&write));
- ASSERT(cacheMayContain(write.entry.key.shortHash()));
+ ASSERT(cacheMayContain(write.record.key.shortHash()));
// Try to update the header of an existing entry.
StringCapture cachePathCapture(m_directoryPath);
backgroundIOQueue().dispatch([this, &write, cachePathCapture] {
- auto headerData = encodeEntryHeader(write.entry);
- auto existingHeaderData = encodeEntryHeader(write.existingEntry.value());
+ auto headerData = encodeRecordHeader(write.record);
+ auto existingHeaderData = encodeRecordHeader(write.existingRecord.value());
bool pageRoundedHeaderSizeChanged = headerData.size() != existingHeaderData.size();
if (pageRoundedHeaderSizeChanged) {
@@ -537,12 +537,12 @@
return;
}
- auto channel = openFileForKey(write.entry.key, IOChannel::Type::Write, cachePathCapture.string());
+ auto channel = openFileForKey(write.record.key, IOChannel::Type::Write, cachePathCapture.string());
channel->write(0, headerData, [this, &write](int error) {
LOG(NetworkCacheStorage, "(NetworkProcess) update complete error=%d", error);
if (error)
- remove(write.entry.key);
+ remove(write.record.key);
write.completionHandler(!error, { });
Modified: trunk/Source/WebKit2/NetworkProcess/cache/NetworkCacheStorage.h (182018 => 182019)
--- trunk/Source/WebKit2/NetworkProcess/cache/NetworkCacheStorage.h 2015-03-26 18:16:02 UTC (rev 182018)
+++ trunk/Source/WebKit2/NetworkProcess/cache/NetworkCacheStorage.h 2015-03-26 18:39:22 UTC (rev 182019)
@@ -47,24 +47,24 @@
public:
static std::unique_ptr<Storage> open(const String& cachePath);
- struct Entry {
+ struct Record {
Key key;
std::chrono::milliseconds timeStamp;
Data header;
Data body;
};
// This may call completion handler synchronously on failure.
- typedef std::function<bool (std::unique_ptr<Entry>)> RetrieveCompletionHandler;
+ typedef std::function<bool (std::unique_ptr<Record>)> RetrieveCompletionHandler;
void retrieve(const Key&, unsigned priority, RetrieveCompletionHandler&&);
typedef std::function<void (bool success, const Data& mappedBody)> StoreCompletionHandler;
- void store(const Entry&, StoreCompletionHandler&&);
- void update(const Entry& updateEntry, const Entry& existingEntry, StoreCompletionHandler&&);
+ void store(const Record&, StoreCompletionHandler&&);
+ void update(const Record& updateRecord, const Record& existingRecord, StoreCompletionHandler&&);
void remove(const Key&);
// Null entry signals end.
- void traverse(std::function<void (const Entry*)>&&);
+ void traverse(std::function<void (const Record*)>&&);
void setMaximumSize(size_t);
void clear();
@@ -89,8 +89,8 @@
void dispatchPendingReadOperations();
struct WriteOperation {
- Entry entry;
- Optional<Entry> existingEntry;
+ Record record;
+ Optional<Record> existingRecord;
StoreCompletionHandler completionHandler;
};
void dispatchFullWriteOperation(const WriteOperation&);