forked from jiuyuan/JYCache
final
This commit is contained in:
parent
bd51a9c1f7
commit
225503039f
Binary file not shown.
After Width: | Height: | Size: 176 KiB |
|
@ -52,7 +52,9 @@ JYCache是一款面向个人使用、大模型训练推理等多种场景,适
|
|||
|
||||
写入限流机制使用文件名称作为区分标识符,即同一个文件在一定时间间隔内可写入的字节数目是有限的,将令牌视同于带宽资源。当一个写入任务(即客户端)发起Put请求时会传递目标文件名称和该次写入字节长度等参数。WriteCache在接收到这些信息时运行流程如下:
|
||||
|
||||
1. 该文件为首次写入:是->为其建立与令牌桶的映射,分配一定带宽资源;否->查找文件对应令牌桶。
|
||||
1. 该文件为首次写入:
|
||||
1. 是->为其建立与令牌桶的映射,分配一定带宽资源。
|
||||
2. 否->查找文件对应令牌桶。
|
||||
2. 从令牌桶中消耗该次写入长度的令牌数目,记录因获取令牌而消耗的时间BlockTime
|
||||
3. 执行真正的写入操作。
|
||||
|
||||
|
@ -107,7 +109,7 @@ Server端使用强化学习模型 OLUCB进行单目标调优,计算出下一
|
|||
|
||||
### 2.2 参数说明
|
||||
- Server端IP 地址为 127.0.0.1,端口号为 2333
|
||||
模型说明:[OLUCB单目标调度算法](https://epr023ri66.feishu.cn/docx/KfsddCGbLoZjf0xgSOqcw0V8nZb)
|
||||
- 模型说明:[OLUCB单目标调度算法](https://epr023ri66.feishu.cn/docx/KfsddCGbLoZjf0xgSOqcw0V8nZb)
|
||||
- fixSize:资源分配单位,= 1024 * 1024 * 256,即256M
|
||||
- reserveSize:保留单位,用于防止Resize时出现某个Pool为0的情况,= 1024 * 1024 * 512,即512M
|
||||
- resizeInterval_:Resize的时间间隔,设置为5s
|
||||
|
@ -119,3 +121,4 @@ Server端使用强化学习模型 OLUCB进行单目标调优,计算出下一
|
|||
|
||||
### 2.4 效果展示
|
||||
|
||||
![](image\result_LinUCB.png)
|
|
@ -429,7 +429,6 @@ Cache::WriteHandle PageCacheImpl::FindOrCreateWriteHandle(const std::string &key
|
|||
if (cache_->insert(writeHandle)) {
|
||||
pageNum_.fetch_add(1);
|
||||
pagesList_.insert(key);
|
||||
// LOG(INFO) <<"[TestOutPut] cache"<< cache_ <<" pool "<< static_cast<int>(pool_)<<" Insert page "<<key;
|
||||
} else {
|
||||
writeHandle = cache_->findToWrite(key);
|
||||
}
|
||||
|
|
|
@ -104,7 +104,6 @@ class PageCacheImpl : public PageCache {
|
|||
bitmapSize_ = cfg_.PageBodySize / BYTE_LEN;
|
||||
cache_ = curr_cache_;
|
||||
pool_ = curr_pool_id_;
|
||||
// LOG(WARNING) << "[TestOutPut] PageCache Init with size : "<<GetCacheSize();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -63,9 +63,6 @@ folly::Future<int> ReadCache::Get(const std::string &key, size_t start,
|
|||
if (remainLen > 0 && !dataAdaptor_) {
|
||||
res = ADAPTOR_NOT_FOUND;
|
||||
}
|
||||
//如果不开LinUCB就不会有需要用fs的情况
|
||||
if(remainLen > 0)
|
||||
LOG(INFO) << "[TestOutPut]ReadCache key : "<<key<<" Readlen : " << realReadLen << " remainLen : "<<remainLen <<" res : "<<res;
|
||||
|
||||
// handle cache misses
|
||||
readLen = 0;
|
||||
|
@ -94,12 +91,9 @@ folly::Future<int> ReadCache::Get(const std::string &key, size_t start,
|
|||
|
||||
auto download = folly::via(executor_.get(), [this, readLen]() {
|
||||
// download flow control
|
||||
LOG(INFO) << "[TestOutPut]ReadCache try to consume "<<readLen<<" tokens";
|
||||
while(!this->tokenBucket_->consume(readLen));
|
||||
LOG(INFO) << "[TestOutPut]ReadCache finished consume "<<readLen<<" tokens";
|
||||
return SUCCESS;
|
||||
}).thenValue([this, key, fileStartOff, readLen, stepBuffer](int i) {
|
||||
LOG(INFO) << "[TestOutPut]ReadCache Extra download: " << key << " " << readLen;
|
||||
ByteBuffer tmpBuffer(stepBuffer.data, readLen);
|
||||
return this->dataAdaptor_->DownLoad(key, fileStartOff, readLen, tmpBuffer).get();
|
||||
}).thenValue([this, key, fileStartOff, readLen, stepBuffer](int downRes) {
|
||||
|
@ -116,7 +110,6 @@ folly::Future<int> ReadCache::Get(const std::string &key, size_t start,
|
|||
}
|
||||
|
||||
if (!fs.empty()) {
|
||||
LOG(INFO) << "[TestOutPut]ReadCache wait for all jobs done, key : "<< key << " fs.size : "<<fs.size();
|
||||
return collectAll(fs).via(executor_.get())
|
||||
.thenValue([key, start, len, readPageCnt, startTime](
|
||||
std::vector<folly::Try<int>, std::allocator<folly::Try<int>>>&& tups) {
|
||||
|
@ -124,7 +117,6 @@ folly::Future<int> ReadCache::Get(const std::string &key, size_t start,
|
|||
for (const auto& t : tups) {
|
||||
if (SUCCESS != t.value()) finalRes = t.value();
|
||||
}
|
||||
LOG(INFO) << "[TestOutPut]ReadCache DownLoad From Adaptor, key : "<<key;
|
||||
if (EnableLogging) {
|
||||
double totalTime = std::chrono::duration<double, std::milli>(
|
||||
std::chrono::steady_clock::now() - startTime).count();
|
||||
|
|
|
@ -63,8 +63,6 @@ void HybridCacheAccessor4S3fs::Init() {
|
|||
stopLinUCBThread = false;
|
||||
LinUCBThread = std::thread(&HybridCacheAccessor4S3fs::LinUCBClient, this);
|
||||
}
|
||||
LOG(WARNING) << "[TestOutPut]Init, EnableResize :" << cfg_.EnableResize;
|
||||
LOG(WARNING) << "[TestOutPut]Init, EnableLinUCB :" << cfg_.EnableLinUCB;
|
||||
LOG(WARNING) << "[Accessor]Init, useGlobalCache:" << cfg_.UseGlobalCache;
|
||||
}
|
||||
|
||||
|
@ -139,9 +137,6 @@ int HybridCacheAccessor4S3fs::InitCache()
|
|||
executor_, readPoolId_, comCache_);
|
||||
|
||||
LOG(WARNING) << "[Accessor]Init Cache in Combined Way.";
|
||||
//test
|
||||
// LOG(WARNING) <<"[TestOutPut] writePoolId_ :"<<static_cast<int>(writePoolId_)
|
||||
// <<" readPoolId_"<<static_cast<int>(readPoolId_);
|
||||
}
|
||||
else //沿用原来的方式
|
||||
{
|
||||
|
@ -154,8 +149,7 @@ int HybridCacheAccessor4S3fs::InitCache()
|
|||
}
|
||||
writeCacheSize_ = cfg_.WriteCacheCfg.CacheCfg.MaxCacheSize;
|
||||
readCacheSize_ = cfg_.ReadCacheCfg.CacheCfg.MaxCacheSize;
|
||||
// LOG(WARNING) <<"[TestOutPut] writeCacheSize_ : "<<writeCacheSize_
|
||||
// <<" ; readCacheSize_ : "<<readCacheSize_;
|
||||
|
||||
|
||||
return SUCCESS;
|
||||
|
||||
|
@ -690,17 +684,13 @@ void HybridCacheAccessor4S3fs::BackGroundFlush()
|
|||
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
continue;
|
||||
}
|
||||
LOG(WARNING) << "[Accessor]BackGroundFlush radically, write pool ratio:"
|
||||
<< WritePoolRatio();
|
||||
LOG(WARNING) << "[TestOutPut] Before Flush, Write Pool size : "
|
||||
<<writeCache_->GetCacheSize();
|
||||
LOG(WARNING) << "[Accessor]BackGroundFlush radically, write pool ratio:"<< WritePoolRatio();
|
||||
// LOG(WARNING) << "[TestOutPut] Before Flush, Write Pool size : " <<writeCache_->GetCacheSize();
|
||||
FsSync();
|
||||
LOG(WARNING) << "[TestOutPut] After Flush, Write Pool size : "
|
||||
<<writeCache_->GetCacheSize()<<" , Ratio : "<<WritePoolRatio();
|
||||
// LOG(WARNING) << "[TestOutPut] After Flush, Write Pool size : "<<writeCache_->GetCacheSize()<<" , Ratio : "<<WritePoolRatio();
|
||||
}
|
||||
if (0 < writeCache_->GetCacheSize())
|
||||
{
|
||||
LOG(WARNING) << "[TestOutPut] Final BackGroundFlush ";
|
||||
FsSync();
|
||||
}
|
||||
}
|
||||
|
@ -713,17 +703,14 @@ void HybridCacheAccessor4S3fs::BackGroundFlush()
|
|||
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
continue;
|
||||
}
|
||||
LOG(WARNING) << "[Accessor]BackGroundFlush radically, write cache ratio:"
|
||||
<< WriteCacheRatio();
|
||||
LOG(WARNING) << "[TestOutPut] Before Flush, Write cache size : "
|
||||
<<writeCache_->GetCacheSize();
|
||||
LOG(WARNING) << "[Accessor]BackGroundFlush radically, write cache ratio:"<< WriteCacheRatio();
|
||||
// LOG(WARNING) << "[TestOutPut] Before Flush, Write cache size : "<<writeCache_->GetCacheSize();
|
||||
FsSync();
|
||||
LOG(WARNING) << "[TestOutPut] After Flush, Write cache size : "
|
||||
<<writeCache_->GetCacheSize()<<" , Ratio : "<<WriteCacheRatio();
|
||||
// LOG(WARNING) << "[TestOutPut] After Flush, Write cache size : "<<writeCache_->GetCacheSize()<<" , Ratio : "<<WriteCacheRatio();
|
||||
}
|
||||
if (0 < writeCache_->GetCacheSize()) //仍有文件未存储
|
||||
{
|
||||
LOG(WARNING) << "[TestOutPut] Final BackGroundFlush ";
|
||||
// LOG(WARNING) << "[TestOutPut] Final BackGroundFlush ";
|
||||
FsSync();
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue