This commit is contained in:
RistyTang 2024-10-10 14:37:54 +08:00
parent bd51a9c1f7
commit 225503039f
6 changed files with 13 additions and 33 deletions

BIN
doc/image/result_LinUCB.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 176 KiB

View File

@ -52,7 +52,9 @@ JYCache是一款面向个人使用、大模型训练推理等多种场景
写入限流机制使用文件名称作为区分标识符即同一个文件在一定时间间隔内可写入的字节数目是有限的将令牌视同于带宽资源。当一个写入任务即客户端发起Put请求时会传递目标文件名称和该次写入字节长度等参数。WriteCache在接收到这些信息时运行流程如下 写入限流机制使用文件名称作为区分标识符即同一个文件在一定时间间隔内可写入的字节数目是有限的将令牌视同于带宽资源。当一个写入任务即客户端发起Put请求时会传递目标文件名称和该次写入字节长度等参数。WriteCache在接收到这些信息时运行流程如下
1. 该文件为首次写入:是->为其建立与令牌桶的映射,分配一定带宽资源;否->查找文件对应令牌桶。 1. 该文件为首次写入:
1. 是->为其建立与令牌桶的映射,分配一定带宽资源。
2. 否->查找文件对应令牌桶。
2. 从令牌桶中消耗该次写入长度的令牌数目记录因获取令牌而消耗的时间BlockTime 2. 从令牌桶中消耗该次写入长度的令牌数目记录因获取令牌而消耗的时间BlockTime
3. 执行真正的写入操作。 3. 执行真正的写入操作。
@ -107,7 +109,7 @@ Server端使用强化学习模型 OLUCB进行单目标调优计算出下一
### 2.2 参数说明 ### 2.2 参数说明
- Server端IP 地址为 127.0.0.1,端口号为 2333 - Server端IP 地址为 127.0.0.1,端口号为 2333
模型说明:[OLUCB单目标调度算法](https://epr023ri66.feishu.cn/docx/KfsddCGbLoZjf0xgSOqcw0V8nZb) - 模型说明:[OLUCB单目标调度算法](https://epr023ri66.feishu.cn/docx/KfsddCGbLoZjf0xgSOqcw0V8nZb)
- fixSize资源分配单位= 1024 * 1024 * 256即256M - fixSize资源分配单位= 1024 * 1024 * 256即256M
- reserveSize保留单位用于防止Resize时出现某个Pool为0的情况= 1024 * 1024 * 512即512M - reserveSize保留单位用于防止Resize时出现某个Pool为0的情况= 1024 * 1024 * 512即512M
- resizeInterval_Resize的时间间隔设置为5s - resizeInterval_Resize的时间间隔设置为5s
@ -119,3 +121,4 @@ Server端使用强化学习模型 OLUCB进行单目标调优计算出下一
### 2.4 效果展示 ### 2.4 效果展示
![](image\result_LinUCB.png)

View File

@ -429,7 +429,6 @@ Cache::WriteHandle PageCacheImpl::FindOrCreateWriteHandle(const std::string &key
if (cache_->insert(writeHandle)) { if (cache_->insert(writeHandle)) {
pageNum_.fetch_add(1); pageNum_.fetch_add(1);
pagesList_.insert(key); pagesList_.insert(key);
// LOG(INFO) <<"[TestOutPut] cache"<< cache_ <<" pool "<< static_cast<int>(pool_)<<" Insert page "<<key;
} else { } else {
writeHandle = cache_->findToWrite(key); writeHandle = cache_->findToWrite(key);
} }

View File

@ -104,7 +104,6 @@ class PageCacheImpl : public PageCache {
bitmapSize_ = cfg_.PageBodySize / BYTE_LEN; bitmapSize_ = cfg_.PageBodySize / BYTE_LEN;
cache_ = curr_cache_; cache_ = curr_cache_;
pool_ = curr_pool_id_; pool_ = curr_pool_id_;
// LOG(WARNING) << "[TestOutPut] PageCache Init with size : "<<GetCacheSize();
} }

View File

@ -63,9 +63,6 @@ folly::Future<int> ReadCache::Get(const std::string &key, size_t start,
if (remainLen > 0 && !dataAdaptor_) { if (remainLen > 0 && !dataAdaptor_) {
res = ADAPTOR_NOT_FOUND; res = ADAPTOR_NOT_FOUND;
} }
//如果不开LinUCB就不会有需要用fs的情况
if(remainLen > 0)
LOG(INFO) << "[TestOutPut]ReadCache key : "<<key<<" Readlen : " << realReadLen << " remainLen : "<<remainLen <<" res : "<<res;
// handle cache misses // handle cache misses
readLen = 0; readLen = 0;
@ -94,12 +91,9 @@ folly::Future<int> ReadCache::Get(const std::string &key, size_t start,
auto download = folly::via(executor_.get(), [this, readLen]() { auto download = folly::via(executor_.get(), [this, readLen]() {
// download flow control // download flow control
LOG(INFO) << "[TestOutPut]ReadCache try to consume "<<readLen<<" tokens";
while(!this->tokenBucket_->consume(readLen)); while(!this->tokenBucket_->consume(readLen));
LOG(INFO) << "[TestOutPut]ReadCache finished consume "<<readLen<<" tokens";
return SUCCESS; return SUCCESS;
}).thenValue([this, key, fileStartOff, readLen, stepBuffer](int i) { }).thenValue([this, key, fileStartOff, readLen, stepBuffer](int i) {
LOG(INFO) << "[TestOutPut]ReadCache Extra download: " << key << " " << readLen;
ByteBuffer tmpBuffer(stepBuffer.data, readLen); ByteBuffer tmpBuffer(stepBuffer.data, readLen);
return this->dataAdaptor_->DownLoad(key, fileStartOff, readLen, tmpBuffer).get(); return this->dataAdaptor_->DownLoad(key, fileStartOff, readLen, tmpBuffer).get();
}).thenValue([this, key, fileStartOff, readLen, stepBuffer](int downRes) { }).thenValue([this, key, fileStartOff, readLen, stepBuffer](int downRes) {
@ -116,7 +110,6 @@ folly::Future<int> ReadCache::Get(const std::string &key, size_t start,
} }
if (!fs.empty()) { if (!fs.empty()) {
LOG(INFO) << "[TestOutPut]ReadCache wait for all jobs done, key : "<< key << " fs.size : "<<fs.size();
return collectAll(fs).via(executor_.get()) return collectAll(fs).via(executor_.get())
.thenValue([key, start, len, readPageCnt, startTime]( .thenValue([key, start, len, readPageCnt, startTime](
std::vector<folly::Try<int>, std::allocator<folly::Try<int>>>&& tups) { std::vector<folly::Try<int>, std::allocator<folly::Try<int>>>&& tups) {
@ -124,7 +117,6 @@ folly::Future<int> ReadCache::Get(const std::string &key, size_t start,
for (const auto& t : tups) { for (const auto& t : tups) {
if (SUCCESS != t.value()) finalRes = t.value(); if (SUCCESS != t.value()) finalRes = t.value();
} }
LOG(INFO) << "[TestOutPut]ReadCache DownLoad From Adaptor, key : "<<key;
if (EnableLogging) { if (EnableLogging) {
double totalTime = std::chrono::duration<double, std::milli>( double totalTime = std::chrono::duration<double, std::milli>(
std::chrono::steady_clock::now() - startTime).count(); std::chrono::steady_clock::now() - startTime).count();

View File

@ -63,8 +63,6 @@ void HybridCacheAccessor4S3fs::Init() {
stopLinUCBThread = false; stopLinUCBThread = false;
LinUCBThread = std::thread(&HybridCacheAccessor4S3fs::LinUCBClient, this); LinUCBThread = std::thread(&HybridCacheAccessor4S3fs::LinUCBClient, this);
} }
LOG(WARNING) << "[TestOutPut]Init, EnableResize :" << cfg_.EnableResize;
LOG(WARNING) << "[TestOutPut]Init, EnableLinUCB :" << cfg_.EnableLinUCB;
LOG(WARNING) << "[Accessor]Init, useGlobalCache:" << cfg_.UseGlobalCache; LOG(WARNING) << "[Accessor]Init, useGlobalCache:" << cfg_.UseGlobalCache;
} }
@ -139,9 +137,6 @@ int HybridCacheAccessor4S3fs::InitCache()
executor_, readPoolId_, comCache_); executor_, readPoolId_, comCache_);
LOG(WARNING) << "[Accessor]Init Cache in Combined Way."; LOG(WARNING) << "[Accessor]Init Cache in Combined Way.";
//test
// LOG(WARNING) <<"[TestOutPut] writePoolId_ :"<<static_cast<int>(writePoolId_)
// <<" readPoolId_"<<static_cast<int>(readPoolId_);
} }
else //沿用原来的方式 else //沿用原来的方式
{ {
@ -154,8 +149,7 @@ int HybridCacheAccessor4S3fs::InitCache()
} }
writeCacheSize_ = cfg_.WriteCacheCfg.CacheCfg.MaxCacheSize; writeCacheSize_ = cfg_.WriteCacheCfg.CacheCfg.MaxCacheSize;
readCacheSize_ = cfg_.ReadCacheCfg.CacheCfg.MaxCacheSize; readCacheSize_ = cfg_.ReadCacheCfg.CacheCfg.MaxCacheSize;
// LOG(WARNING) <<"[TestOutPut] writeCacheSize_ : "<<writeCacheSize_
// <<" ; readCacheSize_ : "<<readCacheSize_;
return SUCCESS; return SUCCESS;
@ -690,17 +684,13 @@ void HybridCacheAccessor4S3fs::BackGroundFlush()
std::this_thread::sleep_for(std::chrono::milliseconds(1)); std::this_thread::sleep_for(std::chrono::milliseconds(1));
continue; continue;
} }
LOG(WARNING) << "[Accessor]BackGroundFlush radically, write pool ratio:" LOG(WARNING) << "[Accessor]BackGroundFlush radically, write pool ratio:"<< WritePoolRatio();
<< WritePoolRatio(); // LOG(WARNING) << "[TestOutPut] Before Flush, Write Pool size : " <<writeCache_->GetCacheSize();
LOG(WARNING) << "[TestOutPut] Before Flush, Write Pool size : "
<<writeCache_->GetCacheSize();
FsSync(); FsSync();
LOG(WARNING) << "[TestOutPut] After Flush, Write Pool size : " // LOG(WARNING) << "[TestOutPut] After Flush, Write Pool size : "<<writeCache_->GetCacheSize()<<" , Ratio : "<<WritePoolRatio();
<<writeCache_->GetCacheSize()<<" , Ratio : "<<WritePoolRatio();
} }
if (0 < writeCache_->GetCacheSize()) if (0 < writeCache_->GetCacheSize())
{ {
LOG(WARNING) << "[TestOutPut] Final BackGroundFlush ";
FsSync(); FsSync();
} }
} }
@ -713,17 +703,14 @@ void HybridCacheAccessor4S3fs::BackGroundFlush()
std::this_thread::sleep_for(std::chrono::milliseconds(1)); std::this_thread::sleep_for(std::chrono::milliseconds(1));
continue; continue;
} }
LOG(WARNING) << "[Accessor]BackGroundFlush radically, write cache ratio:" LOG(WARNING) << "[Accessor]BackGroundFlush radically, write cache ratio:"<< WriteCacheRatio();
<< WriteCacheRatio(); // LOG(WARNING) << "[TestOutPut] Before Flush, Write cache size : "<<writeCache_->GetCacheSize();
LOG(WARNING) << "[TestOutPut] Before Flush, Write cache size : "
<<writeCache_->GetCacheSize();
FsSync(); FsSync();
LOG(WARNING) << "[TestOutPut] After Flush, Write cache size : " // LOG(WARNING) << "[TestOutPut] After Flush, Write cache size : "<<writeCache_->GetCacheSize()<<" , Ratio : "<<WriteCacheRatio();
<<writeCache_->GetCacheSize()<<" , Ratio : "<<WriteCacheRatio();
} }
if (0 < writeCache_->GetCacheSize()) //仍有文件未存储 if (0 < writeCache_->GetCacheSize()) //仍有文件未存储
{ {
LOG(WARNING) << "[TestOutPut] Final BackGroundFlush "; // LOG(WARNING) << "[TestOutPut] Final BackGroundFlush ";
FsSync(); FsSync();
} }
} }