Commit c3bb41c4 authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh
Browse files

Fixed a bug in the linear memory pool


Signed-off-by: default avatarDmitry I. Lyakh <quant4me@gmail.com>
parent e276dd5b
......@@ -3791,7 +3791,7 @@ TEST(NumServerTester, CuTensorNet) {
const int NUM_REPEATS = 10;
exatn::resetLoggingLevel(2,2); //debug
//exatn::resetLoggingLevel(2,2); //debug
bool success = true;
......
/** ExaTN: Tensor Runtime: Tensor network executor: NVIDIA cuQuantum
REVISION: 2022/01/10
REVISION: 2022/01/11
Copyright (C) 2018-2022 Dmitry Lyakh
Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle)
......@@ -413,6 +413,9 @@ void CuQuantumExecutor::loadTensors(std::shared_ptr<TensorNetworkReq> tn_req)
//Initiate data transfers:
HANDLE_CUDA_ERROR(cudaEventRecord(tn_req->data_in_start,tn_req->stream));
for(auto & descr: tn_req->tensor_descriptors){
/*std::cout << "#DEBUG(exatn::CuQuantumExecutor): loadTensors: "
<< descr.second.dst_ptr.back() << " " << descr.second.src_ptr << " "
<< descr.second.size << std::endl << std::flush; //debug*/
HANDLE_CUDA_ERROR(cudaMemcpyAsync(descr.second.dst_ptr.back(),descr.second.src_ptr,
descr.second.size,cudaMemcpyDefault,tn_req->stream));
}
......
/** ExaTN: Tensor Runtime: Tensor network executor: Linear memory allocator
REVISION: 2022/01/03
REVISION: 2022/01/11
Copyright (C) 2018-2022 Dmitry Lyakh
Copyright (C) 2018-2022 Dmitry I. Lyakh
Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle)
Rationale:
......@@ -53,14 +53,35 @@ public:
void * acquireMemory(std::size_t mem_size) {
assert(mem_size > 0);
mem_size = (mem_size - (mem_size % alignment_)) + alignment_;
if(occupiedSize() + mem_size > total_size_) return nullptr;
void * mem_ptr = front_;
std::size_t left_forward = (total_size_ - reinterpret_cast<std::size_t>(front_));
if(left_forward > mem_size){
front_ = (void*)((char*)front_ + mem_size);
}else{
front_ = (void*)((char*)base_ptr_ + (mem_size - left_forward));
const auto unaligned = mem_size % alignment_;
if(unaligned > 0) mem_size += (alignment_ - unaligned);
void * mem_ptr = nullptr;
if(occupiedSize() + mem_size <= total_size_){
const std::size_t fptr = reinterpret_cast<std::size_t>(front_);
const std::size_t bptr = reinterpret_cast<std::size_t>(back_);
if(fptr >= bptr){
std::size_t left_forward = ((reinterpret_cast<std::size_t>(base_ptr_) + total_size_)
- reinterpret_cast<std::size_t>(front_));
if(left_forward >= mem_size){
mem_ptr = front_;
if(left_forward == mem_size){
front_ = base_ptr_;
}else{
front_ = (void*)((char*)front_ + mem_size);
}
}else{
if((reinterpret_cast<std::size_t>(back_) - reinterpret_cast<std::size_t>(base_ptr_)) >= mem_size){
mem_ptr = base_ptr_;
front_ = (void*)((char*)base_ptr_ + mem_size);
}
}
}else{
std::size_t left_forward = (bptr - fptr);
if(left_forward >= mem_size){
mem_ptr = front_;
front_ = (void*)((char*)front_ + mem_size);
}
}
}
return mem_ptr;
}
......@@ -73,8 +94,9 @@ public:
return;
}
void restorePreviousFront(void * front) {
front_ = front;
void restorePreviousFront(void * front_ptr) {
assert(reinterpret_cast<std::size_t>(front_ptr) % alignment_ == 0);
front_ = front_ptr;
return;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment