aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKunoiSayami <[email protected]>2021-11-22 19:05:34 +0800
committerKunoiSayami <[email protected]>2021-11-22 19:05:34 +0800
commit0e18a4800b0737dc8f1d3720bc79bf000529ddef (patch)
tree228a9860c2347a9076a47edc00b6a61e9f4ec663
parentd30033359c0b97b28b50f54eeea6e820156f7578 (diff)
feat(skiplist): Convert skiplist to cuda version
Signed-off-by: KunoiSayami <[email protected]>
-rw-r--r--db/skiplist.cuh39
-rw-r--r--db/skiplist_test.cu27
-rw-r--r--util/arena.cu2
-rw-r--r--util/arena.cuh6
-rw-r--r--util/random.cuh2
5 files changed, 41 insertions, 35 deletions
diff --git a/db/skiplist.cuh b/db/skiplist.cuh
index 5310d9c..2520a13 100644
--- a/db/skiplist.cuh
+++ b/db/skiplist.cuh
@@ -48,7 +48,8 @@ class SkipList {
// Create a new SkipList object that will use "cmp" for comparing keys,
// and will allocate memory using "*arena". Objects allocated in the arena
// must remain allocated for the lifetime of the skiplist object.
- explicit SkipList(Comparator cmp, Arena* arena);
+ //explicit SkipList(Comparator cmp, Arena* arena);
+ __device__ explicit SkipList(Comparator cmp, Arena* arena);
SkipList(const SkipList&) = delete;
SkipList& operator=(const SkipList&) = delete;
@@ -111,7 +112,7 @@ class SkipList {
__device__ bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); }
// Return true if key is greater than the data stored in "n"
- bool KeyIsAfterNode(const Key& key, Node* n) const;
+ __device__ bool KeyIsAfterNode(const Key& key, Node* n) const;
// Return the earliest node that comes at or after key.
// Return nullptr if there is no such node.
@@ -145,38 +146,54 @@ class SkipList {
// Implementation details follow
template <typename Key, class Comparator>
struct SkipList<Key, Comparator>::Node {
- explicit Node(const Key& k) : key(k) {}
+ //explicit Node(const Key& k) : key(k) {}
Key const key;
+ explicit __device__ Node(const Key& k): key(k) {}
+
+ __device__ void* operator new(size_t bytes) {
+ Node * ptr = nullptr;
+ cudaMalloc((void**)&ptr, bytes);
+ return ptr;
+ }
+
+ __device__ void* operator new(size_t bytes, void * const ptr) {
+ return ptr;
+ }
+
+ __device__ void operator delete(void *ptr) {
+ cudaFree(ptr);
+ }
+
// Accessors/mutators for links. Wrapped in methods so we can
// add the appropriate barriers as necessary.
- Node* Next(int n) {
+ __device__ Node* Next(int n) {
assert(n >= 0);
// Use an 'acquire load' so that we observe a fully initialized
// version of the returned Node.
- return next_[n].load(std::memory_order_acquire);
+ return next_[n].load(cuda::memory_order_acquire);
}
__device__ void SetNext(int n, Node* x) {
assert(n >= 0);
// Use a 'release store' so that anybody who reads through this
// pointer observes a fully initialized version of the inserted node.
- next_[n].store(x, std::memory_order_release);
+ next_[n].store(x, cuda::memory_order_release);
}
// No-barrier variants that can be safely used in a few locations.
__device__ Node* NoBarrier_Next(int n) {
assert(n >= 0);
- return next_[n].load(std::memory_order_relaxed);
+ return next_[n].load(cuda::memory_order_relaxed);
}
__device__ void NoBarrier_SetNext(int n, Node* x) {
assert(n >= 0);
- next_[n].store(x, std::memory_order_relaxed);
+ next_[n].store(x, cuda::memory_order_relaxed);
}
private:
// Array of length equal to the node height. next_[0] is lowest level link.
- std::atomic<Node*> next_[1];
+ cuda::atomic<Node*> next_[1];
};
template <typename Key, class Comparator>
@@ -253,7 +270,7 @@ __device__ int SkipList<Key, Comparator>::RandomHeight() {
}
template <typename Key, class Comparator>
-bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
+__device__ bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
// null n is considered infinite
return (n != nullptr) && (compare_(n->key, key) < 0);
}
@@ -323,7 +340,7 @@ typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
}
template <typename Key, class Comparator>
-SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
+__device__ SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
: compare_(cmp),
arena_(arena),
head_(NewNode(0 /* any key will do */, kMaxHeight)),
diff --git a/db/skiplist_test.cu b/db/skiplist_test.cu
index 53f60f8..9b7ec7e 100644
--- a/db/skiplist_test.cu
+++ b/db/skiplist_test.cu
@@ -22,7 +22,7 @@ namespace leveldb {
typedef uint64_t Key;
struct Comparator {
- int operator()(const Key& a, const Key& b) const {
+ __device__ int operator()(const Key& a, const Key& b) const {
if (a < b) {
return -1;
} else if (a > b) {
@@ -287,31 +287,22 @@ __device__ void update_list(SkipList<Key, Comparator> * l, Key key) {
l->Insert(key);
}
-__global__ void insert_skiplist(SkipList<Key, Comparator> * l, Random * rnd) {
+__global__ void insert_skiplist(int random_seed) {
+ Arena arena;
+ Comparator cmp;
+ auto * skipList = new SkipList<Key, Comparator>(cmp, &arena);
+ auto * device_rnd = new Random(random_seed);
unsigned row = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < 1000; i++ ) {
- update_list(l, rnd->Next());
+ update_list(skipList, device_rnd->Next());
}
}
__host__ void host_insert_test() {
- constexpr size_t sz_size = 1024;
- Arena arena;
- Comparator cmp;
Key * keys;
//SkipList<Key, Comparator> list(cmp, &arena);
-
- SkipList<Key, Comparator> * ptr_list;
- Random * device_rnd;
-
- //cudaMallocManaged((void**)&keys, sizeof(Key) * sz_size);
- cudaMallocManaged((void**)&device_rnd, sizeof(Random));
- cudaMallocManaged((void**)&ptr_list, sizeof(SkipList<Key, Comparator>));
-
- ptr_list = new SkipList<Key, Comparator>(cmp, &arena);
- device_rnd = new Random(test::RandomSeed());
/*
for (int i = 0; i < 1000; i++) {
keys[i] = .Next();
@@ -321,12 +312,10 @@ __host__ void host_insert_test() {
dim3 gridSize(32, 1);
- insert_skiplist<<<gridSize, blockSize>>>(ptr_list, device_rnd);
+ insert_skiplist<<<gridSize, blockSize>>>(test::RandomSeed());
cudaDeviceSynchronize();
- cudaFree(&device_rnd);
- cudaFree(ptr_list);
}
diff --git a/util/arena.cu b/util/arena.cu
index db5827d..a9556e5 100644
--- a/util/arena.cu
+++ b/util/arena.cu
@@ -8,7 +8,7 @@ namespace leveldb {
static const int kBlockSize = 4096;
-Arena::Arena()
+__device__ Arena::Arena()
: alloc_ptr_(nullptr), alloc_bytes_remaining_(0), memory_usage_(0),
head_(nullptr), blocks_(nullptr) {}
diff --git a/util/arena.cuh b/util/arena.cuh
index afbc575..950f286 100644
--- a/util/arena.cuh
+++ b/util/arena.cuh
@@ -17,12 +17,12 @@ namespace leveldb {
class Arena {
public:
- Arena();
+ __device__ Arena();
Arena(const Arena&) = delete;
Arena& operator=(const Arena&) = delete;
- ~Arena();
+ __device__ ~Arena();
// Return a pointer to a newly allocated memory block of "bytes" bytes.
__device__ char* Allocate(size_t bytes);
@@ -32,7 +32,7 @@ class Arena {
// Returns an estimate of the total memory usage of data allocated
// by the arena.
- size_t MemoryUsage() const {
+ __device__ size_t MemoryUsage() const {
return memory_usage_.load(cuda::memory_order_relaxed);
}
diff --git a/util/random.cuh b/util/random.cuh
index 38eafe2..95845f1 100644
--- a/util/random.cuh
+++ b/util/random.cuh
@@ -17,7 +17,7 @@ class Random {
uint32_t seed_;
public:
- explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) {
+ __device__ explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) {
// Avoid bad seeds.
if (seed_ == 0 || seed_ == 2147483647L) {
seed_ = 1;