Skip to content

Commit

Permalink
pytorch 1.3 support
Browse files Browse the repository at this point in the history
  • Loading branch information
rusty1s committed Oct 14, 2019
1 parent bd3ae68 commit de0216d
Show file tree
Hide file tree
Showing 17 changed files with 121 additions and 70 deletions.
5 changes: 5 additions & 0 deletions cpu/compat.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#ifdef VERSION_GE_1_3
#define DATA_PTR data_ptr
#else
#define DATA_PTR data
#endif
17 changes: 9 additions & 8 deletions cpu/fps.cpp
Original file line number Diff line number Diff line change
@@ -1,40 +1,41 @@
#include <torch/extension.h>

#include "compat.h"
#include "utils.h"

at::Tensor get_dist(at::Tensor x, ptrdiff_t index) {
return (x - x[index]).norm(2, 1);
}

at::Tensor fps(at::Tensor x, at::Tensor batch, float ratio, bool random) {
auto batch_size = batch[-1].data<int64_t>()[0] + 1;
auto batch_size = batch[-1].DATA_PTR<int64_t>()[0] + 1;

auto deg = degree(batch, batch_size);
auto cum_deg = at::cat({at::zeros(1, deg.options()), deg.cumsum(0)}, 0);
auto k = (deg.toType(at::kFloat) * ratio).ceil().toType(at::kLong);
auto cum_k = at::cat({at::zeros(1, k.options()), k.cumsum(0)}, 0);

auto out = at::empty(cum_k[-1].data<int64_t>()[0], batch.options());
auto out = at::empty(cum_k[-1].DATA_PTR<int64_t>()[0], batch.options());

auto cum_deg_d = cum_deg.data<int64_t>();
auto k_d = k.data<int64_t>();
auto cum_k_d = cum_k.data<int64_t>();
auto out_d = out.data<int64_t>();
auto cum_deg_d = cum_deg.DATA_PTR<int64_t>();
auto k_d = k.DATA_PTR<int64_t>();
auto cum_k_d = cum_k.DATA_PTR<int64_t>();
auto out_d = out.DATA_PTR<int64_t>();

for (ptrdiff_t b = 0; b < batch_size; b++) {
auto index = at::range(cum_deg_d[b], cum_deg_d[b + 1] - 1, out.options());
auto y = x.index_select(0, index);

ptrdiff_t start = 0;
if (random) {
start = at::randperm(y.size(0), batch.options()).data<int64_t>()[0];
start = at::randperm(y.size(0), batch.options()).DATA_PTR<int64_t>()[0];
}

out_d[cum_k_d[b]] = cum_deg_d[b] + start;
auto dist = get_dist(y, start);

for (ptrdiff_t i = 1; i < k_d[b]; i++) {
ptrdiff_t argmax = dist.argmax().data<int64_t>()[0];
ptrdiff_t argmax = dist.argmax().DATA_PTR<int64_t>()[0];
out_d[cum_k_d[b] + i] = cum_deg_d[b] + argmax;
dist = at::min(dist, get_dist(y, argmax));
}
Expand Down
15 changes: 8 additions & 7 deletions cpu/graclus.cpp
Original file line number Diff line number Diff line change
@@ -1,18 +1,19 @@
#include <torch/extension.h>

#include "compat.h"
#include "utils.h"

at::Tensor graclus(at::Tensor row, at::Tensor col, int64_t num_nodes) {
std::tie(row, col) = remove_self_loops(row, col);
std::tie(row, col) = rand(row, col);
std::tie(row, col) = to_csr(row, col, num_nodes);
auto row_data = row.data<int64_t>(), col_data = col.data<int64_t>();
auto row_data = row.DATA_PTR<int64_t>(), col_data = col.DATA_PTR<int64_t>();

auto perm = at::randperm(num_nodes, row.options());
auto perm_data = perm.data<int64_t>();
auto perm_data = perm.DATA_PTR<int64_t>();

auto cluster = at::full(num_nodes, -1, row.options());
auto cluster_data = cluster.data<int64_t>();
auto cluster_data = cluster.DATA_PTR<int64_t>();

for (int64_t i = 0; i < num_nodes; i++) {
auto u = perm_data[i];
Expand Down Expand Up @@ -41,16 +42,16 @@ at::Tensor weighted_graclus(at::Tensor row, at::Tensor col, at::Tensor weight,
int64_t num_nodes) {
std::tie(row, col, weight) = remove_self_loops(row, col, weight);
std::tie(row, col, weight) = to_csr(row, col, weight, num_nodes);
auto row_data = row.data<int64_t>(), col_data = col.data<int64_t>();
auto row_data = row.DATA_PTR<int64_t>(), col_data = col.DATA_PTR<int64_t>();

auto perm = at::randperm(num_nodes, row.options());
auto perm_data = perm.data<int64_t>();
auto perm_data = perm.DATA_PTR<int64_t>();

auto cluster = at::full(num_nodes, -1, row.options());
auto cluster_data = cluster.data<int64_t>();
auto cluster_data = cluster.DATA_PTR<int64_t>();

AT_DISPATCH_ALL_TYPES(weight.scalar_type(), "weighted_graclus", [&] {
auto weight_data = weight.data<scalar_t>();
auto weight_data = weight.DATA_PTR<scalar_t>();

for (int64_t i = 0; i < num_nodes; i++) {
auto u = perm_data[i];
Expand Down
13 changes: 7 additions & 6 deletions cpu/rw.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include <torch/extension.h>

#include "compat.h"
#include "utils.h"

at::Tensor rw(at::Tensor row, at::Tensor col, at::Tensor start,
Expand All @@ -12,12 +13,12 @@ at::Tensor rw(at::Tensor row, at::Tensor col, at::Tensor start,
auto out =
at::full({start.size(0), (int64_t)walk_length + 1}, -1, start.options());

auto deg_d = deg.data<int64_t>();
auto cum_deg_d = cum_deg.data<int64_t>();
auto col_d = col.data<int64_t>();
auto start_d = start.data<int64_t>();
auto rand_d = rand.data<float>();
auto out_d = out.data<int64_t>();
auto deg_d = deg.DATA_PTR<int64_t>();
auto cum_deg_d = cum_deg.DATA_PTR<int64_t>();
auto col_d = col.DATA_PTR<int64_t>();
auto start_d = start.DATA_PTR<int64_t>();
auto rand_d = rand.DATA_PTR<float>();
auto out_d = out.DATA_PTR<int64_t>();

for (ptrdiff_t n = 0; n < start.size(0); n++) {
int64_t cur = start_d[n];
Expand Down
8 changes: 5 additions & 3 deletions cpu/sampler.cpp
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
#include <torch/extension.h>

#include "compat.h"

at::Tensor neighbor_sampler(at::Tensor start, at::Tensor cumdeg, size_t size,
float factor) {

auto start_ptr = start.data<int64_t>();
auto cumdeg_ptr = cumdeg.data<int64_t>();
auto start_ptr = start.DATA_PTR<int64_t>();
auto cumdeg_ptr = cumdeg.DATA_PTR<int64_t>();

std::vector<int64_t> e_ids;
for (ptrdiff_t i = 0; i < start.size(0); i++) {
Expand All @@ -29,7 +31,7 @@ at::Tensor neighbor_sampler(at::Tensor start, at::Tensor cumdeg, size_t size,
e_ids.insert(e_ids.end(), v.begin(), v.end());
} else {
auto sample = at::randperm(num_neighbors, start.options());
auto sample_ptr = sample.data<int64_t>();
auto sample_ptr = sample.DATA_PTR<int64_t>();
for (size_t j = 0; j < size_i; j++) {
e_ids.push_back(sample_ptr[j] + low);
}
Expand Down
6 changes: 4 additions & 2 deletions cuda/coloring.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

#include <ATen/ATen.h>

#include "compat.cuh"

#define THREADS 1024
#define BLOCKS(N) (N + THREADS - 1) / THREADS

Expand Down Expand Up @@ -30,8 +32,8 @@ int64_t colorize(at::Tensor cluster) {
auto props = at::full(numel, BLUE_PROB, cluster.options().dtype(at::kFloat));
auto bernoulli = props.bernoulli();

colorize_kernel<<<BLOCKS(numel), THREADS>>>(cluster.data<int64_t>(),
bernoulli.data<float>(), numel);
colorize_kernel<<<BLOCKS(numel), THREADS>>>(
cluster.DATA_PTR<int64_t>(), bernoulli.DATA_PTR<float>(), numel);

int64_t out;
cudaMemcpyFromSymbol(&out, done, sizeof(out), 0, cudaMemcpyDeviceToHost);
Expand Down
5 changes: 5 additions & 0 deletions cuda/compat.cuh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#ifdef VERSION_GE_1_3
#define DATA_PTR data_ptr
#else
#define DATA_PTR data
#endif
13 changes: 7 additions & 6 deletions cuda/fps_kernel.cu
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#include <ATen/ATen.h>

#include "atomics.cuh"
#include "compat.cuh"
#include "utils.cuh"

#define THREADS 1024
Expand Down Expand Up @@ -164,7 +165,7 @@ fps_kernel(const scalar_t *__restrict__ x, const int64_t *__restrict__ cum_deg,
at::Tensor fps_cuda(at::Tensor x, at::Tensor batch, float ratio, bool random) {
cudaSetDevice(x.get_device());
auto batch_sizes = (int64_t *)malloc(sizeof(int64_t));
cudaMemcpy(batch_sizes, batch[-1].data<int64_t>(), sizeof(int64_t),
cudaMemcpy(batch_sizes, batch[-1].DATA_PTR<int64_t>(), sizeof(int64_t),
cudaMemcpyDeviceToHost);
auto batch_size = batch_sizes[0] + 1;

Expand All @@ -185,15 +186,15 @@ at::Tensor fps_cuda(at::Tensor x, at::Tensor batch, float ratio, bool random) {
auto tmp_dist = at::empty(x.size(0), x.options());

auto k_sum = (int64_t *)malloc(sizeof(int64_t));
cudaMemcpy(k_sum, cum_k[-1].data<int64_t>(), sizeof(int64_t),
cudaMemcpy(k_sum, cum_k[-1].DATA_PTR<int64_t>(), sizeof(int64_t),
cudaMemcpyDeviceToHost);
auto out = at::empty(k_sum[0], k.options());

AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "fps_kernel", [&] {
FPS_KERNEL(x.size(1), x.data<scalar_t>(), cum_deg.data<int64_t>(),
cum_k.data<int64_t>(), start.data<int64_t>(),
dist.data<scalar_t>(), tmp_dist.data<scalar_t>(),
out.data<int64_t>());
FPS_KERNEL(x.size(1), x.DATA_PTR<scalar_t>(), cum_deg.DATA_PTR<int64_t>(),
cum_k.DATA_PTR<int64_t>(), start.DATA_PTR<int64_t>(),
dist.DATA_PTR<scalar_t>(), tmp_dist.DATA_PTR<scalar_t>(),
out.DATA_PTR<int64_t>());
});

return out;
Expand Down
8 changes: 5 additions & 3 deletions cuda/grid_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>

#include "compat.cuh"

#define THREADS 1024
#define BLOCKS(N) (N + THREADS - 1) / THREADS

Expand Down Expand Up @@ -31,10 +33,10 @@ at::Tensor grid_cuda(at::Tensor pos, at::Tensor size, at::Tensor start,

AT_DISPATCH_ALL_TYPES(pos.scalar_type(), "grid_kernel", [&] {
grid_kernel<scalar_t><<<BLOCKS(cluster.numel()), THREADS>>>(
cluster.data<int64_t>(),
cluster.DATA_PTR<int64_t>(),
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(pos),
size.data<scalar_t>(), start.data<scalar_t>(), end.data<scalar_t>(),
cluster.numel());
size.DATA_PTR<scalar_t>(), start.DATA_PTR<scalar_t>(),
end.DATA_PTR<scalar_t>(), cluster.numel());
});

return cluster;
Expand Down
10 changes: 6 additions & 4 deletions cuda/knn_kernel.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include <ATen/ATen.h>

#include "compat.cuh"
#include "utils.cuh"

#define THREADS 1024
Expand Down Expand Up @@ -79,7 +80,7 @@ at::Tensor knn_cuda(at::Tensor x, at::Tensor y, size_t k, at::Tensor batch_x,
at::Tensor batch_y, bool cosine) {
cudaSetDevice(x.get_device());
auto batch_sizes = (int64_t *)malloc(sizeof(int64_t));
cudaMemcpy(batch_sizes, batch_x[-1].data<int64_t>(), sizeof(int64_t),
cudaMemcpy(batch_sizes, batch_x[-1].DATA_PTR<int64_t>(), sizeof(int64_t),
cudaMemcpyDeviceToHost);
auto batch_size = batch_sizes[0] + 1;

Expand All @@ -94,9 +95,10 @@ at::Tensor knn_cuda(at::Tensor x, at::Tensor y, size_t k, at::Tensor batch_x,

AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "knn_kernel", [&] {
knn_kernel<scalar_t><<<batch_size, THREADS>>>(
x.data<scalar_t>(), y.data<scalar_t>(), batch_x.data<int64_t>(),
batch_y.data<int64_t>(), dist.data<scalar_t>(), row.data<int64_t>(),
col.data<int64_t>(), k, x.size(1), cosine);
x.DATA_PTR<scalar_t>(), y.DATA_PTR<scalar_t>(),
batch_x.DATA_PTR<int64_t>(), batch_y.DATA_PTR<int64_t>(),
dist.DATA_PTR<scalar_t>(), row.DATA_PTR<int64_t>(),
col.DATA_PTR<int64_t>(), k, x.size(1), cosine);
});

auto mask = col != -1;
Expand Down
8 changes: 5 additions & 3 deletions cuda/nearest_kernel.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include <ATen/ATen.h>

#include "compat.cuh"
#include "utils.cuh"

#define THREADS 1024
Expand Down Expand Up @@ -62,7 +63,7 @@ at::Tensor nearest_cuda(at::Tensor x, at::Tensor y, at::Tensor batch_x,
at::Tensor batch_y) {
cudaSetDevice(x.get_device());
auto batch_sizes = (int64_t *)malloc(sizeof(int64_t));
cudaMemcpy(batch_sizes, batch_x[-1].data<int64_t>(), sizeof(int64_t),
cudaMemcpy(batch_sizes, batch_x[-1].DATA_PTR<int64_t>(), sizeof(int64_t),
cudaMemcpyDeviceToHost);
auto batch_size = batch_sizes[0] + 1;

Expand All @@ -73,8 +74,9 @@ at::Tensor nearest_cuda(at::Tensor x, at::Tensor y, at::Tensor batch_x,

AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "nearest_kernel", [&] {
nearest_kernel<scalar_t><<<x.size(0), THREADS>>>(
x.data<scalar_t>(), y.data<scalar_t>(), batch_x.data<int64_t>(),
batch_y.data<int64_t>(), out.data<int64_t>(), x.size(1));
x.DATA_PTR<scalar_t>(), y.DATA_PTR<scalar_t>(),
batch_x.DATA_PTR<int64_t>(), batch_y.DATA_PTR<int64_t>(),
out.DATA_PTR<int64_t>(), x.size(1));
});

return out;
Expand Down
11 changes: 7 additions & 4 deletions cuda/proposal.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

#include <ATen/ATen.h>

#include "compat.cuh"

#define THREADS 1024
#define BLOCKS(N) (N + THREADS - 1) / THREADS

Expand Down Expand Up @@ -36,8 +38,8 @@ __global__ void propose_kernel(int64_t *__restrict__ cluster, int64_t *proposal,
void propose(at::Tensor cluster, at::Tensor proposal, at::Tensor row,
at::Tensor col) {
propose_kernel<<<BLOCKS(cluster.numel()), THREADS>>>(
cluster.data<int64_t>(), proposal.data<int64_t>(), row.data<int64_t>(),
col.data<int64_t>(), cluster.numel());
cluster.DATA_PTR<int64_t>(), proposal.DATA_PTR<int64_t>(),
row.DATA_PTR<int64_t>(), col.DATA_PTR<int64_t>(), cluster.numel());
}

template <typename scalar_t>
Expand Down Expand Up @@ -79,7 +81,8 @@ void propose(at::Tensor cluster, at::Tensor proposal, at::Tensor row,
at::Tensor col, at::Tensor weight) {
AT_DISPATCH_ALL_TYPES(weight.scalar_type(), "propose_kernel", [&] {
propose_kernel<scalar_t><<<BLOCKS(cluster.numel()), THREADS>>>(
cluster.data<int64_t>(), proposal.data<int64_t>(), row.data<int64_t>(),
col.data<int64_t>(), weight.data<scalar_t>(), cluster.numel());
cluster.DATA_PTR<int64_t>(), proposal.DATA_PTR<int64_t>(),
row.DATA_PTR<int64_t>(), col.DATA_PTR<int64_t>(),
weight.DATA_PTR<scalar_t>(), cluster.numel());
});
}
10 changes: 6 additions & 4 deletions cuda/radius_kernel.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include <ATen/ATen.h>

#include "compat.cuh"
#include "utils.cuh"

#define THREADS 1024
Expand Down Expand Up @@ -50,7 +51,7 @@ at::Tensor radius_cuda(at::Tensor x, at::Tensor y, float radius,
size_t max_num_neighbors) {
cudaSetDevice(x.get_device());
auto batch_sizes = (int64_t *)malloc(sizeof(int64_t));
cudaMemcpy(batch_sizes, batch_x[-1].data<int64_t>(), sizeof(int64_t),
cudaMemcpy(batch_sizes, batch_x[-1].DATA_PTR<int64_t>(), sizeof(int64_t),
cudaMemcpyDeviceToHost);
auto batch_size = batch_sizes[0] + 1;

Expand All @@ -64,9 +65,10 @@ at::Tensor radius_cuda(at::Tensor x, at::Tensor y, float radius,

AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "radius_kernel", [&] {
radius_kernel<scalar_t><<<batch_size, THREADS>>>(
x.data<scalar_t>(), y.data<scalar_t>(), batch_x.data<int64_t>(),
batch_y.data<int64_t>(), row.data<int64_t>(), col.data<int64_t>(),
radius, max_num_neighbors, x.size(1));
x.DATA_PTR<scalar_t>(), y.DATA_PTR<scalar_t>(),
batch_x.DATA_PTR<int64_t>(), batch_y.DATA_PTR<int64_t>(),
row.DATA_PTR<int64_t>(), col.DATA_PTR<int64_t>(), radius,
max_num_neighbors, x.size(1));
});

auto mask = row != -1;
Expand Down
11 changes: 7 additions & 4 deletions cuda/response.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

#include <ATen/ATen.h>

#include "compat.cuh"

#define THREADS 1024
#define BLOCKS(N) (N + THREADS - 1) / THREADS

Expand Down Expand Up @@ -38,8 +40,8 @@ __global__ void respond_kernel(int64_t *__restrict__ cluster, int64_t *proposal,
void respond(at::Tensor cluster, at::Tensor proposal, at::Tensor row,
at::Tensor col) {
respond_kernel<<<BLOCKS(cluster.numel()), THREADS>>>(
cluster.data<int64_t>(), proposal.data<int64_t>(), row.data<int64_t>(),
col.data<int64_t>(), cluster.numel());
cluster.DATA_PTR<int64_t>(), proposal.DATA_PTR<int64_t>(),
row.DATA_PTR<int64_t>(), col.DATA_PTR<int64_t>(), cluster.numel());
}

template <typename scalar_t>
Expand Down Expand Up @@ -84,7 +86,8 @@ void respond(at::Tensor cluster, at::Tensor proposal, at::Tensor row,
at::Tensor col, at::Tensor weight) {
AT_DISPATCH_ALL_TYPES(weight.scalar_type(), "respond_kernel", [&] {
respond_kernel<scalar_t><<<BLOCKS(cluster.numel()), THREADS>>>(
cluster.data<int64_t>(), proposal.data<int64_t>(), row.data<int64_t>(),
col.data<int64_t>(), weight.data<scalar_t>(), cluster.numel());
cluster.DATA_PTR<int64_t>(), proposal.DATA_PTR<int64_t>(),
row.DATA_PTR<int64_t>(), col.DATA_PTR<int64_t>(),
weight.DATA_PTR<scalar_t>(), cluster.numel());
});
}
Loading

0 comments on commit de0216d

Please sign in to comment.