Skip to content

Commit

Permalink
Couple fixes.
Browse files Browse the repository at this point in the history
  • Loading branch information
LaurentMazare committed Jul 15, 2024
1 parent 16c4bb6 commit c326289
Show file tree
Hide file tree
Showing 10 changed files with 1 addition and 57 deletions.
1 change: 1 addition & 0 deletions gen/gen.ml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ let excluded_functions =
; "_cummax_helper"
; "retain_grad"
; "_validate_sparse_coo_tensor_args"
; "_sparse_semi_structured_addmm"
; "_backward"
; "size"
; "stride"
Expand Down
19 changes: 0 additions & 19 deletions src/wrappers/tensor_fallible_generated.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6489,25 +6489,6 @@ impl Tensor {
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}

pub fn f_internal_sparse_semi_structured_addmm(
&self,
mat1: &Tensor,
mat1_meta: &Tensor,
mat2: &Tensor,
out_dtype: impl Into<Option<Kind>>,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__sparse_semi_structured_addmm(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat1.c_tensor,
mat1_meta.c_tensor,
mat2.c_tensor,
out_dtype.into().map_or(-1, |s| s.c_int())
));
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_internal_sparse_semi_structured_apply(
&self,
thread_masks: &Tensor,
Expand Down
10 changes: 0 additions & 10 deletions src/wrappers/tensor_generated.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3893,16 +3893,6 @@ impl Tensor {
self.f_internal_sparse_mm_reduce_impl(other, reduce).unwrap()
}

pub fn internal_sparse_semi_structured_addmm(
&self,
mat1: &Tensor,
mat1_meta: &Tensor,
mat2: &Tensor,
out_dtype: impl Into<Option<Kind>>,
) -> Tensor {
self.f_internal_sparse_semi_structured_addmm(mat1, mat1_meta, mat2, out_dtype).unwrap()
}

pub fn internal_sparse_semi_structured_apply(&self, thread_masks: &Tensor) -> (Tensor, Tensor) {
self.f_internal_sparse_semi_structured_apply(thread_masks).unwrap()
}
Expand Down
3 changes: 0 additions & 3 deletions src/wrappers/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,6 @@ pub fn has_lazy() -> bool {
pub fn has_mps() -> bool {
unsafe_torch!(torch_sys::at_context_has_mps())
}
pub fn has_ort() -> bool {
unsafe_torch!(torch_sys::at_context_has_ort())
}
pub fn version_cudnn() -> i64 {
unsafe_torch!(torch_sys::at_context_version_cudnn())
}
Expand Down
7 changes: 0 additions & 7 deletions torch-sys/libtch/torch_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1109,13 +1109,6 @@ bool at_context_has_mps() {
return 0;
}

bool at_context_has_ort() {
PROTECT (
return at::globalContext().hasORT();
)
return 0;
}

module atm_load(char *filename) {
PROTECT(
return new torch::jit::script::Module(torch::jit::load(filename));
Expand Down
1 change: 0 additions & 1 deletion torch-sys/libtch/torch_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,6 @@ bool at_context_has_ipu();
bool at_context_has_xla();
bool at_context_has_lazy();
bool at_context_has_mps();
bool at_context_has_ort();


/// Returns the number of CUDA devices available.
Expand Down
7 changes: 0 additions & 7 deletions torch-sys/libtch/torch_api_generated.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2645,13 +2645,6 @@ void atg__sparse_mm_reduce_impl(tensor *out__, tensor self, tensor other, char*
)
}

void atg__sparse_semi_structured_addmm(tensor *out__, tensor input, tensor mat1, tensor mat1_meta, tensor mat2, int out_dtype) {
PROTECT(
auto outputs__ = torch::_sparse_semi_structured_addmm(*input, *mat1, *mat1_meta, *mat2, out_dtype < 0 ? c10::nullopt : c10::optional<at::ScalarType>(at::ScalarType(out_dtype)));
out__[0] = new torch::Tensor(outputs__);
)
}

void atg__sparse_semi_structured_apply(tensor *out__, tensor input, tensor thread_masks) {
PROTECT(
auto outputs__ = torch::_sparse_semi_structured_apply(*input, *thread_masks);
Expand Down
1 change: 0 additions & 1 deletion torch-sys/libtch/torch_api_generated.h
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,6 @@ void atg__sparse_mask_projection_out(tensor *, tensor out, tensor self, tensor m
void atg__sparse_mm(tensor *, tensor sparse, tensor dense);
void atg__sparse_mm_reduce(tensor *, tensor sparse, tensor dense, char* reduce_ptr, int reduce_len);
void atg__sparse_mm_reduce_impl(tensor *, tensor self, tensor other, char* reduce_ptr, int reduce_len);
void atg__sparse_semi_structured_addmm(tensor *, tensor input, tensor mat1, tensor mat1_meta, tensor mat2, int out_dtype);
void atg__sparse_semi_structured_apply(tensor *, tensor input, tensor thread_masks);
void atg__sparse_semi_structured_apply_dense(tensor *, tensor input, tensor thread_masks);
void atg__sparse_semi_structured_linear(tensor *, tensor input, tensor weight, tensor meta, tensor bias, char* activation_ptr, int activation_len, int out_dtype);
Expand Down
8 changes: 0 additions & 8 deletions torch-sys/src/c_generated.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2663,14 +2663,6 @@ extern "C" {
reduce_ptr: *const u8,
reduce_len: c_int,
);
pub fn atg__sparse_semi_structured_addmm(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
mat1_: *mut C_tensor,
mat1_meta_: *mut C_tensor,
mat2_: *mut C_tensor,
out_dtype_: c_int,
);
pub fn atg__sparse_semi_structured_apply(
out__: *mut *mut C_tensor,
input_: *mut C_tensor,
Expand Down
1 change: 0 additions & 1 deletion torch-sys/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,6 @@ extern "C" {
pub fn at_context_has_xla() -> bool;
pub fn at_context_has_lazy() -> bool;
pub fn at_context_has_mps() -> bool;
pub fn at_context_has_ort() -> bool;
pub fn at_context_version_cudnn() -> i64;
pub fn at_context_version_cudart() -> i64;
}
Expand Down

0 comments on commit c326289

Please sign in to comment.