#pragma once // @generated by torchgen/gen.py from Function.h #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace at { // aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor inline at::Tensor masked_scatter_backward(const at::Tensor & grad_output, const at::Tensor & mask, at::IntArrayRef sizes) { return at::_ops::masked_scatter_backward::call(grad_output, mask, c10::fromIntArrayRefSlow(sizes)); } namespace symint { template >> at::Tensor masked_scatter_backward(const at::Tensor & grad_output, const at::Tensor & mask, at::IntArrayRef sizes) { return at::_ops::masked_scatter_backward::call(grad_output, mask, c10::fromIntArrayRefSlow(sizes)); } } // aten::masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor inline at::Tensor masked_scatter_backward_symint(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes) { return at::_ops::masked_scatter_backward::call(grad_output, mask, sizes); } namespace symint { template >> at::Tensor masked_scatter_backward(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes) { return at::_ops::masked_scatter_backward::call(grad_output, mask, sizes); } } }