Skip to content

Commit

Permalink
Revert "[Inductor] Use sleef implementation for CPP backend asinh cod…
Browse files Browse the repository at this point in the history
…egen (pytorch#142360)"

This reverts commit 79cf8fa.

Reverted pytorch#142360 on behalf of https://github.com/jeanschmidt due to seems to have broken macos tests ([comment](pytorch#142360 (comment)))
  • Loading branch information
pytorchmergebot committed Dec 12, 2024
1 parent 30e2b32 commit cd1b592
Show file tree
Hide file tree
Showing 10 changed files with 3 additions and 43 deletions.
3 changes: 0 additions & 3 deletions aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h
Original file line number Diff line number Diff line change
Expand Up @@ -359,9 +359,6 @@ static_assert(
Vectorized<T> asin() const {
return map(Sleef_asinf8_u10);
}
Vectorized<T> asinh() const {
return map(Sleef_asinhf8_u10);
}
Vectorized<T> atan() const {
return map(Sleef_atanf8_u10);
}
Expand Down
3 changes: 0 additions & 3 deletions aten/src/ATen/cpu/vec/vec256/vec256_double.h
Original file line number Diff line number Diff line change
Expand Up @@ -147,9 +147,6 @@ template <> class Vectorized<double> {
Vectorized<double> asin() const {
return Vectorized<double>(Sleef_asind4_u10(values));
}
Vectorized<double> asinh() const {
return Vectorized<double>(Sleef_asinhd4_u10(values));
}
Vectorized<double> atan() const {
return Vectorized<double>(Sleef_atand4_u10(values));
}
Expand Down
3 changes: 0 additions & 3 deletions aten/src/ATen/cpu/vec/vec256/vec256_float.h
Original file line number Diff line number Diff line change
Expand Up @@ -157,9 +157,6 @@ template <> class Vectorized<float> {
Vectorized<float> asin() const {
return Vectorized<float>(Sleef_asinf8_u10(values));
}
Vectorized<float> asinh() const {
return Vectorized<float>(Sleef_asinhf8_u10(values));
}
Vectorized<float> atan() const {
return Vectorized<float>(Sleef_atanf8_u10(values));
}
Expand Down
3 changes: 0 additions & 3 deletions aten/src/ATen/cpu/vec/vec512/vec512_bfloat16.h
Original file line number Diff line number Diff line change
Expand Up @@ -377,9 +377,6 @@ static_assert(
Vectorized<T> asin() const {
return map(Sleef_asinf16_u10);
}
Vectorized<T> asinh() const {
return map(Sleef_asinhf16_u10);
}
Vectorized<T> atan() const {
return map(Sleef_atanf16_u10);
}
Expand Down
3 changes: 0 additions & 3 deletions aten/src/ATen/cpu/vec/vec512/vec512_double.h
Original file line number Diff line number Diff line change
Expand Up @@ -156,9 +156,6 @@ template <> class Vectorized<double> {
Vectorized<double> asin() const {
return Vectorized<double>(Sleef_asind8_u10(values));
}
Vectorized<double> asinh() const {
return Vectorized<double>(Sleef_asinhd8_u10(values));
}
Vectorized<double> atan() const {
return Vectorized<double>(Sleef_atand8_u10(values));
}
Expand Down
3 changes: 0 additions & 3 deletions aten/src/ATen/cpu/vec/vec512/vec512_float.h
Original file line number Diff line number Diff line change
Expand Up @@ -178,9 +178,6 @@ template <> class Vectorized<float> {
Vectorized<float> asin() const {
return Vectorized<float>(Sleef_asinf16_u10(values));
}
Vectorized<float> asinh() const {
return Vectorized<float>(Sleef_asinhf16_u10(values));
}
Vectorized<float> atan() const {
return Vectorized<float>(Sleef_atanf16_u10(values));
}
Expand Down
3 changes: 0 additions & 3 deletions aten/src/ATen/cpu/vec/vec_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -406,9 +406,6 @@ struct Vectorized {
Vectorized<T> asin() const {
return map(std::asin);
}
Vectorized<T> asinh() const {
return map(std::asinh);
}
Vectorized<T> atan() const {
return map(std::atan);
}
Expand Down
1 change: 0 additions & 1 deletion aten/src/ATen/cpu/vec/vec_n.h
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,6 @@ class VectorizedN {
VECTORIZEDN_DEFINE_UNARY_OP(acos)
VECTORIZEDN_DEFINE_UNARY_OP(acosh)
VECTORIZEDN_DEFINE_UNARY_OP(asin)
VECTORIZEDN_DEFINE_UNARY_OP(asinh)
VECTORIZEDN_DEFINE_UNARY_OP(atan)
VECTORIZEDN_DEFINE_UNARY_OP(atanh)
VECTORIZEDN_DEFINE_BINARY_OP(atan2)
Expand Down
20 changes: 0 additions & 20 deletions test/inductor/test_cpu_repro.py
Original file line number Diff line number Diff line change
Expand Up @@ -845,26 +845,6 @@ def fn(input):
(_x,),
)

@requires_vectorization
def test_asinh_with_corner_inputs(self):
# https://github.com/pytorch/pytorch/issues/142345

def fn(input):
out = torch.asinh(input)
return out

x = torch.tensor([0, 0, 0, -10000.1]).repeat(3, 4)

bit_widths = [isa._bit_width for isa in cpu_vec_isa.valid_vec_isa_list()]
for dtype in [torch.float32, torch.bfloat16, torch.float16, torch.double]:
for simdlen in bit_widths:
with torch.no_grad(), config.patch({"cpp.simdlen": simdlen}):
torch._dynamo.reset()
metrics.reset()
_x = x.to(dtype)
self.common(fn, (_x,))
check_metrics_vec_kernel_count(1)

@config.patch(implicit_fallbacks=True)
def test_repeat_interleave(self):
def fn(y):
Expand Down
4 changes: 3 additions & 1 deletion torch/_inductor/codegen/cpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -1401,7 +1401,9 @@ def atanh(x):

@staticmethod
def asinh(x):
return f"{x}.asinh()"
# For real x, asinh(x) = log(x + sqrt(1 + x**2))
vec_one = f"decltype({x})(1)"
return f"({x} + ({vec_one} + {x}*{x}).sqrt()).log()"

@staticmethod
def acosh(x):
Expand Down

0 comments on commit cd1b592

Please sign in to comment.