Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PT FE] Added support for aten::__lshift__ and aten::__rshift__ and aten::bitwise_left_shift and aten::bitwise_right_shift #28939

Closed
Show file tree
Hide file tree
Changes from 12 commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
cb108c3
Added support for aten::__lshift__, aten::__rshift__, aten::bitwise_l…
Mohamed-Ashraf273 Feb 8, 2025
78be01f
Merge branch 'master' of https://github.com/Mohamed-Ashraf273/openvin…
Mohamed-Ashraf273 Feb 8, 2025
1035cbb
Merge branch 'openvinotoolkit:master' into shiftOperations
Mohamed-Ashraf273 Feb 9, 2025
250bab8
Merge branch 'master' of https://github.com/Mohamed-Ashraf273/openvin…
Mohamed-Ashraf273 Feb 10, 2025
eb0e0fa
Merge branch 'shiftOperations' of https://github.com/Mohamed-Ashraf27…
Mohamed-Ashraf273 Feb 10, 2025
c912906
Merge branch 'openvinotoolkit:master' into shiftOperations
Mohamed-Ashraf273 Feb 10, 2025
31c86b4
Merge branch 'shiftOperations' of https://github.com/Mohamed-Ashraf27…
Mohamed-Ashraf273 Feb 10, 2025
1ca9736
Merge branch 'openvinotoolkit:master' into shiftOperations
Mohamed-Ashraf273 Feb 10, 2025
ee4f6fa
Merge branch 'openvinotoolkit:master' into shiftOperations
Mohamed-Ashraf273 Feb 11, 2025
17fc133
aten shift operations are tested successfully
Mohamed-Ashraf273 Feb 11, 2025
3172a5d
Merge branch 'master' into shiftOperations
Mohamed-Ashraf273 Feb 12, 2025
c327640
removing unnecessary includes
Mohamed-Ashraf273 Feb 12, 2025
1aa2c3d
Merge branch 'openvinotoolkit:master' into shiftOperations
Mohamed-Ashraf273 Feb 13, 2025
e8c0147
resolving separate source problem
Mohamed-Ashraf273 Feb 13, 2025
a77069c
shift operations tested
Mohamed-Ashraf273 Feb 13, 2025
8613a87
Merge branch 'openvinotoolkit:master' into shiftOperations
Mohamed-Ashraf273 Feb 13, 2025
7d3eff1
Merge branch 'master' into shiftOperations
Mohamed-Ashraf273 Feb 13, 2025
4fbc673
Merge branch 'openvinotoolkit:master' into shiftOperations
Mohamed-Ashraf273 Feb 14, 2025
f34f53c
Merge branch 'openvinotoolkit:master' into shiftOperations
Mohamed-Ashraf273 Feb 16, 2025
80ff052
Merge branch 'openvinotoolkit:master' into shiftOperations
Mohamed-Ashraf273 Feb 17, 2025
8cbdd91
Merge branch 'openvinotoolkit:master' into shiftOperations
Mohamed-Ashraf273 Feb 18, 2025
d111388
Merge branch 'openvinotoolkit:master' into shiftOperations
Mohamed-Ashraf273 Feb 18, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions src/frontends/pytorch/src/op/lshift.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
// Copyright (C) 2018-2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "openvino/op/bitwise_left_shift.hpp"
#include "openvino/frontend/pytorch/node_context.hpp"
#include "utils.hpp"

namespace ov {
namespace frontend {
namespace pytorch {
namespace op {

using namespace ov::op;

OutputVector translate_lshift(const NodeContext& context) {
num_inputs_check(context, 2, 2);

auto [input_tensor, shift_amount] = get_inputs_with_promoted_types(context, 0, 1);

auto lshift_node = context.mark_node(std::make_shared<v15::BitwiseLeftShift>(input_tensor, shift_amount));
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These two operations are sort of binary operations for which we do not create separate source files and translator. Please check how other binary operations are supported and support yours in the same way.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

thank you for your review,
I've started working on resolving the problem

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rkazants I've resolved the problem

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hi @rkazants ,
can you review my changes in this PR


return {lshift_node};
}

} // namespace op
} // namespace pytorch
} // namespace frontend
} // namespace ov
29 changes: 29 additions & 0 deletions src/frontends/pytorch/src/op/rshift.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
// Copyright (C) 2018-2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "openvino/frontend/pytorch/node_context.hpp"
#include "openvino/op/bitwise_right_shift.hpp"
#include "utils.hpp"

namespace ov {
namespace frontend {
namespace pytorch {
namespace op {

using namespace ov::op;

OutputVector translate_rshift(const NodeContext& context) {
num_inputs_check(context, 2, 2);

auto [input_tensor, shift_amount] = get_inputs_with_promoted_types(context, 0, 1);

auto rshift_node = context.mark_node(std::make_shared<v15::BitwiseRightShift>(input_tensor, shift_amount));

return {rshift_node};
}

} // namespace op
} // namespace pytorch
} // namespace frontend
} // namespace ov
6 changes: 6 additions & 0 deletions src/frontends/pytorch/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ OP_CONVERTER(translate_bernoulli);
OP_CONVERTER(translate_bitwise_and);
OP_CONVERTER(translate_bitwise_not);
OP_CONVERTER(translate_bitwise_or);
OP_CONVERTER(translate_lshift);
OP_CONVERTER(translate_rshift);
OP_CONVERTER(translate_bitwise_xor);
OP_CONVERTER(translate_bucketize);
OP_CONVERTER(translate_cat);
Expand Down Expand Up @@ -343,6 +345,10 @@ const std::unordered_map<std::string, CreatorFunction> get_supported_ops_ts() {
return {
{"aten::__and__", op::translate_bitwise_and},
{"aten::__iand__", op::inplace_op<op::translate_bitwise_and>},
{"aten::__lshift__", op::translate_lshift},
{"aten::__rshift__", op::translate_rshift},
{"aten::bitwise_left_shift", op::translate_lshift},
{"aten::bitwise_right_shift", op::translate_rshift},
{"aten::__derive_index", op::translate_derive_index},
{"aten::__getitem__", op::translate_getitem},
{"aten::__not__", op::translate_1to1_match_1_inputs<opset10::LogicalNot>},
Expand Down
108 changes: 108 additions & 0 deletions tests/layer_tests/pytorch_tests/test_shift_operations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
# Copyright (C) 2018-2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import numpy as np
import pytest
import torch
from pytorch_layer_test_class import PytorchLayerTest, skip_if_export


class TestShiftOperators(PytorchLayerTest):
def _prepare_input(self, lhs_dtype, rhs_dtype, lhs_shape, rhs_shape):
choices = np.array([1, 2, 4, 8, 16, 32])
shifts = np.array([0, 1, 2, 3, 4, 5])

x = np.random.choice(choices, lhs_shape).astype(lhs_dtype)
y = np.random.choice(shifts, rhs_shape).astype(rhs_dtype)
return x, y

def create_model(self):
class aten_shift(torch.nn.Module):
def forward(self, lhs, rhs):
return lhs << rhs, lhs >> rhs

ref_net = None
return aten_shift(), ref_net, ("aten::__lshift__", "aten::__rshift__")

@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize("lhs_dtype", ["int32", "int64"])
@pytest.mark.parametrize("rhs_dtype", ["int32", "int64"])
@pytest.mark.parametrize(
("lhs_shape", "rhs_shape"),
[
([2, 3], [2, 3]),
([2, 3], []),
([], [2, 3]),
([], []),
],
)
def test_shift_operators(self, lhs_dtype, rhs_dtype, lhs_shape, rhs_shape, ie_device, precision, ir_version):
self._test(
*self.create_model(),
ie_device,
precision,
ir_version,
kwargs_to_prepare_input={
"lhs_dtype": lhs_dtype,
"rhs_dtype": rhs_dtype,
"lhs_shape": lhs_shape,
"rhs_shape": rhs_shape,
},
trace_model=True,
freeze_model=False,
)


class TestBitwiseShiftFunctions(PytorchLayerTest):
def _prepare_input(self, lhs_dtype, rhs_dtype, lhs_shape, rhs_shape):
choices = np.array([1, 2, 4, 8, 16, 32])
shifts = np.array([0, 1, 2, 3, 4, 5])

x = np.random.choice(choices, lhs_shape).astype(lhs_dtype)
y = np.random.choice(shifts, rhs_shape).astype(rhs_dtype)
return x, y

def create_model(self):
class aten_bitwise_shift(torch.nn.Module):
def forward(self, lhs, rhs):
return (
torch.bitwise_left_shift(lhs, rhs),
torch.bitwise_right_shift(lhs, rhs)
)

ref_net = None
return aten_bitwise_shift(), ref_net, ("aten::bitwise_left_shift", "aten::bitwise_right_shift")

@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
@pytest.mark.precommit_fx_backend
@pytest.mark.parametrize("lhs_dtype", ["int32", "int64"])
@pytest.mark.parametrize("rhs_dtype", ["int32", "int64"])
@pytest.mark.parametrize(
("lhs_shape", "rhs_shape"),
[
([2, 3], [2, 3]),
([2, 3], []),
([], [2, 3]),
([], []),
],
)
def test_bitwise_shift_functions(self, lhs_dtype, rhs_dtype, lhs_shape, rhs_shape, ie_device, precision, ir_version):
self._test(
*self.create_model(),
ie_device,
precision,
ir_version,
kwargs_to_prepare_input={
"lhs_dtype": lhs_dtype,
"rhs_dtype": rhs_dtype,
"lhs_shape": lhs_shape,
"rhs_shape": rhs_shape,
},
trace_model=True,
freeze_model=False,
)