Skip to content

Commit

Permalink
[GPU] Integer abs support for activation
Browse files Browse the repository at this point in the history
  • Loading branch information
kelvinchoi-intel committed Feb 21, 2025
1 parent 11cacc9 commit 8d96c5e
Show file tree
Hide file tree
Showing 3 changed files with 82 additions and 2 deletions.
3 changes: 2 additions & 1 deletion src/plugins/intel_gpu/src/graph/activation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ layout activation_inst::calc_output_layout(activation_node const& node, kernel_i
activation_func::negation,
activation_func::relu,
activation_func::floor,
activation_func::clamp };
activation_func::clamp,
activation_func::abs };

if (input_node_layout.data_type == data_types::i8 || input_node_layout.data_type == data_types::u8 ||
input_node_layout.data_type == data_types::i32) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,14 @@ bool ActivationKernelOpt::Validate(const Params& p) const {
(params.outputs[0].GetLayout() != DataLayout::bfyx && params.outputs[0].GetLayout() != DataLayout::bfzyx))
return false;

auto input_dt = params.inputs[0].GetDType();
if (input_dt == Datatype::INT8 || input_dt == Datatype::INT32) {
for (auto act : params.activations) {
if (act.function == ActivationFunction::ABS)
return false;
}
}

return true;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1579,7 +1579,8 @@ TEST(activation_i32_fw_gpu, basic_yxfb_i32_funcs) {
activation_func::negation,
activation_func::relu,
activation_func::clamp,
activation_func::floor
activation_func::floor,
activation_func::abs
};

for (auto func : funcs) {
Expand Down Expand Up @@ -1620,6 +1621,76 @@ TEST(activation_i32_fw_gpu, basic_yxfb_i32_funcs) {
case activation_func::floor:
ASSERT_EQ((int32_t)std::floor(input_ptr[i]), output_ptr[i]);
break;
case activation_func::abs:
ASSERT_EQ(std::abs(static_cast<int32_t>(input_ptr[i])), output_ptr[i]);
break;
default:
break;
}
}
}
}

TEST(activation_i32_fw_gpu, basic_yxfb_u8_funcs) {
auto& engine = get_test_engine();
auto input = engine.allocate_memory({ data_types::u8, format::yxfb,{ 2, 2, 2, 2 } });

std::vector<uint8_t> input_vec = {
1, 0, 5, 1,
2, 0, 6, 5,
3, 0, 7, 12,
4, 0, 8, 8
};
set_values(input, input_vec);

// functions valid for uint8 type input
std::vector<activation_func> funcs = {
activation_func::none,
activation_func::negation,
activation_func::relu,
activation_func::clamp,
activation_func::floor,
activation_func::abs
};

for (auto func : funcs) {
topology topology;
activation_additional_params params = {0.0, 1.0};
topology.add(input_layout("input", input->get_layout()));
topology.add(activation("activation", input_info("input"), func, params));

network network(engine, topology, get_test_default_config(engine));
network.set_input_data("input", input);
auto outputs = network.execute();

ASSERT_EQ(outputs.size(), size_t(1));
ASSERT_EQ(outputs.begin()->first, "activation");

auto output_memory = outputs.at("activation").get_memory();
auto output_layout = output_memory->get_layout();
cldnn::mem_lock<uint8_t> output_ptr(output_memory, get_test_stream());
cldnn::mem_lock<uint8_t> input_ptr(input, get_test_stream());

for (size_t i = 0; i < output_layout.get_linear_size(); ++i) {
switch (func) {
case activation_func::none:
ASSERT_EQ((uint8_t)input_ptr[i], output_ptr[i]);
break;
case activation_func::negation:
ASSERT_EQ(!((uint8_t)input_ptr[i]), output_ptr[i]);
break;
case activation_func::relu:
ASSERT_EQ((uint8_t)(std::max(static_cast<int32_t>(input_ptr[i]), 0)), output_ptr[i]);
break;
case activation_func::clamp:
ASSERT_EQ(std::min(std::max(input_ptr[i], static_cast<uint8_t>(params.a)), static_cast<uint8_t>(params.b)), output_ptr[i]);
break;
case activation_func::floor:
ASSERT_EQ((uint8_t)std::floor(input_ptr[i]), output_ptr[i]);
break;
case activation_func::abs:
ASSERT_EQ(std::abs(static_cast<uint8_t>(input_ptr[i])), output_ptr[i]);
break;
default:
break;
}
Expand Down

0 comments on commit 8d96c5e

Please sign in to comment.