From 8fee51fa9f48c88f7145eb8b0cf4f1bf0624aafe Mon Sep 17 00:00:00 2001 From: Brian McGinn Date: Fri, 14 Apr 2023 14:16:50 -0700 Subject: [PATCH 1/2] Release-candidate --- .gitignore | 4 + Dockerfile.dgpu | 74 ++ Dockerfile.dgpu-dev | 107 +++ Dockerfile.igt | 34 + Dockerfile.soc | 69 ++ LICENSE | 29 + README.md | 14 +- benchmark-scripts/README.md | 115 +++ benchmark-scripts/benchmark.sh | 309 +++++++ benchmark-scripts/camera-simulator.sh | 47 + benchmark-scripts/cleanup_gpu_metrics.sh | 54 ++ benchmark-scripts/collect_memory_usage.sh | 151 ++++ benchmark-scripts/collect_platform_metrics.sh | 146 ++++ benchmark-scripts/collect_video_metrics.sh | 188 ++++ .../collect_video_metrics_flex.sh | 181 ++++ benchmark-scripts/collect_xpum_csv.sh | 11 + .../consolidate_multiple_run_of_metrics.py | 539 ++++++++++++ benchmark-scripts/copy-platform-metrics.sh | 23 + benchmark-scripts/download_sample_videos.sh | 11 + benchmark-scripts/format_avc_mp4.sh | 122 +++ benchmark-scripts/log_time_monitor.sh | 69 ++ benchmark-scripts/requirements.txt | 3 + benchmark-scripts/results_parser.py | 217 +++++ benchmark-scripts/run.sh | 371 ++++++++ benchmark-scripts/run_server.sh | 65 ++ .../start_emulated_camera_pipelines.sh | 64 ++ benchmark-scripts/stop_platform_collection.sh | 17 + benchmark-scripts/stop_server.sh | 8 + benchmark-scripts/stream_density.sh | 48 + benchmark-scripts/stream_density_testcases.sh | 63 ++ benchmark-scripts/utility_install.sh | 98 +++ camera-simulator/camera-simulator.sh | 117 +++ configs/extensions/OCR_post_processing.py | 66 ++ .../extensions/OCR_post_processing_0012.py | 64 ++ configs/extensions/barcode.py | 178 ++++ configs/extensions/barcode_nv12_to_gray.py | 234 +++++ configs/extensions/object_removal_by_label.py | 24 + configs/extensions/remote_classify.py | 211 +++++ configs/extensions/tracked_object_filter.py | 66 ++ configs/framework-pipelines/arc/yolov5s.sh | 8 + .../arc/yolov5s_effnetb0.sh | 29 + .../framework-pipelines/arc/yolov5s_full.sh | 29 + configs/framework-pipelines/core/yolov5s.sh | 14 + .../core/yolov5s_effnetb0.sh | 30 + .../framework-pipelines/core/yolov5s_full.sh | 22 + .../core/yolov5s_realsense.sh | 10 + configs/framework-pipelines/dgpu/yolov5s.sh | 20 + .../dgpu/yolov5s_effnetb0.sh | 38 + .../framework-pipelines/dgpu/yolov5s_full.sh | 28 + configs/framework-pipelines/stream_density.sh | 99 +++ configs/framework-pipelines/xeon/yolov5s.sh | 18 + .../xeon/yolov5s_effnetb0.sh | 30 + .../framework-pipelines/xeon/yolov5s_full.sh | 20 + .../Horizontal-text-detection-0002_fix.json | 10 + configs/models/2022/models.list.yml | 12 + .../2022/yolov5s/1/FP16-INT8/yolov5s.mapping | 819 ++++++++++++++++++ .../2022/yolov5s/1/FP16/yolov5s.mapping | 819 ++++++++++++++++++ .../2022/yolov5s/1/FP32-INT8/yolov5s.mapping | 819 ++++++++++++++++++ .../2022/yolov5s/1/FP32/yolov5s.mapping | 819 ++++++++++++++++++ .../models/licenses/APACHE-2.0-TF-Models.txt | 203 +++++ configs/models/licenses/APACHE-2.0-TF-TPU.txt | 203 +++++ .../pipeline.json | 139 +++ configs/results/.gitignore | 4 + docker-build-igt.sh | 27 + docker-build.sh | 40 + docker-run-dev.sh | 233 +++++ docker-run-igt.sh | 14 + docker-run.sh | 151 ++++ docs_src/benchmark.md | 101 +++ docs_src/camera_serial_number.md | 24 + docs_src/hardwaresetup.md | 71 ++ docs_src/images/vision-checkout-1.0.png | Bin 0 -> 45734 bytes docs_src/index.md | 34 + docs_src/mkdocs.yml | 26 + docs_src/pipelinebenchmarking.md | 137 +++ docs_src/pipelinerun.md | 146 ++++ docs_src/pipelinesetup.md | 79 ++ docs_src/references.md | 15 + docs_src/releasenotes.md | 16 + docs_src/toubleshooting.md | 5 + get-gpu-info.sh | 46 + get-options.sh | 185 ++++ modelDownload.sh | 143 +++ patch/libusb.h | 18 + requirements.txt | 6 + run.sh | 371 ++++++++ sample-media/README.md | 1 + security.md | 7 + stop_all_docker_containers.sh | 8 + testModelDownload.sh | 127 +++ test_barcode_docker_run.sh | 17 + test_realsense_params_docker_run.sh | 101 +++ 92 files changed, 10600 insertions(+), 2 deletions(-) create mode 100644 .gitignore create mode 100644 Dockerfile.dgpu create mode 100644 Dockerfile.dgpu-dev create mode 100644 Dockerfile.igt create mode 100644 Dockerfile.soc create mode 100644 LICENSE create mode 100644 benchmark-scripts/README.md create mode 100755 benchmark-scripts/benchmark.sh create mode 100755 benchmark-scripts/camera-simulator.sh create mode 100755 benchmark-scripts/cleanup_gpu_metrics.sh create mode 100755 benchmark-scripts/collect_memory_usage.sh create mode 100755 benchmark-scripts/collect_platform_metrics.sh create mode 100755 benchmark-scripts/collect_video_metrics.sh create mode 100755 benchmark-scripts/collect_video_metrics_flex.sh create mode 100755 benchmark-scripts/collect_xpum_csv.sh create mode 100644 benchmark-scripts/consolidate_multiple_run_of_metrics.py create mode 100755 benchmark-scripts/copy-platform-metrics.sh create mode 100755 benchmark-scripts/download_sample_videos.sh create mode 100755 benchmark-scripts/format_avc_mp4.sh create mode 100755 benchmark-scripts/log_time_monitor.sh create mode 100644 benchmark-scripts/requirements.txt create mode 100644 benchmark-scripts/results_parser.py create mode 100755 benchmark-scripts/run.sh create mode 100755 benchmark-scripts/run_server.sh create mode 100755 benchmark-scripts/start_emulated_camera_pipelines.sh create mode 100755 benchmark-scripts/stop_platform_collection.sh create mode 100755 benchmark-scripts/stop_server.sh create mode 100755 benchmark-scripts/stream_density.sh create mode 100755 benchmark-scripts/stream_density_testcases.sh create mode 100755 benchmark-scripts/utility_install.sh create mode 100755 camera-simulator/camera-simulator.sh create mode 100644 configs/extensions/OCR_post_processing.py create mode 100644 configs/extensions/OCR_post_processing_0012.py create mode 100644 configs/extensions/barcode.py create mode 100644 configs/extensions/barcode_nv12_to_gray.py create mode 100644 configs/extensions/object_removal_by_label.py create mode 100644 configs/extensions/remote_classify.py create mode 100644 configs/extensions/tracked_object_filter.py create mode 100755 configs/framework-pipelines/arc/yolov5s.sh create mode 100755 configs/framework-pipelines/arc/yolov5s_effnetb0.sh create mode 100755 configs/framework-pipelines/arc/yolov5s_full.sh create mode 100755 configs/framework-pipelines/core/yolov5s.sh create mode 100755 configs/framework-pipelines/core/yolov5s_effnetb0.sh create mode 100755 configs/framework-pipelines/core/yolov5s_full.sh create mode 100755 configs/framework-pipelines/core/yolov5s_realsense.sh create mode 100755 configs/framework-pipelines/dgpu/yolov5s.sh create mode 100755 configs/framework-pipelines/dgpu/yolov5s_effnetb0.sh create mode 100755 configs/framework-pipelines/dgpu/yolov5s_full.sh create mode 100755 configs/framework-pipelines/stream_density.sh create mode 100755 configs/framework-pipelines/xeon/yolov5s.sh create mode 100755 configs/framework-pipelines/xeon/yolov5s_effnetb0.sh create mode 100755 configs/framework-pipelines/xeon/yolov5s_full.sh create mode 100644 configs/models/2022/Horizontal-text-detection-0002_fix.json create mode 100755 configs/models/2022/models.list.yml create mode 100644 configs/models/2022/yolov5s/1/FP16-INT8/yolov5s.mapping create mode 100644 configs/models/2022/yolov5s/1/FP16/yolov5s.mapping create mode 100644 configs/models/2022/yolov5s/1/FP32-INT8/yolov5s.mapping create mode 100644 configs/models/2022/yolov5s/1/FP32/yolov5s.mapping create mode 100644 configs/models/licenses/APACHE-2.0-TF-Models.txt create mode 100644 configs/models/licenses/APACHE-2.0-TF-TPU.txt create mode 100644 configs/pipelines/xeon/yolov5s_tracking_mixed_cpu_full/pipeline.json create mode 100755 configs/results/.gitignore create mode 100755 docker-build-igt.sh create mode 100755 docker-build.sh create mode 100755 docker-run-dev.sh create mode 100755 docker-run-igt.sh create mode 100755 docker-run.sh create mode 100644 docs_src/benchmark.md create mode 100644 docs_src/camera_serial_number.md create mode 100644 docs_src/hardwaresetup.md create mode 100644 docs_src/images/vision-checkout-1.0.png create mode 100644 docs_src/index.md create mode 100644 docs_src/mkdocs.yml create mode 100644 docs_src/pipelinebenchmarking.md create mode 100644 docs_src/pipelinerun.md create mode 100644 docs_src/pipelinesetup.md create mode 100644 docs_src/references.md create mode 100644 docs_src/releasenotes.md create mode 100644 docs_src/toubleshooting.md create mode 100755 get-gpu-info.sh create mode 100755 get-options.sh create mode 100755 modelDownload.sh create mode 100644 patch/libusb.h create mode 100644 requirements.txt create mode 100755 run.sh create mode 100644 sample-media/README.md create mode 100644 security.md create mode 100755 stop_all_docker_containers.sh create mode 100755 testModelDownload.sh create mode 100755 test_barcode_docker_run.sh create mode 100755 test_realsense_params_docker_run.sh diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..ceef3f57 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +.cl-cache +**/__pycache__ +.vscode +results/* \ No newline at end of file diff --git a/Dockerfile.dgpu b/Dockerfile.dgpu new file mode 100644 index 00000000..43629a17 --- /dev/null +++ b/Dockerfile.dgpu @@ -0,0 +1,74 @@ +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +FROM intel/dlstreamer:2022.3.0-ubuntu22-gpu555 + +USER root + +RUN if [ -n "$HTTP_PROXY" ] ; then echo "Acquire::http::Proxy \"$HTTP_PROXY\";" > /etc/apt/apt.conf; fi +RUN apt-get update -y || true; DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + build-essential \ + autoconf \ + git \ + libssl-dev \ + libusb-1.0-0-dev \ + libudev-dev \ + pkg-config \ + libgtk-3-dev \ + libglfw3-dev \ + libgl1-mesa-dev \ + libglu1-mesa-dev \ + nasm \ + ninja-build \ + cmake \ + python3 \ + python3-pip \ + meson \ + flex \ + bison && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# Upgrade meson as the gstreamer needs later version +RUN echo "upgrading meson to the latest version..." && pip3 install --user meson --upgrade + +# Install realsense +RUN mkdir -p /rs && cd /rs && git clone https://github.com/gwen2018/librealsense.git +RUN cd /rs/librealsense && \ + git checkout stream_d436_b +COPY ./patch/libusb.h /rs/librealsense/src/libusb/libusb.h + #./scripts/setup_udev_rules.sh && \ +RUN cd /rs/librealsense && mkdir build && \ + cd build/ && \ + cmake ../ \ + -DBUILD_SHARED_LIBS=true \ + -DBUILD_WITH_JPEGTURBO=true \ + -DBUILD_PYTHON_BINDINGS:bool=true \ + -DBUILD_WITH_CUDA=false \ + -DFORCE_RSUSB_BACKEND=false \ + -DPYTHON_EXECUTABLE=/usr/bin/python3 \ + -DBUILD_GLSL_EXTENSIONS=false \ + -DBUILD_WITH_CPU_EXTENSIONS=true \ + -DBUILD_UNIT_TESTS=false \ + -DBUILD_GRAPHICAL_EXAMPLES=false \ + -DCMAKE_BUILD_TYPE=Release && \ + make -j$(cat /proc/cpuinfo |grep -c proc) && \ + make install && \ + export PYTHONPATH="$PYTHONPATH":/usr/lib/python3/dist-packages/pyrealsense2 && \ + python3 -c "import pyrealsense2 as rs; print(rs)" +RUN mv /rs/librealsense/build/libjpeg-turbo/lib/libturbojpeg.so* /usr/local/lib +# # Build gst realsense element. Use github version once pull request is accepted with bug fixes +RUN cd /rs && git clone https://github.com/brian-intel/realsense-gstreamer +RUN cd /rs/realsense-gstreamer && \ + /usr/bin/meson setup build && ninja -C build +RUN cd /rs/realsense-gstreamer && /usr/bin/meson . build && ninja -C build +RUN cp /rs/realsense-gstreamer/build/src/libgstrealsense_meta.so /opt/intel/dlstreamer/gstreamer/lib/ && \ + cp /rs/realsense-gstreamer/build/src/libgstrealsensesrc.so /opt/intel/dlstreamer/gstreamer/lib/gstreamer-1.0 && \ + cp /usr/local/lib/libturbojpeg.so* /opt/intel/dlstreamer/gstreamer/lib/ +#RUN gst-inspect-1.0 realsensesrc + +COPY ./requirements.txt /requirements.txt +RUN pip3 install --upgrade pip --no-cache-dir -r /requirements.txt diff --git a/Dockerfile.dgpu-dev b/Dockerfile.dgpu-dev new file mode 100644 index 00000000..4555d978 --- /dev/null +++ b/Dockerfile.dgpu-dev @@ -0,0 +1,107 @@ +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +FROM intel/dlstreamer:2022.3.0-ubuntu22-gpu555-dpcpp-devel as dls_gpu419-43-RC2_build +USER root +WORKDIR /dlstreamersrc +COPY frameworks.ai.dlstreamer.pipeline-framework-2022.2.1-gpu419.43-RC2/ . +RUN mkdir -p build; cd build; cmake -DCMAKE_INSTALL_PREFIX=/opt/intel/dlstreamer ..; make --jobs=$(nproc --all) install + +FROM intel/dlstreamer:2022.3.0-ubuntu22-gpu555-dpcpp-devel as optional_fw_build +COPY --from=dls_gpu419-43-RC2_build /opt/intel/dlstreamer /opt/intel/dlstreamer +USER root +RUN if [ ! -z "$HTTP_PROXY" ] ; then echo "Acquire::http::Proxy \"$HTTP_PROXY\";" > /etc/apt/apt.conf; fi +RUN apt update && DEBIAN_FRONTEND=noninteractive apt install -y \ + build-essential \ + autoconf \ + git \ + libssl-dev \ + libusb-1.0-0-dev \ + libudev-dev \ + pkg-config \ + libgtk-3-dev \ + libglfw3-dev \ + libgl1-mesa-dev \ + libglu1-mesa-dev \ + nasm \ + ninja-build \ + cmake \ + python3 \ + python3-pip \ + meson && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + + +# Install latest OpenVINO - 2022.3.0.dev20221125 pre-release +# Note about pre-release +# NOTE: This version is pre-release software and has not undergone full release validation or qualification. No support is offered on pre-release software and APIs/behavior are subject to change. It should NOT be incorporated into any production software/solution and instead should be used only for early testing and integration while awaiting a final release version of this software. +# +WORKDIR /tmp +#RUN wget https://github.com/openvinotoolkit/openvino/archive/refs/tags/2022.3.0.dev20221125.zip; unzip 2022.3.0.dev20221125.zip +RUN git clone https://github.com/openvinotoolkit/openvino.git; cd openvino; git checkout 2022.3.0.dev20221125; git submodule update --init --recursive +RUN cd openvino; chmod +x install_build_dependencies.sh; ./install_build_dependencies.sh +RUN cd openvino; mkdir build && cd build; cmake -DCMAKE_INSTALL_PREFIX:PATH=/opt/intel/openvino_2022 -DCMAKE_BUILD_TYPE=Release ..; make --jobs=$(nproc --all); make install + + +#FROM intel/dlstreamer:2022.2.0-ubuntu20-gpu419.40 +USER root +RUN if [ ! -z "$HTTP_PROXY" ] ; then echo "Acquire::http::Proxy \"$HTTP_PROXY\";" > /etc/apt/apt.conf; fi +RUN apt update && DEBIAN_FRONTEND=noninteractive apt install -y \ + build-essential \ + autoconf \ + git \ + libssl-dev \ + libusb-1.0-0-dev \ + libudev-dev \ + pkg-config \ + libgtk-3-dev \ + libglfw3-dev \ + libgl1-mesa-dev \ + libglu1-mesa-dev \ + nasm \ + ninja-build \ + cmake \ + python3 \ + python3-pip \ + meson && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# Install realsense +RUN mkdir -p /rs && cd /rs && git clone https://github.com/gwen2018/librealsense.git +RUN cd /rs/librealsense && \ + git checkout stream_d436_b && \ + #./scripts/setup_udev_rules.sh && \ + mkdir build && \ + cd build/ && \ + cmake ../ \ + -DBUILD_SHARED_LIBS=true \ + -DBUILD_WITH_JPEGTURBO=true \ + -DBUILD_PYTHON_BINDINGS:bool=true \ + -DBUILD_WITH_CUDA=false \ + -DFORCE_RSUSB_BACKEND=false \ + -DPYTHON_EXECUTABLE=/usr/bin/python3 \ + -DBUILD_GLSL_EXTENSIONS=false \ + -DBUILD_WITH_CPU_EXTENSIONS=true \ + -DBUILD_UNIT_TESTS=false \ + -DBUILD_GRAPHICAL_EXAMPLES=false \ + -DCMAKE_BUILD_TYPE=Release && \ + make -j$(cat /proc/cpuinfo |grep proc |wc -l) && \ + make install + #export PYTHONPATH=$PYTHONPATH:/usr/local/lib/python3.6/pyrealsense2 && \ + #python3 -c "import pyrealsense2 as rs; print(rs)" +RUN mv /rs/librealsense/build/libjpeg-turbo/lib/libturbojpeg.so* /usr/local/lib +# Build gst realsense element. Use github version once pull request is accepted with bug fixes +RUN cd /rs && git clone https://github.com/brian-intel/realsense-gstreamer +RUN cd /rs/realsense-gstreamer && /usr/bin/meson . build && ninja -C build +RUN cp /rs/realsense-gstreamer/build/src/libgstrealsense_meta.so /opt/intel/dlstreamer/gstreamer/lib/ +RUN cp /rs/realsense-gstreamer/build/src/libgstrealsensesrc.so /opt/intel/dlstreamer/gstreamer/lib/gstreamer-1.0 +RUN cp /usr/local/lib/libturbojpeg.so* /opt/intel/dlstreamer/gstreamer/lib/ +#RUN gst-inspect-1.0 realsensesrc + +COPY ./requirements.txt /requirements.txt +RUN pip3 install --upgrade pip --no-cache-dir -r /requirements.txt diff --git a/Dockerfile.igt b/Dockerfile.igt new file mode 100644 index 00000000..caaebac3 --- /dev/null +++ b/Dockerfile.igt @@ -0,0 +1,34 @@ +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + + +FROM ubuntu:20.04 +RUN if [ -n "$HTTP_PROXY" ] ; then echo "Acquire::http::Proxy \"$HTTP_PROXY\";" > /etc/apt/apt.conf; fi +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + build-essential \ + ca-certificates \ + git \ + cmake \ + libunwind-dev \ + libgsl-dev \ + libasound2-dev \ + libxmlrpc-core-c3-dev \ + libjson-c-dev \ + libcurl4-openssl-dev \ + python-docutils \ + valgrind \ + peg \ + libdrm-intel1 \ + pkg-config libdrm-dev libkmod-dev libprocps-dev libdw-dev libpixman-1-dev libcairo-dev libudev-dev flex bison \ + meson && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# Install igt +WORKDIR /igt +RUN git config --global http.proxy $HTTP_PROXY; git clone https://github.com/freedesktop/xorg-intel-gpu-tools.git; cd xorg-intel-gpu-tools; git checkout igt-gpu-tools-1.26 + +RUN cd xorg-intel-gpu-tools; meson build; ninja -C build; cd build; ninja install diff --git a/Dockerfile.soc b/Dockerfile.soc new file mode 100644 index 00000000..cbee0786 --- /dev/null +++ b/Dockerfile.soc @@ -0,0 +1,69 @@ +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +FROM intel/dlstreamer:2022.2.0-ubuntu20-gpu815 as base + +USER root + +RUN if [ -n "$HTTP_PROXY" ] ; then echo "Acquire::http::Proxy \"$HTTP_PROXY\";" > /etc/apt/apt.conf; fi +RUN apt-get update -y || true; DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + build-essential \ + autoconf \ + git \ + libssl-dev \ + libusb-1.0-0-dev \ + libudev-dev \ + pkg-config \ + libgtk-3-dev \ + libglfw3-dev \ + libgl1-mesa-dev \ + libglu1-mesa-dev \ + nasm \ + ninja-build \ + cmake \ + python3 \ + python3-pip \ + meson && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# Install realsense +RUN mkdir -p /rs && cd /rs && git clone https://github.com/gwen2018/librealsense.git +RUN cd /rs/librealsense && \ + git checkout stream_d436_b +COPY ./patch/libusb.h /rs/librealsense/src/libusb/libusb.h + #./scripts/setup_udev_rules.sh && \ +RUN cd /rs/librealsense && mkdir build && \ + cd build/ && \ + cmake ../ \ + -DBUILD_SHARED_LIBS=true \ + -DBUILD_WITH_JPEGTURBO=true \ + -DBUILD_PYTHON_BINDINGS:bool=true \ + -DBUILD_WITH_CUDA=false \ + -DFORCE_RSUSB_BACKEND=false \ + -DPYTHON_EXECUTABLE=/usr/bin/python3 \ + -DBUILD_GLSL_EXTENSIONS=false \ + -DBUILD_WITH_CPU_EXTENSIONS=true \ + -DBUILD_UNIT_TESTS=false \ + -DBUILD_GRAPHICAL_EXAMPLES=false \ + -DCMAKE_BUILD_TYPE=Release && \ + make -j$(cat /proc/cpuinfo |grep -c proc) && \ + make install && \ + export PYTHONPATH="$PYTHONPATH":/usr/lib/python3/dist-packages/pyrealsense2 && \ + python3 -c "import pyrealsense2 as rs; print(rs)" +RUN mv /rs/librealsense/build/libjpeg-turbo/lib/libturbojpeg.so* /usr/local/lib +# Build gst realsense element. Use github version once pull request is accepted with bug fixes +RUN cd /rs && git clone https://github.com/brian-intel/realsense-gstreamer +RUN cd /rs/realsense-gstreamer && /usr/bin/meson . build && ninja -C build +RUN cp /rs/realsense-gstreamer/build/src/libgstrealsense_meta.so /opt/intel/dlstreamer/gstreamer/lib/ && \ + cp /rs/realsense-gstreamer/build/src/libgstrealsensesrc.so /opt/intel/dlstreamer/gstreamer/lib/gstreamer-1.0 && \ + cp /usr/local/lib/libturbojpeg.so* /opt/intel/dlstreamer/gstreamer/lib/ +#RUN gst-inspect-1.0 realsensesrc + + +#FROM base as build-default +COPY ./requirements.txt /requirements.txt +RUN pip3 install --upgrade pip --no-cache-dir -r /requirements.txt diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..26a8c62f --- /dev/null +++ b/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2023, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md index 6629d6b2..c46cdf38 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,12 @@ -# vision-self-checkout - +# Self-checkout Rapid Prototype + +[Documentation](./docs_src/index.md) + +## Known issues + +- Once barcode is detected and decoded, barcode label text is displayed inside the object even if barcode is not visible. +- Overlapping object detection label (gvatrack adds its own labels) + +## Disclaimer + +GStreamer is an open source framework licensed under LGPL. See https://gstreamer.freedesktop.org/documentation/frequently-asked-questions/licensing.html?gi-language=c. You are solely responsible for determining if your use of Gstreamer requires any additional licenses. Intel is not responsible for obtaining any such licenses, nor liable for any licensing fees due, in connection with your use of Gstreamer. \ No newline at end of file diff --git a/benchmark-scripts/README.md b/benchmark-scripts/README.md new file mode 100644 index 00000000..c3870fdc --- /dev/null +++ b/benchmark-scripts/README.md @@ -0,0 +1,115 @@ +# Benchmarking + +## Installation +Install the benchmark utilities required + +chmod +x *.sh + +sudo ./utility_install.sh + + +## Benchmark Data Collection (NEW) +This section is to replace the below Benchmark Data Collection after validation. + +**benchmark.sh** + +Before starting this script ensure the ../samples-media folder has the needed video to benchmark against. + +This script will start benchmarking a specific number of pipelines or can start stream density benchmarking based on the parameters. + +Inputs: The parameters are nearly the same as docker-run and docker-run-dev. There x new parameters to add first: + +--pipelines NUMBER_OF_PIPELINES_TO_START or --stream_density TARGET_FPS +--logdir PATH_TO_LOG_DIR/data +--duration NUMBER_OF_SECONDS_TO_BENCHMARK +--init_duration NUMBER_OF_SECONDS_TO_WAIT_BEFORE_STARTING_DATA_COLLECTION + +For the remaining parameters e.g. --platform, --inputsrc,etc see docker-run.sh. + +Example for running product detection use case for 30 seconds after waiting 60 seconds for initialization. +sudo ./benchmark.sh --pipelines 2 --logdir yolov5s_serpcanyon_grocery-shelf/data --duration 30 --init_duration 60 --platform dgpu.1 --inputsrc rtsp://127.0.0.1:8554/camera_0 --classification_disabled --ocr_disabled --barcode_disabled + +Additional sample command lines: + +1. sudo ./benchmark.sh --pipelines 4 --logdir yolov5s_serpcanyon_grocery-shelf/data --duration 120 --init_duration 30 --platform dgpu --inputsrc rtsp://127.0.0.1:8554/camera_0 +2. sudo ./benchmark.sh --pipelines 4 --logdir yolov5s_serpcanyon_grocery-shelf/data --duration 120 --init_duration 30 --platform core --inputsrc rtsp://127.0.0.1:8554/camera_0 +3. sudo ./benchmark.sh --pipelines 4 --logdir yolov5s_serpcanyon_grocery-shelf/data --duration 120 --init_duration 30 --platform xeon --inputsrc rtsp://127.0.0.1:8554/camera_0 +4. sudo ./benchmark.sh --stream_density 15 --logdir yolov5s_serpcanyon_grocery-shelf/data --duration 120 --init_duration 30 --platform xeon --inputsrc rtsp://127.0.0.1:8554/camera_0 + +**consolidate_multiple_run_of_metrics.py** + +Use this script once all testing is complete. The consolidate_multiple_run_of_metrics.py will consolidate the benchmarks into one .csv summary file. + +Inputs to the script are: + +1. --root_directory: the top level directory where the results are stored +2. --output: the location to put the output file + +Sample command line: +sudo python3 ./consolidate_multiple_run_of_metrics.py --root_directory yolov5s_6330N/ --output yolov5s_6330N/consolidated.csv + + +## Benchmark Data Collection (REMOVE ME) + +**collect_video_metrics.sh** + +Use this script to start benchmarking a specific workload. Note that this script depends on camera_simulator.sh, docker-run.sh and stop_pipelines.sh + +Before starting this script ensure the ../samples-media folder has the needed video to benchmark against. + + +Inputs to the collect_video_metrics.sh script are: + +1. CAMERA_ID: the video stream to be run for the workload. Needs to be the full path ie: rtsp://127.0.0.1:8554/camera_0 +2. PIPELINE_NUMBER: the number of pipelines to start +3. LOG_DIRECTORY: the location to store all the log files. The consolidation script will look for directories within the top level directory and process the results in each one so the user will want to keep in mind this structure when creating the log directory. For example, for multiple videos with different number of objects, a log_directory would look like: yolov5s_6330N/object1_mixed. Whatever is meaningful for the test run. +4. DURATION: the amount of time to run the data collection +5. COMPLETE_INIT_DURATION: the amount of time to allow the system to settle prior to starting the data collection. +6. DEVICE: Use soc if testing for CPU/iGPU or dgpu if testing for Flex or Arc GPUs +7. SYSTEM: core or xeon + +Sample command lines: +1. sudo ./collect_video_metrics.sh rtsp://127.0.0.1:8554/camera_0 4 yolov5s_6330N/object5_mixed 120 30 dgpu xeon +2. sudo ./collect_video_metrics.sh rtsp://127.0.0.1:8554/camera_0 4 yolov5s_6330N/object5_mixed 120 30 soc core +3. sudo ./collect_video_metrics.sh rtsp://127.0.0.1:8554/camera_0 4 yolov5s_6330N/object5_mixed 120 30 soc xeon + +**consolidate_multiple_run_of_metrics.py** + +Use this script once all testing is complete. The consolidate_multiple_run_of_metrics.py will consolidate the benchmarks into one .csv summary file. + +Inputs to the script are: + +1. --root_directory: the top level directory where the results are stored +2. --output: the location to put the output file + +Sample command line: +sudo python3 ./consolidate_multiple_run_of_metrics.py --root_directory yolov5s_6330N/ --output yolov5s_6330N/consolidated.csv + + +**stop_server.sh** + +Stops the docker images closing the pipelines + +**stream_density.sh** + +Use this script to test maximum streams that can be run on your system. output will be the maximum pipelines ran with the last fps recorded. + +Inputs to the script are: + +1. CAMERA_ID: the video stream to be run for the workload. Needs to be the full path ie: rtsp://127.0.0.1:8554/camera_0 +2. PLATFORM: core, xeon, or dgpu.x + - dgpu.x should be replaced with targetted GPUs such as dgpu (for all GPUs), dgpu.0, dgpu.1, etc +3. TARGET_FPS: the minimum target frame per second for pipelines to reach + +Sample command lines: +1. sudo ./stream_density.sh rtsp://127.0.0.1:8554/camera_0 core 15 + +## Benchmark Helper Scripts + +**camera-simulator.sh** + +Starts the camera simulator. To use, place the script in a folder named camera-simulator. At the same directory level as the camera-simulator folder, create a folder called sample-media. The camera-simulator.sh script will start a simulator for each .mp4 video that it finds in the sample-media folder and will enumerate them as camera_0, camera_1 etc. Be sure the path to camera-simulator.sh script is correct in the start_emulated_camera_pipelines.sh script. + + + + diff --git a/benchmark-scripts/benchmark.sh b/benchmark-scripts/benchmark.sh new file mode 100755 index 00000000..4b2c8859 --- /dev/null +++ b/benchmark-scripts/benchmark.sh @@ -0,0 +1,309 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +error() { + printf '%s\n' "$1" >&2 + exit 1 +} + +show_help() { + echo " + usage: $0 + --pipelines NUMBER_OF_PIPELINES | --stream_density TARGET_FPS + --logdir FULL_PATH_TO_DIRECTORY + --duration SECONDS (not needed when --stream_density is specified) + --init_duration SECONDS + --platform core|xeon|dgpu.x + --inputsrc RS_SERIAL_NUMBER|CAMERA_RTSP_URL|file:video.mp4|/dev/video0 + [--classification_disabled] + [ --ocr_disabled | --ocr [OCR_INTERVAL OCR_DEVICE] ] + [ --barcode_disabled | --barcode [BARCODE_INTERVAL] ] + [realsense_enabled] + + Note: + 1. dgpu.x should be replaced with targetted GPUs such as dgpu (for all GPUs), dgpu.0, dgpu.1, etc + 2. filesrc will utilize videos stored in the sample-media folder + 3. Set environment variable STREAM_DENSITY_MODE=1 for starting single container stream density testing + 4. Set environment variable RENDER_MODE=1 for displaying pipeline and overlay CV metadata + " +} + +OPTIONS_TO_SKIP=0 + +get_options() { + while :; do + case $1 in + -h | -\? | --help) + show_help + exit + ;; + --pipelines) + if [ -z "$2" ]; then + error 'ERROR: "--pipelines" requires an integer.' + fi + + PIPELINE_COUNT=$2 + echo "pipelines: $PIPELINE_COUNT" + OPTIONS_TO_SKIP=$(( $OPTIONS_TO_SKIP + 1 )) + shift + ;; + --stream_density) + if [ -z "$2" ]; then + error 'ERROR: "--stream_density" requires an integer.' + fi + + PIPELINE_COUNT=1 + STREAM_DENSITY_FPS=$2 + echo "stream_density: $STREAM_DENSITY_FPS" + OPTIONS_TO_SKIP=$(( $OPTIONS_TO_SKIP + 1 )) + shift + ;; + --logdir) + if [ -z "$2" ]; then + error 'ERROR: "--logdir" requires an path to a directory.' + fi + + LOG_DIRECTORY=$2 + echo "logdir: $LOG_DIRECTORY" + OPTIONS_TO_SKIP=$(( $OPTIONS_TO_SKIP + 1 )) + shift + ;; + --duration) + if [ -z "$2" ]; then + error 'ERROR: "--duration" requires an integer.' + fi + + DURATION=$2 + echo "duration: $DURATION" + OPTIONS_TO_SKIP=$(( $OPTIONS_TO_SKIP + 1 )) + shift + ;; + --init_duration) + if [ -z "$2" ]; then + error 'ERROR: "--init_duration" requires an integer.' + fi + + OPTIONS_TO_SKIP=$(( $OPTIONS_TO_SKIP + 1 )) + COMPLETE_INIT_DURATION=$2 + echo "init_duration: $COMPLETE_INIT_DURATION" + shift + ;; + -?*) + break + ;; + ?*) + break + ;; + *) + break + ;; + esac + + OPTIONS_TO_SKIP=$(( $OPTIONS_TO_SKIP + 1 )) + shift + + done +} + + +# USAGE: +# 1. PLATFORM: core|xeon|dgpu.x +# 2. INPUT SOURCE: RS_SERIAL_NUMBER|CAMERA_RTSP_URL|file:video.mp4|/dev/video0 +# 3. CLASSIFICATION: enabled|disabled +# 4. OCR: disabled|OCR_INTERVAL OCR_DEVICE +# 5. BARCODE: disabled|BARCODE_INTERVAL +# 6. REALSENSE: enabled|disabled +# 7. PIPELINE_NUMBER: the number of pipelines to start or specify MAX and a stream density benchmark will be performed with a 15 fps target per pipeline +# 8. LOG_DIRECTORY: the location to store all the log files. The consolidation script will look for directories within the top level directory and process the results in each one so the user will want to keep in mind this structure when creating the log directory. For example, for multiple videos with different number of objects, a log_directory would look like: yolov5s_6330N/object1_mixed. Whatever is meaningful for the test run. +# 9. DURATION: the amount of time to run the data collection +# 10 COMPLETE_INIT_DURATION: the amount of time to allow the system to settle prior to starting the data collection. + +# load benchmark params +if [ -z $1 ] +then + show_help +fi +get_options "$@" + +# load docker-run params +shift $OPTIONS_TO_SKIP +source ../get-options.sh "$@" + +# set performance mode +echo "Setting scaling_governor to perf mode" +echo performance | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor + +# clean log directory that is being reused +if [ -d $LOG_DIRECTORY ]; then rm -Rf $LOG_DIRECTORY; fi + +if [ ! -d $LOG_DIRECTORY ]; then mkdir -p $LOG_DIRECTORY; fi + +# clean previous meta data +rm -f results/* +rm -f ../results/* + +# NOTE: pcm-memory and pcm-power only support xeon platform. Need to use pcm for core platform +# NOTE: need to separate into 2 run for xeon as pcm-power and pcm-memory cannot run in parallel +is_xeon=`lscpu | grep -i xeon | wc -l` +if [ "$is_xeon" == "1" ] +then + run_index=2 +else + run_index=1 +fi + +distributed=0 +echo "RunIndex $run_index" +for test_run in $( seq 0 $(($run_index - 1)) ) +do + echo "Entered loop" + # Start camera-simulator if rtsp is requested + if grep -q "rtsp" <<< "$INPUTSRC"; then + echo "Starting RTSP stream" + ./camera-simulator.sh + sleep 5 + fi + echo "Starting workload(s)" + + # docker-run needs to run in it's directory for the file paths to work + cd ../ +# pwd + + source get-gpu-info.sh + NUM_GPU=0 + if [ "$HAS_FLEX_140" == 1 ] + then + NUM_GPU=$GPU_NUM_140 + elif [ "$HAS_FLEX_170" == 1 ] + then + NUM_GPU=$GPU_NUM_170 + fi + + echo "DEBUG: docker-run.sh $@" + + for i in $( seq 0 $(($PIPELINE_COUNT - 1)) ) + do + if [ -z "$STREAM_DENSITY_FPS" ]; then + #pushd .. + echo "Starting pipeline$i" + if [ "$CPU_ONLY" != 1 ] && ([ "$HAS_FLEX_140" == 1 ] || [ "$HAS_FLEX_170" == 1 ]) + then + if [ "$NUM_GPU" != 0 ] + then + gpu_index=$(expr $i % $NUM_GPU) + # replacing the value of --platform with dgpu.$gpu_index for flex case + orig_args=("$@") + for ((i=0; i < $#; i++)) + do + if [ "${orig_args[i]}" == "--platform" ] + then + arrgpu=(${orig_args[i+1]//./ }) + TARGET_GPU_NUMBER=${arrgpu[1]} + if [ -z "$TARGET_GPU_NUMBER" ] || [ "$distributed" == 1 ]; then + set -- "${@:1:i+1}" "dgpu.$gpu_index" "${@:i+3}" + distributed=1 + fi + break + fi + done + LOW_POWER=$LOW_POWER ./docker-run.sh "$@" + else + echo "Error: NUM_GPU is 0, cannot run" + exit 1 + fi + else + CPU_ONLY=$CPU_ONLY LOW_POWER=$LOW_POWER ./docker-run.sh "$@" + fi + sleep 1 + #popd + else + echo "Starting stream density benchmarking" + #cleanup any residual containers + sids=($(docker ps --filter="name=vision-self-checkout" -q -a)) + if [ -z "$sids" ] + then + echo "no dangling docker containers to clean up" + else + for sid in "${sids[@]}" + do + echo "cleaning up dangling container $sid" + docker rm $sid -f + done + fi + + DURATION=0 + #pushd .. + #echo "Cur dir: `pwd`" + # Sync sleep in stream density script and platform metrics data collection script + CPU_ONLY=$CPU_ONLY LOW_POWER=$LOW_POWER COMPLETE_INIT_DURATION=$COMPLETE_INIT_DURATION STREAM_DENSITY_FPS=$STREAM_DENSITY_FPS STREAM_DENSITY_MODE=1 ./docker-run.sh "$@" + #popd + fi + done + cd - + + echo "Waiting for init duration to complete..." + sleep $COMPLETE_INIT_DURATION + + # launch log file monitor to detect if any pipeline stall happening + POLLING_INTERVAL=2 + ./log_time_monitor.sh ../results/ $POLLING_INTERVAL $PIPELINE_COUNT > $LOG_DIRECTORY/log_time_monitor$test_run.log & + log_time_monitor_pid=$! + + if [ $test_run -eq 0 ] + then + ./collect_platform_metrics.sh $DURATION $LOG_DIRECTORY $PLATFORM + else + ./collect_platform_metrics.sh $DURATION $LOG_DIRECTORY $PLATFORM --xeon-memory-only + fi + + if [ -z "$STREAM_DENSITY_FPS" ] + then + echo "Waiting $DURATION seconds for workload to finish" + else + echo "Waiting for workload(s) to finish..." + sids=$(docker ps --filter="name=vision-self-checkout" -q -a) + stream_workload_running=`echo "$sids" | wc -w` + + while [ 1 == 1 ] + do + sleep 1 + sids=$(docker ps --filter="name=vision-self-checkout" -q -a) + #echo "sids: $sids" + stream_workload_running=`echo "$sids" | wc -w` + #echo "stream workload_running: $stream_workload_running" + if (( $(echo $stream_workload_running 0 | awk '{if ($1 == $2) print 1;}') )) + then + #echo "DEBUG: quitting.." + break + fi + done + fi + ./stop_platform_collection.sh + + echo "workloads finished..." + if [ -e ../results/r0.jsonl ] + then + sudo ./copy-platform-metrics.sh $LOG_DIRECTORY + sudo python3 ./results_parser.py >> meta_summary.txt + sudo mv meta_summary.txt $LOG_DIRECTORY + fi + + echo "test_run is: $test_run" + if [ $test_run -eq 0 ] + then + ./cleanup_gpu_metrics.sh $LOG_DIRECTORY + fi + + sleep 2 + ./stop_server.sh + sleep 5 + + # clean up the background process of log_time_monitor + kill $log_time_monitor_pid + +done # loop for test runs + diff --git a/benchmark-scripts/camera-simulator.sh b/benchmark-scripts/camera-simulator.sh new file mode 100755 index 00000000..67959829 --- /dev/null +++ b/benchmark-scripts/camera-simulator.sh @@ -0,0 +1,47 @@ +#!/bin/bash -e +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +COMMAND=$1 +SOURCE_DIR=$(dirname $(dirname "$(readlink -f "$0")")) +CAMERAS=$2 + +if [ -z "$COMMAND" ]; then + COMMAND="START" +fi + +if [ "${COMMAND,,}" = "start" ]; then + + cd $SOURCE_DIR/sample-media + FILES=( *.mp4 ) + + if [ -z "$CAMERAS" ]; then + CAMERAS=${#FILES[@]} + fi + + cd $SOURCE_DIR/camera-simulator + + docker run --rm -t --network=host --name camera-simulator aler9/rtsp-simple-server >rtsp_simple_server.log.txt 2>&1 & + index=0 + echo $CAMERAS + while [ $index -lt $CAMERAS ] + do + for file in "${FILES[@]}" + do + echo "Starting camera: rtsp://127.0.0.1:8554/camera_$index from $file" + docker run -t --rm --entrypoint ffmpeg --network host -v$SOURCE_DIR/sample-media:/home/pipeline-server/sample-media openvino/ubuntu20_data_runtime:2021.4.2 -nostdin -re -stream_loop -1 -i /home/pipeline-server/sample-media/$file -c copy -f rtsp -rtsp_transport tcp rtsp://localhost:8554/camera_$index >/dev/null 2>&1 & + ((index+=1)) + if [ $CAMERAS -le $index ]; then + break + fi + sleep 1 + done + done + +elif [ "${COMMAND,,}" = "stop" ]; then + docker kill camera-simulator +fi + diff --git a/benchmark-scripts/cleanup_gpu_metrics.sh b/benchmark-scripts/cleanup_gpu_metrics.sh new file mode 100755 index 00000000..31afb1c3 --- /dev/null +++ b/benchmark-scripts/cleanup_gpu_metrics.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# USAGE LOG_DIRECTORY +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +fix_igt_json() { + echo "Fix IGT JSON called" + sed -i -e s/^}$/},/ $1 + sed -i '$ s/.$//' $1 + tmp_file=/tmp/tmp.json + sudo echo '[' > $tmp_file + sudo cat $1 >> $tmp_file + sudo echo ']' >> $tmp_file + mv $tmp_file $1 +} + +if [ -z "$1" ] +then + echo "Missing log_directory" + exit 1 +fi + +LOG_DIRECTORY=$1 + +echo "fixing igt and xpum for files in $LOG_DIRECTORY" +if [ -e ${LOG_DIRECTORY}/igt0.json ]; then + echo "fixing igt0.json" + fix_igt_json ${LOG_DIRECTORY}/igt0.json + #./fix_json.sh ${LOG_DIRECTORY} +fi +if [ -e ${LOG_DIRECTORY}/igt1.json ]; then + echo "fixing igt1.json" + fix_igt_json ${LOG_DIRECTORY}/igt1.json +fi + +#move the xpumanager dump files +devices=(0 1 2) +for device in ${devices[@]}; do + xpum_file=${LOG_DIRECTORY}/xpum${device}.json + if [ -e $xpum_file ]; then + echo "==== Stopping xpumanager collection (device ${device}) ====" + task_id=$(jq '.task_id' $xpum_file) + xpumcli dump --rawdata --stop $task_id + sudo cp $(jq --raw-output '.dump_file_path' $xpum_file) ${LOG_DIRECTORY}/xpum${device}.csv + #sudo cp $(jq --raw-output '.dump_file_path' $xpum_file) j_xpum${device}.csv + sudo rm ${LOG_DIRECTORY}/xpum${device}.json + cat ${LOG_DIRECTORY}/xpum${device}.csv | \ + python3 -c 'import csv, json, sys; print(json.dumps([dict(r) for r in csv.DictReader(sys.stdin)]))' > xpum${device}.json + sudo mv xpum${device}.json ${LOG_DIRECTORY}/xpum${device}.json + fi +done diff --git a/benchmark-scripts/collect_memory_usage.sh b/benchmark-scripts/collect_memory_usage.sh new file mode 100755 index 00000000..d247ff9f --- /dev/null +++ b/benchmark-scripts/collect_memory_usage.sh @@ -0,0 +1,151 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +CAMERA_ID=$1 +PIPELINE_NUMBER=$2 +LOG_DIRECTORY=$3 +DURATION=$4 +COMPLETE_INIT_DURATION=$5 +PCM_DIRECTORY=/opt/intel/pcm/build/bin +STARTING_PORT=8080 +SOURCE_DIR=$(dirname "$(readlink -f "$0")") +GPU_DEVICE=$6 +#system options: +#core, xeon +SYSTEM=$7 + +#todo: need to add options to customize pipeline with ocr, barcode and classification options +#usage: ./docker-run.sh --platform core|xeon|dgpu.x --inputsrc RS_SERIAL_NUMBER|CAMERA_RTSP_URL|file:video.mp4|/dev/video0 [--classification_disabled] [ --ocr_disabled | --ocr [OCR_INTERVAL OCR_DEVICE] ] [ --barcode_disabled | --barcode [BARCODE_INTERVAL] ] + +#GPU_DEVICE options are +#dgpu (for flex and arc), soc for CPU/iGPU + +if grep -q "dgpu" <<< "$GPU_DEVICE"; then +#if [ $GPU_DEVICE == "dgpu" ] +#then + echo "device set to dgpu" + DEVICE=$GPU_DEVICE +else + DEVICE=$SYSTEM +fi +#echo "device set to $DEVICE" + +# Handle bugs and "features" in intel_gpu_top JSON output +# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/100 +# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/blob/master/man/intel_gpu_top.rst?plain=1#L84 +fix_igt_json() { + sed -i -e s/^}$/},/ $1 + sed -i '$ s/.$//' $1 + tmp_file=/tmp/tmp.json + echo '[' > $tmp_file + cat $1 >> $tmp_file + echo ']' >> $tmp_file + mv $tmp_file $1 +} + +# NOTE: clean up log directory that is being reused +if [ -d $LOG_DIRECTORY ]; then rm -Rf $LOG_DIRECTORY; fi +if [ ! -d $LOG_DIRECTORY ]; then mkdir -p $LOG_DIRECTORY; fi + +#remove previous meta data +rm -f results/* +#rm -f ../results/* + + + #Add the video to test to the sample_media folder + echo "Starting RTSP stream" + ./camera-simulator.sh + sleep 5 + echo "Starting pipelines. Device: $DEVICE" + #docker-run needs to run in it's directory for the file paths to work + cd ../ + for i in $( seq 0 $(($PIPELINE_NUMBER - 1)) ) + do + if grep -q "dgpu" <<< "$GPU_DEVICE"; then + echo " ./docker-run.sh --platform $DEVICE --inputsrc $CAMERA_ID --ocr 5 GPU" + ./docker-run.sh --platform $DEVICE --inputsrc $CAMERA_ID --ocr 5 GPU + else + echo "./docker-run.sh --platform $DEVICE --inputsrc $CAMERA_ID " + ./docker-run.sh --platform $DEVICE --inputsrc $CAMERA_ID + fi + done + cd - + + sleep $COMPLETE_INIT_DURATION + echo "Starting data collection" + timeout "$DURATION"s sar 60 >& $LOG_DIRECTORY/cpu_usage.log & + timeout "$DURATION"s free -s 60 >& $LOG_DIRECTORY/memory_usage.log & + timeout "$DURATION"s sudo iotop -o -P -b -d 60 >& $LOG_DIRECTORY/disk_bandwidth.log & + + if [ $SYSTEM = "xeon" ] + then + timeout "$DURATION"s sudo $PCM_DIRECTORY/pcm-memory 60 -silent -nc -csv=$LOG_DIRECTORY/memory_bandwidth.csv & + elif [ $SYSTEM = "core" ] + then + #echo "Add pcm for core here" + timeout "$DURATION"s sudo $PCM_DIRECTORY/pcm 60 -silent -nc -nsys -csv=$LOG_DIRECTORY/pcm.csv & + fi + + if grep -q "dgpu" <<< "$GPU_DEVICE" && [ $SYSTEM != "core" ]; then + metrics=0,5,22,24,25 + if [ -e /dev/dri/renderD128 ]; then + echo "==== Starting xpumanager capture (card 0) ====" + timeout "$DURATION"s sudo xpumcli dump --rawdata --start -d 0 -m $metrics -j > ${LOG_DIRECTORY}/xpum0.json & + fi + if [ -e /dev/dri/renderD129 ]; then + echo "==== Starting xpumanager capture (card 1) ====" + timeout "$DURATION"s sudo xpumcli dump --rawdata --start -d 1 -m $metrics -j > ${LOG_DIRECTORY}/xpum1.json & + fi + else + if [ -e /dev/dri/renderD128 ]; then + echo "==== Starting intel_gpu_top ====" + timeout "$DURATION"s sudo intel_gpu_top -s 60000 -J > ${LOG_DIRECTORY}/igt1.json & + fi + fi + + + sleep $DURATION + + echo "stopping server" + #./stop_server.sh + sleep 10 + + if [ -e ../results/r0.jsonl ] + then + sudo cp -r ../results . + sudo mv results/pipeline* $LOG_DIRECTORY + fi + + echo "fixing igt and xpum" + if [ -e ${LOG_DIRECTORY}/igt1.json ]; then + echo "fixing igt1.json" + fix_igt_json ${LOG_DIRECTORY}/igt1.json + #./fix_json.sh ${LOG_DIRECTORY} + fi + if [ -e ${LOG_DIRECTORY}/igt2.json ]; then + echo "fixing igt2.json" + fix_igt_json ${LOG_DIRECTORY}/igt2.json + fi + + #move the xpumanager dump files + devices=(0 1) + for device in ${devices[@]}; do + xpum_file=${LOG_DIRECTORY}/xpum${device}.json + if [ -e $xpum_file ]; then + echo "==== Stopping xpumanager collection (device ${device}) ====" + task_id=$(jq '.task_id' $xpum_file) + xpumcli dump --rawdata --stop $task_id + sudo cp $(jq --raw-output '.dump_file_path' $xpum_file) ${LOG_DIRECTORY}/xpum${device}.csv + #sudo cp $(jq --raw-output '.dump_file_path' $xpum_file) j_xpum${device}.csv + sudo rm ${LOG_DIRECTORY}/xpum${device}.json + cat ${LOG_DIRECTORY}/xpum${device}.csv | \ + python3 -c 'import csv, json, sys; print(json.dumps([dict(r) for r in csv.DictReader(sys.stdin)]))' > xpum${device}.json + sudo mv xpum${device}.json ${LOG_DIRECTORY}/xpum${device}.json + fi + done + + diff --git a/benchmark-scripts/collect_platform_metrics.sh b/benchmark-scripts/collect_platform_metrics.sh new file mode 100755 index 00000000..bc207552 --- /dev/null +++ b/benchmark-scripts/collect_platform_metrics.sh @@ -0,0 +1,146 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +show_help() { + echo " + usage: $0 DURATION LOG_DIRECTORY PLATFORM [--xeon-memory-only] + " +} + +#echo "DEBUG: Params $@" + +DURATION=$1 +LOG_DIRECTORY=$2 +PLATFORM=$3 +#SOURCE_DIR=$(dirname "$(readlink -f "$0")") +PCM_DIRECTORY=/opt/intel/pcm/build/bin +source ../get-gpu-info.sh + +test_run=0 +if [ "$4" == "--xeon-memory-only" ] +then + test_run=1 +fi + +is_xeon=`lscpu | grep -i xeon | wc -l` + +echo "Starting platform data collection" +#if this is the first run, collect all the metrics +if [ $test_run -eq 0 ] +then + echo "Starting main data collection" + timeout "$DURATION" sar 1 >& $LOG_DIRECTORY/cpu_usage.log & + timeout "$DURATION" free -s 1 >& $LOG_DIRECTORY/memory_usage.log & + timeout "$DURATION" sudo iotop -o -P -b >& $LOG_DIRECTORY/disk_bandwidth.log & + + if [ "$is_xeon" == "1" ] + then + echo "Starting xeon pcm-power collection" + timeout "$DURATION" sudo $PCM_DIRECTORY/pcm-power >& $LOG_DIRECTORY/power_usage.log & + else + #/opt/intel/pcm/build/bin/pcm 1 -silent -nc -nsys -csv=$LOG_DIRECTORY/pcm.csv & + #pcm_has_data=`wc -l yolov5s_efficientnet_i7-12700H_4objs_igpu_streamdensity/data/pcm.csv | cut -d ' ' -f 1` + echo "Starting non-xeon pcm collection" + modprobe msr + # process list to see if any dangling pcm background processes to kill + pcm_pids=($(ps aux | grep pcm | grep -v grep | awk '{print $2}')) + if [ -z "$pcm_pids" ] + then + echo "no dangling pcm background processes to clean up" + else + for pid in "${pcm_pids[@]}" + do + echo "cleaning up dangling pcm $pid" + sudo kill -9 "$pid" + done + fi + timeout "$DURATION" sudo $PCM_DIRECTORY/pcm 1 -silent -nc -nsys -csv=$LOG_DIRECTORY/pcm.csv & + echo "DEBUG: pcm started collecting" + fi + + # DGPU pipeline and Flex GPU Metrics + if [ "$PLATFORM" == "dgpu" ] && [ $HAS_ARC == 0 ] + then + metrics=0,5,22,24,25 + # Check for up to 4 GPUs e.g. 300W max + if [ -e /dev/dri/renderD128 ]; then + echo "==== Starting xpumanager capture (gpu 0) ====" + timeout "$DURATION" sudo xpumcli dump --rawdata --start -d 0 -m $metrics -j > ${LOG_DIRECTORY}/xpum0.json & + fi + if [ -e /dev/dri/renderD129 ]; then + echo "==== Starting xpumanager capture (gpu 1) ====" + timeout "$DURATION" sudo xpumcli dump --rawdata --start -d 1 -m $metrics -j > ${LOG_DIRECTORY}/xpum1.json & + fi + if [ -e /dev/dri/renderD130 ]; then + echo "==== Starting xpumanager capture (gpu 2) ====" + timeout "$DURATION" sudo xpumcli dump --rawdata --start -d 2 -m $metrics -j > ${LOG_DIRECTORY}/xpum2.json & + fi + if [ -e /dev/dri/renderD131 ]; then + echo "==== Starting xpumanager capture (gpu 4) ====" + timeout "$DURATION" sudo xpumcli dump --rawdata --start -d 3 -m $metrics -j > ${LOG_DIRECTORY}/xpum3.json & + fi + # DGPU pipeline and Arc GPU Metrics + elif [ "$PLATFORM" == "dgpu" ] && [ $HAS_ARC == 1 ] + then + echo "==== Starting igt arc ====" + # Arc is always on Core platform and although its GPU.1, the IGT device is actually 0 + # Collecting both + timeout $DURATION ../docker-run-igt.sh 0 + timeout $DURATION ../docker-run-igt.sh 1 + + # CORE pipeline and iGPU/Arc GPU Metrics + elif [ "$PLATFORM" == "core" ] + then + if [ $HAS_ARC == 1 ] + then + # Core can only have at most 2 GPUs + timeout $DURATION ../docker-run-igt.sh 0 + timeout $DURATION ../docker-run-igt.sh 1 + else + timeout $DURATION ../docker-run-igt.sh 0 + fi + fi +#if this is the second run, collect memory bandwidth data only +else + if [ "$is_xeon" == "1" ] + then + timeout "$DURATION" sudo $PCM_DIRECTORY/pcm-memory 1 -silent -nc -csv=$LOG_DIRECTORY/memory_bandwidth.csv & + fi +fi + +if [ "$DURATION" == "0" ] +then + echo "Data collection running until max stream density is reached" +else + echo "Data collection will run for $DURATION seconds" +fi +sleep $DURATION + +#echo "stopping docker containers" +#./stop_server.sh +#echo "stopping data collection..." +#sudo pkill -f iotop +#sudo pkill -f free +#sudo pkill -f sar +#sudo pkill -f pcm-power +#sudo pkill -f pcm +#sudo pkill -f xpumcli +#sudo pkill -f intel_gpu_top +#sleep 2 + +#if [ -e ../results/r0.jsonl ] +#then +# echo "Copying data for collection scripts...`pwd`" + +# sudo cp -r ../results . +# sudo mv results/igt* $LOG_DIRECTORY +# sudo mv results/pipeline* $LOG_DIRECTORY +# sudo python3 ./results_parser.py >> meta_summary.txt +# sudo mv meta_summary.txt $LOG_DIRECTORY +#else +# echo "Warning no data found for collection!" +#fi diff --git a/benchmark-scripts/collect_video_metrics.sh b/benchmark-scripts/collect_video_metrics.sh new file mode 100755 index 00000000..17ed6809 --- /dev/null +++ b/benchmark-scripts/collect_video_metrics.sh @@ -0,0 +1,188 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +CAMERA_ID=$1 +PIPELINE_NUMBER=$2 +LOG_DIRECTORY=$3 +DURATION=$4 +COMPLETE_INIT_DURATION=$5 +PCM_DIRECTORY=/opt/intel/pcm/build/bin +STARTING_PORT=8080 +SOURCE_DIR=$(dirname "$(readlink -f "$0")") +GPU_DEVICE=$6 +#system options: +#core, xeon +SYSTEM=$7 +REALSENSE_ENABLED=$8 + +#todo: need to add options to customize pipeline with ocr, barcode and classification options +#usage: ./docker-run.sh --platform core|xeon|dgpu.x --inputsrc RS_SERIAL_NUMBER|CAMERA_RTSP_URL|file:video.mp4|/dev/video0 [--classification_disabled] [ --ocr_disabled | --ocr [OCR_INTERVAL OCR_DEVICE] ] [ --barcode_disabled | --barcode [BARCODE_INTERVAL] ] + +#GPU_DEVICE options are +#dgpu (for flex and arc), soc for CPU/iGPU + +if grep -q "dgpu" <<< "$GPU_DEVICE"; then +#if [ $GPU_DEVICE == "dgpu" ] +#then + echo "device set to dgpu" + DEVICE=$GPU_DEVICE +else + DEVICE=$SYSTEM +fi +#echo "device set to $DEVICE" + +# Handle bugs and "features" in intel_gpu_top JSON output +# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/100 +# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/blob/master/man/intel_gpu_top.rst?plain=1#L84 +fix_igt_json() { + sed -i -e s/^}$/},/ $1 + sed -i '$ s/.$//' $1 + tmp_file=/tmp/tmp.json + echo '[' > $tmp_file + cat $1 >> $tmp_file + echo ']' >> $tmp_file + mv $tmp_file $1 +} + +#NOTE: clean up log directory that is being reused +if [ -d $LOG_DIRECTORY ]; then rm -Rf $LOG_DIRECTORY; fi + +if [ ! -d $LOG_DIRECTORY ]; then mkdir -p $LOG_DIRECTORY; fi + +#remove previous meta data +rm -f results/* +rm -f ../results/* + +# NOTE: pcm-memory and pcm-power only support xeon platform. Need to use pcm for core platform +# NOTE: need to separate into 2 run for xeon as pcm-power and pcm-memory cannot run in parallel +if [ $SYSTEM = "xeon" ] ; then + run_index=2 +else + run_index=1 +fi + +for test_run in $( seq 0 $(($run_index - 1)) ) +do + + #Add the video to test to the sample_media folder + echo "Starting RTSP stream" + ./camera-simulator.sh + sleep 5 + echo "Starting pipelines. Device: $DEVICE" + #docker-run needs to run in it's directory for the file paths to work + cd ../ + for i in $( seq 0 $(($PIPELINE_NUMBER - 1)) ) + do + if grep -q "dgpu" <<< "$GPU_DEVICE"; then + echo " ./docker-run.sh --platform $DEVICE --inputsrc $CAMERA_ID --ocr 5 GPU" + ./docker-run.sh --platform $DEVICE --inputsrc $CAMERA_ID --ocr 5 GPU $REALSENSE_ENABLED + else + ./docker-run.sh --platform $DEVICE --inputsrc $CAMERA_ID $REALSENSE_ENABLED + fi + done + cd - + + sleep $COMPLETE_INIT_DURATION + echo "Starting data collection" + #if this is the first run, collect all the metrics + if [ $test_run -eq 0 ] + then + timeout "$DURATION"s sar 1 >& $LOG_DIRECTORY/cpu_usage.log & + timeout "$DURATION"s free -s 1 >& $LOG_DIRECTORY/memory_usage.log & + timeout "$DURATION"s sudo iotop -o -P -b >& $LOG_DIRECTORY/disk_bandwidth.log & + + if [ $SYSTEM = "xeon" ] + then + timeout "$DURATION"s sudo $PCM_DIRECTORY/pcm-power >& $LOG_DIRECTORY/power_usage.log & + elif [ $SYSTEM = "core" ] + then + #echo "Add pcm for core here" + timeout "$DURATION"s sudo $PCM_DIRECTORY/pcm 1 -silent -nc -nsys -csv=$LOG_DIRECTORY/pcm.csv & + fi + + #if [ $GPU_DEVICE = "dgpu" ] + if grep -q "dgpu" <<< "$GPU_DEVICE" && [ $SYSTEM != "core" ]; then + metrics=0,5,22,24,25 + if [ -e /dev/dri/renderD128 ]; then + echo "==== Starting xpumanager capture (card 0) ====" + timeout "$DURATION"s sudo xpumcli dump --rawdata --start -d 0 -m $metrics -j > ${LOG_DIRECTORY}/xpum0.json & + fi + if [ -e /dev/dri/renderD129 ]; then + echo "==== Starting xpumanager capture (card 1) ====" + timeout "$DURATION"s sudo xpumcli dump --rawdata --start -d 1 -m $metrics -j > ${LOG_DIRECTORY}/xpum1.json & + fi + elif [ $GPU_DEVICE = "soc" ] && [ $SYSTEM = "core" ] + then + if [ -e /dev/dri/renderD128 ]; then + echo "==== Starting intel_gpu_top soc ====" + timeout "$DURATION"s ../docker-run-igt.sh 0 + fi + #arc gpu + else + if [ -e /dev/dri/renderD129 ]; then + echo "==== Starting intel_gpu_top arc ====" + timeout "$DURATION"s ../docker-run-igt.sh 0 + fi + fi + + #if this is the second run, collect memory bandwidth data only + else + if [ $SYSTEM = "xeon" ] + then + timeout "$DURATION"s sudo $PCM_DIRECTORY/pcm-memory 1 -silent -nc -csv=$LOG_DIRECTORY/memory_bandwidth.csv & + fi + fi + + + sleep $DURATION + echo "stopping server" + ./stop_server.sh + + if [ -e ../results/r0.jsonl ] + then + sudo cp -r ../results . + sudo mv results/igt* $LOG_DIRECTORY + sudo mv results/pipeline* $LOG_DIRECTORY + sudo python3 ./results_parser.py >> meta_summary.txt + sudo mv meta_summary.txt $LOG_DIRECTORY + fi + + echo "test_run is: $test_run" + if [ $test_run -eq 0 ] + then + echo "fixing igt and xpum" + if [ -e ${LOG_DIRECTORY}/igt0.json ]; then + echo "fixing igt0.json" + fix_igt_json ${LOG_DIRECTORY}/igt0.json + #./fix_json.sh ${LOG_DIRECTORY} + fi + if [ -e ${LOG_DIRECTORY}/igt1.json ]; then + echo "fixing igt1.json" + fix_igt_json ${LOG_DIRECTORY}/igt1.json + fi + + #move the xpumanager dump files + devices=(0 1) + for device in ${devices[@]}; do + xpum_file=${LOG_DIRECTORY}/xpum${device}.json + if [ -e $xpum_file ]; then + echo "==== Stopping xpumanager collection (device ${device}) ====" + task_id=$(jq '.task_id' $xpum_file) + xpumcli dump --rawdata --stop $task_id + sudo cp $(jq --raw-output '.dump_file_path' $xpum_file) ${LOG_DIRECTORY}/xpum${device}.csv + #sudo cp $(jq --raw-output '.dump_file_path' $xpum_file) j_xpum${device}.csv + sudo rm ${LOG_DIRECTORY}/xpum${device}.json + cat ${LOG_DIRECTORY}/xpum${device}.csv | \ + python3 -c 'import csv, json, sys; print(json.dumps([dict(r) for r in csv.DictReader(sys.stdin)]))' > xpum${device}.json + sudo mv xpum${device}.json ${LOG_DIRECTORY}/xpum${device}.json + fi + done + fi + + sleep 10 + +done diff --git a/benchmark-scripts/collect_video_metrics_flex.sh b/benchmark-scripts/collect_video_metrics_flex.sh new file mode 100755 index 00000000..d8cc99a8 --- /dev/null +++ b/benchmark-scripts/collect_video_metrics_flex.sh @@ -0,0 +1,181 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +get_gpu_devices() { + has_gpu=0 + has_any_intel_server_gpu=`dmesg | grep -i "class 0x038000" | grep "8086"` + has_flex_170=`echo "$has_any_intel_server_gpu" | grep -i "56C0"` + has_flex_140=`echo "$has_any_intel_server_gpu" | grep -i "56C1"` + + if [ -z "$has_any_intel_server_gpu" ] + then + echo "Error: No Intel GPUs found" + return + fi + echo "GPU exists!" + + if [ ! -z "$has_flex_140" ] + then + HAS_FLEX_140=1 + GPU_NUM_140=`echo "$has_flex_140" | wc -l` + fi + if [ ! -z "$has_flex_170" ] + then + HAS_FLEX_170=1 + GPU_NUM_170=`echo "$has_flex_170" | wc -l` + fi + + echo "HAS_FLEX_140=$HAS_FLEX_140, HAS_FLEX_170=$HAS_FLEX_170, GPU_NUM_140=$GPU_NUM_140, GPU_NUM_170=$GPU_NUM_170" +} + +CAMERA_ID=$1 +PIPELINE_NUMBER=$2 +LOG_DIRECTORY=$3 +DURATION=$4 +COMPLETE_INIT_DURATION=$5 +PCM_DIRECTORY=/opt/intel/pcm/build/bin +STARTING_PORT=8080 +SOURCE_DIR=$(dirname "$(readlink -f "$0")") +GPU_DEVICE=dgpu +#system options: +#core, xeon +SYSTEM=xeon +REALSENSE_ENABLED=$6 + +#todo: need to add options to customize pipeline with ocr, barcode and classification options +#usage: ./docker-run.sh --platform core|xeon|dgpu.x --inputsrc RS_SERIAL_NUMBER|CAMERA_RTSP_URL|file:video.mp4|/dev/video0 [--classification_disabled] [ --ocr_disabled | --ocr [OCR_INTERVAL OCR_DEVICE] ] [ --barcode_disabled | --barcode [BARCODE_INTERVAL] ] + +#GPU_DEVICE options are +#dgpu (for flex and arc), soc for CPU/iGPU + +if grep -q "dgpu" <<< "$GPU_DEVICE"; then + echo "device set to dgpu" + DEVICE=$GPU_DEVICE +else + echo "error: should be always dgpu for flex devices" + exit 1 +fi + +#NOTE: clean up log directory that is being reused +if [ -d $LOG_DIRECTORY ]; then rm -Rf $LOG_DIRECTORY; fi + +if [ ! -d $LOG_DIRECTORY ]; then mkdir -p $LOG_DIRECTORY; fi + +#remove previous meta data +rm -f results/* +rm -f ../results/* + +HAS_FLEX_140=0 +HAS_FLEX_170=0 +get_gpu_devices + +NUM_GPU=0 +if [ "$HAS_FLEX_140" == 0 ] && [ "$HAS_FLEX_170" == 0 ] +then + echo "Error: could not find the flex device hardware" + exit 1 +elif [ "$HAS_FLEX_140" == 1 ] +then + NUM_GPU=$GPU_NUM_140 +else + NUM_GPU=$GPU_NUM_170 +fi + +if [ "$NUM_GPU" == 0 ] +then + echo "Error: NUM_GPU is 0" + exit 1 +fi + +# NOTE: pcm-memory and pcm-power only support xeon platform. Need to use pcm for core platform +# NOTE: need to separate into 2 run for xeon as pcm-power and pcm-memory cannot run in parallel +for test_run in $( seq 0 $((1)) ) +do + + #Add the video to test to the sample_media folder + echo "Starting RTSP stream" + ./camera-simulator.sh + sleep 5 + echo "Starting pipelines. Device: $DEVICE" + #docker-run needs to run in it's directory for the file paths to work + cd ../ + + for i in $( seq 0 $(($PIPELINE_NUMBER - 1)) ) + do + # distributed the pipeline workloads to each gpu alternatively + gpu_index=$(expr $i % $NUM_GPU) + echo " ./docker-run.sh --platform dgpu.$gpu_index --inputsrc $CAMERA_ID --ocr 5 GPU $REALSENSE_ENABLED" + ./docker-run.sh --platform dgpu.$gpu_index --inputsrc $CAMERA_ID --ocr 5 GPU $REALSENSE_ENABLED + statusCode=$? + if [ "$statusCode" != 0 ]; then + echo "Error: failed to launch pipeline $i for dgpu.$gpu_index with exit code $statusCode" + fi + done + cd - + + sleep $COMPLETE_INIT_DURATION + echo "Starting data collection" + #if this is the first run, collect all the metrics + if [ $test_run -eq 0 ] + then + timeout "$DURATION"s sar 1 >& $LOG_DIRECTORY/cpu_usage.log & + timeout "$DURATION"s free -s 1 >& $LOG_DIRECTORY/memory_usage.log & + timeout "$DURATION"s sudo iotop -o -P -b >& $LOG_DIRECTORY/disk_bandwidth.log & + + timeout "$DURATION"s sudo $PCM_DIRECTORY/pcm-power >& $LOG_DIRECTORY/power_usage.log & + + metrics=0,5,22,24,25 + if [ -e /dev/dri/renderD128 ]; then + echo "==== Starting xpumanager capture (card 0) ====" + timeout "$DURATION"s sudo xpumcli dump --rawdata --start -d 0 -m $metrics -j > ${LOG_DIRECTORY}/xpum0.json & + fi + if [ -e /dev/dri/renderD129 ]; then + echo "==== Starting xpumanager capture (card 1) ====" + timeout "$DURATION"s sudo xpumcli dump --rawdata --start -d 1 -m $metrics -j > ${LOG_DIRECTORY}/xpum1.json & + fi + #if this is the second run, collect memory bandwidth data only + else + timeout "$DURATION"s sudo $PCM_DIRECTORY/pcm-memory 1 -silent -nc -csv=$LOG_DIRECTORY/memory_bandwidth.csv & + fi + + + sleep $DURATION + echo "stopping server" + ./stop_server.sh + + if [ -e ../results/r0.jsonl ] + then + sudo cp -r ../results . + sudo mv results/pipeline* $LOG_DIRECTORY + sudo python3 ./results_parser.py >> meta_summary.txt + sudo mv meta_summary.txt $LOG_DIRECTORY + fi + + echo "test_run is: $test_run" + if [ $test_run -eq 0 ] + then + echo "fixing xpum" + #move the xpumanager dump files + devices=(0 1) + for device in ${devices[@]}; do + xpum_file=${LOG_DIRECTORY}/xpum${device}.json + if [ -e $xpum_file ]; then + echo "==== Stopping xpumanager collection (device ${device}) ====" + task_id=$(jq '.task_id' $xpum_file) + xpumcli dump --rawdata --stop $task_id + sudo cp $(jq --raw-output '.dump_file_path' $xpum_file) ${LOG_DIRECTORY}/xpum${device}.csv + sudo rm ${LOG_DIRECTORY}/xpum${device}.json + cat ${LOG_DIRECTORY}/xpum${device}.csv | \ + python3 -c 'import csv, json, sys; print(json.dumps([dict(r) for r in csv.DictReader(sys.stdin)]))' > xpum${device}.json + sudo mv xpum${device}.json ${LOG_DIRECTORY}/xpum${device}.json + fi + done + fi + + sleep 10 + +done diff --git a/benchmark-scripts/collect_xpum_csv.sh b/benchmark-scripts/collect_xpum_csv.sh new file mode 100755 index 00000000..7dc9d22d --- /dev/null +++ b/benchmark-scripts/collect_xpum_csv.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# Measure temp, frequency , power usage +echo "Collecting Flex metrics in $1/flex-metrics.csv" +xpumcli dump -m 0,1,2,3,8 > $1/flex-metrics.csv + diff --git a/benchmark-scripts/consolidate_multiple_run_of_metrics.py b/benchmark-scripts/consolidate_multiple_run_of_metrics.py new file mode 100644 index 00000000..959e5c1e --- /dev/null +++ b/benchmark-scripts/consolidate_multiple_run_of_metrics.py @@ -0,0 +1,539 @@ +''' +* Copyright (C) 2023 Intel Corporation. +* +* SPDX-License-Identifier: BSD-3-Clause +''' + +import pathlib +import datetime +import argparse +from abc import ABC, abstractmethod +from statistics import mean +import os +import re +import fnmatch +import numpy as np +import pandas as pd +from collections import defaultdict +from natsort import natsorted +from operator import add +import json + +# constants +AVG_CPU_USAGE_CONSTANT = "CPU Utilization %" +AVG_GPU_USAGE_CONSTANT = "GPU Utilization %" +AVG_GPU_MEM_USAGE_CONSTANT = "Memory Utilization %" +AVG_GPU_COMPUTE_USAGE_CONSTANT = "Compute Utilization %" +AVG_GPU_VDBOX_USAGE_CONSTANT = "Utilization %" + +AVG_DISK_READ_BANDWIDTH_CONSTANT = "Disk Read MB/s" +AVG_DISK_WRITE_BANDWIDTH_CONSTANT = "Disk Write MB/s" +AVG_MEM_USAGE_CONSTANT = "Memory Utilization %" +AVG_POWER_USAGE_CONSTANT = "Power Draw W" +AVG_MEM_BANDWIDTH_CONSTANT = "Memory Bandwidth Usage MB/s" +AVG_FPS_CONSTANT = "FPS" +LAST_MODIFIED_LOG = "Last log update" +TEXT_COUNT_CONSTANT = "Total Text count" +BARCODE_COUNT_CONSTANT = "Total Barcode count" + +class KPIExtractor(ABC): + @abstractmethod + def extract_data(self, log_file_path): + pass + + @abstractmethod + def return_blank(self): + pass + +class CPUUsageExtractor(KPIExtractor): + _SAR_CPU_USAGE_PATTERN = "(\\d\\d:\\d\\d:\\d\\d.(AM|PM))\\s+(\\w+)\\s+(\\d+.\\d+)\\s+(\\d+.\\d+)\\s+(\\d+.\\d+)\\s+(\\d+.\\d+)\\s+(\\d+.\\d+)\\s+(\\d+.\\d+)" + _IDLE_CPU_PERCENT_GROUP = 9 + + #overriding abstract method + def extract_data(self, log_file_path): + if os.path.getsize(log_file_path) == 0: + return {AVG_CPU_USAGE_CONSTANT: "NA"} + + print("parsing CPU usages") + sar_cpu_usage_row_p = re.compile(self._SAR_CPU_USAGE_PATTERN) + cpu_usages = [] + with open(log_file_path) as f: + for line in f: + sar_cpu_usage_row_m= sar_cpu_usage_row_p.match(line) + if sar_cpu_usage_row_m: + idle_cpu_percentage = float(sar_cpu_usage_row_m.group(self._IDLE_CPU_PERCENT_GROUP)) + cpu_usages.append(float(100) - idle_cpu_percentage) + if len(cpu_usages) > 0: + return {AVG_CPU_USAGE_CONSTANT: mean(cpu_usages)} + else: + return {AVG_CPU_USAGE_CONSTANT: "NA"} + + def return_blank(self): + return {AVG_CPU_USAGE_CONSTANT: "NA"} + + +class GPUUsageExtractor(KPIExtractor): + _USAGE_PATTERN = "Render/3D/0" + #overriding abstract method + def extract_data(self, log_file_path): + print("parsing GPU usages") + #print("log file path: {}".format(log_file_path)) + device = re.findall(r'\d+', os.path.basename(log_file_path)) + #print("device number: {}".format(device)) + gpu_device_usage = {} + eu_total = 0 + device_usage_key = "GPU_{} {}".format(device[0], AVG_GPU_USAGE_CONSTANT) + device_vdbox0_usage_key = "GPU_{} VDBOX0 {}".format(device[0], AVG_GPU_VDBOX_USAGE_CONSTANT) + device_vdbox1_usage_key = "GPU_{} VDBOX1 {}".format(device[0], AVG_GPU_VDBOX_USAGE_CONSTANT) + with open(log_file_path) as f: + eu_samples = [] + vdbox0_samples = [] + vdbox1_samples = [] + data = json.load(f) + for entry in data: + #json data works for vdbox, but not for overall usage due to duplicate Render/3D/0 entries in the log file + #eu_samples.append(entry["engines"]["Render/3D/0"]["busy"]) + #print("usage: {}".format(entry["engines"]["Render/3D/0"]["busy"])) + vdbox0_samples.append(entry["engines"]["Video/0"]["busy"]) + try: + vdbox1_samples.append(entry["engines"]["Video/1"]["busy"]) + except KeyError: + pass + + if len(vdbox0_samples) > 0: + gpu_device_usage[device_vdbox0_usage_key] = mean(vdbox0_samples) + try: + gpu_device_usage[device_vdbox1_usage_key] = mean(vdbox1_samples) + except: + pass + + usage_samples = [] + with open(log_file_path) as f: + lines = f.readlines() + + for i, line in enumerate(lines): + if self._USAGE_PATTERN in line: + #print("found pattern {}".format(line)) + usage_line = lines[i+1] + #print("usage line {}".format(usage_line)) + #usage_line.strip() + #print("usage line {}".format(usage_line)) + junk, usage_percent = usage_line.split(":", 1) + usage_percent, junk = usage_percent.split(",", 1) + #print("usage percent after split {}".format(float(usage_percent))) + if float(usage_percent) > 0: + #print("usage percent {}".format(float(usage_percent))) + + usage_samples.append(float(usage_percent)) + if usage_samples: + #print("avg gpu usage: {}".format(mean(usage_samples))) + gpu_device_usage[device_usage_key] = mean(usage_samples) + else: + gpu_device_usage[device_usage_key] = 0.0 + + + if gpu_device_usage: + return gpu_device_usage + else: + return {AVG_GPU_USAGE_CONSTANT: "NA"} + + def return_blank(self): + return {AVG_GPU_USAGE_CONSTANT: "NA"} + +class XPUMUsageExtractor(KPIExtractor): + #overriding abstract method + def extract_data(self, log_file_path): + print("parsing GPU usages") + #print("log file path: {}".format(log_file_path)) + gpu_device_usage = {} + gpu_device_mem_usage = {} + gpu_device_compute_usage = {} + gpu_encode_usage = {} + gpu_decode_usage = {} + device = re.findall(r'\d+', os.path.basename(log_file_path)) + #print("Device: {}".format(device)) + device_usage_key = "GPU_{} {}".format(device[0], AVG_GPU_USAGE_CONSTANT) + device_mem_usage_key = "GPU_{} {}".format(device[0], AVG_GPU_MEM_USAGE_CONSTANT) + device_compute_usage_key = "GPU_{} {}".format(device[0], AVG_GPU_COMPUTE_USAGE_CONSTANT) + device_vdbox0_usage_key = "GPU_{} VDBOX0 {}".format(device[0], AVG_GPU_VDBOX_USAGE_CONSTANT) + device_vdbox1_usage_key = "GPU_{} VDBOX1 {}".format(device[0], AVG_GPU_VDBOX_USAGE_CONSTANT) + #print("{}".format(device_usage_key)) + with open(log_file_path) as f: + gpu_samples = [] + mem_samples = [] + compute_samples = [] + encode0_samples = [] + encode1_samples = [] + decode0_samples = [] + decode1_samples = [] + data = json.load(f) + for entry in data: + try: + gpu_samples.append(float(entry[" GPU Utilization (%)"])) + mem_samples.append(float(entry[" GPU Memory Utilization (%)"])) + compute_samples.append(float(entry[" Compute Engine 0 (%)"])) + encode0_samples.append(float(entry[" Encoder Engine 0 (%)"])) + encode1_samples.append(float(entry[" Encoder Engine 1 (%)"])) + decode0_samples.append(float(entry[" Decoder Engine 0 (%)"])) + decode1_samples.append(float(entry[" Decoder Engine 1 (%)"])) + except Exception: + # there might be some anomaly in xpu manager outputs when collecting metrics, eg. emptry strings + # here we ignore that formatting issue + pass + entries = len(gpu_samples) + #for index in range(entries): + # print("sample: {}".format(gpu_samples[index])) + + if len(gpu_samples) > 0: + gpu_device_usage[device_usage_key] = mean(gpu_samples) + gpu_device_usage[device_mem_usage_key] = mean(mem_samples) + gpu_device_usage[device_compute_usage_key] = mean(compute_samples) + gpu_device_usage[device_vdbox0_usage_key] = (mean(encode0_samples) + mean(decode0_samples)) + gpu_device_usage[device_vdbox1_usage_key] = (mean(encode1_samples) + mean(decode1_samples)) + if gpu_device_usage: + return gpu_device_usage + else: + return {AVG_GPU_USAGE_CONSTANT: "NA"} + + def return_blank(self): + return {AVG_GPU_USAGE_CONSTANT: "NA"} + + +class MetaExtractor(KPIExtractor): + _TEXT_PATTERN = "Total Text count:" + _BARCODE_PATTERN = "Total Barcode count:" + #overriding abstract method + def extract_data(self, log_file_path): + if os.path.getsize(log_file_path) == 0: + return {TEXT_COUNT_CONSTANT: "NA", BARCODE_COUNT_CONSTANT: "NA"} + + print("parsing text and barcode data") + #text_count = 0 + #barcode_count = 0 + with open(log_file_path) as f: + for line in f: + if self._TEXT_PATTERN in line: + print("got text pattern") + text_count = line.split(":", 1) + text_count = int(text_count[1]) + #print("text count: {}".format(text_count)) + elif self._BARCODE_PATTERN in line: + print("got barcode pattern") + barcode_count = line.split(":", 1) + barcode_count = int(barcode_count[1]) + #print("barcode count: {}".format(barcode_count)) + + if 'text_count' in locals() and 'barcode_count' in locals(): + return {TEXT_COUNT_CONSTANT: text_count, BARCODE_COUNT_CONSTANT: barcode_count} + else: + return {TEXT_COUNT_CONSTANT: "NA", BARCODE_COUNT_CONSTANT: "NA"} + + def return_blank(self): + return {TEXT_COUNT_CONSTANT: "NA", BARCODE_COUNT_CONSTANT: "NA"} + +class MemUsageExtractor(KPIExtractor): + _MEM_USAGE_PATTERN = "Mem:\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)" + _MEM_TOTAL_GROUP = 1 + _MEM_USED_GROUP = 2 + + #overriding abstract method + def extract_data(self, log_file_path): + if os.path.getsize(log_file_path) == 0: + return {AVG_MEM_USAGE_CONSTANT: "NA"} + + print("parsing memory usage") + mem_usages = [] + mem_usage_p = re.compile(self._MEM_USAGE_PATTERN) + with open(log_file_path) as f: + for line in f: + mem_usage_m = mem_usage_p.match(line) + if mem_usage_m: + mem_usage = float(mem_usage_m.group(self._MEM_USED_GROUP)) / float(mem_usage_m.group(self._MEM_TOTAL_GROUP)) + mem_usages.append(mem_usage) + + if len(mem_usages) > 0: + return {AVG_MEM_USAGE_CONSTANT: mean(mem_usages) * 100} + else: + return {AVG_MEM_USAGE_CONSTANT: "NA"} + + def return_blank(self): + return {AVG_MEM_USAGE_CONSTANT: "NA"} + +class PowerUsageExtractor(KPIExtractor): + #overriding abstract method + _POWER_USAGE_PATTERN = "(\\w+);.Consumed.energy.units:.(\\d+).+Joules: (\\d+.\\d+).+Watts:.(\\d+.\\d+).+TjMax:.(\\d+)" + _SOCKET_ID_GROUP = 1 + _POWER_USAGE_GROUP = 4 + def extract_data(self, log_file_path): + if os.path.getsize(log_file_path) == 0: + return {AVG_POWER_USAGE_CONSTANT: "NA"} + + power_dict = defaultdict(list) + power_usage_p = re.compile(self._POWER_USAGE_PATTERN) + print("parsing power usage") + with open(log_file_path) as f: + for line in f: + power_usage_m = power_usage_p.match(line) + if power_usage_m: + socket_id = power_usage_m.group(self._SOCKET_ID_GROUP) + power_usage = float(power_usage_m.group(self._POWER_USAGE_GROUP)) + power_dict[socket_id].append(power_usage) + + power_kpi_dict = {} + for socket_id, power_usages in power_dict.items(): + socket_key = "{} {}".format(socket_id, AVG_POWER_USAGE_CONSTANT) + power_kpi_dict[socket_key] = mean(power_usages) + + if power_kpi_dict: + return power_kpi_dict + else: + return {AVG_POWER_USAGE_CONSTANT: "NA"} + + def return_blank(self): + return {AVG_POWER_USAGE_CONSTANT: "NA"} + +class DiskBandwidthExtractor(KPIExtractor): + _DISK_BANDWIDTH_PATTERN = "Total DISK READ:.+\\s(\\d+.\\d+).(B\\/s|K\\/s).+\\s(\\d+.\\d+).(B\\/s|K\\/s)" + _READ_BYTES_PER_SECOND_GROUP = 1 + _READ_BYTES_UNITS_GROUP = 2 + _WRITE_BYTES_PER_SECOND_GROUP = 3 + _WRITE_BYTES_UNITS_GROUP = 4 + + #overriding abstract method + def extract_data(self, log_file_path): + print("parsing disk bandwidth") + disk_read_bytes_per_second = [] + disk_write_bytes_per_second = [] + disk_bandwidth_p = re.compile(self._DISK_BANDWIDTH_PATTERN) + with open(log_file_path) as f: + for line in f: + disk_bandwidth_m = disk_bandwidth_p.match(line) + if disk_bandwidth_m: + # we want the data in Bytes first before finally converting to MegaBytes + unit_multiplier = 1 if 'B' in disk_bandwidth_m.group(self._READ_BYTES_UNITS_GROUP) else 1000 + read_bytes_per_second = float(disk_bandwidth_m.group(self._READ_BYTES_PER_SECOND_GROUP)) * unit_multiplier + + unit_multiplier = 1 if 'B' in disk_bandwidth_m.group(self._WRITE_BYTES_PER_SECOND_GROUP) else 1000 + write_bytes_per_second = float(disk_bandwidth_m.group(self._WRITE_BYTES_PER_SECOND_GROUP)) * unit_multiplier + + disk_read_bytes_per_second.append(read_bytes_per_second) + disk_write_bytes_per_second.append(write_bytes_per_second) + + megabytes_to_bytes = 1000000 + if len(disk_read_bytes_per_second) > 0 and len(disk_write_bytes_per_second) > 0: + return {AVG_DISK_READ_BANDWIDTH_CONSTANT: mean(disk_read_bytes_per_second) / megabytes_to_bytes, + AVG_DISK_WRITE_BANDWIDTH_CONSTANT: mean(disk_write_bytes_per_second) / megabytes_to_bytes} + else: + {AVG_DISK_READ_BANDWIDTH_CONSTANT: "NA", AVG_DISK_WRITE_BANDWIDTH_CONSTANT: "NA"} + + def return_blank(self): + return {AVG_DISK_READ_BANDWIDTH_CONSTANT: "NA", AVG_DISK_WRITE_BANDWIDTH_CONSTANT: "NA"} + +class MemBandwidthExtractor(KPIExtractor): + #overriding abstract method + def extract_data(self, log_file_path): + if os.path.getsize(log_file_path) == 0: + return {AVG_MEM_BANDWIDTH_CONSTANT: "NA"} + + print("parsing memory bandwidth") + socket_memory_bandwidth = {} + df = pd.read_csv(log_file_path, header=1) + socket_count = 0 + for column in df.columns: + if 'Memory (MB/s)' in column: + socket_key = "S{} {}".format(socket_count, AVG_MEM_BANDWIDTH_CONSTANT) + mem_bandwidth = df[column].tolist() + socket_memory_bandwidth[socket_key] = mean([ x for x in mem_bandwidth if pd.isna(x) == False ]) + socket_count = socket_count + 1 + + if socket_memory_bandwidth: + return socket_memory_bandwidth + else: + return {AVG_MEM_BANDWIDTH_CONSTANT: "NA"} + + def return_blank(self): + return {AVG_MEM_BANDWIDTH_CONSTANT: "NA"} + + +class PIPELINEFPSExtractor(KPIExtractor): + _FPS_KEYWORD = "avg_fps" + + #overriding abstract method + def extract_data(self, log_file_path): + print("parsing fps") + average_fps_list = [] + camera_fps = {} + cam = re.findall(r'\d+', os.path.basename(log_file_path)) + camera_key = "Camera_{} {}".format(cam[0], AVG_FPS_CONSTANT) + with open(log_file_path) as f: + for line in f: + average_fps_list.append(float(line)) + + if len(average_fps_list) > 0: + camera_fps[camera_key] = mean(average_fps_list) + else: + camera_fps[camera_key] = "NA" + + return camera_fps + + def return_blank(self): + return {AVG_FPS_CONSTANT: "NA"} + +class FPSExtractor(KPIExtractor): + _FPS_KEYWORD = "avg_fps" + + #overriding abstract method + def extract_data(self, log_file_path): + print("parsing fps") + average_fps_list = [] + camera_fps = {} + cam = re.findall(r'\d+', os.path.basename(log_file_path)) + camera_key = "Camera_{} {}".format(cam[0], AVG_FPS_CONSTANT) + with open(log_file_path) as f: + for line in f: + if self._FPS_KEYWORD in line: + average_fps_list.append(float((line.split(":"))[1].replace(",", ""))) + + if len(average_fps_list) > 0: + camera_fps[camera_key] = mean(average_fps_list) + + if camera_fps: + return camera_fps + else: + return {AVG_FPS_CONSTANT: "NA"} + + def return_blank(self): + return {AVG_FPS_CONSTANT: "NA"} + +class PIPELINLastModifiedExtractor(KPIExtractor): + #overriding abstract method + def extract_data(self, log_file_path): + print("parsing last modified log time") + average_fps_list = [] + last_modified = {} + cam = re.findall(r'\d+', os.path.basename(log_file_path)) + print(log_file_path) + print(cam) + camera_key = "Camera_{} {}".format(cam[0], LAST_MODIFIED_LOG) + + #get the last file modified time + print(log_file_path) + unix_date = os.path.getmtime(log_file_path) if os.path.exists(log_file_path) else None + print(unix_date) + #convert unix time to human readable date time + formatted_date = datetime.datetime.fromtimestamp(unix_date) if not (unix_date is None) else None + print(formatted_date) + #convert date format to string + last_modified[camera_key] = formatted_date.strftime('%m/%d/%Y %H:%M:%f') if not (formatted_date is None) else {LAST_MODIFIED_LOG: "NA"} + print(last_modified[camera_key]) + + return last_modified + + def return_blank(self): + return {LAST_MODIFIED_LOG: "NA"} + +class PCMExtractor(KPIExtractor): + #overriding abstract method + def extract_data(self, log_file_path): + if os.path.getsize(log_file_path) == 0: + return {AVG_POWER_USAGE_CONSTANT: "NA", AVG_MEM_BANDWIDTH_CONSTANT: "NA"} + + socket_memory_and_power = {} + print("parsing memory bandwidth") + df = pd.read_csv(log_file_path, header=1) + socket_count = 0 + for column in df.columns: + if 'READ' in column: + mem_read = df[column].tolist() + elif 'WRITE' in column: + mem_write = df[column].tolist() + mem_bandwidth = list(map(add, mem_read, mem_write)) + socket_key = "S{} {}".format(socket_count, AVG_MEM_BANDWIDTH_CONSTANT) + socket_memory_and_power[socket_key] = 1000 * mean([ x for x in mem_bandwidth if pd.isna(x) == False ]) + socket_count = socket_count + 1 + + print("parsing power usage") + df = pd.read_csv(log_file_path) + socket_power_usage = {} + socket_count = 0 + for column in df.columns: + if 'Proc Energy (Joules)' in column: + power_usage = df[column].tolist() + del power_usage[0] + socket_key = "S{} {}".format(socket_count, AVG_POWER_USAGE_CONSTANT) + socket_memory_and_power[socket_key] = mean([ float(x) for x in power_usage if pd.isna(x) == False ]) + socket_count = socket_count + 1 + + if socket_memory_and_power: + return socket_memory_and_power + else: + return {AVG_POWER_USAGE_CONSTANT: "NA", AVG_MEM_BANDWIDTH_CONSTANT: "NA"} + + def return_blank(self): + return {AVG_POWER_USAGE_CONSTANT: "-", AVG_MEM_BANDWIDTH_CONSTANT: "-"} + +KPIExtractor_OPTION = {"meta_summary.txt":MetaExtractor, + "camera":FPSExtractor, + "pipeline":PIPELINEFPSExtractor, + "(?:^r).*\.jsonl$":PIPELINLastModifiedExtractor, + "cpu_usage.log":CPUUsageExtractor, + "memory_usage.log":MemUsageExtractor, + "memory_bandwidth.csv":MemBandwidthExtractor, + "disk_bandwidth.log":DiskBandwidthExtractor, + "power_usage.log":PowerUsageExtractor, + "pcm.csv":PCMExtractor, + "(?:^xpum).*\.json$":XPUMUsageExtractor, + "igt":GPUUsageExtractor,} + +def add_parser(): + parser = argparse.ArgumentParser(description='Consolidate data') + parser.add_argument('--root_directory', nargs=1, help='Root directory that consists all log directory that store log file', required=True) + parser.add_argument('--output', nargs=1, help='Output file to store consolidate data', required=True) + return parser + +if __name__ == '__main__': + parser = add_parser() + args = vars(parser.parse_args()) + + root_directory = args['root_directory'][0] + output = args['output'][0] + + n = 0 + df = pd.DataFrame() + for log_directory_path in [ f.path for f in os.scandir(root_directory) if f.is_dir() ]: + folderName = os.path.basename(log_directory_path) + full_kpi_dict = {} + for kpiExtractor in KPIExtractor_OPTION: + fileFound = False + for dirpath, dirname, filename in os.walk(log_directory_path): + for file in filename: + if re.search(kpiExtractor, file): + #print("matched file: {}".format(file)) + fileFound = True + extractor = KPIExtractor_OPTION.get(kpiExtractor)() + kpi_dict = extractor.extract_data(os.path.join(log_directory_path, file)) + if kpi_dict: + full_kpi_dict.update(kpi_dict) + #if fileFound == False: + # extractor = KPIExtractor_OPTION.get(kpiExtractor)() + # kpi_dict = extractor.return_blank() + # if kpi_dict: + # full_kpi_dict.update(kpi_dict) + + list_of_metric = [] + list_of_value = [] + for kpi, value in full_kpi_dict.items(): + #print("kpi: {}, value: {}".format(kpi, value)) + #print("value list size: {}".format(len(list_of_value))) + list_of_metric.append(kpi) + if isinstance(value, str): + list_of_value.append(value) + else: + list_of_value.append(round(value, 3)) + + if n == 0: + df['Metric'] = list_of_metric + df[folderName] = list_of_value + n = -1 + + df.to_csv(output, header=True) diff --git a/benchmark-scripts/copy-platform-metrics.sh b/benchmark-scripts/copy-platform-metrics.sh new file mode 100755 index 00000000..212abf7b --- /dev/null +++ b/benchmark-scripts/copy-platform-metrics.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +LOG_DIRECTORY=$1 + +if [ -e ../results/r0.jsonl ] +then + echo "Copying data for collection scripts...`pwd`" + + sudo cp -r ../results . + sudo cp results/stream* $LOG_DIRECTORY || true + sudo mv results/igt* $LOG_DIRECTORY || true + sudo mv results/pipeline* $LOG_DIRECTORY + sudo cp results/r* $LOG_DIRECTORY + sudo python3 ./results_parser.py >> meta_summary.txt + sudo mv meta_summary.txt $LOG_DIRECTORY +else + echo "Warning no data found for collection!" +fi diff --git a/benchmark-scripts/download_sample_videos.sh b/benchmark-scripts/download_sample_videos.sh new file mode 100755 index 00000000..987156ad --- /dev/null +++ b/benchmark-scripts/download_sample_videos.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# up to 3 bottles and human hand +./format_avc_mp4.sh coca-cola-4465029.mp4 https://www.pexels.com/video/4465029/download/ $1 $2 $3 +#./format_avc_mp4.sh grocery-items-on-the-kitchen-shelf-4983686.mp4 https://www.pexels.com/video/4983686/download/ $1 $2 $3 +#./format_avc_mp4.sh couple-paying-at-the-counter-in-the-grocery-4121754.mp4 https://www.pexels.com/video/4121754/download/ diff --git a/benchmark-scripts/format_avc_mp4.sh b/benchmark-scripts/format_avc_mp4.sh new file mode 100755 index 00000000..3c41e5b4 --- /dev/null +++ b/benchmark-scripts/format_avc_mp4.sh @@ -0,0 +1,122 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +show_help() { + echo " + usage: $0 video_name.mp4 URL_TO_MP4 [width height fps] + + Note: + 1. This utility will convert the video_name.mp4 file to 4k@15FPS in AVC and requires Intel GPU. + 2. The video_name.mp4 file must reside in the sample-media folder. + 3. The video_name.mp4 file must already be in AVC format. + " +} + +WIDTH=3840 +HEIGHT=2160 +FPS=15 +HAS_FLEX_140=0 +HAS_FLEX_170=0 +HAS_ARC=0 +#HAS_iGPU=0 + +get_gpu_devices() { + has_gpu=0 + has_any_intel_non_server_gpu=`dmesg | grep -i "class 0x030000" | grep "8086"` + has_any_intel_server_gpu=`dmesg | grep -i "class 0x038000" | grep "8086"` + has_flex_170=`echo "$has_any_intel_server_gpu" | grep -i "56C0"` + has_flex_140=`echo "$has_any_intel_server_gpu" | grep -i "56C1"` + has_arc=`echo "$has_any_intel_non_server_gpu" | grep -iE "5690|5691|5692|56A0|56A1|56A2|5693|5694|5695|5698|56A5|56A6|56B0|56B1|5696|5697|56A3|56A4|56B2|56B3"` + + if [ -z "$has_any_intel_non_server_gpu" ] && [ -z "$has_any_intel_server_gpu" ] + then + echo "No Intel GPUs found" + return + fi + #echo "GPU exists!" + if [ ! -z "$has_flex_140" ] + then + HAS_FLEX_140=1 + fi + if [ ! -z "$has_flex_170" ] + then + HAS_FLEX_170=1 + fi + if [ ! -z "$has_arc" ] + then + HAS_ARC=1 + fi + + #echo "HAS_FLEX_140=$HAS_FLEX_140, HAS_FLEX_170=$HAS_FLEX_170, HAS_ARC=$HAS_ARC" +} + + +get_gpu_devices + +if [ -z "$2" ] +then + show_help + exit 1 +fi + + +result=${1/.mp4/"-bench.mp4"} +if [ -f ../sample-media/$result ] +then + echo "Skipping...conversion was already done for ../sample-media/$result." + exit 0 +fi + +if [ ! -f ../sample-media/$1 ] && [ ! -f ../sample-media/$result ] +then + wget -O ../sample-media/$1 $2 +fi + +if [ ! -f ../sample-media/$1 ] +then + echo "ERROR: Can not find video file or + " + show_help + exit 1 +fi + +if [ ! -z "$3" ] +then + WIDTH=$3 +fi + +if [ ! -z "$4" ] +then + HEIGHT=$4 +fi + +if [ ! -z "$5" ] +then + FPS=$5 +fi + +if [ -z "$WIDTH" ] || [ -z "$HEIGHT" ] || [ -z "$FPS" ] +then + echo "ERROR: Invalid width height fps" + exit 1 +fi + + +if [ $HAS_FLEX_140 == 1 ] || [ $HAS_FLEX_170 == 1 ] || [ $HAS_ARC == 1 ] +then + TAG=sco-dgpu:2.0 + +else + echo "ERROR: Requires Intel Flex/Arc GPU" + exit 1 +fi + +echo "$WIDTH $HEIGHT $FPS" +docker run --network host --privileged --user root --ipc=host -e VIDEO_FILE=$1 -e DISPLAY=:0 -v /tmp/.X11-unix:/tmp/.X11-unix -v `pwd`/../sample-media/:/vids -w /vids -it --rm $TAG bash -c "if [ -f /vids/$result ]; then exit 1; else gst-launch-1.0 filesrc location=/vids/$1 ! qtdemux ! h264parse ! vaapih264dec ! vaapipostproc width=$WIDTH height=$HEIGHT ! videorate ! 'video/x-raw, framerate=$FPS/1' ! vaapih264enc ! h264parse ! mp4mux ! filesink location=/vids/$result; fi" + +rm ../sample-media/$1 +echo "Result will be created in ../sample-media/$result" diff --git a/benchmark-scripts/log_time_monitor.sh b/benchmark-scripts/log_time_monitor.sh new file mode 100755 index 00000000..39b4a2f3 --- /dev/null +++ b/benchmark-scripts/log_time_monitor.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +WATCH_LOG_DIR=$1 # like ../results/ +WATCH_POLL_INTERVAL_IN_SECOND=$2 # usually per second polling +NUM_PIPELINES=$3 # number of pipelines: this determines how many pipeline log files to compare with + +if [ ! -d "$WATCH_LOG_DIR" ]; then + echo "ERROR: cannot find the log directory: $WATCH_LOG_DIR" + exit 1 +fi + +if [ $NUM_PIPELINES -lt 2 ]; then + echo "nothing to compare with, exiting" + exit 0 +fi + +PIPELINE_FILE_PREFIX=pipeline + +log_files=$(find "$WATCH_LOG_DIR" -name "$PIPELINE_FILE_PREFIX*.log" -printf '%p\n') +num_log_files=$(echo "$log_files" | wc -l) + +echo "INFO: find $num_log_files log files in $WATCH_LOG_DIR" + +if [ "$num_log_files" -lt "$NUM_PIPELINES" ]; then + echo "ERROR: expecting $NUM_PIPELINES log files but only found $num_log_files" + exit 1 +fi + +while true +do + echo "log file timestamp monitor running ..." + times=() + for log_file in $log_files + do + t=$(stat -c %Y "$log_file") + echo "timestamp for $log_file is $t" + times+=("$t") + done + + # calculate time difference and stall threshold + STALL_THRESHOLD=5 + i=0 + for log_file1 in $log_files + do + j=0 + for log_file2 in $log_files + do + # only compare to other files not itself and also compare the file later not repeat the previous already compared files + if [ "$log_file1" != "$log_file2" ] && [ "$j" -gt "$i" ]; then + t1="${times[$i]}" + t2="${times[$j]}" + time_diff=$(expr $t1 - $t2) + # removing -ve values if $t1 < $t2 + time_diff=${time_diff#-} + if [ "$time_diff" -ge "$STALL_THRESHOLD" ]; then + echo "WARNING: stalled pipelines detected, $log_file1 and $log_file2 time difference is $time_diff seconds, above stalled threshold $STALL_THRESHOLD seconds" + fi + fi + j=$((j+1)) + done + i=$((i+1)) + done + sleep $WATCH_POLL_INTERVAL_IN_SECOND +done diff --git a/benchmark-scripts/requirements.txt b/benchmark-scripts/requirements.txt new file mode 100644 index 00000000..9b9d222a --- /dev/null +++ b/benchmark-scripts/requirements.txt @@ -0,0 +1,3 @@ +natsort==8.2.0 +numpy==1.19.4 +pandas==1.3.5 diff --git a/benchmark-scripts/results_parser.py b/benchmark-scripts/results_parser.py new file mode 100644 index 00000000..8af354c8 --- /dev/null +++ b/benchmark-scripts/results_parser.py @@ -0,0 +1,217 @@ +''' +* Copyright (C) 2023 Intel Corporation. +* +* SPDX-License-Identifier: BSD-3-Clause +''' + +import time +import sys +import argparse +import os +import json +from collections import Counter +from dataclasses import dataclass +import traceback + +@dataclass +class InferenceCounts: + detection: int = 0 + classification: int = 0 + text_detection: int = 0 + text_recognition: int = 0 + barcode: int = 0 + + +tracked_objects = {} +frame_count = 0 +inferenceCounts = InferenceCounts() + +def parse_args(): + parser = argparse.ArgumentParser(prog="Results Parser", + fromfile_prefix_chars='@', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument('--mode', default="file", help='Mode: file or mqtt') + parser.add_argument('--stream-index', default=0, help='Stream index') + parser.add_argument('--file', default="", help='file name') + parser.add_argument('--min-detections', default=15, help='Number of detections to define a valid object') + parser.add_argument('--reclassify-interval', default=1, help='Reclassify interval') + parser.add_argument('--broker-address', default="localhost", help='MQTT broker address') + parser.add_argument('--broker-port', default=1883, help='MQTT broker port') + return parser.parse_args() + + +def is_inside(inner, outer): + return inner["x_min"] >= outer["x_min"] and \ + inner["x_max"] <= outer["x_max"] and \ + inner["y_min"] >= outer["y_min"] and \ + inner["y_max"] <= outer["y_max"] + +def get_parent_id(detections, detection): + bbox = detection["bounding_box"] + for key in detections: + if is_inside(bbox, detections[key]["bounding_box"]): + return key + return 0 + +def print_object(obj): + print(" - Object {}: {}".format(obj["id"], obj["label"])) + print(" - Product: {}".format(obj["product"])) + print(" - Barcode: {}".format(obj.get("barcode"))) + print(" - Text: {} {}".format(len(obj["text"]),obj["text"])) + + +def process(results, reclassify_interval): + product_key = "classification_layer_name:efficientnet-b0/model/head/dense/BiasAdd/Add" + text_keys = ["inference_layer_name:logits", "inference_layer_name:shadow/LSTMLayers/transpose_time_major", + "inference_layer_name:shadow/LSTMLayers/Reshape_1"] + detections = {} + objects = {} + inferenceCounts.detection+=1 + # Needed for additional entries like non-inference results like {"resolution":{"height":2160,"width":3840},"timestamp":201018476} + if "objects" not in results: + return + for result in results["objects"]: + detection = result["detection"] + region_id = result["region_id"] + label = "EMPTY" + if "label" in detection: + label = detection["label"] + if "id" in result: + tracking_id = result["id"] + objects[region_id] = { + "id" : tracking_id, + "label" : label, + "text" : [], + "barcode": None, + "bounding_box": detection["bounding_box"] + } + detections[region_id] = detection + if product_key in result: + product = result[product_key]["label"][10:] + objects[region_id]["product"] = product + if label.startswith("barcode: "): + barcode = detection["label"][9:] + if barcode.endswith("_tracked"): + barcode = barcode[:-len("_tracked")] + else: + inferenceCounts.barcode+=1 + parent_id = get_parent_id(detections, detection) + if parent_id: + objects[parent_id]["barcode"] = barcode + for text_key in text_keys: + if text_key in result: + text = result[text_key]["label"] + inferenceCounts.text_detection+=1 + inferenceCounts.text_recognition+=1 + parent_id = get_parent_id(detections, detection) + if parent_id: + objects[parent_id]["text"].append(text) + + print("- Frame {}".format(frame_count)) + for obj in sorted(objects.values(),key=lambda obj: obj["bounding_box"]["x_min"]): + print_object(obj) + update_tracked_object(obj,tracked_objects) + + +def update_tracked_object(obj, tracked_objects): + tracked_object = tracked_objects.setdefault(obj["id"],{}) + tracked_keys = ["barcode","text","label","product"] + tracked_object["id"] = obj["id"] + for tracked_key in tracked_keys: + updates = obj[tracked_key] + if not isinstance(updates,list): + updates= [updates] + tracked_object.setdefault(tracked_key,Counter()).update( + updates) + + +def process_file(args): + if args.file: + filename=args.file + else: + filename = "results/r{}.jsonl".format(args.stream_index) + file = open(filename, "r") + line = file.readline() + global frame_count + while line: + try: + results = json.loads(line) + process(results, args.reclassify_interval) + frame_count += 1 + except Exception as e: + print("Error: {}".format(e)) + print(traceback.format_exc()) + line = file.readline() + file.close() + + +def on_connect(client, user_data, _unused_flags, return_code): + if return_code == 0: + args = user_data + print("Connected to broker at {}:{}".format(args.broker_address, args.broker_port)) + topic = "gulfstream/results_{}".format(args.stream_index) + print("Subscribing to topic {}".format(topic)) + client.subscribe(topic) + else: + print("Error {} connecting to broker".format(return_code)) + sys.exit(1) + +def on_message(_unused_client, user_data, msg): + results = json.loads(msg.payload) + process(results) + +def process_mqtt(args): + client = mqtt.Client("Gulfstream", userdata=args) + client.on_connect = on_connect + client.on_message = on_message + client.connect(args.broker_address, args.broker_port) + client.loop_forever() + + +def main(): + try: + args = parse_args() + if args.mode == "file": + process_file(args) + else: + import paho.mqtt.client as mqtt + process_mqtt(args) + text_count = 0 + barcode_count = 0 + print("-------") + print("Summary") + print("-------") + print("Frames {}".format(frame_count)) + inferenceCounts.classification = inferenceCounts.detection + print(inferenceCounts) + summary = [] + for obj in tracked_objects.values(): + summary_obj = {} + id = obj["id"] + for key in obj: + if isinstance(obj[key],Counter): + print("key is : {}".format(key)) + if key == "text": + obj[key] = {k:v for k, v in obj[key].items() if v > args.min_detections} + summary_obj[key] = list(obj[key].items()) + obj[key] = list(obj[key].items()) + if key == "barcode": + barcode_count += 1 + else: + summary_obj[key] = obj[key] + print("obj[key]: {}".format(obj[key])) + detections = obj["label"][0][1] + if detections >= args.min_detections: + print_object(obj) + text_count += len(obj["text"]) + summary.append(summary_obj) + print(json.dumps(summary)) + print("Total Objects: {} ".format(len(obj))) + print("Total Text count: {}".format(text_count)) + print("Total Barcode count: {}".format(barcode_count)) + except: + print(traceback.format_exc()) + +if __name__ == "__main__": + main() diff --git a/benchmark-scripts/run.sh b/benchmark-scripts/run.sh new file mode 100755 index 00000000..267a5ae0 --- /dev/null +++ b/benchmark-scripts/run.sh @@ -0,0 +1,371 @@ +#!/bin/bash -e +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +RUN_PREFIX= +MODELS= +PIPELINES= +FRAMEWORK= +IMAGE= +VOLUME_MOUNT= +MODE=SERVICE +PORTS= +DEVICES= +DEFAULT_GSTREAMER_IMAGE="dlstreamer-pipeline-server-gstreamer" +DEFAULT_FFMPEG_IMAGE="dlstreamer-pipeline-server-ffmpeg" +ENTRYPOINT= +ENTRYPOINT_ARGS= +PRIVILEGED= +NETWORK= +USER= +INTERACTIVE=-it +DEVICE_CGROUP_RULE= +USER_GROUPS= +ENABLE_RTSP=${ENABLE_RTSP:-"false"} +ENABLE_WEBRTC=${ENABLE_WEBRTC:-"false"} +RTSP_PORT=8554 + +SCRIPT_DIR=$(dirname "$(readlink -f "$0")") +SOURCE_DIR=$(dirname $SCRIPT_DIR) +ENVIRONMENT=$(env | cut -f1 -d= | grep -E '_(proxy)$' | sed 's/^/-e / ' | tr '\n' ' ') +IGNORE_INIT_ERRORS=false + +show_options() { + echo "" + echo "Running Pipeline Server Image: '${IMAGE}'" + echo " Models: '${MODELS}'" + echo " Pipelines: '${PIPELINES}'" + echo " Framework: '${FRAMEWORK}'" + echo " Environment: '${ENVIRONMENT}'" + echo " Volume Mounts: '${VOLUME_MOUNT}'" + echo " Mode: '${MODE}'" + echo " Ports: '${PORTS}'" + echo " Name: '${NAME}'" + echo " Network: '${NETWORK}'" + echo " Entrypoint: '${ENTRYPOINT}'" + echo " EntrypointArgs: '${ENTRYPOINT_ARGS}'" + echo " User: '${USER}'" + echo " User Groups: '${USER_GROUPS}'" + echo " Devices: '${DEVICES}'" + echo " Device CGroup Rule: '${DEVICE_CGROUP_RULE}'" + echo "" +} + +show_help() { + echo "usage: run.sh" + echo " [--image image]" + echo " [--framework ffmpeg || gstreamer]" + echo " [--models path to models directory]" + echo " [--pipelines path to pipelines directory]" + echo " [-v additional volume mount to pass to docker run]" + echo " [-e additional environment to pass to docker run]" + echo " [--entrypoint-args additional parameters to pass to entrypoint in docker run]" + echo " [-p additional ports to pass to docker run]" + echo " [--network name network to pass to docker run]" + echo " [--user name of user to pass to docker run]" + echo " [--group-add name of user group to pass to docker run]" + echo " [--name container name to pass to docker run]" + echo " [--device device to pass to docker run]" + echo " [--enable-rtsp To enable rtsp re-streaming]" + echo " [--rtsp-port Specify the port to use for rtsp re-streaming]" + echo " [--enable-webrtc To enable WebRTC frame destination]" + echo " [--dev run in developer mode]" + exit 0 +} + +error() { + printf '%s\n' "$1" >&2 + exit +} + +enable_hardware_access() { + # GPU + if ls /dev/dri/render* 1> /dev/null 2>&1; then + echo "Found /dev/dri/render entry - enabling for GPU" + DEVICES+='--device /dev/dri ' + RENDER_GROUPS=$(stat -c '%g' /dev/dri/render*) + for group in $RENDER_GROUPS + do + USER_GROUPS+="--group-add $group " + done + fi + + # Intel(R) NCS2 + if [ -d /dev/bus/usb ]; then + echo "Found /dev/bus/usb - enabling for Intel(R) NCS2" + DEVICE_CGROUP_RULE=--device-cgroup-rule=\'c\ 189:*\ rmw\' + VOLUME_MOUNT+="-v /dev/bus/usb:/dev/bus/usb " + fi + + # HDDL + if compgen -G /dev/myriad* > /dev/null ; then + echo "Found /dev/myriad devices - enabling for HDDL-R" + VOLUME_MOUNT+="-v /var/tmp:/var/tmp -v /dev/shm:/dev/shm " + fi + + # Webcam + for device in $(ls /dev | grep video); do + echo "Found /dev/$device - enabling webcam" + DEVICES+="--device /dev/$device " + done + + # Microphone + if [ -e /dev/snd ]; then + echo "Found /dev/snd - enabling microphone" + DEVICES+="--device /dev/snd " + fi +} + +while [[ "$#" -gt 0 ]]; do + case $1 in + -h | -\? | --help) + show_help # Display a usage synopsis. + exit + ;; + --dry-run) + RUN_PREFIX=echo + ;; + --image) # Takes an option argument; ensure it has been specified. + if [ "$2" ]; then + IMAGE=$2 + shift + else + error 'ERROR: "--image" requires a non-empty option argument.' + fi + ;; + --models) + if [ "$2" ]; then + MODELS=$(realpath $2) + shift + else + error 'ERROR: "--models" requires a non-empty option argument.' + fi + ;; + --user) + if [ "$2" ]; then + USER="--user $2" + shift + else + error 'ERROR: "--user" requires a non-empty option argument.' + fi + ;; + --group-add) + if [ "$2" ]; then + USER_GROUPS+="--group-add $2 " + shift + else + error 'ERROR: "--group-add" requires a non-empty option argument.' + fi + ;; + --device) + if [ "$2" ]; then + DEVICES+="--device $2 " + shift + else + error 'ERROR: "--device" requires a non-empty option argument.' + fi + ;; + --privileged) + PRIVILEGED="--privileged " + ;; + --device-cgroup-rule) + if [ "$2" ]; then + DEVICE_CGROUP_RULE="--device-cgroup-rule=$2 " + shift + else + error 'ERROR: "--device-cgroup-rule" requires a non-empty option argument.' + fi + ;; + --pipelines) + if [ "$2" ]; then + PIPELINES=$(realpath $2) + shift + else + error 'ERROR: "--pipelines" requires a non-empty option argument.' + fi + ;; + --framework) + if [ "$2" ]; then + FRAMEWORK=$2 + shift + else + error 'ERROR: "--framework" requires a non-empty option argument.' + fi + ;; + -e) + if [ "$2" ]; then + ENVIRONMENT+="-e $2 " + shift + else + error 'ERROR: "-e" requires a non-empty option argument.' + fi + ;; + --entrypoint-args) + if [ "$2" ]; then + ENTRYPOINT_ARGS+="$2 " + shift + else + error 'ERROR: "--entrypoint-args" requires a non-empty option argument.' + fi + ;; + -p) + if [ "$2" ]; then + PORTS+="-p $2 " + shift + else + error 'ERROR: "-p" requires a non-empty option argument.' + fi + ;; + -v) + if [ "$2" ]; then + VOLUME_MOUNT+="-v $2 " + shift + else + error 'ERROR: "-v" requires a non-empty option argument.' + fi + ;; + --dev) + MODE=DEV + ;; + --name) + if [ "$2" ]; then + NAME=$2 + shift + else + error 'ERROR: "--name" requires a non-empty option argument.' + fi + ;; + --network) + if [ "$2" ]; then + NETWORK="--network $2" + shift + else + error 'ERROR: "--network" requires a non-empty option argument.' + fi + ;; + --entrypoint) + if [ "$2" ]; then + ENTRYPOINT="--entrypoint $2" + shift + else + error 'ERROR: "--entrypoint" requires a non-empty option argument.' + fi + ;; + --rtsp-port) + if [ "$2" ]; then + RTSP_PORT=$2 + shift + else + error 'ERROR: "--rtsp-port" requires a non-empty option argument.' + fi + ;; + --enable-rtsp) + ENABLE_RTSP=true + ;; + --enable-webrtc) + ENABLE_WEBRTC=true + ;; + --non-interactive) + unset INTERACTIVE + ;; + --) # End of all options. + shift + break + ;; + -?*) + printf 'WARN: Unknown option (ignored): %s\n' "$1" >&2 + ;; + *) # Default case: No more options, so break out of the loop. + break ;; + esac + + shift +done + +if [ -z "$FRAMEWORK" ]; then + FRAMEWORK="gstreamer" +elif [ $FRAMEWORK != 'gstreamer' ] && [ $FRAMEWORK != 'ffmpeg' ]; then + echo "Invalid framework" + show_help +fi + +if [ -z "$IMAGE" ]; then + IMAGE=DEFAULT_${FRAMEWORK^^}_IMAGE + IMAGE=${!IMAGE} +fi + +if [ -z "$NAME" ]; then + # Convert tag separator if exists + NAME=${IMAGE//[\:\/]/_} +fi + +if [ "${MODE}" == "DEV" ]; then + VOLUME_MOUNT+="-v $SOURCE_DIR:/home/pipeline-server/ " + VOLUME_MOUNT+="-v /tmp:/tmp " + VOLUME_MOUNT+="-v /dev:/dev " + if [ -z "$NETWORK" ]; then + NETWORK="--network=host" + fi + if [ -z "$ENTRYPOINT" ]; then + ENTRYPOINT="--entrypoint /bin/bash" + fi + if [ -z "$MODELS" ]; then + MODELS=$SOURCE_DIR/models + fi + if [ -z "$PIPELINES" ]; then + PIPELINES=$SOURCE_DIR/pipelines/$FRAMEWORK + fi + PRIVILEGED="--privileged " +elif [ ! -z "$ENTRYPOINT" ]; then + MODE=CUSTOM_ENTRYPOINT +elif [ "${MODE}" == "SERVICE" ]; then + if [ -z "$PORTS" ]; then + PORTS+="-p 8080:8080 " + fi +else + echo "Invalid Mode" + show_help +fi + +enable_hardware_access + +if [ "$ENABLE_RTSP" != "false" ]; then + ENVIRONMENT+="-e ENABLE_RTSP=$ENABLE_RTSP -e RTSP_PORT=$RTSP_PORT " + PORTS+="-p $RTSP_PORT:$RTSP_PORT " +fi + +if [ "$ENABLE_WEBRTC" != "false" ]; then + ENVIRONMENT+="-e ENABLE_WEBRTC=$ENABLE_WEBRTC " +fi + +if [ ! -z "$MODELS" ]; then + VOLUME_MOUNT+="-v $MODELS:/home/pipeline-server/models " +fi + +if [ ! -z "$PIPELINES" ]; then + VOLUME_MOUNT+="-v $PIPELINES:/home/pipeline-server/pipelines " +fi + +if [ ! -z "$VOLUME_MOUNT" ]; then + if [ -z "$USER" ]; then + USER="--user $UID" + fi +fi + +if [ ! -z "$USER" ]; then + for group in "audio" "users" + do + USER_GROUPS+="--group-add $group " + done +fi + +show_options + +echo "$RUN_PREFIX docker run $INTERACTIVE --rm $ENVIRONMENT -e IGNORE_INIT_ERRORS=$IGNORE_INIT_ERRORS $VOLUME_MOUNT $DEVICE_CGROUP_RULE $DEVICES $NETWORK $PORTS $ENTRYPOINT --name ${NAME} ${PRIVILEGED} ${USER} $USER_GROUPS $IMAGE ${ENTRYPOINT_ARGS}" + +# eval must be used to ensure the --device-cgroup-rule string is correctly parsed +eval "$RUN_PREFIX docker run $INTERACTIVE --rm $ENVIRONMENT -e IGNORE_INIT_ERRORS=$IGNORE_INIT_ERRORS $VOLUME_MOUNT $DEVICE_CGROUP_RULE $DEVICES $NETWORK $PORTS $ENTRYPOINT --name ${NAME} ${PRIVILEGED} ${USER} $USER_GROUPS $IMAGE ${ENTRYPOINT_ARGS}" + diff --git a/benchmark-scripts/run_server.sh b/benchmark-scripts/run_server.sh new file mode 100755 index 00000000..ed2de450 --- /dev/null +++ b/benchmark-scripts/run_server.sh @@ -0,0 +1,65 @@ +#!/bin/bash -e +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +COMMAND=$1 +PIPELINE_NUMBER=$2 +TAG=sco-soc:2.0 +PIPELINE_SERVER_VERSION=0.7.2-beta +SOURCE_DIR=$(dirname "$(readlink -f "$0")") +PARENT_DIR=$(dirname $SOURCE_DIR) +PIPELINE_SERVER_DIR=$SOURCE_DIR/pipeline-server-$PIPELINE_SERVER_VERSION +STARTING_RTSP_PORT=9554 +STARTING_PORT=8080 + +if [ -z "$LOG_LEVEL" ]; then + LOG_LEVEL=INFO +fi + +if [ -z "$GST_DEBUG" ]; then + GST_DEBUG=0 +fi + +if [ -z "$COMMAND" ]; then + COMMAND="START" +fi + +if [ "${COMMAND,,}" = "start" ]; then + mkdir -p $SOURCE_DIR/.cl-cache + mkdir -p $PARENT_DIR/results + mkdir -p $PARENT_DIR/rendered + + REDIRECT="" + if [ "${2,,}" = "quiet" ]; then + $PIPELINE_SERVER_DIR/docker/run.sh --network host --image postman/newman --name pipeline-server -v $SOURCE_DIR/pipelines:/home/pipeline-server/pipelines -v $SOURCE_DIR/extensions:/home/pipeline-server/extensions -v $SOURCE_DIR/pipeline-server/results:/tmp/results -e cl_cache_dir=/home/pipeline-server/.cl-cache -v $SOURCE_DIR/.cl-cache:/home/pipeline-server/.cl-cache -v $SOURCE_DIR/models:/home/pipeline-server/models -e GST_DEBUG=$GST_DEBUG --enable-rtsp --non-interactive --rtsp-port 9554 >$SOURCE_DIR/server.log.txt 2>&1 & + else + export GST_DEBUG=0 + export LOG_LEVEL=INFO + export ENABLE_RTSP=true + export IGNORE_INIT_ERRORS=true + RTSP_PORT=$STARTING_RTSP_PORT + PORT=$STARTING_PORT + for i in $( seq 0 $(($PIPELINE_NUMBER - 1)) ) + do + CONTAINER_NAME="pipeline-server"$(($i + 1)) + LOG_FILE_NAME="server"$(($i + 1))".txt" + if [ $i = 0 ]; then + echo "./run.sh --network host --image $TAG --name pipeline-server -v /dev/dri:/dev/dri -v $SOURCE_DIR/../pipeline-server/pipelines:/home/pipeline-server/pipelines -v $SOURCE_DIR/../pipeline-server/extensions:/home/pipeline-server/extensions -v $SOURCE_DIR/results:/tmp/results -v /home/intel-admin/poc-recordings:/home/poc-recordings -v $SOURCE_DIR/../pipeline-server/models/2022:/home/pipeline-server/models -e LOG_LEVEL=$LOG_LEVEL -e GST_DEBUG=$GST_DEBUG --non-interactive" + ./run.sh --network host --image $TAG --name pipeline-server -v `pwd`/../sample-media/:/vids -v /dev/dri:/dev/dri -v $SOURCE_DIR/../pipeline-server/pipelines:/home/pipeline-server/pipelines -v $SOURCE_DIR/../pipeline-server/extensions:/home/pipeline-server/extensions -v $SOURCE_DIR/results:/tmp/results -v /home/intel-admin/poc-recordings:/home/poc-recordings -v $SOURCE_DIR/../pipeline-server/models/2022:/home/pipeline-server/models -e LOG_LEVEL=$LOG_LEVEL -e GST_DEBUG=$GST_DEBUG --non-interactive > server.txt 2>&1 & + else + RTSP_PORT=$(($RTSP_PORT + 1)) + PORT=$(($PORT + 1)) + ./run.sh --network host --image $TAG --name $CONTAINER_NAME -v `pwd`/../sample-media/:/vids -v $SOURCE_DIR/../pipeline-server/pipelines:/home/pipeline-server/pipelines -v $SOURCE_DIR/../pipeline-server/extensions:/home/pipeline-server/extensions -v $SOURCE_DIR/results:/tmp/results -e cl_cache_dir=/home/pipeline-server/.cl-cache -v $SOURCE_DIR/../pipeline-server/models/2022:/home/pipeline-server/models -e LOG_LEVEL=$LOG_LEVEL -e GST_DEBUG=$GST_DEBUG --entrypoint-args --port=$PORT --non-interactive > $LOG_FILE_NAME 2>&1 & + fi + done + fi + +elif [ "${COMMAND,,}" = "stop" ]; then + docker kill pipeline-server +elif [ "${COMMAND,,}" = "attach" ]; then + docker attach pipeline-server +fi + diff --git a/benchmark-scripts/start_emulated_camera_pipelines.sh b/benchmark-scripts/start_emulated_camera_pipelines.sh new file mode 100755 index 00000000..b0cf0301 --- /dev/null +++ b/benchmark-scripts/start_emulated_camera_pipelines.sh @@ -0,0 +1,64 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +#to control number of object detected +INPUT_CAMERA=$1 +#to control number of instance for pipeline-server +PIPELINE_NUMBER=$2 +MODEL=yolov5s +STARTING_PORT=8080 + +# starting emulated cameras locally +./camera-simulator.sh +sleep 1 + +pipeline=' +{ + "source": { + "uri": "rtsp://127.0.0.1:8554/mycam", + "type": "uri" + }, + "destination": { + "metadata": { + "type": "file", + "path": "/tmp/results/r.jsonl", + "format":"json-lines" + }, + "frame": { + "type": "rtsp", + "sync-with-source": false, + "path": "mycam" + } + }, + "parameters": { + "classification": { + "device": "CPU" + }, + "detection": { + "device": "CPU" + } + } +}' + +pipelineFile=$MODEL"_tracking_mixed_cpu_full" +echo $pipelineFile +PORT=$STARTING_PORT +echo "Performing mixed tracking with OD-interval=1, OC-interval=1, OCR-interval=3, Barcode-interval=3 " +for i in $( seq 0 $(($PIPELINE_NUMBER - 1)) ) +do + if [ $i != 0 ]; then + PORT=$(($PORT + 1)) + fi + pipeline_num=$((i + 1)) + declare pipelineName="pipeline"$pipeline_num + pipelineName=$(echo $pipeline | sed "s/mycam/$INPUT_CAMERA/g") + pipelineName=${pipelineName/r.json/r$i.json} + echo $pipelineName + curl -H 'Content-Type: application/json' http://127.0.0.1:$PORT/pipelines/xeon/$pipelineFile --data @- < $2) print 1;}') )) + then + echo "yes" + else + echo "no" + MEETS_FPS=false + echo "Max number of pipelines: $(( $num_pipelines ))" + fi + + echo "Stopping server" + ./stop_server.sh + sleep 30 + num_pipelines=$(( $num_pipelines + 1 )) +done #done while diff --git a/benchmark-scripts/stream_density_testcases.sh b/benchmark-scripts/stream_density_testcases.sh new file mode 100755 index 00000000..afae9e6a --- /dev/null +++ b/benchmark-scripts/stream_density_testcases.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +#test case 1: minimum number of streams +min_expected=2 +echo "testcase: minimum ${min_expected} streams expected" +#testing for core system with rtsp, you may need to edit the input source if rtsp is different for camera device +./stream_density.sh rtsp://127.0.0.1:8554/camera_1 core 14.5 > testoutput.txt 2>&1 +res=$(grep -i -Eo "Max number of pipelines: ([0-9])" ./testoutput.txt | awk -F ' ' '{print $5}') + +if [ -z "${res}" ]; then + echo "maximum pipeline numbers not found, test failed" +elif [ "${res}" -ge "${min_expected}" ]; then + echo "test passed, maximum pipeline number = ${res}" +else + echo "failed to reach the min. ${min_expected} streams as maximum pipeline number = ${res}" +fi + +echo + +# test case 2: reach minimum target FPS +min_expected_fps=14 +echo "testcase: min target fps = ${min_expected_fps}" +./stream_density.sh rtsp://127.0.0.1:8554/camera_1 core ${min_expected_fps} > testoutput2.txt 2>&1 +max_pipelines=$(grep -i -Eo "Max number of pipelines: ([0-9])" ./testoutput2.txt | awk -F ' ' '{print $5}') +fps_at_max_pipelines=$(grep -i -Eo "FPS for total number of pipeline ${max_pipelines}: ([0-9]+.[0-9]+)" ./testoutput2.txt | awk -F ' ' '{print $8}') +min_fps="" +if [ -z "${fps_at_max_pipelines}" ]; then + echo "could not find the fps for max number of pipelines, trying to find the last fps" + min_fps=$(grep -i -Eo "FPS for total number of pipeline ([0-9]+): ([0-9]+.[0-9]+)" ./testoutput2.txt | sort -r | head -1 | awk -F ' ' '{print $8}') +else + min_fps=${fps_at_max_pipelines} +fi + +if [ -z "${min_fps}" ]; then + echo "minimum fps for pipelines not found, test failed" +elif [ 1 -eq "$(echo "${min_fps} >= ${min_expected_fps}" | bc)" ]; then + echo "test passed, FPS for pipeline is greater than or equal to the minimum fps expected (${min_expected_fps}) = ${min_fps} and max. number of pipelines = ${max_pipelines}" +else + echo "failed to reach the min. fps ${min_expected_fps} for maximum pipeline number = ${max_pipelines} and the actual fps = ${min_fps}" +fi + +echo + +# test case 3: expected it cannot reach very high minimum target FPS +min_expected_fps=50 +echo "testcase: min target fps = ${min_expected_fps}" +./stream_density.sh rtsp://127.0.0.1:8554/camera_1 core ${min_expected_fps} > testoutput.txt 2>&1 +max_pipelines=$(grep -i -Eo "Max number of pipelines: ([0-9])" ./testoutput.txt | awk -F ' ' '{print $5}') +min_fps=$(grep -i -Eo "FPS for total number of pipeline ([0-9]+): ([0-9]+.[0-9]+)" ./testoutput.txt | sort -r | head -1 | awk -F ' ' '{print $8}') + +# expect case that we couldn't reach the target high FPS like over 30 or 50 +if [ -z "${min_fps}" ]; then + echo "minimum fps for pipelines not found, test failed" +elif [ 1 -eq "$(echo "${min_fps} < ${min_expected_fps}" | bc)" ]; then + echo "test passed, FPS for pipeline is expected to be unable to reach the target FPS (${min_expected_fps}) = ${min_fps} and max. number of pipelines = ${max_pipelines}" +else + echo "failed test and FPS exceeds the min. target fps ${min_expected_fps} for maximum pipeline number = ${max_pipelines} and the actual fps = ${min_fps}" +fi \ No newline at end of file diff --git a/benchmark-scripts/utility_install.sh b/benchmark-scripts/utility_install.sh new file mode 100755 index 00000000..4a0f1a4a --- /dev/null +++ b/benchmark-scripts/utility_install.sh @@ -0,0 +1,98 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y python3-pip +pip3 install -r requirements.txt + +if [ -d "/opt/intel/pcm" ] +then + rm -R /opt/intel/pcm +fi + +echo "Installing IOTOP" +apt --yes install iotop +ret=$? +if [ $ret -ne 0 ]; then + echo "ERROR: IOTOP install was NOT successful" + exit 1 +fi + +#install SAR +echo "Installing SAR" +apt --yes install sysstat -y +ret=$? +if [ $ret -ne 0 ]; then + echo "ERROR: SAR install was NOT successful" + exit 1 +fi + +#install jq +echo "Installing jq" +apt --yes install jq +ret=$? +if [ $ret -ne 0 ]; then + echo "ERROR: jq install was NOT successful" + exit 1 +fi + +#install curl +echo "Installing curl" +apt --yes install curl +ret=$? +if [ $ret -ne 0 ]; then + echo "ERROR: curl install was NOT successful" + exit 1 +fi + +#install cmake for building pcm +echo "Installing cmake" +apt --yes install cmake +ret=$? +if [ $ret -ne 0 ]; then + echo "ERROR: cmake install was NOT successful" + exit 1 +fi + +PCM_DIRECTORY=/opt/intel +echo "Installing PCM" +[ ! -d "$PCM_DIRECTORY" ] && mkdir -p "$PCM_DIRECTORY" +cd $PCM_DIRECTORY +git clone --recursive https://github.com/opcm/pcm.git +ret=$? +if [ $ret -ne 0 ]; then + echo "ERROR: git clone of PCM was NOT successful" + exit 1 +fi + +#if the checkout was good then build PCM +cd pcm +mkdir build +cd build +cmake .. +cmake --build . + +if [ $ret -ne 0 ]; then + echo "ERROR: build of PCM was NOT successful" + exit 1 +fi + + +#install xpumanager +server_gpu=`dmesg | grep -i "class 0x038000" | grep "8086"` + +#echo "return is: $server_gpu" +if grep -q "class" <<< "$server_gpu"; then + echo "Install xpumanager" + wget https://github.com/intel/xpumanager/releases/download/V1.2.3/xpumanager_1.2.3_20230221.054746.0e2d4bfb+ubuntu22.04_amd64.deb + apt --yes install intel-gsc + apt --yes install level-zero + apt --yes install intel-level-zero-gpu + sudo dpkg -i ./xpumanager_1.2.3_20230221.054746.0e2d4bfb+ubuntu22.04_amd64.deb + +else + echo "Do not install xpumanager" +fi diff --git a/camera-simulator/camera-simulator.sh b/camera-simulator/camera-simulator.sh new file mode 100755 index 00000000..f2de6cd1 --- /dev/null +++ b/camera-simulator/camera-simulator.sh @@ -0,0 +1,117 @@ +#!/bin/bash -e +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +COMMAND="start" +SOURCE_DIR=$(dirname $(dirname "$(readlink -f "$0")")) +CAMERAS= +FILES= + + +get_options() { + while :; do + case $1 in + -h | -\? | --help) + show_help + exit + ;; + --command) + if [ "$2" ]; then + COMMAND=$2 + shift + else + error 'ERROR: "--command" requires an argument.' + fi + ;; + --cameras) + if [ "$2" ]; then + CAMERAS=$2 + shift + else + error 'ERROR: "--cameras" requires an argument.' + fi + ;; + --files) + if [ "$2" ]; then + FILES=$2 + if [[ ! -e $SOURCE_DIR/sample-media/$2 ]]; then + echo "File $2 does not exist" + exit 1 + fi + shift + else + error 'ERROR: "--files" requires an argument.' + fi + ;; + --) + shift + break + ;; + -?*) + error 'ERROR: Unknown option: ' $1 + ;; + ?*) + error 'ERROR: Unknown option: ' $1 + ;; + *) + break + ;; + esac + + shift + done + +} + +show_help() { + echo "usage: camera-simulator.sh" + echo " [--command start,stop]" + echo " [--cameras number of cameras]" + echo " [--files comma seperated list of files within sample-media]" + exit 0 +} + +get_options "$@" + +if [ -z "$COMMAND" ]; then + COMMAND="START" +fi + +if [ "${COMMAND,,}" = "start" ]; then + + if [ -z "$FILES" ]; then + cd $SOURCE_DIR/sample-media + FILES=( *.mp4 ) + else + IFS=','; FILES=( $FILES ); unset IFS; + fi + + if [ -z "$CAMERAS" ]; then + CAMERAS=${#FILES[@]} + fi + + cd $SOURCE_DIR/camera-simulator + + docker run --rm -t --network=host --name camera-simulator aler9/rtsp-simple-server >rtsp_simple_server.log.txt 2>&1 & + index=0 + while [ $index -lt $CAMERAS ] + do + for file in "${FILES[@]}" + do + echo "Starting camera: rtsp://127.0.0.1:8554/camera_$index from $file" + docker run -t --rm --entrypoint ffmpeg --network host -v $SOURCE_DIR/sample-media:/home/pipeline-server/sample-media openvino/ubuntu20_data_runtime:2021.4.2 -nostdin -re -stream_loop -1 -i /home/pipeline-server/sample-media/$file -c copy -f rtsp -rtsp_transport tcp rtsp://localhost:8554/camera_$index >/dev/null 2>&1 & + ((index+=1)) + if [ $CAMERAS -le $index ]; then + break + fi + sleep 1 + done + done + +elif [ "${COMMAND,,}" = "stop" ]; then + docker kill camera-simulator 2> /dev/null +fi + diff --git a/configs/extensions/OCR_post_processing.py b/configs/extensions/OCR_post_processing.py new file mode 100644 index 00000000..f704f10a --- /dev/null +++ b/configs/extensions/OCR_post_processing.py @@ -0,0 +1,66 @@ +''' +* Copyright (C) 2023 Intel Corporation. +* +* SPDX-License-Identifier: BSD-3-Clause +''' + +from json import decoder +import numpy as np +from gstgva import VideoFrame +from gi.repository import Gst, GObject +import sys +import gi +gi.require_version('Gst', '1.0') +Gst.init(sys.argv) + +# The net output is a blob with the shape 30, 1, 37 in the format W, B, L, where: +# W - output sequence length +# B - batch size +# # , where # - special blank character for CTC decoding algorithm. +# L - confidence distribution across alphanumeric symbols: 0123456789abcdefghijklmnopqrstuvwxyz +# The network output can be decoded by CTC Greedy Decoder/CTC Beam decoder + +# This extension implements CTC Greedy Decoder + +class OCR: + W = 16 + B = 1 + L = 37 + + ALPHABET = [ "#", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", + "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] + + def __init__(self, threshold=0.5): + self.threshold = threshold + + def softmax(self, value): + e_value = np.exp(value - np.max(value)) + return e_value / e_value.sum() + + def process_frame(self, frame): + #return True + try: + for region in frame.regions(): + for tensor in region.tensors(): + label = "" + + if tensor["converter"] == "raw_data_copy": + #print("Found converter") + data = tensor.data() + data = data.reshape(self.W, self.L) + for i in range(self.W): + conf_list = self.softmax(data[i][:]) + x = self.softmax(conf_list) + highest_prob = max(x) + if highest_prob < self.threshold: + pass + index = np.where(x == highest_prob)[0][0] + if index == 0: + continue + label += OCR.ALPHABET[index] + if label: + tensor.set_label(label) + except Exception as e: + print(str(e)) + + return True diff --git a/configs/extensions/OCR_post_processing_0012.py b/configs/extensions/OCR_post_processing_0012.py new file mode 100644 index 00000000..251f9bb0 --- /dev/null +++ b/configs/extensions/OCR_post_processing_0012.py @@ -0,0 +1,64 @@ + +''' +* Copyright (C) 2023 Intel Corporation. +* +* SPDX-License-Identifier: BSD-3-Clause +''' + +from json import decoder +import numpy as np +from gstgva import VideoFrame +from gi.repository import Gst, GObject +import sys +import gi +gi.require_version('Gst', '1.0') +Gst.init(sys.argv) + +# The net output is a blob with the shape 30, 1, 37 in the format W, B, L, where: +# W - output sequence length +# B - batch size +# # , where # - special blank character for CTC decoding algorithm. +# L - confidence distribution across alphanumeric symbols: 0123456789abcdefghijklmnopqrstuvwxyz +# The network output can be decoded by CTC Greedy Decoder/CTC Beam decoder + +# This extension implements CTC Greedy Decoder + +class OCR: + W = 30 + B = 1 + L = 37 + + ALPHABET = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", + "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "#"] + + def __init__(self, threshold=0.5): + self.threshold = threshold + + def softmax(self, value): + e_value = np.exp(value - np.max(value)) + return e_value / e_value.sum() + + def process_frame(self, frame): + try: + for region in frame.regions(): + for tensor in region.tensors(): + label = "" + if tensor["converter"] == "raw_data_copy": + data = tensor.data() + data = data.reshape(self.W, self.L) + for i in range(self.W): + conf_list = self.softmax(data[i][:]) + x = self.softmax(conf_list) + highest_prob = max(x) + if highest_prob < self.threshold: + pass + index = np.where(x == highest_prob)[0][0] + if index == OCR.L-1: + continue + label += OCR.ALPHABET[index] + if label: + tensor.set_label(label) + except Exception as e: + print(str(e)) + + return True \ No newline at end of file diff --git a/configs/extensions/barcode.py b/configs/extensions/barcode.py new file mode 100644 index 00000000..9cec082b --- /dev/null +++ b/configs/extensions/barcode.py @@ -0,0 +1,178 @@ +''' +* Copyright (C) 2023 Intel Corporation. +* +* SPDX-License-Identifier: BSD-3-Clause +''' + +import zxingcpp +import pyzbar.pyzbar as pyzbar +from pyzbar.pyzbar import ZBarSymbol +#from server.common.utils import logging +from collections import OrderedDict +from dataclasses import dataclass +from abc import ABC, abstractmethod + +#logger = logging.get_logger('barcode', is_static=True) + + +@dataclass +class DetectedObject: + x: int + y: int + w: int + h: int + label: str + confidence: float + + +class BarcodeDecoder(ABC): + def __init__(self) -> None: + pass + + @abstractmethod + def decode(self, region_data): + pass + + def convert_bytestring(self, barcode): + if str(barcode): + return barcode + return barcode.decode('utf-8') + + +class PyZbar(BarcodeDecoder): + BARCODE_TYPES = [ZBarSymbol.EAN2, + ZBarSymbol.EAN5, + ZBarSymbol.EAN8, + ZBarSymbol.UPCE, + ZBarSymbol.ISBN10, + ZBarSymbol.UPCA, + ZBarSymbol.EAN13, + ZBarSymbol.ISBN13, + ZBarSymbol.COMPOSITE, + ZBarSymbol.I25, + ZBarSymbol.DATABAR, + ZBarSymbol.DATABAR_EXP, + ZBarSymbol.CODABAR, + ZBarSymbol.CODE39, + ZBarSymbol.PDF417, + ZBarSymbol.SQCODE, + ZBarSymbol.CODE93, + ZBarSymbol.CODE128] + + def __init__(self) -> None: + super().__init__() + + def decode(self, region_data): + barcodes = [] + decodedObjects = pyzbar.decode( + region_data, PyZbar.BARCODE_TYPES) + for barcode in decodedObjects: + (x, y, w, h) = barcode.rect + new_label = "barcode: {}".format( + self.convert_bytestring(barcode.data)) + barcode = DetectedObject(x, y, w, h, new_label, 0.9) + #logger.debug("Adding x {} y {} w {} h {} label {}".format( + # x, y, w, h, new_label)) + barcodes.append(barcode) + return barcodes + + +class ZxingCpp(BarcodeDecoder): + def __init__(self) -> None: + super().__init__() + + def decode(self, region_data): + barcodes = [] + barcode = zxingcpp.read_barcode( + region_data, zxingcpp.BarcodeFormat.UPCA) + if barcode is None: + return [] + (x, y, w, h) = (barcode.position.top_left.x, + barcode.position.top_left.y, 0, 0) + new_label = "barcode: {}".format( + self.convert_bytestring(barcode.text)) + barcode = DetectedObject(x, y, w, h, new_label, 0.9) + barcodes.append(barcode) + return barcodes + + +class LRUCache: + def __init__(self, max_tracked_objects): + self.data = OrderedDict() + self.capacity = max_tracked_objects + + def get(self, key: int) -> int: + if key not in self.data: + return None + else: + self.data.move_to_end(key) + return self.data[key] + + def put(self, key: int, value: int) -> None: + self.data[key] = value + self.data.move_to_end(key) + if len(self.data) > self.capacity: + self.data.popitem(last=False) + + +class BarcodeDetection: + SUPPORTED_LIBRARIES = ["pyzbar", "zxingcpp"] + + def __init__(self, disable=False, decode_type="zxingcpp", reclassify_interval=5, max_tracked_objects=20): + self.disable = disable + + self.reclassify_interval = reclassify_interval + self.frame_count = 0 + self.tracked_objects = LRUCache(max_tracked_objects) + + #if self.disable: + # logger.info("Barcode disabled") + + if decode_type == "pyzbar": + self.decoder = PyZbar() + elif decode_type == "zxingcpp": + self.decoder = ZxingCpp() + + def process_frame(self, frame): + + if self.disable: + return True + self.frame_count += 1 + skip_frame_processing = False + if self.reclassify_interval != -1 and (self.reclassify_interval == 0 or (self.frame_count % self.reclassify_interval != 0)): + skip_frame_processing = True + + regions = list(frame.regions()) + with frame.data() as frame_data: + for region in regions: + region_rect = region.rect() + (o_x, o_y, _, _) = region_rect + object_id = region.object_id() + + # Do not reclassify, re-use prior results + tracked_objects_list = self.tracked_objects.get(object_id) + if skip_frame_processing and tracked_objects_list: + for tracked in tracked_objects_list: + x, y, w, h, label, confidence = tracked.x, tracked.y, tracked.w, tracked.h, tracked.label, tracked.confidence + #logger.debug("Adding barcode region from tracked objects x {} y {} w {} h {} label {}".format( + # x, y, w, h, label)) + frame.add_region( + x+o_x, y+o_y, w, h, label, confidence) + return True + + region_data = frame_data[region_rect.y:region_rect.y + + region_rect.h, region_rect.x:region_rect.x+region_rect.w] + barcodes = self.decoder.decode(region_data) + tracked_barcodes = [] + for barcode in barcodes: + #logger.debug("Adding barcode region x {} y {} w {} h {} label {}".format( + # barcode.x+o_x, barcode.y+o_y, barcode.w, barcode.h, barcode.label)) + frame.add_region(barcode.x+o_x, barcode.y+o_y, barcode.w, barcode.h, + barcode.label, barcode.confidence) + tracked_object = DetectedObject( + barcode.x, barcode.y, barcode.w, barcode.h, barcode.label, barcode.confidence) + tracked_barcodes.append(tracked_object) + if tracked_barcodes: + self.tracked_objects.put(object_id, tracked_barcodes) + + return True diff --git a/configs/extensions/barcode_nv12_to_gray.py b/configs/extensions/barcode_nv12_to_gray.py new file mode 100644 index 00000000..86d704b4 --- /dev/null +++ b/configs/extensions/barcode_nv12_to_gray.py @@ -0,0 +1,234 @@ +''' +* Copyright (C) 2023 Intel Corporation. +* +* SPDX-License-Identifier: BSD-3-Clause +''' + +import zxingcpp +import pyzbar.pyzbar as pyzbar +from pyzbar.pyzbar import ZBarSymbol +#from server.common.utils import logging +from collections import OrderedDict +from dataclasses import dataclass +from abc import ABC, abstractmethod + +import cv2 +import gi +gi.require_version('Gst', '1.0') +gi.require_version("GstVideo", "1.0") +from gi.repository import Gst,GstVideo +import gstgva as va +import numpy as np +import ctypes + +#logger = logging.get_logger('barcode', is_static=True) + + +@dataclass +class DetectedObject: + x: int + y: int + w: int + h: int + label: str + confidence: float + + +class BarcodeDecoder(ABC): + def __init__(self) -> None: + pass + + @abstractmethod + def decode(self, region_data): + pass + + def convert_bytestring(self, barcode): + if str(barcode): + return barcode + return barcode.decode('utf-8') + + +class PyZbar(BarcodeDecoder): + BARCODE_TYPES = [ZBarSymbol.EAN2, + ZBarSymbol.EAN5, + ZBarSymbol.EAN8, + ZBarSymbol.UPCE, + ZBarSymbol.ISBN10, + ZBarSymbol.UPCA, + ZBarSymbol.EAN13, + ZBarSymbol.ISBN13, + ZBarSymbol.COMPOSITE, + ZBarSymbol.I25, + ZBarSymbol.DATABAR, + ZBarSymbol.DATABAR_EXP, + ZBarSymbol.CODABAR, + ZBarSymbol.CODE39, + ZBarSymbol.PDF417, + ZBarSymbol.SQCODE, + ZBarSymbol.CODE93, + ZBarSymbol.CODE128] + + def __init__(self) -> None: + super().__init__() + + def decode(self, region_data): + barcodes = [] + decodedObjects = pyzbar.decode( + region_data, PyZbar.BARCODE_TYPES) + for barcode in decodedObjects: + (x, y, w, h) = barcode.rect + new_label = "barcode: {}".format( + self.convert_bytestring(barcode.data)) + barcode = DetectedObject(x, y, w, h, new_label, 0.9) + #logger.debug("Adding x {} y {} w {} h {} label {}".format( + # x, y, w, h, new_label)) + barcodes.append(barcode) + return barcodes + + +class ZxingCpp(BarcodeDecoder): + def __init__(self) -> None: + super().__init__() + + def decode(self, region_data): + barcodes = [] + barcode = zxingcpp.read_barcode( + region_data, zxingcpp.BarcodeFormat.UPCA) + if barcode is None: + return [] + (x, y, w, h) = (barcode.position.top_left.x, + barcode.position.top_left.y, 0, 0) + new_label = "barcode: {}".format( + self.convert_bytestring(barcode.text)) + barcode = DetectedObject(x, y, w, h, new_label, 0.9) + barcodes.append(barcode) + return barcodes + + +class LRUCache: + def __init__(self, max_tracked_objects): + self.data = OrderedDict() + self.capacity = max_tracked_objects + + def get(self, key: int) -> int: + if key not in self.data: + return None + else: + self.data.move_to_end(key) + return self.data[key] + + def put(self, key: int, value: int) -> None: + self.data[key] = value + self.data.move_to_end(key) + if len(self.data) > self.capacity: + self.data.popitem(last=False) + + +class BarcodeDetection: + SUPPORTED_LIBRARIES = ["pyzbar", "zxingcpp"] + + def __init__(self, disable=False, decode_type="zxingcpp", reclassify_interval=5, max_tracked_objects=20): + self.disable = disable + + self.reclassify_interval = reclassify_interval + self.frame_count = 0 + self.tracked_objects = LRUCache(max_tracked_objects) + + #if self.disable: + #logger.info("Barcode disabled") + + if decode_type == "pyzbar": + self.decoder = PyZbar() + elif decode_type == "zxingcpp": + self.decoder = ZxingCpp() + + def process_frame(self, frame): + + if self.disable: + return True + self.frame_count += 1 + skip_frame_processing = False + if self.reclassify_interval != -1 and (self.reclassify_interval == 0 or (self.frame_count % self.reclassify_interval != 0)): + skip_frame_processing = True + + regions = list(frame.regions()) + do_zxing_flag = False + for region in regions: + region_rect = region.rect() + (o_x, o_y, _, _) = region_rect + object_id = region.object_id() + + # Do not reclassify, re-use prior results + tracked_objects_list = self.tracked_objects.get(object_id) + if skip_frame_processing and (tracked_objects_list is not None): + for tracked in tracked_objects_list: + x, y, w, h, label, confidence = tracked.x, tracked.y, tracked.w, tracked.h, tracked.label, tracked.confidence + #logger.debug("Adding barcode region from tracked objects x {} y {} w {} h {} label {}".format( + # x, y, w, h, label)) + frame.add_region( + x+o_x, y+o_y, w, h, label+'_tracked', confidence) + continue + do_zxing_flag = True + + with va.util.gst_buffer_data(frame._VideoFrame__buffer, Gst.MapFlags.READ) as data: + n_planes = frame._VideoFrame__video_info.finfo.n_planes + h, w = frame.video_info().height, frame.video_info().width + meta = frame.video_meta() + stride = meta.stride[0] + + bytes_per_pix = frame.video_info().finfo.pixel_stride[0] + # 3120 input_h =3136 + # input_h = int(len(data) / (w * bytes_per_pix) / 1.5) + input_h = int(meta.offset[1] / stride) + planes = [np.ndarray((h, w, bytes_per_pix), + buffer=data, dtype=np.uint8, strides=(stride,1,1))] + offset = stride * input_h + + data_ptr = ctypes.addressof(data) + offset + data_size = stride * input_h // 2 + plane_raw = ctypes.cast(data_ptr, ctypes.POINTER( + ctypes.c_byte * data_size)).contents + if n_planes == 2: + uv_plane = np.ndarray((h // 2, w, bytes_per_pix), + buffer=plane_raw, dtype=np.uint8, strides=(stride,1,1)) + planes.append(uv_plane) + else: + u_plane = np.ndarray((h // 4, w, bytes_per_pix), + buffer=plane_raw, dtype=np.uint8, strides=(stride,1,1)) + stride = meta.stride[1] + offset += stride * input_h // 2 + data_ptr = ctypes.addressof(data) + offset + plane_raw = ctypes.cast(data_ptr, ctypes.POINTER( + ctypes.c_byte * data_size)).contents + v_plane = np.ndarray((h // 4, w, bytes_per_pix), + buffer=plane_raw, dtype=np.uint8, strides=(stride,1,1)) + planes.append(u_plane) + planes.append(v_plane) + img = np.concatenate(planes) +# mat = cv2.cvtColor(img, cv2.COLOR_YUV2BGR_NV12) + frame_data = cv2.cvtColor(img, cv2.COLOR_YUV2GRAY_NV12) + for region in regions: + region_rect = region.rect() + (o_x, o_y, _, _) = region_rect + object_id = region.object_id() + + # Do not reclassify, re-use prior results + tracked_objects_list = self.tracked_objects.get(object_id) + if skip_frame_processing and (tracked_objects_list is not None): + continue + + region_data = frame_data[region_rect.y:region_rect.y + + region_rect.h, region_rect.x:region_rect.x+region_rect.w] + barcodes = self.decoder.decode(region_data) + tracked_barcodes = [] + for barcode in barcodes: + #logger.debug("Adding barcode region x {} y {} w {} h {} label {}".format( + # barcode.x+o_x, barcode.y+o_y, barcode.w, barcode.h, barcode.label)) + frame.add_region(barcode.x+o_x, barcode.y+o_y, barcode.w, barcode.h, + barcode.label, barcode.confidence) + tracked_object = DetectedObject( + barcode.x, barcode.y, barcode.w, barcode.h, barcode.label, barcode.confidence) + tracked_barcodes.append(tracked_object) + self.tracked_objects.put(object_id, tracked_barcodes) + + return True diff --git a/configs/extensions/object_removal_by_label.py b/configs/extensions/object_removal_by_label.py new file mode 100644 index 00000000..2e0ec35a --- /dev/null +++ b/configs/extensions/object_removal_by_label.py @@ -0,0 +1,24 @@ +''' +* Copyright (C) 2023 Intel Corporation. +* +* SPDX-License-Identifier: BSD-3-Clause +''' + +class ObjectRemovalByLabel: + def __init__(self, + object_filter=["dining table", "chair", "person", "bed", "sink"]): + self._object_filter = object_filter + + def process_frame(self, frame): + if not self._object_filter: + return True + regions = list(frame.regions()) + removable_regions = [region for region in regions if region.label() in self._object_filter] + + orig_regions=list(frame.regions()) + removable_region_ids = [region.region_id() for region in removable_regions] + for region in orig_regions: + if region.region_id() in removable_region_ids: + frame.remove_region(region) + + return True diff --git a/configs/extensions/remote_classify.py b/configs/extensions/remote_classify.py new file mode 100644 index 00000000..0ecb4b59 --- /dev/null +++ b/configs/extensions/remote_classify.py @@ -0,0 +1,211 @@ +''' +* Copyright (C) 2023 Intel Corporation. +* +* SPDX-License-Identifier: BSD-3-Clause +''' + +import cv2 +import ovmsclient +import numpy +import json +import threading +import time +import os +from multiprocessing import Process, Queue +from vaserving.common.utils import logging + +def softmax(logits, axis=None): + exp = numpy.exp(logits) + return exp / numpy.sum(exp, axis=axis) + +def process_result(region, result, model_name, labels=None): + result = softmax(result) + classification = numpy.argmax(result) + if region: + tensor = region.add_tensor("classification") + tensor["label_id"] = int(classification) + tensor["model_name"] = model_name + tensor["confidence"] = float(result[0][int(classification)]) + if labels: + tensor["label"] = labels[tensor["label_id"]] + else: + tensor["label"] = str(tensor["label_id"]) + +class OVMSClassify: + + def _process(self): + pid = os.getpid() + + try: + client = ovmsclient.make_grpc_client(self._model_server) + except Exception as error: + print("Can't connect to model server %s, %s"%(self._model_server,error)) + return + self._logger.info("{} Process: {} Connected to model-server: {}".format(self._stream, + pid, + self._model_server)) + try: + + while True: + image = self._input_queue.get() + if not image: + break + + result = client.predict(inputs={self._input_name:image[1]},model_name = self._model_name) + self._output_queue.put((image[0],result,image[2],time.time(),pid)) + except Exception as error: + print("Exception in request: %s" %(error)) + + self._output_queue.put((None,None,None,None,None)) + + del client + + self._logger.info("{} Process: {} Completed".format(self._stream,pid)) + + + def __del__(self): + for _ in self._processes: + self._input_queue.put(None) + + def __init__(self, + model_name="efficientnet-b0", + model_proc="/home/pipeline-server/models/efficientnet-b0/1/efficientnet-b0.json", + model_server="model-server:9000", + processes=16, + max_objects=None, + min_objects=None, + stream=None, + scale=True, + object_filter=[]): + self._model_server = model_server + self._model_name = model_name + self._input_queue = Queue() + self._output_queue = Queue() + self._labels = [] + self._start_time =time.time() + self._last_report_time = self._start_time + self._classify_count = 0 + self._latencies = 0 + self._frames = 0 + self._min_objects = min_objects + self._max_objects = max_objects + self._logger = logging.get_logger('remote-classify', is_static=True) + self._stream = stream + self._region_sizes = 0 + self._scale = scale + self._object_filter = object_filter + + if not stream: + self._stream = "" + + if model_proc: + with open(model_proc, 'r') as model_proc_file: + model_proc_data = json.load(model_proc_file) + self._labels = model_proc_data["output_postproc"][0]["labels"] + self._input_name = model_proc_data["input_preproc"][0]["layer_name"] + + self._processes = [] + + for _ in range(processes): + self._processes.append(Process(target=self._process,daemon=True)) + self._processes[-1].start() + + def process_frame(self, frame): + regions = list(frame.regions()) + if self._object_filter: + regions = [region for region in regions if region.label() in self._object_filter] + + if self._min_objects and len(regions) < self._min_objects: + for _ in range(len(regions),self._min_objects): + regions.append(frame.add_region(0,0,1,1,"fake",1.0,True)) + + if self._max_objects and len(regions)>self._max_objects: + regions=regions[0:self._max_objects] + + with frame.data() as frame_data: + for index, region in enumerate(regions): + if self._max_objects and index>self._max_objects: + break + region_rect = region.rect() + region_data = frame_data[ + region_rect.y:region_rect.y+region_rect.h, + region_rect.x:region_rect.x+region_rect.w + ] + if self._scale: + region_data = cv2.resize(region_data,(224,224)) + _,img = cv2.imencode(".jpeg",region_data) + self._region_sizes += len(img) + self._input_queue.put((index,bytes(img),time.time())) + + for i in range(len(regions)): + if self._max_objects and i>self._max_objects: + break + index,result,input_time,output_time,pid = self._output_queue.get() + if index is None: + raise Exception("Error sending request to model server") + self._latencies += output_time - input_time + process_result(regions[index], + result, + self._model_name, + self._labels) + + self._classify_count += len(regions) + self._frames += 1 + + if time.time()-self._last_report_time>1 and self._classify_count: + self._last_report_time=time.time() + self._logger.info("{} Classification IPS: {}, Average Latency: {}, Objects Per Frame: {}, Average Region Size: {}" .format + (self._stream, + self._classify_count/(self._last_report_time-self._start_time), + self._latencies / self._classify_count, + self._classify_count/self._frames, + self._region_sizes/self._classify_count)) + + return True + + + +class OVMSClassifyEncodeOnly: + + def __init__(self, + stream=None, + **kwargs): + self._start_time =time.time() + self._last_report_time = self._start_time + self._classify_count = 0 + self._latencies = 0 + self._logger = logging.get_logger('remote-classify', is_static=True) + self._stream = stream + if not stream: + self._stream = "" + + + def process_frame(self, frame): + with frame.data() as frame_data: + regions = list(frame.regions()) + for region in regions: + start = time.time() + region_rect = region.rect() + region_data = frame_data[ + region_rect.y:region_rect.y+region_rect.h, + region_rect.x:region_rect.x+region_rect.w + ] + _,img = cv2.imencode(".jpeg",region_data) + self._latencies += (time.time()-start) + + self._classify_count += len(regions) + if time.time()-self._last_report_time>1 and self._classify_count: + self._last_report_time=time.time() + self._logger.info("{} Encode IPS: {}, Average Latency: {}".format(self._stream, + self._classify_count/(self._last_report_time-self._start_time), + self._latencies / self._classify_count)) + + return True + + +class OVMSClassifyNoOp: + def __init__(self,**kwargs): + pass + def process_frame(self,frame): + return True + diff --git a/configs/extensions/tracked_object_filter.py b/configs/extensions/tracked_object_filter.py new file mode 100644 index 00000000..e09c179d --- /dev/null +++ b/configs/extensions/tracked_object_filter.py @@ -0,0 +1,66 @@ +''' +* Copyright (C) 2023 Intel Corporation. +* +* SPDX-License-Identifier: BSD-3-Clause +''' + +import zxingcpp +import pyzbar.pyzbar as pyzbar +from pyzbar.pyzbar import ZBarSymbol +#from server.common.utils import logging +from collections import OrderedDict +from dataclasses import dataclass +from abc import ABC, abstractmethod + +#logger = logging.get_logger('barcode', is_static=True) + + +class LRUCache: + def __init__(self, max_tracked_objects): + self.data = OrderedDict() + self.capacity = max_tracked_objects + + def get(self, key: int) -> int: + if key not in self.data: + return None + else: + self.data.move_to_end(key) + return self.data[key] + + def put(self, key: int, value: int) -> None: + self.data[key] = value + self.data.move_to_end(key) + if len(self.data) > self.capacity: + self.data.popitem(last=False) + + +class ObjectFilter: + + def __init__(self, disable=False, reclassify_interval=5, max_tracked_objects=100): + self.disable = disable + + self.reclassify_interval = reclassify_interval + self.frame_count = 0 + self.tracked_objects = LRUCache(max_tracked_objects) + + #if self.disable: + # logger.info("filter disabled") + + + def process_frame(self, frame): + + if self.disable: + return True + self.frame_count += 1 + skip_frame_processing = False + if self.reclassify_interval != -1 and (self.reclassify_interval == 0 or (self.frame_count % self.reclassify_interval != 0)): + skip_frame_processing = True + + regions = list(frame.regions()) + for region in regions: + object_id = region.object_id() + if object_id: + if (self.tracked_objects.get(object_id) is not None) and skip_frame_processing: + frame.remove_region(region) + self.tracked_objects.put(object_id, True) + return True diff --git a/configs/framework-pipelines/arc/yolov5s.sh b/configs/framework-pipelines/arc/yolov5s.sh new file mode 100755 index 00000000..a6409a12 --- /dev/null +++ b/configs/framework-pipelines/arc/yolov5s.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +gst-launch-1.0 $inputsrc ! vaapidecodebin ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=GPU pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 ! gvametaaggregate name=aggregate ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log diff --git a/configs/framework-pipelines/arc/yolov5s_effnetb0.sh b/configs/framework-pipelines/arc/yolov5s_effnetb0.sh new file mode 100755 index 00000000..29b8fb4a --- /dev/null +++ b/configs/framework-pipelines/arc/yolov5s_effnetb0.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +if [ "$INPUTSRC_TYPE" == "REALSENSE" ]; then + decode_pp="! videoconvert ! video/x-raw,format=BGR" + # TODO: update with vaapipostproc when MJPEG codec is supported. + echo "Not supported until D436 with MJPEG." > /tmp/results/pipeline$cid_count.log + exit 0 +fi + +#gst-launch-1.0 $inputsrc ! vaapidecodebin $decode_pp ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=GPU pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 gpu-throughput-streams=4 nireq=4 batch-size=1 ! gvatrack name=tracking tracking-type=zero-term-imageless ! queue max-size-bytes=0 max-size-buffers=0 max-size-time=0 ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=GPU inference-region=roi-list name=classification pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 reclassify-interval=1 batch-size=1 nireq=4 gpu-throughput-streams=4 ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log + +GST_VAAPI_DRM_DEVICE=/dev/dri/renderD129 +export GST_VAAPI_DRM_DEVICE="$GST_VAAPI_DRM_DEVICE" + + +if [ "$CPU_ONLY" == "1" ] +then + echo "Enabled CPU inference pipeline only" + gst-launch-1.0 $inputsrc ! decodebin force-sw-decoders=1 ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=CPU ! gvatrack name=tracking tracking-type=zero-term-imageless ! queue max-size-bytes=0 max-size-buffers=0 max-size-time=0 ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=CPU inference-region=roi-list name=classification ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log + +else + echo "Enabled GPU pipeline" + gst-launch-1.0 $inputsrc ! vaapidecodebin $decode_pp ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=GPU pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 gpu-throughput-streams=4 nireq=4 batch-size=1 ! gvatrack name=tracking tracking-type=zero-term-imageless ! queue max-size-bytes=0 max-size-buffers=0 max-size-time=0 ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=GPU inference-region=roi-list name=classification pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 reclassify-interval=1 batch-size=1 nireq=4 gpu-throughput-streams=4 ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log +fi diff --git a/configs/framework-pipelines/arc/yolov5s_full.sh b/configs/framework-pipelines/arc/yolov5s_full.sh new file mode 100755 index 00000000..1c95c041 --- /dev/null +++ b/configs/framework-pipelines/arc/yolov5s_full.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +if grep -qi "gpu" <<< "$OCR_DEVICE"; then + OCR_PP="pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1" +fi + +if [ "$INPUTSRC_TYPE" == "REALSENSE" ]; then + decode_pp="! videoconvert ! video/x-raw,format=BGR" + # TODO: update with vaapipostproc when MJPEG codec is supported. + echo "Not supported until D436 with MJPEG." > /tmp/results/pipeline$cid_count.log + exit 0 +fi + +if [ "$RENDER_MODE" == "1" ]; then + + gst-launch-1.0 tcpserversrc host=127.0.0.1 port=5000 ! h264parse ! vaapih264dec ! queue ! videoconvert ! xvimagesink sync=true & + + sleep 5 + + gst-launch-1.0 $inputsrc ! vaapidecodebin $decode_pp ! "video/x-raw(memory:VASurface)" ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP16-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=GPU pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 gpu-throughput-streams=4 nireq=4 batch-size=1 ! gvatrack name=tracking tracking-type=zero-term-imageless ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=GPU inference-region=roi-list name=classification pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 reclassify_interval=${OCR_RECLASSIFY_INTERVAL} batch-size=8 nireq=4 gpu-throughput-streams=4 ! gvapython class=ObjectFilter module=/home/pipeline-server/extensions/tracked_object_filter.py kwarg=\"{\\\"reclassify_interval\\\": ${OCR_RECLASSIFY_INTERVAL}}\" name=tracked_object_filter ! gvadetect model-instance-id=ocr nireq=4 gpu-throughput-streams=4 batch-size=8 threshold=.2 model=models/horizontal-text-detection-0002/1/FP16-INT8/horizontal-text-detection-0002.xml model-proc=models/horizontal-text-detection-0002/1/horizontal-text-detection-0002.json name=text_detection device=$OCR_DEVICE inference-region=roi-list pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 ! gvainference model-instance-id=ocr2 nireq=4 gpu-throughput-streams=4 batch-size=128 device=GPU model=models/text-recognition-0012-GPU/1/FP16-INT8/text-recognition-0012-mod.xml model-proc=models/text-recognition-0012-GPU/1/text-recognition-0012.json inference-region=roi-list name=text_recognition ! gvapython class=OCR module=/home/pipeline-server/extensions/OCR_post_processing_0012.py name=ocr_postprocess ! gvapython name=barcode class=BarcodeDetection module=/home/pipeline-server/extensions/barcode_nv12_to_gray.py kwarg=\"{\\\"reclassify_interval\\\": ${BARCODE_RECLASSIFY_INTERVAL}}\" ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! \ + gvawatermark ! vaapih264enc ! tcpclientsink host=127.0.0.1 port=5000 +else + gst-launch-1.0 $inputsrc ! vaapidecodebin $decode_pp ! "video/x-raw(memory:VASurface)" ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP16-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=GPU pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 gpu-throughput-streams=4 nireq=4 batch-size=1 ! gvatrack name=tracking tracking-type=zero-term-imageless ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=GPU inference-region=roi-list name=classification pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 reclassify_interval=${OCR_RECLASSIFY_INTERVAL} batch-size=8 nireq=4 gpu-throughput-streams=4 ! gvapython class=ObjectFilter module=/home/pipeline-server/extensions/tracked_object_filter.py kwarg=\"{\\\"reclassify_interval\\\": ${OCR_RECLASSIFY_INTERVAL}}\" name=tracked_object_filter ! gvadetect model-instance-id=ocr nireq=4 gpu-throughput-streams=4 batch-size=8 threshold=.2 model=models/horizontal-text-detection-0002/1/FP16-INT8/horizontal-text-detection-0002.xml model-proc=models/horizontal-text-detection-0002/1/horizontal-text-detection-0002.json name=text_detection device=$OCR_DEVICE inference-region=roi-list pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 ! gvainference model-instance-id=ocr2 nireq=4 gpu-throughput-streams=4 batch-size=128 device=GPU model=models/text-recognition-0012-GPU/1/FP16-INT8/text-recognition-0012-mod.xml model-proc=models/text-recognition-0012-GPU/1/text-recognition-0012.json inference-region=roi-list name=text_recognition ! gvapython class=OCR module=/home/pipeline-server/extensions/OCR_post_processing_0012.py name=ocr_postprocess ! gvapython name=barcode class=BarcodeDetection module=/home/pipeline-server/extensions/barcode_nv12_to_gray.py kwarg=\"{\\\"reclassify_interval\\\": ${BARCODE_RECLASSIFY_INTERVAL}}\" ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log +fi diff --git a/configs/framework-pipelines/core/yolov5s.sh b/configs/framework-pipelines/core/yolov5s.sh new file mode 100755 index 00000000..6bd77eb4 --- /dev/null +++ b/configs/framework-pipelines/core/yolov5s.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +if [ -z "$LOW_POWER" ] +then + gst-launch-1.0 $inputsrc ! vaapidecodebin ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=MULTI:GPU,CPU ! gvametaaggregate name=aggregate ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log +else + echo "Enabled GPU based low power pipeline " + gst-launch-1.0 $inputsrc ! vaapidecodebin ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=GPU $pre_process ! gvametaaggregate name=aggregate ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log +fi diff --git a/configs/framework-pipelines/core/yolov5s_effnetb0.sh b/configs/framework-pipelines/core/yolov5s_effnetb0.sh new file mode 100755 index 00000000..10652953 --- /dev/null +++ b/configs/framework-pipelines/core/yolov5s_effnetb0.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +if [ "$INPUTSRC_TYPE" == "REALSENSE" ]; then + decode_pp="! videoconvert ! video/x-raw,format=BGR" + # TODO: update with vaapipostproc when MJPEG codec is supported. + echo "Not supported until D436 with MJPEG." > /tmp/results/pipeline$cid_count.log + exit 0 +fi + + +if [ "1" == "$LOW_POWER" ] +then + echo "Enabled GPU based low power pipeline " + gst-launch-1.0 $inputsrc ! vaapidecodebin ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=GPU $pre_process ! gvatrack name=tracking tracking-type=zero-term-imageless ! queue max-size-bytes=0 max-size-buffers=0 max-size-time=0 ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=GPU inference-region=roi-list name=classification $pre_process ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log + +elif [ "$CPU_ONLY" == "1" ] +then + echo "Enabled CPU inference pipeline only" + gst-launch-1.0 $inputsrc ! decodebin force-sw-decoders=1 ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=CPU ! gvatrack name=tracking tracking-type=zero-term-imageless ! queue max-size-bytes=0 max-size-buffers=0 max-size-time=0 ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=CPU inference-region=roi-list name=classification ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log + +else + echo "Enabled CPU+iGPU pipeline" + gst-launch-1.0 $inputsrc ! vaapidecodebin ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=MULTI:GPU,CPU ! gvatrack name=tracking tracking-type=zero-term-imageless ! queue max-size-bytes=0 max-size-buffers=0 max-size-time=0 ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=MULTI:GPU,CPU inference-region=roi-list name=classification ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log + +fi diff --git a/configs/framework-pipelines/core/yolov5s_full.sh b/configs/framework-pipelines/core/yolov5s_full.sh new file mode 100755 index 00000000..eb7a9132 --- /dev/null +++ b/configs/framework-pipelines/core/yolov5s_full.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +if grep -qi "gpu" <<< "$OCR_DEVICE"; then + OCR_PP="pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1" + +fi + +echo "INPUTSRC_TYPE: $INPUTSRC_TYPE" + +if [ "$INPUTSRC_TYPE" == "REALSENSE" ]; then + decode_pp="! videoconvert ! video/x-raw,format=BGR" + # TODO: update with vaapipostproc when MJPEG codec is supported. + echo "Not supported until D436 with MJPEG." > /tmp/results/pipeline$cid_count.log + exit 0 +fi + +gst-launch-1.0 $inputsrc ! vaapidecodebin $decode_pp ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=GPU pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 ! gvatrack name=tracking tracking-type=zero-term-imageless ! tee name=branch ! queue ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json reclassify-interval=1 device=GPU inference-region=roi-list name=classification pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 ! gvametaaggregate name=aggregate ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose branch. ! queue ! gvapython class=ObjectFilter module=/home/pipeline-server/extensions/tracked_object_filter.py kwarg=\"{\\\"reclassify_interval\\\": ${OCR_RECLASSIFY_INTERVAL}}\" name=tracked_object_filter ! gvadetect model-instance-id=ocr threshold=.40 model=models/horizontal-text-detection-0001/1/FP16-INT8/horizontal-text-detection-0001.xml model-proc=models/horizontal-text-detection-0001/1/horizontal-text-detection-0001.json name=text_detection device=$OCR_DEVICE inference-region=roi-list $OCR_PP ! gvainference model-instance-id=ocr2 device=$OCR_DEVICE model=models/text-recognition-0014/1/FP16-INT8/text-recognition-0014.xml model-proc=models/text-recognition-0014/1/text-recognition-0012.json inference-region=roi-list name=text_recognition object-class=text ! gvapython class=OCR module=/home/pipeline-server/extensions/OCR_post_processing.py name=ocr_postprocess ! aggregate. branch. ! queue ! gvapython name=barcode class=BarcodeDetection module=/home/pipeline-server/extensions/barcode_nv12_to_gray.py kwarg=\"{\\\"reclassify_interval\\\": ${BARCODE_RECLASSIFY_INTERVAL}}\" ! aggregate. 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log diff --git a/configs/framework-pipelines/core/yolov5s_realsense.sh b/configs/framework-pipelines/core/yolov5s_realsense.sh new file mode 100755 index 00000000..acc6d53b --- /dev/null +++ b/configs/framework-pipelines/core/yolov5s_realsense.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +echo "Starting yolov5s realsense pipeline" + +gst-launch-1.0 $inputsrc ! $decode_type ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=GPU $pre_process ! gvatrack name=tracking tracking-type=zero-term-imageless ! tee name=branch ! queue ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json reclassify-interval=1 device=GPU inference-region=roi-list name=classification $pre_process ! gvametaaggregate name=aggregate ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose branch. ! queue ! gvapython class=ObjectFilter module=/home/pipeline-server/extensions/tracked_object_filter.py kwarg=\"{\\\"reclassify_interval\\\": ${OCR_RECLASSIFY_INTERVAL}}\" name=tracked_object_filter ! gvadetect model-instance-id=ocr threshold=.40 model=models/horizontal-text-detection-0001/1/FP16-INT8/horizontal-text-detection-0001.xml model-proc=models/horizontal-text-detection-0001/1/horizontal-text-detection-0001.json name=text_detection device=$OCR_DEVICE inference-region=roi-list ! gvainference model-instance-id=ocr2 device=$OCR_DEVICE model=models/text-recognition-0014/1/FP16-INT8/text-recognition-0014.xml model-proc=models/text-recognition-0014/1/text-recognition-0012.json inference-region=roi-list name=text_recognition object-class=text ! gvapython class=OCR module=/home/pipeline-server/extensions/OCR_post_processing.py name=ocr_postprocess ! queue ! gvapython name=barcode class=BarcodeDetection module=/home/pipeline-server/extensions/barcode.py kwarg=\"{\\\"reclassify_interval\\\": ${BARCODE_RECLASSIFY_INTERVAL}}\" ! aggregate. branch. ! aggregate. 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log diff --git a/configs/framework-pipelines/dgpu/yolov5s.sh b/configs/framework-pipelines/dgpu/yolov5s.sh new file mode 100755 index 00000000..cb7acb19 --- /dev/null +++ b/configs/framework-pipelines/dgpu/yolov5s.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +if [ "$AUTO_SCALE_FLEX_140" == "1" ] +then + deviceid=$(($cid_count % 2)) + if [ "$deviceid" == "0" ] + then + GST_VAAPI_DRM_DEVICE=/dev/dri/renderD128 + else + GST_VAAPI_DRM_DEVICE=/dev/dri/renderD129 + fi + export GST_VAAPI_DRM_DEVICE="$GST_VAAPI_DRM_DEVICE" +fi + +gst-launch-1.0 $inputsrc ! vaapidecodebin ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=GPU pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 ! gvametaaggregate name=aggregate ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log diff --git a/configs/framework-pipelines/dgpu/yolov5s_effnetb0.sh b/configs/framework-pipelines/dgpu/yolov5s_effnetb0.sh new file mode 100755 index 00000000..6b7b0f1c --- /dev/null +++ b/configs/framework-pipelines/dgpu/yolov5s_effnetb0.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +if [ "$INPUTSRC_TYPE" == "REALSENSE" ]; then + decode_pp="! videoconvert ! video/x-raw,format=BGR" + # TODO: update with vaapipostproc when MJPEG codec is supported. + echo "Not supported until D436 with MJPEG." > /tmp/results/pipeline$cid_count.log + exit 0 +fi + +#gst-launch-1.0 $inputsrc ! vaapidecodebin $decode_pp ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=GPU pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 gpu-throughput-streams=4 nireq=4 batch-size=1 ! gvatrack name=tracking tracking-type=zero-term-imageless ! queue max-size-bytes=0 max-size-buffers=0 max-size-time=0 ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=GPU inference-region=roi-list name=classification pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 reclassify-interval=1 batch-size=1 nireq=4 gpu-throughput-streams=4 ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log + +if [ "$AUTO_SCALE_FLEX_140" == "1" ] +then + deviceid=$(($cid_count % 2)) + if [ "$deviceid" == "0" ] + then + GST_VAAPI_DRM_DEVICE=/dev/dri/renderD128 + else + GST_VAAPI_DRM_DEVICE=/dev/dri/renderD129 + fi + export GST_VAAPI_DRM_DEVICE="$GST_VAAPI_DRM_DEVICE" +fi + + +if [ "$CPU_ONLY" == "1" ] +then + echo "Enabled CPU inference pipeline only" + gst-launch-1.0 $inputsrc ! decodebin force-sw-decoders=1 ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=CPU ! gvatrack name=tracking tracking-type=zero-term-imageless ! queue max-size-bytes=0 max-size-buffers=0 max-size-time=0 ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=CPU inference-region=roi-list name=classification ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log + +else + echo "Enabled GPU pipeline" + gst-launch-1.0 $inputsrc ! vaapidecodebin $decode_pp ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=GPU pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 gpu-throughput-streams=4 nireq=4 batch-size=1 ! gvatrack name=tracking tracking-type=zero-term-imageless ! queue max-size-bytes=0 max-size-buffers=0 max-size-time=0 ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=GPU inference-region=roi-list name=classification pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 reclassify-interval=1 batch-size=1 nireq=4 gpu-throughput-streams=4 ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log +fi diff --git a/configs/framework-pipelines/dgpu/yolov5s_full.sh b/configs/framework-pipelines/dgpu/yolov5s_full.sh new file mode 100755 index 00000000..8f5bb694 --- /dev/null +++ b/configs/framework-pipelines/dgpu/yolov5s_full.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +if grep -qi "gpu" <<< "$OCR_DEVICE"; then + OCR_PP="pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1" +fi + +if [ "$INPUTSRC_TYPE" == "REALSENSE" ]; then + decode_pp="! videoconvert ! video/x-raw,format=BGR" + # TODO: update with vaapipostproc when MJPEG codec is supported. + echo "Not supported until D436 with MJPEG." > /tmp/results/pipeline$cid_count.log + exit 0 +fi + +if [ "$RENDER_MODE" == "1" ]; then + gst-launch-1.0 tcpserversrc host=127.0.0.1 port=5000 ! h264parse ! vaapih264dec ! queue ! videoconvert ! xvimagesink sync=true & + + sleep 5 + + gst-launch-1.0 $inputsrc ! vaapidecodebin $decode_pp ! "video/x-raw(memory:VASurface)" ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP16-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=GPU pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 gpu-throughput-streams=4 nireq=4 batch-size=1 ! gvatrack name=tracking tracking-type=zero-term-imageless ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=GPU inference-region=roi-list name=classification pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 reclassify_interval=${OCR_RECLASSIFY_INTERVAL} batch-size=8 nireq=4 gpu-throughput-streams=4 ! gvapython class=ObjectFilter module=/home/pipeline-server/extensions/tracked_object_filter.py kwarg=\"{\\\"reclassify_interval\\\": ${OCR_RECLASSIFY_INTERVAL}}\" name=tracked_object_filter ! gvadetect model-instance-id=ocr nireq=4 gpu-throughput-streams=4 batch-size=8 threshold=.2 model=models/horizontal-text-detection-0002/1/FP16-INT8/horizontal-text-detection-0002.xml model-proc=models/horizontal-text-detection-0002/1/horizontal-text-detection-0002.json name=text_detection device=$OCR_DEVICE inference-region=roi-list pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 ! gvainference model-instance-id=ocr2 nireq=4 gpu-throughput-streams=4 batch-size=128 device=GPU model=models/text-recognition-0012-GPU/1/FP16-INT8/text-recognition-0012-mod.xml model-proc=models/text-recognition-0012-GPU/1/text-recognition-0012.json inference-region=roi-list name=text_recognition ! gvapython class=OCR module=/home/pipeline-server/extensions/OCR_post_processing_0012.py name=ocr_postprocess ! gvapython name=barcode class=BarcodeDetection module=/home/pipeline-server/extensions/barcode_nv12_to_gray.py kwarg=\"{\\\"reclassify_interval\\\": ${BARCODE_RECLASSIFY_INTERVAL}}\" ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! \ + gvawatermark ! vaapih264enc ! tcpclientsink host=127.0.0.1 port=5000 +else + gst-launch-1.0 $inputsrc ! vaapidecodebin $decode_pp ! "video/x-raw(memory:VASurface)" ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP16-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=GPU pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 gpu-throughput-streams=1 nireq=8 batch-size=2 ! gvatrack name=tracking tracking-type=zero-term-imageless ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=GPU inference-region=roi-list name=classification pre-process-backend=vaapi-surface-sharing reclassify_interval=${OCR_RECLASSIFY_INTERVAL} batch-size=8 nireq=4 gpu-throughput-streams=1 ! gvapython class=ObjectFilter module=/home/pipeline-server/extensions/tracked_object_filter.py kwarg=\"{\\\"reclassify_interval\\\": ${OCR_RECLASSIFY_INTERVAL}}\" name=tracked_object_filter ! gvadetect model-instance-id=ocr nireq=4 gpu-throughput-streams=2 batch-size=8 threshold=.2 model=models/horizontal-text-detection-0002/1/FP16-INT8/horizontal-text-detection-0002.xml model-proc=models/horizontal-text-detection-0002/1/horizontal-text-detection-0002.json name=text_detection device=$OCR_DEVICE inference-region=roi-list pre-process-backend=vaapi-surface-sharing pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1 ! gvainference model-instance-id=ocr2 nireq=4 gpu-throughput-streams=2 batch-size=32 device=GPU model=models/text-recognition-0012-GPU/1/FP16-INT8/text-recognition-0012-mod.xml model-proc=models/text-recognition-0012-GPU/1/text-recognition-0012.json inference-region=roi-list name=text_recognition ! queue max-size-time=0 max-size-bytes=0 max-size-buffers=64 ! gvapython class=OCR module=/home/pipeline-server/extensions/OCR_post_processing_0012.py name=ocr_postprocess ! gvapython name=barcode class=BarcodeDetection module=/home/pipeline-server/extensions/barcode_nv12_to_gray.py kwarg=\"{\\\"reclassify_interval\\\": ${BARCODE_RECLASSIFY_INTERVAL}}\" ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log +fi diff --git a/configs/framework-pipelines/stream_density.sh b/configs/framework-pipelines/stream_density.sh new file mode 100755 index 00000000..5a166f4c --- /dev/null +++ b/configs/framework-pipelines/stream_density.sh @@ -0,0 +1,99 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +TARGET_FPS=15 +MEETS_FPS=true +INIT_DURATION=120 +num_pipelines=0 +log=/tmp/results/stream_density.log + +if [ ! -z "$STREAM_DENSITY_FPS" ] +then + TARGET_FPS=$STREAM_DENSITY_FPS +fi + +if [ ! -z "$COMPLETE_INIT_DURATION" ] +then + INIT_DURATION=$COMPLETE_INIT_DURATION +fi + +echo "Stream density TARGET_FPS set for $TARGET_FPS and INIT_DURATION set for $INIT_DURATION" > $log +echo "Starting single container stream density benchmarking" >> $log + +GPU_DEVICE_TOGGLE="1" + +while [ $MEETS_FPS = true ] +do + total_fps_per_stream=0.0 + total_fps=0.0 + num_pipelines=$(( $num_pipelines + 1 )) + cid_count=$(( $num_pipelines - 1 )) + + echo "Starting pipeline: $num_pipelines" >> $log + if [ -z "$AUTO_SCALE_FLEX_140" ] + then + #echo "DEBUG: $1" >> $log + ./$1 & + else + echo "INFO: Auto scaling on both flex 140 gpus...targetting device $GPU_DEVICE_TOGGLE" >> $log + if [ "$GPU_DEVICE_TOGGLE" == "1" ] + then + GST_VAAPI_DRM_DEVICE=/dev/dri/renderD128 ./$1 & + GPU_DEVICE_TOGGLE=2 + else + GST_VAAPI_DRM_DEVICE=/dev/dri/renderD129 ./$1 & + GPU_DEVICE_TOGGLE=1 + fi + fi + + echo "waiting for pipelines to settle" >> $log + # let the pipelines settle + sleep $INIT_DURATION + + for i in $( seq 0 $(($cid_count))) + do + #fps=`tail -1 /tmp/results/pipeline$cid_count.log` + # Last 10/20 seconds worth of currentfps + STREAM_FPS_LIST=`tail -20 /tmp/results/pipeline$i.log` + if [ -z "$STREAM_FPS_LIST" ] + then + echo "Warning: No FPS returned from pipeline$i.log" + STREAM_FPS_LIST=`tail -20 /tmp/results/pipeline$i.log` + echo "DEBUG: $STREAM_FPS_LIST" + #continue + fi + stream_fps_sum=0 + stream_fps_count=0 + + for stream_fps in $STREAM_FPS_LIST + do + stream_fps_sum=`echo $stream_fps_sum $stream_fps | awk '{print $1 + $2}'` + stream_fps_count=`echo $stream_fps_count 1 | awk '{print $1 + $2}'` + done + stream_fps_avg=`echo $stream_fps_sum $stream_fps_count | awk '{print $1 / $2}'` + + + total_fps=`echo $total_fps $stream_fps_avg | awk '{print $1 + $2}'` + total_fps_per_stream=`echo $total_fps $num_pipelines | awk '{print $1 / $2}'` + echo "FPS for pipeline$i: $stream_fps_avg" >> $log + done + echo "Total FPS throughput: $total_fps" >> $log + echo "Total FPS per stream: $total_fps_per_stream" >> $log + + if (( $(echo $total_fps_per_stream $TARGET_FPS | awk '{if ($1 >= $2) print 1;}') )) + then + total_fps=0 + echo "yes" + else + echo "no" + MEETS_FPS=false + echo "Max stream density achieved for target FPS $TARGET_FPS is $(( $cid_count ))" >> $log + echo "Finished stream density benchmarking" >> $log + fi + #sleep 10 + +done #done while diff --git a/configs/framework-pipelines/xeon/yolov5s.sh b/configs/framework-pipelines/xeon/yolov5s.sh new file mode 100755 index 00000000..9f64f5d0 --- /dev/null +++ b/configs/framework-pipelines/xeon/yolov5s.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +if [ "$RENDER_MODE" == "1" ]; then + gst-launch-1.0 $inputsrc ! decodebin ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=CPU ! gvametaaggregate name=aggregate ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl \ + ! videoconvert ! video/x-raw,format=I420 \ + ! gvawatermark ! videoconvert ! fpsdisplaysink video-sink=ximagesink sync=true --verbose \ + 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log + +# ! videoconvert ! video/x-raw,format=I420 \ +# ! gvawatermark ! jpegenc name=jpegencoder ! xvimagesink +else + gst-launch-1.0 $inputsrc ! decodebin ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=CPU ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log +fi diff --git a/configs/framework-pipelines/xeon/yolov5s_effnetb0.sh b/configs/framework-pipelines/xeon/yolov5s_effnetb0.sh new file mode 100755 index 00000000..10652953 --- /dev/null +++ b/configs/framework-pipelines/xeon/yolov5s_effnetb0.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +if [ "$INPUTSRC_TYPE" == "REALSENSE" ]; then + decode_pp="! videoconvert ! video/x-raw,format=BGR" + # TODO: update with vaapipostproc when MJPEG codec is supported. + echo "Not supported until D436 with MJPEG." > /tmp/results/pipeline$cid_count.log + exit 0 +fi + + +if [ "1" == "$LOW_POWER" ] +then + echo "Enabled GPU based low power pipeline " + gst-launch-1.0 $inputsrc ! vaapidecodebin ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=GPU $pre_process ! gvatrack name=tracking tracking-type=zero-term-imageless ! queue max-size-bytes=0 max-size-buffers=0 max-size-time=0 ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=GPU inference-region=roi-list name=classification $pre_process ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log + +elif [ "$CPU_ONLY" == "1" ] +then + echo "Enabled CPU inference pipeline only" + gst-launch-1.0 $inputsrc ! decodebin force-sw-decoders=1 ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=CPU ! gvatrack name=tracking tracking-type=zero-term-imageless ! queue max-size-bytes=0 max-size-buffers=0 max-size-time=0 ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=CPU inference-region=roi-list name=classification ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log + +else + echo "Enabled CPU+iGPU pipeline" + gst-launch-1.0 $inputsrc ! vaapidecodebin ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=MULTI:GPU,CPU ! gvatrack name=tracking tracking-type=zero-term-imageless ! queue max-size-bytes=0 max-size-buffers=0 max-size-time=0 ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json device=MULTI:GPU,CPU inference-region=roi-list name=classification ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log + +fi diff --git a/configs/framework-pipelines/xeon/yolov5s_full.sh b/configs/framework-pipelines/xeon/yolov5s_full.sh new file mode 100755 index 00000000..c2db32fb --- /dev/null +++ b/configs/framework-pipelines/xeon/yolov5s_full.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +if [ "$INPUTSRC_TYPE" == "REALSENSE" ]; then + decode_pp="! videoconvert ! video/x-raw,format=BGR" +fi + +if [ "$RENDER_MODE" == "1" ]; then + gst-launch-1.0 $inputsrc ! decodebin force-sw-decoders=true $decode_pp ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=CPU ! gvatrack name=tracking tracking-type=zero-term-imageless ! tee name=branch ! queue ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json reclassify-interval=1 device=CPU inference-region=roi-list name=classification ! gvametaaggregate name=aggregate ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl branch. \ + ! queue ! gvapython class=ObjectFilter module=/home/pipeline-server/extensions/tracked_object_filter.py name=tracked_object_filter ! gvadetect model-instance-id=ocr threshold=.40 model=models/horizontal-text-detection-0001/1/FP16-INT8/horizontal-text-detection-0001.xml model-proc=models/horizontal-text-detection-0001/1/horizontal-text-detection-0001.json name=text_detection device=CPU inference-region=roi-list ! gvainference model-instance-id=ocr2 model=models/text-recognition-0014/1/FP16-INT8/text-recognition-0014.xml model-proc=models/text-recognition-0014/1/text-recognition-0012.json inference-region=roi-list name=text_recognition object-class=text ! gvapython class=OCR module=/home/pipeline-server/extensions/OCR_post_processing.py name=ocr_postprocess ! aggregate. branch. ! queue ! videoconvert ! video/x-raw,format=BGR ! gvapython name=barcode class=BarcodeDetection module=/home/pipeline-server/extensions/barcode.py ! aggregate. branch. ! queue \ + ! gvawatermark \ + ! videoconvert ! fpsdisplaysink video-sink=ximagesink sync=true --verbose + 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log +else + gst-launch-1.0 $inputsrc ! decodebin force-sw-decoders=true $decode_pp ! gvadetect model-instance-id=odmodel name=detection model=models/yolov5s/1/FP32-INT8/yolov5s.xml model-proc=models/yolov5s/1/yolov5s.json threshold=.5 device=CPU ! gvatrack name=tracking tracking-type=zero-term-imageless ! tee name=branch ! queue ! gvaclassify model-instance-id=clasifier labels=models/efficientnet-b0/1/imagenet_2012.txt model=models/efficientnet-b0/1/FP16-INT8/efficientnet-b0.xml model-proc=models/efficientnet-b0/1/efficientnet-b0.json reclassify-interval=1 device=CPU inference-region=roi-list name=classification ! gvametaaggregate name=aggregate ! gvametaconvert name=metaconvert add-empty-results=true ! gvametapublish name=destination file-format=2 file-path=/tmp/results/r$cid_count.jsonl ! fpsdisplaysink video-sink=fakesink sync=true --verbose branch. ! queue ! gvapython class=ObjectFilter module=/home/pipeline-server/extensions/tracked_object_filter.py name=tracked_object_filter ! gvadetect model-instance-id=ocr threshold=.40 model=models/horizontal-text-detection-0001/1/FP16-INT8/horizontal-text-detection-0001.xml model-proc=models/horizontal-text-detection-0001/1/horizontal-text-detection-0001.json name=text_detection device=CPU inference-region=roi-list ! gvainference model-instance-id=ocr2 model=models/text-recognition-0014/1/FP16-INT8/text-recognition-0014.xml model-proc=models/text-recognition-0014/1/text-recognition-0012.json inference-region=roi-list name=text_recognition object-class=text ! gvapython class=OCR module=/home/pipeline-server/extensions/OCR_post_processing.py name=ocr_postprocess ! aggregate. branch. ! queue ! videoconvert ! video/x-raw,format=BGR ! gvapython name=barcode class=BarcodeDetection module=/home/pipeline-server/extensions/barcode.py ! aggregate. 2>&1 | stdbuf -oL sed -n -e 's/^.*current: //p' | stdbuf -oL cut -d , -f 1 > /tmp/results/pipeline$cid_count.log +fi diff --git a/configs/models/2022/Horizontal-text-detection-0002_fix.json b/configs/models/2022/Horizontal-text-detection-0002_fix.json new file mode 100644 index 00000000..80b9aa42 --- /dev/null +++ b/configs/models/2022/Horizontal-text-detection-0002_fix.json @@ -0,0 +1,10 @@ +{ + "json_schema_version": "2.2.0", + "input_preproc": [], + "output_postproc": [ + { + "converter": "boxes_labels" + } + ] + } + \ No newline at end of file diff --git a/configs/models/2022/models.list.yml b/configs/models/2022/models.list.yml new file mode 100755 index 00000000..6a89c271 --- /dev/null +++ b/configs/models/2022/models.list.yml @@ -0,0 +1,12 @@ +# https://github.com/dlstreamer/pipeline-zoo-models/tree/main/storage/efficientnet-b0_INT8 +- model: efficientnet-b0 +# https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.1/models_bin/3/horizontal-text-detection-0001/ +- model: horizontal-text-detection-0001 +# https://github.com/dlstreamer/pipeline-zoo-models/tree/main/storage/text-recognition-0012-mod +- model: text-recognition-0012-mod +# https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.1/models_bin/3/text-recognition-0014/ +- model: text-recognition-0014 +# https://github.com/dlstreamer/pipeline-zoo-models/tree/main/storage/yolov5s-416 +- model: yolov5s +# https://github.com/dlstreamer/pipeline-zoo-models/tree/main/storage/yolov5s-416_INT8 +- model: yolov5s-INT8 \ No newline at end of file diff --git a/configs/models/2022/yolov5s/1/FP16-INT8/yolov5s.mapping b/configs/models/2022/yolov5s/1/FP16-INT8/yolov5s.mapping new file mode 100644 index 00000000..91af3394 --- /dev/null +++ b/configs/models/2022/yolov5s/1/FP16-INT8/yolov5s.mapping @@ -0,0 +1,819 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/configs/models/2022/yolov5s/1/FP16/yolov5s.mapping b/configs/models/2022/yolov5s/1/FP16/yolov5s.mapping new file mode 100644 index 00000000..1c92e490 --- /dev/null +++ b/configs/models/2022/yolov5s/1/FP16/yolov5s.mapping @@ -0,0 +1,819 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/configs/models/2022/yolov5s/1/FP32-INT8/yolov5s.mapping b/configs/models/2022/yolov5s/1/FP32-INT8/yolov5s.mapping new file mode 100644 index 00000000..447cfe05 --- /dev/null +++ b/configs/models/2022/yolov5s/1/FP32-INT8/yolov5s.mapping @@ -0,0 +1,819 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/configs/models/2022/yolov5s/1/FP32/yolov5s.mapping b/configs/models/2022/yolov5s/1/FP32/yolov5s.mapping new file mode 100644 index 00000000..93103471 --- /dev/null +++ b/configs/models/2022/yolov5s/1/FP32/yolov5s.mapping @@ -0,0 +1,819 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/configs/models/licenses/APACHE-2.0-TF-Models.txt b/configs/models/licenses/APACHE-2.0-TF-Models.txt new file mode 100644 index 00000000..489485ec --- /dev/null +++ b/configs/models/licenses/APACHE-2.0-TF-Models.txt @@ -0,0 +1,203 @@ +Copyright 2016 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016, The Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/configs/models/licenses/APACHE-2.0-TF-TPU.txt b/configs/models/licenses/APACHE-2.0-TF-TPU.txt new file mode 100644 index 00000000..27e80ef7 --- /dev/null +++ b/configs/models/licenses/APACHE-2.0-TF-TPU.txt @@ -0,0 +1,203 @@ +Copyright 2017 The TensorFlow Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017, The TensorFlow Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/configs/pipelines/xeon/yolov5s_tracking_mixed_cpu_full/pipeline.json b/configs/pipelines/xeon/yolov5s_tracking_mixed_cpu_full/pipeline.json new file mode 100644 index 00000000..26a5c41c --- /dev/null +++ b/configs/pipelines/xeon/yolov5s_tracking_mixed_cpu_full/pipeline.json @@ -0,0 +1,139 @@ +{ + "type": "GStreamer", + "template": [ + " {auto_source}", + " ! decodebin force-sw-decoders=true", + " ! gvadetect model-instance-id=ssd name=detection model={models[yolov5s][1][FP32-INT8][network]} threshold=.5 device=CPU", + " ! gvatrack name=tracking tracking-type=zero-term-imageless", + " ! tee name=branch", + " ! queue", + " ! gvaclassify model-instance-id=clasifier model={models[efficientnet-b0][1][FP16-INT8][network]} reclassify-interval=1 device=CPU inference-region=roi-list name=classification", + " ! gvametaaggregate name=aggregate", + " ! gvametaconvert name=metaconvert add-empty-results=true", + " ! gvametapublish name=destination ! appsink", + " branch. ! queue ", + " ! gvapython class=ObjectFilter module=/home/pipeline-server/extensions/tracked_object_filter.py name=tracked_object_filter", + " ! gvadetect model-instance-id=ocr threshold=.40 model={models[horizontal-text-detection-0001][1][FP16-INT8][network]} name=text_detection device=CPU inference-region=roi-list", + " ! gvainference model-instance-id=ocr2 model={models[text-recognition-0014][1][FP16-INT8][network]} inference-region=roi-list name=text_recognition object-class=text ", + " ! gvapython class=OCR module=/home/pipeline-server/extensions/OCR_post_processing.py name=ocr_postprocess", + " ! aggregate.", + " branch. ! queue ", + " ! videoconvert ! video/x-raw,format=BGR", + " ! gvapython name=barcode class=BarcodeDetection module=/home/pipeline-server/extensions/barcode.py", + " ! aggregate." + ], + "description": "Yolov5s SCO Pipeline with Tracking Mixed", + "parameters": { + "type": "object", + "properties": { + "detection": { + "type": "object", + "element": { + "name": "detection", + "format": "element-properties" + } + }, + "text-detection": { + "type": "object", + "element": { + "name": "text_detection", + "format": "element-properties" + } + }, + "text-recognition": { + "type": "object", + "element": { + "name": "text_recognition", + "format": "element-properties" + } + }, + "barcode-properties": { + "element": { + "name": "barcode", + "property": "kwarg", + "format": "json" + }, + "type": "object", + "properties": { + "disable": { + "type": "boolean" + }, + "decode_type": { + "type": "string" + }, + "reclassify_interval": { + "type": "integer" + }, + "max_tracked_objects": { + "type": "integer" + } + } + }, + "OCR-properties": { + "element": { + "name": "ocr_postprocess", + "property": "kwarg", + "format": "json" + }, + "type": "object", + "properties": { + "threshold": { + "type": "number" + } + }, + "default": { + "threshold": 0.5 + } + }, + "classification": { + "type": "object", + "element": { + "name": "classification", + "format": "element-properties" + } + }, + "classification-filter": { + "element": { + "name": "classification_filter", + "format": "json", + "property": "kwarg" + }, + "type": "object", + "properties": { + "object_filter": { + "type": "array" + } + } + }, + "classification-filter-num-objects": { + "element": { + "name": "classification_filter_num_objects", + "format": "json", + "property": "kwarg" + }, + "type": "object", + "properties": { + "enable": { + "type": "boolean" + }, + "min_objects": { + "type": "integer" + }, + "max_objects": { + "type": "integer" + }, + "fake_object_width": { + "type": "number", + "minimum": 0, + "maximum": 1 + }, + "fake_object_height": { + "type": "number", + "minimum": 0, + "maximum": 1 + } + } + } + } + } +} diff --git a/configs/results/.gitignore b/configs/results/.gitignore new file mode 100755 index 00000000..86d0cb27 --- /dev/null +++ b/configs/results/.gitignore @@ -0,0 +1,4 @@ +# Ignore everything in this directory +* +# Except this file +!.gitignore \ No newline at end of file diff --git a/docker-build-igt.sh b/docker-build-igt.sh new file mode 100755 index 00000000..bbbf2492 --- /dev/null +++ b/docker-build-igt.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +if [ "$#" -eq 2 ] +then + httpp=$1 + httpps=$2 +elif [ "$#" -gt 0 ] +then + echo " + Builds not requiring network proxy + usage: ./docker-build-igt.sh + + Optional: Builds requiring network proxy + usage: ./docker-build-igt.sh http_proxy_ip:http_proxy_port https_proxy_ip:https_proxy_port + " + exit 0 +fi + +# ./docker-build-igt.sh http://proxy-chain.intel.com:911 http://proxy-chain.intel.com:912 + +echo "Building igt HTTPS_PROXY=$httpps HTTP_PROXY=$httpp" +docker build --no-cache --build-arg HTTPS_PROXY=$httpps --build-arg HTTP_PROXY=$httpp -t igt:latest -f Dockerfile.igt . diff --git a/docker-build.sh b/docker-build.sh new file mode 100755 index 00000000..35630971 --- /dev/null +++ b/docker-build.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +if [ -z $1 ] +then + echo " + Builds not requiring network proxy + usage: ./docker-build.sh dgpu|soc + + Optional: Builds requiring network proxy + usage: ./docker-build.sh dgpu|soc http_proxy_ip:http_proxy_port https_proxy_ip:https_proxy_port + " + exit 0 +fi + +httpp=$2 +httpps=$3 + +# ./docker-build.sh dgpu http://proxy-chain.intel.com:911 http://proxy-chain.intel.com:912 + +if [ -f intel-graphics.key ] +then + rm intel-graphics.key +fi +wget https://repositories.intel.com/graphics/intel-graphics.key + +if [ x$1 == "xdgpu" ] +then + echo "Building for dgpu Arc/Flex" + docker build --no-cache --build-arg HTTPS_PROXY=$httpps --build-arg HTTP_PROXY=$httpp -t sco-dgpu:2.0 -f Dockerfile.dgpu . +else + echo "Building for SOC (e.g. TGL/ADL/Xeon SP/etc)" + docker build --no-cache --build-arg HTTPS_PROXY=$httpps --build-arg HTTP_PROXY=$httpp -t sco-soc:2.0 -f Dockerfile.soc . +fi + +rm intel-graphics.key diff --git a/docker-run-dev.sh b/docker-run-dev.sh new file mode 100755 index 00000000..30d51178 --- /dev/null +++ b/docker-run-dev.sh @@ -0,0 +1,233 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +error() { + printf '%s\n' "$1" >&2 + exit +} + +show_help() { + echo " + usage: ./docker-run.sh --platform core|xeon|dgpu.x --inputsrc RS_SERIAL_NUMBER|CAMERA_RTSP_URL|file:video.mp4|/dev/video0 [--classification_disabled] [ --ocr_disabled | --ocr [OCR_INTERVAL OCR_DEVICE] ] [ --barcode_disabled | --barcode [BARCODE_INTERVAL] ] + + Note: + 1. dgpu.x should be replaced with targetted GPUs such as dgpu (for all GPUs), dgpu.0, dgpu.1, etc + 2. filesrc will utilize videos stored in the sample-media folder + " +} + +HAS_FLEX_140=0 +HAS_FLEX_170=0 +HAS_ARC=0 +#HAS_iGPU=0 + +get_gpu_devices() { + has_gpu=0 + has_any_intel_non_server_gpu=`dmesg | grep -i "class 0x030000" | grep "8086"` + has_any_intel_server_gpu=`dmesg | grep -i "class 0x038000" | grep "8086"` + has_flex_170=`echo "$has_any_intel_server_gpu" | grep -i "56C0"` + has_flex_140=`echo "$has_any_intel_server_gpu" | grep -i "56C1"` + has_arc=`echo "$has_any_intel_non_server_gpu" | grep -iE "5690|5691|5692|56A0|56A1|56A2|5693|5694|5695|5698|56A5|56A6|56B0|56B1|5696|5697|56A3|56A4|56B2|56B3"` + + if [ -z "$has_any_intel_non_server_gpu" ] && [ -z "$has_any_intel_server_gpu" ] + then + echo "No Intel GPUs found" + return + fi + echo "GPU exists!" + + if [ ! -z "$has_flex_140" ] + then + HAS_FLEX_140=1 + fi + if [ ! -z "$has_flex_170" ] + then + HAS_FLEX_170=1 + fi + if [ ! -z "$has_arc" ] + then + HAS_ARC=1 + fi + + echo "HAS_FLEX_140=$HAS_FLEX_140, HAS_FLEX_170=$HAS_FLEX_170, HAS_ARC=$HAS_ARC" +} + +get_options() { + while :; do + case $1 in + -h | -\? | --help) + show_help + exit + ;; + --platform) + if [ "$2" ]; then + if [ $2 == "xeon" ]; then + PLATFORM=$2 + shift + elif grep -q "core" <<< "$2"; then + PLATFORM="core" + TARGET_GPU_DEVICE="--device=/dev/dri/renderD128" + shift + elif grep -q "dgpu" <<< "$2"; then + arrgpu=(${2//./ }) + gpu_number=${arrgpu[1]} + if [ -z "$gpu_number" ]; then + TARGET_GPU="GPU.0" + TARGET_GPU_DEVICE="--privileged" + else + gid=$((128+$gpu_number)) + + TARGET_GPU="GPU."$gpu_number + TARGET_GPU_DEVICE="--device=/dev/dri/renderD"$gid + fi + PLATFORM="dgpu" + #echo "$PLATFORM $TARGET_GPU" + + shift + else + error 'ERROR: "--platform" requires an argument core|xeon|dgpu.' + fi + else + error 'ERROR: "--platform" requires an argument core|xeon|dgpu.' + fi + ;; + --inputsrc) + if [ "$2" ]; then + INPUTSRC=$2 + shift + else + error 'ERROR: "--inputsrc" requires an argument RS_SERIAL_NUMBER|CAMERA_RTSP_URL|file:video.mp4|/dev/video0.' + fi + ;; + --classification_disabled) + CLASSIFICATION_DISABLED=1 + ;; + --ocr_disabled) + OCR_DISABLED=1 + ;; + --barcode_disabled) + BARCODE_DISABLED=1 + ;; + --ocr) + if [ "$2" ]; then + OCR_INTERVAL=$2 + else + error 'ERROR: "--ocr" requires an argument [OCR_INTERVAL OCR_DEVICE].' + fi + if [ "$3" ]; then + OCR_DEVICE=$3 + shift 2 + else + error 'ERROR: "--ocr" requires an argument [OCR_INTERVAL] [OCR_DEVICE].' + fi + ;; + --barcode) + if [ "$2" ]; then + BARCODE_INTERVAL=$2 + else + error 'ERROR: "--barcode" requires an argument [BARCODE_INTERVAL].' + fi + ;; + -?*) + error "ERROR: Unknown option $1" + ;; + ?*) + error "ERROR: Unknown option $1" + ;; + *) + break + ;; + esac + + shift + + done +} + +BARCODE_DISABLED=0 +BARCODE_INTERVAL=5 +OCR_INTERVAL=5 +OCR_DEVICE=CPU +OCR_DISABLED=0 +CLASSIFICATION_DISABLED=0 +export GST_DEBUG=0 + +get_options "$@" +get_gpu_devices + +if [ -z $1 ] || [ -z $PLATFORM ] || [ -z $INPUTSRC ] +then + show_help + exit 0 +fi + +cl_cache_dir=`pwd`/.cl-cache +echo "CLCACHE: $cl_cache_dir" + +#HAS_FLEX_140=$HAS_FLEX_140, HAS_FLEX_170=$HAS_FLEX_170, HAS_ARC=$HAS_ARC + + +if [ $HAS_FLEX_140 == 1 ] || [ $HAS_FLEX_170 == 1 ] || [ $HAS_ARC == 1 ] +then + echo "Arc/Flex device support" + TAG=sco-dgpu:2.0 + +else + echo "SOC (CPU, iGPU, and Xeon SP) device support" + TAG=sco-soc:2.0 +fi + +cids=$(docker ps --filter="name=vision-self-checkout" -q -a) +cid_count=`echo "$cids" | wc -w` +CONTAINER_NAME="vision-self-checkout"$(($cid_count)) +LOG_FILE_NAME="vision-self-checkout"$(($cid_count))".log" + +#echo "barcode_disabled: $BARCODE_DISABLED, barcode_interval: $BARCODE_INTERVAL, ocr_interval: $OCR_INTERVAL, ocr_device: $OCR_DEVICE, ocr_disabled=$OCR_DISABLED, class_disabled=$CLASSIFICATION_DIABLED" + +if grep -q "rtsp" <<< "$INPUTSRC"; then + # rtsp + # todo pass depay info + inputsrc=$INPUTSRC" ! rtph264depay " + INPUTSRC_TYPE="RTSP" + pre_process="pre-process-backend=vaapi-surface-sharing -e pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1" + +elif grep -q "file" <<< "$INPUTSRC"; then + # filesrc + arrfilesrc=(${INPUTSRC//:/ }) + # use vids since container maps a volume to this location based on sample-media folder + # TODO: need to pass demux/codec info + inputsrc="filesrc location=vids/"${arrfilesrc[1]}" ! qtdemux ! h264parse " + INPUTSRC_TYPE="FILE" + +elif grep -q "video" <<< "$INPUTSRC"; then + # v4l2src /dev/video* + # TODO need to pass stream info + inputsrc="v4l2src device="$INPUTSRC + INPUTSRC_TYPE="USB" + +else + # rs-serial realsenssrc + # TODO need to pass depthalign info + cameras=`ls /dev/vid* | while read line; do echo "--device=$line"; done` + TARGET_GPU_DEVICE=$TARGET_GPU_DEVICE" "$cameras + inputsrc="realsensesrc cam-serial-number="$INPUTSRC" stream-type=0 align=0 imu_on=false" + INPUTSRC_TYPE="REALSENSE" +fi + +if [ "${OCR_DISABLED}" == "0" ] && [ "${BARCODE_DISABLED}" == "0" ] && [ "${CLASSIFICATION_DISABLED}" == "0" ]; then + pipeline="yolov5s_full.sh" + +elif [ "${OCR_DISABLED}" == "1" ] && [ "${BARCODE_DISABLED}" == "1" ] && [ "${CLASSIFICATION_DISABLED}" == "1" ]; then + pipeline="yolov5s.sh" +elif [ "${OCR_DISABLED}" == "1" ] && [ "${BARCODE_DISABLED}" == "1" ] && [ "${CLASSIFICATION_DISABLED}" == "0" ]; then + pipeline="yolov5s_effnetb0.sh" +else + echo "Not implemented" + exit 0 +fi + +docker run --network host $TARGET_GPU_DEVICE --user root --ipc=host --name vision-self-checkout$cid_count -v `pwd`/configs/framework-pipelines/stream_density.sh:/home/pipeline-server/stream_density_framework-pipelines.sh -e INPUTSRC_TYPE=$INPUTSRC_TYPE -e DISPLAY=:0 -e cl_cache_dir=/home/pipeline-server/.cl-cache -v $cl_cache_dir:/home/pipeline-server/.cl-cache -v /tmp/.X11-unix:/tmp/.X11-unix -v `pwd`/sample-media/:/home/pipeline-server/vids -v `pwd`/configs/pipelines:/home/pipeline-server/pipelines -v `pwd`/configs/extensions:/home/pipeline-server/extensions -v `pwd`/results:/tmp/results -v `pwd`/configs/models/2022:/home/pipeline-server/models -v `pwd`/configs/framework-pipelines:/home/pipeline-server/framework-pipelines -w /home/pipeline-server -e BARCODE_RECLASSIFY_INTERVAL=$BARCODE_INTERVAL -e OCR_RECLASSIFY_INTERVAL=$OCR_INTERVAL -e OCR_DEVICE=$OCR_DEVICE -e LOG_LEVEL=$LOG_LEVEL -e GST_DEBUG=$GST_DEBUG -e cid_count=$cid_count -e pre_process="$pre_process" -e LOW_POWER="$LOW_POWER" -e CPU_ONLY="$CPU_ONLY" -e inputsrc="$inputsrc" --rm -it $TAG diff --git a/docker-run-igt.sh b/docker-run-igt.sh new file mode 100755 index 00000000..32da175b --- /dev/null +++ b/docker-run-igt.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +SOURCE_DIR=$(dirname "$(readlink -f "$0")") +if [ -z $1 ]; then + echo "PCI card id required" +else + docker run -v $SOURCE_DIR/results:/tmp/results -itd --privileged igt:latest bash -c "/usr/local/bin/intel_gpu_top -d pci:card=$1 -J > /tmp/results/igt$1.json" + #docker run -v $SOURCE_DIR/results:/tmp/results -it --privileged igt:latest bash -c "/usr/local/bin/intel_gpu_top -d pci:card=$1" +fi diff --git a/docker-run.sh b/docker-run.sh new file mode 100755 index 00000000..36015758 --- /dev/null +++ b/docker-run.sh @@ -0,0 +1,151 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +export GST_DEBUG=0 + +source get-gpu-info.sh + +if [ -z "$PLATFORM" ] || [ -z "$INPUTSRC" ] +then + source get-options.sh "$@" +fi + +cl_cache_dir=`pwd`/.cl-cache +echo "CLCACHE: $cl_cache_dir" + +#HAS_FLEX_140=$HAS_FLEX_140, HAS_FLEX_170=$HAS_FLEX_170, HAS_ARC=$HAS_ARC + +# TODO: override tag for other images and workloads +if [ $HAS_FLEX_140 == 1 ] || [ $HAS_FLEX_170 == 1 ] || [ $HAS_ARC == 1 ] +then + if [ $PLATFORM == "dgpu" ] + then + echo "Arc/Flex device driver stack" + TAG=sco-dgpu:2.0 + else + TAG=sco-soc:2.0 + echo "SOC (CPU, iGPU, and Xeon SP) device driver stack" + fi + + if [ $HAS_ARC == 1 ]; then + PLATFORM="arc" + fi + +else + echo "SOC (CPU, iGPU, and Xeon SP) device driver stack" + TAG=sco-soc:2.0 +fi + +if [ ! -z "$CONTAINER_IMAGE_OVERRIDE" ] +then + echo "Using container image override $CONTAINER_IMAGE_OVERRIDE" + TAG=$CONTAINER_IMAGE_OVERRIDE +fi + +cids=$(docker ps --filter="name=vision-self-checkout" -q -a) +cid_count=`echo "$cids" | wc -w` +CONTAINER_NAME="vision-self-checkout"$(($cid_count)) +LOG_FILE_NAME="vision-self-checkout"$(($cid_count))".log" + +#echo "barcode_disabled: $BARCODE_DISABLED, barcode_interval: $BARCODE_INTERVAL, ocr_interval: $OCR_INTERVAL, ocr_device: $OCR_DEVICE, ocr_disabled=$OCR_DISABLED, class_disabled=$CLASSIFICATION_DIABLED" +pre_process="" +if grep -q "rtsp" <<< "$INPUTSRC"; then + # rtsp + # todo pass depay info + inputsrc=$INPUTSRC" ! rtph264depay " + INPUTSRC_TYPE="RTSP" + pre_process="pre-process-backend=vaapi-surface-sharing -e pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1" + + +elif grep -q "file" <<< "$INPUTSRC"; then + # filesrc + arrfilesrc=(${INPUTSRC//:/ }) + # use vids since container maps a volume to this location based on sample-media folder + # TODO: need to pass demux/codec info + inputsrc="filesrc location=vids/"${arrfilesrc[1]}" ! qtdemux ! h264parse " + INPUTSRC_TYPE="FILE" + decode_type="vaapidecodebin" + pre_process="pre-process-backend=vaapi-surface-sharing -e pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1" + +elif grep -q "video" <<< "$INPUTSRC"; then + # v4l2src /dev/video* + # TODO need to pass stream info + inputsrc="v4l2src device="$INPUTSRC + INPUTSRC_TYPE="USB" + TARGET_USB_DEVICE="--device=$INPUTSRC" + decode_type="videoconvert ! video/x-raw,format=BGR" + pre_process="" + +else + # rs-serial realsenssrc + # TODO need to pass depthalign info + inputsrc="realsensesrc cam-serial-number="$INPUTSRC" stream-type=0 align=0 imu_on=false" + # add realsense color related properties if any + if [ "$COLOR_WIDTH" != 0 ]; then + inputsrc=$inputsrc" color-width="$COLOR_WIDTH + fi + if [ "$COLOR_HEIGHT" != 0 ]; then + inputsrc=$inputsrc" color-height="$COLOR_HEIGHT + fi + if [ "$COLOR_FRAMERATE" != 0 ]; then + inputsrc=$inputsrc" color-framerate="$COLOR_FRAMERATE + fi + INPUTSRC_TYPE="REALSENSE" + decode_type="decodebin ! videoconvert ! video/x-raw,format=BGR" + pre_process="" + cameras=`ls /dev/vid* | while read line; do echo "--device=$line"; done` + TARGET_GPU_DEVICE=$TARGET_GPU_DEVICE" "$cameras +fi + +if [ "${OCR_DISABLED}" == "0" ] && [ "${BARCODE_DISABLED}" == "0" ] && [ "${CLASSIFICATION_DISABLED}" == "0" ] && [ "${REALSENSE_ENABLED}" == "0" ]; then + pipeline="yolov5s_full.sh" + +elif [ "${OCR_DISABLED}" == "1" ] && [ "${BARCODE_DISABLED}" == "1" ] && [ "${CLASSIFICATION_DISABLED}" == "1" ]; then + pipeline="yolov5s.sh" +elif [ "${OCR_DISABLED}" == "1" ] && [ "${BARCODE_DISABLED}" == "1" ] && [ "${CLASSIFICATION_DISABLED}" == "0" ]; then + pipeline="yolov5s_effnetb0.sh" +elif [ "${REALSENSE_ENABLED}" == "1" ]; then + # TODO: this will not work for diff pipelines like _full and _effnetb0 etc + pipeline="yolov5s_realsense.sh" + +else + echo "Not implemented" + exit 0 +fi + +# Set RENDER_MODE=1 for demo purposes only +RUN_MODE="-itd" +if [ "$RENDER_MODE" == 1 ] +then + RUN_MODE="-it" +fi + +bash_cmd="framework-pipelines/$PLATFORM/$pipeline" +if [ "$STREAM_DENSITY_MODE" == 1 ]; then + echo "Starting Stream Density" + bash_cmd="./stream_density_framework-pipelines.sh framework-pipelines/$PLATFORM/$pipeline" + stream_density_mount="-v `pwd`/configs/framework-pipelines/stream_density.sh:/home/pipeline-server/stream_density_framework-pipelines.sh" + stream_density_params="-e STREAM_DENSITY_FPS=$STREAM_DENSITY_FPS -e COMPLETE_INIT_DURATION=$COMPLETE_INIT_DURATION" + echo "DEBUG: $stream_density_params" +fi + +#echo "DEBUG: $TARGET_GPU_DEVICE $PLATFORM $HAS_FLEX_140" +if [ "$TARGET_GPU_DEVICE" == "--privileged" ] && [ "$PLATFORM" == "dgpu" ] && [ $HAS_FLEX_140 == 1 ] +then + if [ "$STREAM_DENSITY_MODE" == 1 ]; then + # override logic in workload script so stream density can manage it + AUTO_SCALE_FLEX_140=2 + else + # allow workload to manage autoscaling + AUTO_SCALE_FLEX_140=1 + fi +fi + +# make sure models are downloaded or existing: +./modelDownload.sh + +docker run --network host $cameras $TARGET_USB_DEVICE $TARGET_GPU_DEVICE --user root --ipc=host --name vision-self-checkout$cid_count -e RENDER_MODE=$RENDER_MODE $stream_density_mount -e INPUTSRC_TYPE=$INPUTSRC_TYPE -e DISPLAY=$DISPLAY -e cl_cache_dir=/home/pipeline-server/.cl-cache -v $cl_cache_dir:/home/pipeline-server/.cl-cache -v /tmp/.X11-unix:/tmp/.X11-unix -v `pwd`/sample-media/:/home/pipeline-server/vids -v `pwd`/configs/pipelines:/home/pipeline-server/pipelines -v `pwd`/configs/extensions:/home/pipeline-server/extensions -v `pwd`/results:/tmp/results -v `pwd`/configs/models/2022:/home/pipeline-server/models -v `pwd`/configs/framework-pipelines:/home/pipeline-server/framework-pipelines -w /home/pipeline-server -e BARCODE_RECLASSIFY_INTERVAL=$BARCODE_INTERVAL -e OCR_RECLASSIFY_INTERVAL=$OCR_INTERVAL -e OCR_DEVICE=$OCR_DEVICE -e LOG_LEVEL=$LOG_LEVEL -e GST_DEBUG=$GST_DEBUG -e decode_type="$decode_type" -e pre_process="$pre_process" -e LOW_POWER="$LOW_POWER" -e cid_count=$cid_count -e inputsrc="$inputsrc" $RUN_MODE $stream_density_params -e CPU_ONLY="$CPU_ONLY" -e AUTO_SCALE_FLEX_140="$AUTO_SCALE_FLEX_140" --rm $TAG bash -c "$bash_cmd" diff --git a/docs_src/benchmark.md b/docs_src/benchmark.md new file mode 100644 index 00000000..e1c7819a --- /dev/null +++ b/docs_src/benchmark.md @@ -0,0 +1,101 @@ +# Benchmarking + +## Installation +Build the soc docker images and igt version of that if running on the core system: + +- ../docker-build.sh soc + +- ../docker-build-igt.sh + + +After docker images have been successfully built, change directory to `benchmark_scripts` from the base directory; i.e.: +```bash +cd ./benchmark_scripts +``` + +Then install the benchmark utilities required + +sudo ./utility_install.sh + +## Benchmark Data Collection (NEW) +This section is to replace the below Benchmark Data Collection after validation. + +**benchmark.sh** + +Before starting this script ensure the ../samples-media folder has the needed video to benchmark against. + +The benchmark.sh shell script is located under `benchmark_scripts` directory under the base directory. Before executing this script, +change the current directory to directory `benchmark_scripts`. + +This script will start benchmarking a specific number of pipelines or can start stream density benchmarking based on the parameters. + +Inputs: The parameters are nearly the same as docker-run and docker-run-dev. There x new parameters to add first: + +--pipelines NUMBER_OF_PIPELINES_TO_START or --stream_density TARGET_FPS +--logdir PATH_TO_LOG_DIR/data +--duration NUMBER_OF_SECONDS_TO_BENCHMARK +--init_duration NUMBER_OF_SECONDS_TO_WAIT_BEFORE_STARTING_DATA_COLLECTION + +For the remaining parameters e.g. --platform, --inputsrc,etc see docker-run.sh. + +Example for running product detection use case for 30 seconds after waiting 60 seconds for initialization. +sudo ./benchmark.sh --pipelines 2 --logdir yolov5s_serpcanyon_grocery-shelf/data --duration 30 --init_duration 60 --platform dgpu.1 --inputsrc rtsp://127.0.0.1:8554/camera_0 --classification_disabled --ocr_disabled --barcode_disabled + +Note: when the value of parameter platform is `dgpu` and if the hardware device is flex, the benchmark script will distribute the loads among its available GPUs. + +Additional sample command lines: + +1. sudo ./benchmark.sh --pipelines 4 --logdir yolov5s_serpcanyon_grocery-shelf/data --duration 120 --init_duration 30 --platform dgpu --inputsrc rtsp://127.0.0.1:8554/camera_0 +2. sudo ./benchmark.sh --pipelines 4 --logdir yolov5s_serpcanyon_grocery-shelf/data --duration 120 --init_duration 30 --platform core --inputsrc rtsp://127.0.0.1:8554/camera_0 +3. sudo ./benchmark.sh --pipelines 4 --logdir yolov5s_serpcanyon_grocery-shelf/data --duration 120 --init_duration 30 --platform xeon --inputsrc rtsp://127.0.0.1:8554/camera_0 +4. sudo ./benchmark.sh --stream_density 15 --logdir yolov5s_serpcanyon_grocery-shelf/data --duration 120 --init_duration 30 --platform xeon --inputsrc rtsp://127.0.0.1:8554/camera_0 + +**consolidate_multiple_run_of_metrics.py** + +Use this script once all testing is complete. The consolidate_multiple_run_of_metrics.py will consolidate the benchmarks into one .csv summary file. + +Inputs to the script are: + +1. --root_directory: the top level directory where the results are stored +2. --output: the location to put the output file + +Sample command line: +sudo python3 ./consolidate_multiple_run_of_metrics.py --root_directory yolov5s_6330N/ --output yolov5s_6330N/consolidated.csv + + +**consolidate_multiple_run_of_metrics.py** + +Use this script once all testing is complete. The consolidate_multiple_run_of_metrics.py will consolidate the benchmarks into one .csv summary file. + +Inputs to the script are: + +1. --root_directory: the top level directory where the results are stored +2. --output: the location to put the output file + +Sample command line: +sudo python3 ./consolidate_multiple_run_of_metrics.py --root_directory yolov5s_6330N/ --output yolov5s_6330N/consolidated.csv + + +**stop_server.sh** + +Stops the docker images closing the pipelines + +**stream_density.sh** + +Use this script to test maximum streams that can be run on your system. output will be the maximum pipelines ran with the last fps recorded. + +Inputs to the script are: + +1. CAMERA_ID: the video stream to be run for the workload. Needs to be the full path ie: rtsp://127.0.0.1:8554/camera_0 +2. PLATFORM: core, xeon, or dgpu.x + - dgpu.x should be replaced with targetted GPUs such as dgpu (for all GPUs), dgpu.0, dgpu.1, etc +3. TARGET_FPS: the minimum target frame per second for pipelines to reach + +Sample command lines: +1. sudo ./stream_density.sh rtsp://127.0.0.1:8554/camera_0 core 15 + +## Benchmark Helper Scripts + +**camera-simulator.sh** + +Starts the camera simulator. To use, place the script in a folder named camera-simulator. At the same directory level as the camera-simulator folder, create a folder called sample-media. The camera-simulator.sh script will start a simulator for each .mp4 video that it finds in the sample-media folder and will enumerate them as camera_0, camera_1 etc. Be sure the path to camera-simulator.sh script is correct in the start_emulated_camera_pipelines.sh script. diff --git a/docs_src/camera_serial_number.md b/docs_src/camera_serial_number.md new file mode 100644 index 00000000..e8368dee --- /dev/null +++ b/docs_src/camera_serial_number.md @@ -0,0 +1,24 @@ +# How to get RealSense camera serial number + +- run the built docker image in interactive mode, with root, and mount the host devices + +`docker run --rm -u root -it --privileged sco-soc:2.0 bash` + +- while in the container run `rs-enumerate-devices` to list metadata of all attached RealSense devices + +``` +/home/pipeline-zoo/workspace# rs-enumerate-devices +Device info: + Name : Intel RealSense D435 + Serial Number :----->serial number<------ + Firmware Version : 05.08.15.00 + Recommended Firmware Version : 05.13.00.50 + Physical Port : /sys/devices/pci0000:00/0000:00:14.0/usb2/2-3/2-3:1.0/video4linux/video0 + Debug Op Code : 15 + Advanced Mode : YES + Product Id : 0B07 + Camera Locked : YES + Product Line : D400 + Asic Serial Number : + Firmware Update Id : +``` \ No newline at end of file diff --git a/docs_src/hardwaresetup.md b/docs_src/hardwaresetup.md new file mode 100644 index 00000000..be2c940f --- /dev/null +++ b/docs_src/hardwaresetup.md @@ -0,0 +1,71 @@ +# Setup for Hardware + +## 11th & 12th Gen Intel® Core™ Setup + +### Step 1: Install Ubuntu 20.04 + +Ubuntu [20.04](https://releases.ubuntu.com/focal/) following these [steps](https://ubuntu.com/tutorials/install-ubuntu-desktop#1-overview) + +### Step 2: Install Docker + +Install Docker Engine using these [steps](https://docs.docker.com/engine/install/ubuntu/) + +### Step 3: Pipeline Setup + +Once complete continue to [Pipeline Setup](./pipelinesetup.md) for pipeline setup + +--- + +### Xeon Setup + +### Step 1: Install Ubuntu 22.04 + +Ubuntu [22.04](https://releases.ubuntu.com/22.04/) following these [steps](https://ubuntu.com/tutorials/install-ubuntu-desktop#1-overview) + +### Step 2: Install Docker + +Install Docker Engine using these [steps](https://docs.docker.com/engine/install/ubuntu/) + +### Step 3: Pipeline Setup + +Once complete continue to [Pipeline Setup](./pipelinesetup.md) for pipeline setup + +--- + +## Intel® Data Center GPU Flex 140 & 170 Setup + +### Step 1: Install Ubuntu 22.04 + +Ubuntu [22.04](https://releases.ubuntu.com/22.04/) following these [steps](https://ubuntu.com/tutorials/install-ubuntu-desktop#1-overview) + +### Step 2: Kernel Update + +Follow Intel Data Center GPU Flex Series install instructions [steps](https://dgpu-docs.intel.com/installation-guides/ubuntu/ubuntu-jammy-dc.html) + +### Step 3: Install Docker + +Install Docker Engine using these [steps](https://docs.docker.com/engine/install/ubuntu/) + +### Step 4: Pipeline Setup + +Once complete continue to [Pipeline Setup](./pipelinesetup.md) for pipeline setup + +--- + +## Intel® Arc™ Setup + +### Step 1: Install Ubuntu 20.04 + +Ubuntu [20.04](https://releases.ubuntu.com/focal/) following these [steps](https://ubuntu.com/tutorials/install-ubuntu-desktop#1-overview) + +### Step 2: Kernel Update + +Follow the Arc kernel install [steps](https://dgpu-docs.intel.com/installation-guides/ubuntu/ubuntu-focal-arc.html) + +### Step 3: Install Docker + +Install Docker Engine using these [steps](https://docs.docker.com/engine/install/ubuntu/) + +### Step 4: Pipeline Setup + +Once complete continue to [Pipeline Setup](./pipelinesetup.md) for pipeline setup diff --git a/docs_src/images/vision-checkout-1.0.png b/docs_src/images/vision-checkout-1.0.png new file mode 100644 index 0000000000000000000000000000000000000000..efb9d20866e83f3f83b31bd8bd8c153836bc014a GIT binary patch literal 45734 zcmd43XIN8f*e)1l18xwpP!t4oyDgM}B1J%o1yB)?4hc=E0tvknLfMFlfDKTjBQ=GZ z1PDbyL_vB95CS4iNJ38ngfc7c?>jT+y3Wj;GvE0!KQutrde>W@a^Lr}o;@(q7vK}) z1A#ySckkRb1%bGTAke`(hj@U$-CtpHEM>$a6l=`2FVvchwoaxA=ocrVX_4v*aV( z;he8QU$5tH2b7mg*O6f-!nwmc&cjAC@Ah(r4pKVM`DoPQl!{4zIV$!5=P&MjuU)LS z|KmXyA7@k0WKnMDY=`8#qh7x+co{h~YY7SZ{qw6^Bfp!uedM7i=jNko|JZRpb!LwK z&s47BVDSKURzsRmX?;T>vLUIJ(0{RCe>kI{hAC;zV6pw1Q4iq z{20(3caXS>SGm4q=;uPi53Yo9zl6Yr{x-UllKV?L1Y-!>1`I)oGo@#a&wYPC4N*eW zmfwrVDfDG&hTGaycRbsz;LDb^MEkH%_11*-wY8rv?Sazy6O9NWGC~k68o^j%Ll7Mj+?2##3T}c@9fP~o=0|=s$J$Y`f~arB1*}pO*0241*lL?7 zMJ+%+?oopzx9z@|6k@}q^V>Jy_QjKQ;z|oYB@YJ9ilGCOZpI!ei=1UD*wqbAAXs+n zwX~+wr+r%lRL311_P0qC-_A6JU{c}qQXN&^>Ir7IX{*b7oc)@{3+x8P1Hj_+ew6+s z^YwfxzbCs}V>uxD{tXk!aM~5oHKF6eM|9g!!UNh%jqD|q542tSy383;YRpuP@3hy? z&(GvPmc}XBT-)ieYJ9|I(osIy*sb?&SruiG4Ac_dTY9fQtx>26|LUB69mz%_4#sWC zJt%EfScBjjWsUnMdvY@|V_7W(By!`lsI5=5hEY4!H;;7(GisG07>97WG9hw0QPtL<4{ z3prWem#!-pRf_5CKW)0G};ece)=ab_-0E)RQTnr9D`7N=Ytce6@> z#8Yp9EBKy!DBBa@nHo)ckB_9OqPVTVwT*)GW#oQ1sbvvr8h9kd5_KIKZAB_r{~Opipmj`sF6tD|X7ho;_{0VOeq*@Y;zOXzk)GAwCU4)b_8m7E!fG?Z6f z^Ehr(mLo+KxQ3a@$Q>v4XiubwAXb$U@Fwxp)h~%EG_JBFt3!iz!OIiy?3P0JY`uq; zpd;(&2ehg$WsJ%NJ<_l z{o1d$5V&$w8qXRZi#1gHPU{fps0CoqbiSBnkNP;6HEre;Hlh@ob}Acy)3O>_;&FDU zQDdp5RpK1q7qQFt^7&;~7OzQ^Nk^LVU~-Dvi_crJ1SHf4HH`?uGQuit@$H{W5?caZ z#83ePM0+laedW)Z`<=PvJ~+Sl`*&rI;XTtexNL=RA8KdS z&S&v64}zk$&j5$zrK{L&gIn(IM}7dOjwk#8NC)vhW!?ct@LzNg|9e|7vb4EhE|C_< z&T~ZBj&hxd0fTg4*Eg6)!XsAO@e$-)M?p}|1wF=sqgi_}f!w_yOgj+uytETjO*^Q= zl@7SfD0LeVHn~KK|3D5FI9t6@8!iH%2dX>PhjO(3(+B7F$b*#5>}4I)p|1p=&@t?D z`|TQgY5%o3JIZ2`y`h$tsZcIZ?^}*P`+Ax0!?VtpXK&Fw9oXrxk@B47@p5y1nbM#UwOID7v0wv*hO3@jpqwk5$##GAB~jdsN-7hC zzeli$lYuwZRhzaF`CIcU=Zmt!I_v?jA0p+DSibZ1T=Ix<C55px%X~fdpldgEh~#dD7)fa zj#Hya&$q7Z?XoFj20<{O6(vq|XZZc~c>}NS@226k-Qn8cw%&~HdC!p`g z$88B(%k}Hybr<{@^0qa-u3hP~kf0Mq>gkss#z3hjCzn<0$9>x+mLfr*{%4&1bi!{w zGq{e?v`cQP#km_ztVCI4Nj=;G%u}GJl<5ifprzh!*pPyinM54d@kObjj)SLz^)EpAe=#BT0~+9y*W(Zcr1_xwW^ z|BCzBF-ITzWU95Yy@GD69V=_b21sgeS7t$XBTnc$0nSZ=GdAZl0_ws`{Nb$vsx`;b zlokC#{&q3Rh6wys@Cnsezqh%JSPo})$d>(~pJH}rC^8XC>p_t0f=BT%a?ClSLX*@$ z`#3kL=YazMY1M~uo>sw<0=EnHY9n)GfZM_!|YHw@G zkzdEmggFn5#mq%7=Yl*+>zYIXiw&HhoFjJZv0nb7u^)G>Y^X=nn(aY4x6b|kN=djs z)7*d;jPRyYmkh9j${+r1|B zQS_0%*5-2_PZV7n;|n_}!IKhRxoRGK+1f7Of!RwuLxhD>HMiG?-NuDF>FhtLnjWti zrx4MBw7y9{cDAe9ZoEyWB`hYNtKxFTfLpN(;jt(U1p3IwNubVfCrQcTUf<#l3fPCs zYMOfK3oa#t!+}Shvoww9z2{^wy6V*2H#?T`+Kr-rzdEb6rvjY{j%@%%nR5&*XwW3V zTBl1R@Si|&m5H&SMOWSM;IS!$6wxRAnn+jz|0A+)96Wo;bD*LsPy>bzJaxUl5>ov_ zvk=v$g0>^<1>IxqVP{okgTz4(kUPpiG+&&L78xB5{c#b~d9mF(9*zJPv5t10ib%y# z$0>f>+3;E}{m=Mt>+YOasRM3Lam!;GL9}bs)BIl-r>T&z&LpVK-HJd7^)K7V?S$d| ztD?|$Nv;?-PnF!O&s<&C%vlT}dgYD_Yq>qunn1 zC-}cWV;Xceq<dBY`DbiBD5h4*Oj3* z&~UL>qfqi-6wl|uTRKW5-t;JAEKiS`J z8r(S58V?0>FidgrR}ndDN6%q&1&|LZPC)A2dL%z+Blqq`LtH-ZZZrIo!P3&cJ?nrk zgxt6hO+@Z&t}a35x-&h`>AmooAE*@?37q`Zmk(A@=z-uoX0yUOQ^6{p!*Z;V^tJQf z)!m#t(LGIC@4^lNhoDVqZLOmJoCsT6R#p>2n6*YK55ga;I}~ z-;8TsjKN-dP>Qyh?c#G1)Y{A;Bfp(b%NQ)Pnx|8Ugxx(>Q*iqdg-Ghw-j5)zei5hO zk@YRVxJ7M`UN6l!xS4Y!mIfT)z;;c>pqad_19F$qeC*<{4E5~p*;^&|UNXWYBR3zi zLgKvWW>I?6ztNY$*F32*(poby9tdX_LPcwM@JO`X5b_&u-aT+pTU%Q*ru2NOGAf19 zz?$%nwkYU*I3@~zh5)?hxK`-i);tBmf{j&w(p5+E+ZrDq9C$85RjjVuBn4k#poZ3t`(-E*|bRx@y|#K>Xg3G*lrZ z7RCR%zsk*+z!Z{iJut{SBcTyggCoGmDg?`fiegc1y7=Wme!mbaY~7=lWCC% zaK?E|4->5v@G;3+K4cyLLJ{K>#fz!D7xQyay~VE|N&SUrqL}wFo@yh1{)E1k&2Ma7 zyd}3trAiSVw(<}J6FH!l&N=p%nX8mT#H!_Y$7y#PoO}9C_K?SEqc#0}+S^1I99)#Y z0%n(hagpBKUJ-}7;e0SPOYdmI@UUDuGF($6z>2pW)b)YKyWhS2%>_d~rnwOY8@b`E zP*Da)Y&|HgMN)4*v_-@YrCq!JE7kkTzpMW8FTU5$I&=5y+AMeGhk}Ow$!#`ydYply zY_W9l{#)Xf)!_*e8j#auo4L8EE|1X;ZU+I^z>NpF5+mEWuw)5B-~!6>_JX3=D8*m9 zQq*cm0PZ59z4>Ol%A+*XhrdjLmwCgQ%jW!%3k9&aS#y(4`Y;A+_w)#_kEWHOp4mig z)y|~1q5j_<3q#YDRaF$2((jl`x;jWd`a{o%g6QsM6S&MtJUn77RAqHzV(Fbj7h;kU z{B75Noc`rCc0RG*x<<3yKOYvp9LWBanFi}5mW>TJbydNT;b0N3`BD9Z`H|F-O8+5p z(64E5#Y*SKP4@^{yy&P)GQ#mq+?-~RURgcf~NH2cVh2LA5PG-8jc{O zU$kN+i>r3y);wNb!WGNBER4~Mu;ed0Sk{yZz$6-y&ZxWbmcoojihh&2hJ!f(w9qOj))~e{DgzFSG%-%!aJmA8UuS=nA)<0u`O2j z5vGb)FHf`QY8z<%$3mO4-bfecZUKH7 z8HiMHALRz+M1yS`C>VAy{nv0F=){r+3YKzR_9Tinns&oGbjHEf$8aMb^@YgZ@l^(= zU}>R@k(G7JAT5dP{Sm{ZlbTD&Wnv3YfX4Y6!q(S?tDhmkU;l(#l-bQo4p%^%eu%t` z^v062`@ju4GQV>V$Hs`Q{GC|1-Fqgik{*Zr;8m3d55TN&c_aGqOPaqR?yb?RTybqr z?fOSA8n%*QODDm)`UbWo0d_G(S*)oKTAs-btAech>;@_~^$n?+&Tm$(iM*}N?y9#~ zx|ZV3>Y}=r?f&@r820>aw9*Met^Ox>hlP`xs0#6m^=5#Tc z&s@HHNbGJgPRNxxK}C-sWXqBS(oUkK`h3vmR>3~c#&Y&>ByfVeMu{*Km5b}n)>&^93(2bf^_r- zM8$VK%1o{?Pz^XW6TO5dSE{YB{BI*oJW&w0)*A#F?fvDo8`kw*M8OgK$aK%?-Zo0( z-y%9g$Jt|SCscAhrEGeZztacyvVazbemWgD_=l1)g` z+=j9S+S^+^isaot=!<&th066DFUoxP=9+g^I;+-K`El8-aP1d}M*&6Sl<8;(tf zlba<-p`T7+4@1aB8>gK!jWq|tj%c?-@Q*z;^)s9hdQ@93xuVI7<4x^dkMF*vz5DDk zFmX<9NG>ef;rcL$bADQsX!_(uzSZGBQ_hK_-x3)T*g$yY={Yn%{r#Y_Uda}F4m zujddxKk(Ots)(&IK?Dr7o?;5%gIoEiT3#8ruW+YuRZO)AxDvy@sO=O-9rDMN9ckj$Ugv^{AAjk1+07M7b-)LjA=&+-bppF@15ZU1YNXB0SlsnrZ5*jR$anTX zR!ev4MZsc0rKxuVGT3f}l!|csNYSiq(hzyd2PDc$CNc&nneitJo!>Wlu3R%R%V{oZ z85FewmoDbZiq+q1W~KX=N`{7x7M^%0EgbRHsJ?OT=5B!S9g8RER1ytONBgwvd5)GQ z1}OA!}?BRVY?iNqni0&+k-fDWtOePYwsm??j0 z8PYJq^s+t`K3+GXrdktOZ&T5GSn2PZ^66Z^ySDwvkEA=hdkK{vt+&y}^L{zKD*lVU z{qUu3u?1jFlZy68bhF9v_W3fru^|2Nejs^kwJ$A@M%AwWJCe*2T*F?GCUC z!@qUh-(#!0E#IqcPP6$~pG8>BSG+>D_lJqdZoFLNXyQjX8Q4UnGnU+h7!{WnDk^k+pgoAS*@4ku2k9vMGhYuw7z-d?0LfYSjE4kQ$75CvuEsTM= z?ON&)C8OlMZIY`t74AOw;N5p2K4MX3P8 zW1zDS*IpaQSG<0G{DYc*A^%(8nU(%MnyQiycn_ca0T&e!8H*Lv`jzxv)jLZ<&F?eC z!9m8&a;wrEv)+@g+)2q&ba{>g@})=^CzJuQ4H6~(-wu3?LPgejjB6|2LCQs+N<~a;YJ75`2@o&g0E?gXq3f=M<*oVb;O3x>UwunK zJz1KRFd(zSo`zUrAhSuH?sPJNu(PwXiF=HhXON-?fpkRY#QKjIIyS%t5vVLy1@Uji z5cukwYP@n}9bE7I3DSAH`Pva|R%8^&&I((U7Bz2>x2qfJ&eVv7Uc3Zyb4%N2`E{i$ zFX6YhRhaJGVqZ%gnj*1rY5lWLbU7<_bBYO7@bXK*3%ld~mNm0HwAbcR#6tjxXC8;+ zJA3R|v(S4J3$ z+um5X%RX^V89?rO|F|@6gb2&@9I~j%45c3xbw}Mq0d~98oC(aITUMxRT-0LGrb#F4 z(a6y8EU`P$R`a1hqIqriZEzbNGUwuYpQyNXpAMgSrEiOTIGJq?Zny&mjFfZD=VNKY z0p5=4XF)nwiwS@VXLd|(OnfJ1nr-VOc0DK5gy%uJ24 zDJQ`=B{%xCGcD;W{}K=}Zkax?t#*GUz9N0yrFUm*9Rp{A9S>+a_b42S_gm$|JhE$L>S8R6{Ri^rJ;Aj z^4wpj2x}H<8GVyREo+io=0ab_th+yurDy|)GIgXPf0t2)3}nTFZ7M%Y6#0j$Co{AC zI;7!EEqEqkrI}a7YveM)m_r!YN;x-(e6;)@(IJD3<&y2WiS>@*&8MBgc7v)5I_9{x zK^2_=v;PLMy+3z)ymv<_uBYi=q)msXr0Z!XfPNL929i|8+je@kfN(*_?>Dkmx-e$6@kZa(pa5yb+ig)=Fg6X)tvCwnx$p{ zcLv=#&v9C*)3aQr|6bY3nRchA+;$H+6Cd^E2C&JWd_1rmo8xI?!vsvlm7~HygXcI6 za;=<`huTI*BpBo{_G8y!?vqoIJ6qa>f?8 zP7k2gqJ(T3=`Rhp8KuA%TmYz{3GDfo2l;A%Tq>AU+4+0A7C0A%nNHfK$fL@ZHvGe{ z{vHqy=lK#_$I@`_Y|%7<0R9^sXmA&3-~sOxu({!G_8OmYivZkr=Bjf_+aF|K8-fBK zz9U8R^4XDtpkfDX40d~AJ12bMY5k9h=Rj8VcvxHewATT5nU1{(&+?(`E1E(&h88SV zOsXBIIL`T;yp>460UaO|@Vupv;Sqb$&IO|}^9Mlm2_ypJQ~nI}cJY2(z{R2d10b>E zz&bqOTKo{+BMY5ACqEfzGASl-Tldt}-iT`V_b`}$U7HHT`mRYX&VzgfkPy%&*-q`~ zTdY0T_TSlLR#S+?+)Zh0_&?|=t6*bH|QfL zwqJaBREyAl*r0D%sgBlAqFVI`cH7?-yLl#WPMBfx3@*1fV-1nj-xB8NCk0NQlWG7aNwG9ogf9mSwt$~sX z1Dm4z?;pkT3gQ8STthy8{yc}F14v$$*XqwUxWizgl=g&PvLsA1Y{Q4Zkfe>_IjeAr@Kzych-0i2qg!b*rUbUr$}T0Z-1^%l~60ch+pOko}AjrG5>4dE>h^l15^ay8peUHL_?|x zUZamP)dMR5@REoqJ_YQmGY?4rv7UxDUVS&s)_r?3=+=aoD1!yjw!{sGrMPmRsL+{g<5@1?(r)`zu zssOiOX>LB#@&j#C6?3j5`Th0KN+7Bt+`PN;L_nNYkY?`({tA$#I3}G;RD4Jm)V3Fr zyJ(bg66y8uVJ;Yo|IP`u;0cg?0|Ns(5O+MyHtwl)?Gj_{G=wqy&RUp1ulJd{3B`G& z5beY{F`p-8xKVUiGAEDurC*C_jMS4n(21L*OcyYDchYCa$#K1tIZSy(QaX0QtZhjF!N%POMZcUag zY0|2_q{p)ZnT zh0dFKqLa){0xg#X{_1U>>-sRyEcx3ez$itAl!}Ft@{T5^1D4o*_;q4U1Z!rmxdHSt z1M)CAMY3XKzc!Vk4sO5q=%PAy#*0mea0gOK%aiZ;+~xiJ{Kmt8un2A^k)m`lb!)N0 zL)Wm{f4)1!pcrDVJd)B;AnYUjPIun)<5*FuD>9=4HB^U6{dpNDuf4Z9 zEe!F(VcNG5b(V2(36!I)8Q&Mx$%rb?E5pic(u;J!hPdMg6M!*9lze5j;_QuBp0=LlG-pyVDC0#( zsdv*ZlP|eU+OYZKYq$5B;VL%>Nx(5omjE2Gps*6Abd1ei8K#96H6n)h;90()N_v^>7>6ewR@tNc` z;x+agzXr)8Tb~DcS2*b>IB9f_U#|x|Pwfv>WUaoS@K`iPYK%GJ3r%Zu)qN{}PGBjE z{YtZ9X8MPvs|kN$N38G~5JEc*6d4B>G;xAM{tsd~cL&o^B~O$n$xRMQi7#V53bru{ z`X4eSf`xySqECbRzmUYT`SshO;N1Tz>a15o;FmyJ)@LK=$ z@w7!qqlwAyps?L}0+e@i*H1WL$e6@S`EvTH%)nlj>+X7Mvxrzs3w=z2oB7Ne5MnrS z;AZBAYNKPQ1bIwXd(RtMQ|G#VXE+Vwi&P8DYp4#$xAW8H`k<7WiXlUSJ}`&>B3j7J z<1c0B70*1C&@eqs#@6W;DQ?e~a`MtvlfXCoQ&vNU%I~9f4)AxGnQhWg?C=|%@=;$O zeRgp^7)SO*GSYG0Z(zZ8YAPcM#p*#D<$xC@VRdke zZJ>xR5WU0QA|G|f^UKT zK+bMNII$*UDY{r}ZMg)35e|^ePxVUiPNq>Nv{aQY1`#ot+hxf-th}?uxT_UCScpC{ za`&G%+qA_IA0!0#&i?wGIh!FXOUJhZH23W(hLoVKCpBn^@M?0)Y=_B~nkBorUKc%W z5jkEM-}J~BZ0khQ-Uur|1j&X^gl9uN_V?~@jlcfMa@hTGJ>0c#Y;)^4Ad~&hH?dc9 z$&=Ff2G>Q011H3KBIg($*(s*+wn-CT2BkIHYS@-{CW#2wISn~jP2qKeJY_p3T{FWn zJw_{h>chK;OM6VaFq_rJWPpcFt(e{(S$&;vS?xw#@U$UYU)HjR{OpGM756?TvwV{e zh#AxvDg{sMOwCr~GZI7Sh&bN6nL5 z`C+p@a$nU=z8avJv?B#lR@-$g_E*ikdbWjwE+IdN!vG+UAMM!5J6u9KLvDN&4Y0N| z5KrRggYc;&Mn%rpBwpczw~I?V#)6zQoD0?NJazdyq}u&q9GOI(5Lkw?9!{fmgT%?i z$1Yw-eBl0!8`_5q5q-%*LtS};)An<6d(gklMafdrscr9HcA|_xa9tx?5KQb61p}Fu zYgg!Jmj-6~&LWm|{yA5GvZ}6rhcRJw8Pb^;a<8#XTC-c7^z)7+DgX0bZQK?92NFN0fxUNFeQ-3tR6ulLQaOLl zH;yHWmmXPE_B>N+vR~T*9uoNb=cH(3jgo{|gv@<}?@(T;R$X2*N+_aCt!VL8S{m#g z%zEvt#_D!DKV!h+jK!%+h%|)cul(=uUIS8a zFbd|dR~Yz9jQ_;0QB@v715>^ElKwB?`@MAoP_N7#OmOF9b)kVdvj-qxsa<#Z-Q%RYi(C1A$H@_K6|tGnkB}n{ixffopcS) z;IYIvM!~VBp>>ui*Yzpwo)dxj3M>i#SF;=9b8sn5YeynUt|CM07A2C=+?7mMkl!5P zQ`v1zInO$UXe2v?0ggZxTrpjs51TI~*iBAyRg_0;{Ll)1krW7S1UB2u%Gu72ZtReI zj}A;}=q2j%`x=hQEoi+<6$*#XQsjCg09-*ynh&|?|52AL=4-RxoMf6~_R`{=RVU^k zmM7JeA8TJ0gK2zMYzuWT9Ta2%p)H?eXiUKRLFe~cH8wrTh1$h)O~?aA`;7c}*IAe*2N-pXw;+Jwl2*(+(_ z_d`1!*je;R{4ir61)$qt)PB;qS}?L^P@-z9OyNtCjhjn*Qmxh}nP7QotyPM-lH!d zjTv=9K9jb@N!PNmdAw$W5%;)1Z}y|KtkLt2WwDZFwZ}9O(S4wE-x@y`d!JV{v*0q% z4Rwl=0R&TT4!RWf;x5s&HhOP0XZe{G-ml=ql0P(%dpnNa)264^0lkeY5Y*$^5{zHK zmt)~Sl~uXlZ;bwins3K!&z5?P^4bBuY5}0?_=<;8E>e{jNx_WTA*FjMDYDd%NvroF zfMnZ0A0!9h)&ZWz2h3f%`CwOFzh48;Wb#-Md*=$E60HK#Ltg#2yp7J_suVhFFxaXc zkfFAL$A*8#m33u%_J6$F+;B%2h#FoLflj6zOWXZn`mN)Q#cK(9<0Hnq4>Aq(1D>S? z09xFPjfH`Bj1|^)VL0!ohI{Xsa26%l?p?%UNf8>rUW0ZSBwqb@cA>K=*gv)zB!}Sk z>pLs0_|Wya9~0XqRBU(28Ah&p!C;L+vFeJ(o*Pg)h`HBXm0d zs7w8<=!$z{^zL#qAaZg^)`DoI)k7>U8fSSKq{>yO`5WDDFLdewQoV5IsGD7lxu*y4 zY+5w0lKkwqCotW)fn?GVLA3Qip>$?7XC4j8c(9-O*uUf@*8#B)*v2a+& zo4$JYNZS~BkU7})INF6aQi1mxtqkcE#{KHZ(-n?=0FVLD;9n2+CMi2R5u`EHikjmm z5TWdf*GPFYhEYHD`te6ez{q?tarC-(P9F8Wu5xY0&t(<~b25N+zBvH;^k?JW)|fo4 z`U_9S00h5|0nEDWA5J*t69B>P#C?|8;RyV(f|!Hux6C@1zrVjq@6El1{itSxeDskA zXj#uh#~f7*2^2mw#8*1pb3f11JQOEb1KIgun&;;^xsSg-V=4C#z%3d1YYr)w2;P%> zAEQb|pnvjGR*uVG9mYfAmWj(+q$Vp&3tyIiAtE*#(C85N_t-EdZ4?`~g>u;S@KG=Y zmVo3plO<1KwA!s^;YY~#l;N+<8e%` zB|m$ea(#KSNqeMJvmuG2;Q5inp%lIzdVlSwpT+FAx-Xym-UK$hy$Ec&5cB6~oiIQ} z{Ds|AiVFhD$5cE9i}?n&+<%n%$d>aY_rn~t(CncWu_IW%+{KG%_U#+2p9u;{X%E6> z%2Gdng4o*9B&C)2#4t#cUPO}o&x>p9(FSEec!u*yDm-dZ<1bnIXlPcGHL-OSOrImy?QK3*je~5 zVMpDErm;39+*U%4`pJ@X$P?Y<$x4@mhK+s$VZFP-$gaq-O^%)5SP)C^zBQ+iVIa`_W^YWkP$8xG(NOK z_%DaLp!n0pe7CR(+bSNtL+g#T*xtSJ87IOptS9Z_erF9_etdYgj&nysxxt8GQQ^^$ z-phGMgQT5!UW)LgEQtB4U9WkoU~GxBwbKvrkp{F&ljTfhR}e1%4DWKhgL+)Ym(#vB zJw;I{M@*Esr0qQ+SK(@5pVnA|V?KaJFo^089*W)&hde;Oe028V>wC>l3=h$z$TyoHGq(zo}TUL9Z&1KHQ_ULDDl>TjY8DE2y)-JE4+;oxn~T>w0nC8x78O&DBDgn z1S25xUyEW(tMkx7!BRhC`e{wj>%?eqN-LUi1 zh+Lg}=~`uEV`%%);3ea1f4?Zyv-{q)SV3+J+*eDLpOu0>uxKVQS4e?h)sRgI@Ce zL*WV98bZ*%Jm#XW>Z=sUd7Z7@-)t7s2=w7)H#MDl$i<5bX0O#%;}z~Nz>{|u%vKTZ zCuP)(faYSnABC=+FwhEWm;U;s^NsSd{OmViet^6ce(slcc_wsuJaPonK$?s2noqPl zsZeVH2wjBZnj2=Tk#ox`#9^J3+teVJfs!C!cnu4zsnwVjA2W)Dgc+Xh|WEn$0r zYS(%uYVLh68oyFyFE$U!TxL(D|9E<$F{ss#z26vFssW!`^QZVa?5>3`t@iY=tI1`S z=5`4O%UNUN$@GLYX2AK+>T{~b8M8@uMa;`;L!Jppjecv`b92}VC@1{Ypu+tK_Y(C_ zjba^gwEv*!1JtxZR?q3o=`1K|YsTM+_NG;XTmS9)as$#Bwf3c+5;RZPgExf*eAKWk z7!)Uo*7md^3&)VG9;0M$&k|3yfD0oI;}^7k`o#}}=@~aNX~FyKEB`LEMhl$F?2!4L z`rYo_RYeH5l+;AwgyN?-6Y$j~%`QG>$_ULZxMupgLpt>qN-$zEevz_EL0ZpSTkrM6 z8^2xgz$h+W&fFih`qG|k(rc9gCzb|Ze=7YlEBJ;gJKHLl%vx3h?}cPKU`mi- zehEadcD?sI@^TqTyPei#9$|MpQt)+OI3q>+eyLvA5Owo>(InpCnmDP!%%G zIt-9vFLXu>uf*MO$STzZNa|dNcM*TxM$~NQ@au>oO7zQ%G6e17*aqWc#F^|cZm2L_ z;hJwoRY zgkmvrWkh}I=Ts=O%Rk)YcW0~rq7_>IbpI~%s)4Q%YxfPUP-a(OTz~#M_tb;S!=|E* zJw?mn8E_d5pd57QC%fc=dV$&OjXP&8&e|_K1l~}6b+Zrxn+yrg(^T=S)?As!tttWl z2Wtts%N~+0n!5omGw5RT>$ick?L2PPrKZgjyk5-?D`3W`_dZk=DpYn#vOlFti5-pmXR(EZ2#q~A# zjcX}ts7bG%pX_i`2DPG%ua<}Rb|)Kgs}p4zMWf|k+qHYM3vbxphEtYrVwNqU3653&ZZEYd_g&5`Sr4f%~agY?nv>t-SckaoRGZs^|!4^yCA5`8tfP)2@ zMwfqh^o+|qQEW#W|I?eBIiJ?v-kN%8$36SrhapvqcKgU?4Q#SIj9v6x545+ub;7n5 zeKWheh~Y3f1}$2#wesj1qM90Du~TNZCVkk-7Oz?yE&LZO^_Jrc*N?@0b1u_wcP~B1 zfDUhH1O3*KR8}FSn6ph@w>9l=djb2}BzwQ2bZNF~A!#~IRn!QPCDgr7NS2?wi|G_y+1HqG85YF>LdBpR1Dj3;kxHv+N3*o_Fpu7Io>vlW1G{Qzn)bsGMr zR(*H@)O0`4E$E0W&8vHmfbGcJFL!3$o$Y%++@m_Mo#pZy>5eMQqxBGYpj})5QaUIE z{O9}!w=M4b^O&;KI-vO>58F49jVTOKo~SRqxtSf*B4|wDY-k|!vCril+uY8 zS;@wRGo3G~s0+Ih`n&Ud+gk_kDEznjBVd4u_rT>IuPp9Zte77)oXx0vwi_Y*&%!)^ z&J_-$4lvC&Y%Bt8!8Yq45H~700LNsZ1s+L_4?@q}@lgWh=2OXW zr#KUCv&8Z72SVuo$kfxSvzKdcskaT=nNIYiX%ThS`|568r0%~@0HXpmWUX0Qvl|*+ zKsiulO!D3mzB5UR00vayeYbM?-pEw}%rgDKo+VI}bee{Kszuxw{qMMYATIrShc6}4 z<1cOpT#btJBm{Q}^`Z_4pk#cRyAjJ->|?-s4vwul6Lj zFBfj{Ny7Nqe0oSqox(oRN2U*ugf0i~IMiMy3+rlScp|8W8>i3I@=}J3j|+%bYjj#VJxD=8Nc52fH`R258WA#zNrg z0jyBBnOcoVRACXC79#f+&Y|k-QpaPZkj}h#H2z#%INechMDqWRs2@6L3$QO__-G*p zw>UBi3GPV9+^iw`bTGYoz=%KT1)fu0 zWN)AKTeei@&A3Vf7vQ9REOjB7eJjs?eFx#OYp%Oc_yAR|5kA}a zrXxKtbN+}{o4jUK=iOMZzDoYvZ|=Pe&(NBHWcH7C+)LhWsAxlCN+XH72{h|ZFgGw1 z7f2kMCv5n!vD{Dtgkqc_+vqT5&=m;wA-hs11f=w=43$PrMdkp zzj>nLKcpXYhz^JEul{b;ejj*2MVy-iIbUCpfCQvsOrYwvR%E8asT~3oXW5lhGcTt8 ziAo6sUY@bqE&?bQD_uG{#l=hbOaG*%rj7yXjicJxS^gZU-&QzdQCLBL0_T|qNbY+% z(n2`t3-I<8IDHIvj&`AVX_*_4mnbFyXeZXx$9%=YpiIxJ`|m{pYT1ueA}?cC}zM9{dZ8FbvdlBpXt5Tdrwg6y7aGuUCpQ5-dg#>ChfI? z^aIWNQT?td*JNLE)TMOdT)HPvgL@5lYlj`uL4w7p9s6I`8hp6wmZV_>9zd{}xFZtk zBY=={Tr0&Tp}XWYee8jhJ+~|Ozbw1nTC|ntWdDbffddvpJ)}50M6iGeV2S&3r2o11 z+zI*S(G8qdWhHrJ3Lp_bwa>IN7KYp7PUZ*wAMCwnSW{cOE{p|H5U@~`s*nlNSmrm$O=n*U^MLGczh=9}p0U-nu2xm-m?RW2e-hI9w-*tX{U#@Fi zOEJvM%sJ*5&$yqv0MPF!i+Gli9!c8>mB*f_6$1CuuJ^H@i_4==8@|V%EB6Vg1zb{m z=&;Ed>pgn8%a1P>kpXSq=H~30n)!h@*v)w_i4km%^J%qhve9j;!i;=%BdJ2_ObNMT zv_#(gi;&90G56G_>?;6+bwM%uUKQLEPVjx_BJn|G@x!hKZ!b^F_{(_P2R7&n2)9O7 z7qg6^$c>2pBWXcFKu-ePX&m#@o_&oACGu1qNap3Jp=%bNER+=K?ZX={ucy+paMea+ z`uC~AW?P3=`2&W3$Okx**P0@Dx2aTx)Yivr!#x)13OeM%wV8;5tVMa}A=%{f))5>J z;5_&AKPR4b{2+-8#{E@$1I=mI5BGun&~he!4uLg`KJJG{1(dkL%vNl)&)P0F*p z-zZP)Fy0LA>o7hp2mVwdT+0<7iox?b<7_=SAh2ylbo586kFkACf zlTHg^pgRw``IF~ZsEkCq{oRys$&xzA~F5GiOJHkl7u!{eb4dK^9A%^O@>_i}0Mza$9 zR=dWkF*Zv=gz8ymrIjT<`BxWTaJ6DZJhwQz;&R`vlC;nZ1y@XLa6k}QHq(<^+f6ca zt?vtOKdG5eHU4jy8NuZc!8e2k2)@y<#GJ$3fgk$1WN#)J(|}8XXPmIS%R7#OByJN_ zPdxgbqbdrZ<@_$x%Th@=V=0orJ9hL(yEc}jBDcZf(2||^16$Jezih$fve#BvN4F$7 zYEYuMTbu0KI5kJT=VNy=KFMz#nLD5C>+c#s7etiS#X8Stif&=0%!e=UD$3`jCF-9Z z($th6I!Wu;x0-pj8)n?TkEA9TTM)LAm7iNCwosFYh_Bdr#wo)ONa>UTGW=9h)QxwN zC$O(caH9VXsRPgL#=9_Oc69TIPO{tcUj3^2RpSy<#-%}Te7tp7z1{31`u?(hVFwkR zJGI}Hi>$FVw5?qNp-e`4f~8ktltwtufg8|ZUH?#s$WVA7r}KBRgOg3mT|m9CIyEC`P*qTCfmYS3#%!$)}l2_6;N+*K%WCY zz|AyJQICjKsz4eue$HzXCWqO?ft+sE54kc|yP)7%FS9{-;Df{`i+DKdmiBpUkhZWy zi!_-oX%)DCw_|K(|GbhZfw36Ra6EpBGxuA6aYX>~u4fE=d%I zb6};)&CGO}m&mb?8gz(o**! zpOIH;;r%(9)SfcgN}{(^7%C$12&Rcwk{)s~rF(NjFRkOmm2@k^riu19=j9Q)XRJ@n z?R?6!sfa@4sH4)rrpnX)D=t@jxc^LhUsvMAAgiulV`Pr@SYghEs|6=HYJUq6@6J}3 znNDx~i#8HM`#;h~#&#}Jx$7;-r2!NVO^e6W4Q8R}yN4Z;a6uSijG7<59%=kM>JO2k zIPHiaM+HNwR!hloxU9mNL8vo1H*X6x`si0CU!8|lnt$P?(Q36=(~gU$;ogMUvQGcr zme}~A(I#c-0Y0bo&YCo)y}XhxbI7J_e#u*s&y)9AoUn?WC~~csDj4vRqxB}&LYy-5 z={)hH=Y6}t*%R+YZ>O#GX@@G?xqyl3MM$r!@kCqZmPuiof(4+}*k2ydyxhyx=IA)Z zHN%6OHxoOOUSM_h)+upCz1h$iVeL)V@Wy;`jueOmgl>xnT5FXd0D{TF@^Z$-^Y1j) z-k~{bqqdyZHXGEd%gM_fhh6+)GnwPL1F2MrIUOd(WWd=qsM-7gN3p005K%&6AtV2_ z7mld;3wG~$7ff7*Yia)>$m8Xx2ZZ5@cRM~7sYmk}RRz`04=*-&iC&WCdqfx*(UP!c zRQ#jIg98@>=Z+d1Z#N&m?3M-?ajbvFK!@lnb}Nl8isZnwIBzuOhN6Cnu6Lgu?vSCb zOEM2IC^x+NROytQny=Gr))_X~bx58Qp_iYqv!vl~yC=4+Bc5;4wxJc{B9qOeD`V zs~k=Fz_mcM%UwA#y>q4P)yIW0dfT>+SV+s%c(95u>Ns9(?h-8+3 z>ZT44I}j3a_QEx*-lJGM(p?EDl?Sy(G8kn+qoELCGSK+#FO-V6Q+DKNepGL_;6#4Z0nYy*ljzyKt@q60Xu!YIr+#g1y%aB+Ca&_}v?ao0 zX(`EPc)AsgoS$$zpbl@`NvOAtnpinHyXq*OxiP>T3ynkF>w%e@mc#wgxRq%uB_sG4 zJi@2gpP;xpEz0p((0XcMDz>Z4foM4B@oiKsAwI>Tuq!oW=+|)I-s*gJ;4M{^(780; z%ogxsA9yrQw6j^pNY3n%=~+DX8W2Wwj6)>_=GV}o=5tLb`o=SQtBMK|Ets(m*dnBG ztFE$=t;JVs&LmisTC$Og1;U{^=;8d`TKDc57aeg>O23-QlTbE)KmZ7xmMiR`86R#= z<7IZ$J*>0o1^hgzVUCmEz-DXJ3**-{H^7{dLe2+ZkOVkh+c;K4yb`5}@R%ZQj_Uce zP2w7kDA}gO3oA`({H^(n%jCBE=LUkR_!G5aO!A@dZ=WV&l@>~qT^j};c}P%fLnfmE zm&dg8=__#tFXwEp(BznUk0;sMwc0+sS6jb__rEeg_T<!vHBr2MiGL{UX^f-@=xo zQj2lU;&v-jcw?8c{4~MIRQ*;TX%QQVOO$=FEHA!+McDQA{%R|*56M5^%@2$?fGnU? z(6#RTVHm^_pz<|apPT{Pn#$*(t^_%L5&=s0wifX$SK&9cWwC#iy@F( zoMvsgli{d45Hy_;8LB*y15^m(Ut&BOeZY3HapX((g~^_~O6`zuM>K!hk+USW;ZKe( z>`3oPl6% zsoXZ)WY|ce(6!nbU!20r&)?h{%fZg>1pLkbBIEI>g>eT5Tt!@@YQB5M-@ADf;&RIY z6|#iA(GMGk6YrBJo3nBMi4r0GNd?;DADo1H+H_?;+f;~t_KaAg7Rlw3l9on+2N>#I z`M0EqiX1&#^rA!TRZ(Ard9{2MXbU_z1Qd7xRQdAJ@qQlIx)+)OqqO>K=Td?6fIxvL z5u9hT7Xya9@YO1n?CG3ZQr`egzY=^_^{dGTn^7+X0POhGZ#2hhDL+a&U~X5=shupx z!q#7n1C0XE%=O)u7)=+nY|1+6XohM6gqAcisnE!ZwId>@eD-u&VD|!UD>zv>0OM%I zoHfP?5Tm!%H>y1chP|v1Skz3^29G)rC@tr?&L_z^5|8O8B7*%&Y1iMD!nGXLf^0cB zI4lsB+StPl9l%Os@VFqm{u<*F%iCdc5LcUJ-``1lSsv$%1GmEpAO*$0B8L@fyzXl%{6KC^ z>`-Ai5vo0MYY_>Orem?RpnwiwBSZK@U1&TwVFgDu*JwAJyMN|gaA)HZ+8cXjYS$__ zmf3L~7?8$bfId{d{iTQu2cM*yBlU290}zkY=oA6%W{Y_AKFoNzOUb~uS_od}W04h2 zdUk8mM}A&;?1qaG&N7B*F8Fqp_NFU1Bq#R|3QbbG7|m^G7o61e5b@h~Ed9`EwJ)!) zVRxH3)UVOn1@w0XFD}cy)JS>@z8bjxkWg+9^ff`>A95z_ZGtRF0xXATF>+cjZ@3n- zL)bQdkoUR>`!NNG_R1cfdrEH_j9<{TYr75+hB!O$EysQpE7j@{7hJ)3^DaHIFS zK;<*&0sQl`U(v05YG$7Pgfyo;JmBMgi{`E+CEWsCx#T!n6#;l%s)|7>zGoJSCuCIk zOVoZqga=PBnC)1=r4Zf;V1CsssMOfi_2xP+v*pW9ne9`T<$}b3m7NGUG|~Yf2l|!$ zbjM`FhWz}fRFhxs>zv?BWcO}s22BR2`iHfG{Dpu2?lI_xC$zNQuluU&9|kIBAYlM+ z&WBX8H!f`gnqz*HrFk_zUJjD_p`q$;Pq0HK(R461|oI}N)3XPLM zi}@wDl+eInQK|wwLd247DR%OV^<;pZ;&2dcxREa@IcITDAGnBUfw?~|=~kmL$K#v~ z=UnOj3`bE%4~Z9lXN2f*dm(*D-C zQz;48qY=%HdT)ml9qX?&OumylS16GyC z`?JqJ0P_ch;A1^kLRk8B}}NLA5AL5Y?N!RggN zo#%Djr#DX*m>6CP0K#624*_eVkgm(GjHe0O|nXy3s;G%OlTTu3^CdxgNSMGi536(xn@7|RJyRK( z6Dcu5oaXbcAXH+FfVYg7(^;k9qCi;Z9Y~JXpRLIs0Qimb0NTNZhVH~7$O;~l1FyAJ zW?bY?(|4#!4Fi5#qew5GKKBmQT^dwU&(IyM_xFm@g}wmuG6zrL3oS!F^Ulqhn)k6Q zhT^UBB}?9>323{%mU03HatispYBRX>O3b@5uXEMK++IDv`;Dl;$6hr^I{M}BVB7y4 zKhJ;f2L$v9iHQ$U_qRlNd3gsDq|BW9i%h|cy$z*<>V_OU52&sL959v&n8{8w=lC^H zW{m<6vXP$N*lMJxjDumyW%v6VW$Y=yr>tUqd8(F>**^_D(HLOO9h>U$C^oJ5qKpKL zIf&x9FaS)K2b;nGfpjE(c%nXyn8qe6`Ly{S`#%ITAY zRV-d&H$YA=k3au)bT7tLX6rl_D7OslD>UBmLMClLR!@?9{1>XHhJc)ODi$JkqJXL& zVxiWSGTJV7RBf`C`fosX5jW%6hBc+DKe3L!z4)+Z{F3v~&+mx?NE1lG$59yzS&vcY z33x@%>3d*kc`ytTq!e@EPK8;HT?(Gg{-``bXj(iER%+3-r0G;+Y^to>OA4Wo@1FCn zTlL*ykwtSRQStF2D8`|HI>#<$xD%@=3Nt>G%dAzs2pmes`+pPmJ z-Sxrcse4gI>Wj@4g-&=$hYwkOjHo7LmgWaJ*snH21`xFlXSwvrzsBQWmKmRo2>wN+ zTt~y{{xbG2hPs-xC;JYYYnB#sdO-r~Q+eh2QI+Ok(q6KEW@&C(A;ZPuko!LGT!OyY z9_(zfI@!C}koI*4eRLOf+Zm0YOMk}oZ|<+7dJlA;wtrXYqTUQKxvBuV+-sM+tT+7% z$JBfvdxn||v$&!u`?gmTdB3eE4km%p;b8J+@AX7wOit6v^*0xGac0@M&S|zl_uD)r!Fspf5!63=vHA_qZw4(3h==fatYT}c96i{q@dKDLD#-f)UI6@36`!J+9 zps&PU(;Ul3C^mF8n|V2QPp0!q{(BRH1~Xx*k0H*u>w}icb%)3K21}sp%*(}Qh=V!- z3(SHumEw=d==J9H;9A3_42v+c@HBnTG)(gHyN~E?Lx{WCZQ_o_ZK4TtT$mSUA(&Xc zX<<$&XLo>}%%F$tm*q?R;8Av_&}r8BauUE%jt`Mr4mUAM=7O5O-jYE1Mq8$wPUpK5 zZWwdL+~S4)4}F|570Jm>?ERPY?+>D#sA-HhHg4J>U-y)or1CrqG6=96lRXfh7JKF> zdi8>i;keF=JLX)Qcqz;ntOQedYCmV}H3Ojmbk{F@ogm>E>p=g!6mYZSs3_a=pw*%| zK1x||xEUp{x-f~G$wq_y*~>*$kJF>SWFOqPh#tDxv*WB#Dw4VGy*`oHd{c7Oqy2wU z0?r^IC7^V?u5oTv)d2PQYP0w!$J+8&4`ug5kAxMyTkmynHO~Bl!KoMnx z`v0aDyrJLlt3_hix;aL0YG>u-E+evR_SYD(LdjDDvDOz{S#HHdYy=ea!(r?mUIw#u zl+)wpomW5VY8E#woNicezNf7aPQ)%$w=o}17%6Ry3^tBLWWSm~+)c5Qks7{L<@{ID zMfqB1CE!)$EIEyNIzcq)lu~fWE-MWvdVhJ9`krE**q&@AV zV94EJ+uRL8q=SB&yFEUok#!D|*j!p3I=&G7tfA5%BBY?(adh^VOyX~)-{XC>ls=3` zD(`c{)w$Mm?3G_K(p9JEU54YsLCmV81P$ZWI)A4%KMAF@*bd~}DWB&eG#t&sYFACl zAtIqp7-n=I$j{*Jjm<8VlVd@(b6*>ST#gmgfHv0g864-^p>B@%J3;lGbdD8pVpuKF zL7nGd6cc5e*x<%&XYqI{+MCZNh0`1~uhtjMhT1D)weH5l^Yg(GQHy zm^8Gg<@qlo8_kz|o+o5xoh&w}k#5xQU@U5-p_@n6A_GP`MyR3GZ{#1V;b#X@VVP;0 zwj<&`?=&hIlU|khU^jQS>&u;G0Z7M0Ge6CN+vVj&%a40dpt@@0I#zMO|DfTvw}u9d z)B}77(GzCeL!nf*mfVv=RG$j#n_&Sj9USKGNLm|_r#nj0AXR6L+q9XVms8az6R8~j zIEB98%@&Q?`6Vn-Mb~$;c^7`DCWYbr(}&4}+hpEcUwwPDgY{;-D0XJFWE<;-5L^4B z9yT2iXgg0zQ#jw0OF~k^mgnB8JC09j)~TS+N%%6xlfp#AP7MJ{h%@O6Elk-^pES%o zEM1l&F-ds2ixFCn?yRLBG!HRgPn6gHLJWkQ?Y`PMjMfJ{LgL5FR*%8VFGXb3Ke9+) ziH4i?DGGAc7_P4)6FcN*od3NZPxaZh9`6#x@$z&8^{f38^j^u ziumf%Gcx40Fp4q5#wn|Kk9b!r7GF7nZJ!qi2#gH~to@F*yShj37OGzE=;57TA8cF8 zw^ZVC_v`eP^0>U_)QP`iBUSWGCF}puj7=TDgJmwcF=bh797Q&gc^HN4ih1U}C{Pl| z&Wk$Uyw5PhlBemj!9s+R7fC+A_2F6JIrCDRn3V42-hrTKAimSTA9vK5&D+T{j(lNF zQpw*AA$i9K$Y=@9TNtl?63&@a3cc3y&S z0E4Ty6!AC1qU%g_hJn!^p=?ZxK*##^Un>9&46!FA=M_onsBd0%9Rvjx3?cf!>p z>mqFp=l~fr)^p=XNlZ}kmX!r;=`0}DjI0(RNx-+v1)%#Ct0@87#$aSh^!*N6&b;}$ zw;QkO$0eIqffYA;b^Z8Ne!E#v7qdJ#z^THU(@^E0Yba{b^}~?;-(aJi7odhLj^NBV zm}pit6J9>I7^3X5RR$!ahX|w_6K&10gTQiloYA+Fn5nKQp&WRI??GI($Aiz`2Qv*F z@eahGfy=G)@@=tvUsAF=IpMEMd)WK&LOq8QVwMW;8MgIx2w81DQPPO3R zBYjGeW3jspZoD~djq!8^?8RALTy+VWlVZ@-#mq@V(2V~qPNYIm)Gw>)hmioMY1ZjD z(e95iABwEyY`QYUPA-KMTo=2DoII>-fg~7of@!9NcwZkT-=>_KKrz44V&o+4I{aLm ziZ&#>;L7;ni5XdvTOV=N354N7ezQVvVQ4ROKf|YLp83PVcx+>7)~5(;NL`RqJJMt& zlw3}J4wqgOA=ZChb*4v7tiam6H^+q?htD^s7{|eai$Ie{xZ+jjF6l*#gU@|S+he?s zhA?*B$BnHnxL1$ID$RigG=@lQY35N~{>)+sG9G}Znw()Di=o}fO4c=xCR%4(rs~_X zo>HKCEd%BVX9lc`Pf|CQJFsJ8_XB_rNoakHw$y*JR#Q1&vk6?RC;LYoUBv&vQUQGM z^e@&{o7M!=+`B1Gklf5N9kNdX_{Hn&e#@D`id?e*2WP{#74S0;+x~XftFFMHwQ1|t zPd40(@%YE~&$HCuu-DP!` zQt1UdUVZa#%YdOZ#xMq)p#?PuP=>O6Wl0iEWY)0gC^>K{Z?5z>10B-RhdiUch9E}G zuSsa%6vSRugEBHAJt7oaDB7PcL?dQD!xdO+r(+m9O4oFto?0F7l@ot@*GNbt+l_;Z8 z#zJiem10;}8v0O8-;@e=y!Id#1J|CaOhBJQQVS8B3+e^G13zRHX3hSM_Dd&iu&L=9 zJ`vwDfBlxR z^=AEX5IC06-d=#476C-MRmw$;!gCMk`7HAq@z+p|Sk0_%k3tv{MCjOrA{3-`lt)0JxyHwEsuy z!FG`|MmfvU%Y~Hx?HvXk!WKe(!RvodPTMy3{y#;l{r_)(F5G{*3I89xlK+g~{}hVn zAhrZX8b}2ZVNbvCU&HYp@9{KB<7zY#7UJehuU54Dg4@+j%%g8`f~FU=k>4?Vxj_lh zpClga^p(YQqVTn%d4@t~T)4h9QVnDbO;-R*Lc8vgEf-)eO~FvZU7Rv73yK|y5BlyC1=PdW>w5`xFJ?1K?kyi!*26E$SE1LymNoUsS zyWnTzSZZ?#5P*T1OKVR3&AF(PqHNf<@Zv>hS;;N=r#NMnBt@`(JowwK^VeSUm`G8< zZc3^#rs}a^PLQ3@#=x#);t;ZU^-x&C%^wa|HU?f;(x+gi62ey3TT+>IG z!nf*YnR}M7Y^wTD_hP@!Yv>4iB{ed15yYC{chQ8vxyc^XKKeqA2yB(*Y38;Ad2!7* zKj3#Kd9e?~MM3bl&XeJlu}y85O4yf1@MV$;W*hU_?D8{}TmLw=n4ZQx9<_JwWkDrgTWwkityHPt>@*soDfAbEA684x5 zFevCB|9v>*vGGn2L#2ZNrv_gCLp+XfWA<;~do(gX5@@6W90VQc zzve!^a0irBmR|ntD&~L6f;^g=o>$l5a@VY(00eqOY=uI{`5(`E4i-p6c@Qpw&Q&_Z zssXRZUqM9XcJ~Z)QFgF&=hRt6r=9Exr@`u1=C5lS&25#J6QyNOFxS~ScW-+liZ`d7 z1NzLyy)`V4YiYTduXd0457n10L*Z-Sqo10$M3%;#Q ztsIEA`L~z&pW~~3ftq>g4gXw{f4e$;y$p?0MXZG;b`MKB=0Csp#U~DVPGImb4y?e5 zp(|^^trp9os-IxUci3qc%Z0DN`mBbUE%u8j0KSsk@1hFaZJRc`eyH=?+R_4SVI4E2 z2F_ZvkHJ}FjFMUyIO+3WXlA7Ls6>6x%9DK zQbR7!HmEnN8JhqZbuMY37Z5})+CEPKNoLzUxxD@06I0<%NOtc^dg1*I8!QMaOEXW?Lt1WKPLX-JI@18C_(qp4JCf(t9&C(PK3NY^ z!~^3$@i*?|^+QM=9dbSnLCcSGau#_#5O--qWcqTKca@MSf%$jAv@*3}FN1>kLY zyw{{5u-d_QxVze8>Kzz||?Ou^X9Y)Q;j zY#6Elihbb*<7z1y@oCpD5Z0!5d@X$aU?><7PEby1k!GUw0!Cz983g0Gpvv!yKKI%H zdK$A)Usfkw9W6=WVvca*ErDVv5tGm2=7mwm?o9k)=K03NlkyR9D)ci0l{&3Gu|?gva{!o)gX_-^&YbpTOdG&sw>w2A$3EkYy3AgBr*>K`SxRluuo zbn8g?RT4M4oS6r5hPW{;W)eZ2ROUv+wY!-DWYGQYFwc2o&=N z&yFhi7&dGG3-6lwq3+4fEZ^H<_M z3FXE|z`djqGzrs+dpHnCo2KT-a6|{lvkODDP$?u}Yf%?>{s1O${ZZ{)dFxov&#tOJ z7pvu<_d$i=?-t>AVMFe|F2hM{5T(TFZ4lXv^enQ7w7KB36q0=?oDJ8#9IjTr=WVyO za=bYsq`0@)SnML>L*e*_mxSu+)uS%^{VB4u<;Az7i#y`31&xODe_^m}dmu}(W0^tS z#75>?!8BXc?9MrmQ77VLD3X4W6Cv9}PQ-0w*RPF5Ls}gCL(8;qmn>+m7fsv2vb*K+ z(|sMSvxz3Y91Eyh%RlMXO&*g*a%?WX$hGr_I7)eAwalBAhm5E~aK&7^)ZtH?@$E5! zBPCO-gi+AO37_iWl=(I}NZrFNVm@jjWw;NaZIfmrE@_4-YLZ*GftXhZQKzWub** zKzK|(5(P&DnO1(!_p|B~n4z8Q=1~TrcE4^*eRJSUk#jT(H-c{oDdrY;^(+(prcyA# z5=P@@u#25QjA@BjpoPCynar)z7ClhvBYV&Jne+a0@(oKd_d33%1}s#=(HKU*zu!C> zABt*wAN)B=>P!tPffrqxr}wxkMKBg#S&$?>+sGzb3A{lEfs5p<3I{zTmyd#^FCAr3 zy>-BUhpb0Ddd)U;5uGnrm+?N+P#tyZ+xM4M`-Lud3Ktq3c~eMXAS|MdY+9#OXw~Hi zkbrChcHSJ{d#^10z7y~Hq-yPc^}@gCZP@^=Q=^i@>TF@jDdkfZnrlP7X{L+|FX6Ww zhO~{F3&Pb=5)L+9-iE02d8s3enz~2_dt;77@k`T=7Cv=bGtCcKSa?DGeN25V^R0?M z$*{o`v42)y{?9+17MsibCc)8e-Ks~k8Ep!0RDzE*UmiUFw(@~MgI$iXTIF+cJ^o>O z$2HiEPJGRyfBo7{H(B2V?pFerInW>Z+gEP)adR4WO%*;`*3G(6-4S{<=hf!Ve0qb*tX%G4UtKjdHr2fpdeFJxOaAe0WXwO zdG_atD*&AP$BJXow*F^-;Hi-jAQ7?_fFZ!L#|9dH1HfD-EI>~a7Lk{RPJk?$#e$FA z<{jBu8NbN#*x}8NuSY=Owhatq@wR|rT~zHwEC&X)*fj#brLUEnr)f4LS-gJFmjB9- zfZ3cTVq^VdEAUSb6BAET@UZP3cP#(ZV3+W@!LDZNc~F;RbWqnFfAfNbitmc8$|R84 z8CKYF1?{&*nX(lfn(g5dnifgB)$w|6%|IrNL7ZxVruV<HIELy`BkmB}RWaikQ-i>=!9Ndo{QI%uh4fWQ66(_Ve@f-;SG z_-zQvAjbmF=ZLn=HrN@yHuE5HAl zJ%HM0Jd9~zkpx=0cE7@P*Oz`^Kv}Xccx8(UitG0UaULh);b~DS$vKaI-`RA~#yqB0 zxBOiR)E}TQvN-q0Kkd!} z{wpIbIB*Y(&PC`LbC6Y#!^VfOp;2%jXGq-fffP{Y#3S*mB>qjEmjg*Cnxz$NxWr6~ zAs~6?_12z0Gg|Kp#f6<(roJz704Hy7cy-qmt>jBm!HBU}mpgk5jV*X_CBT4dfoWP{ z3z=KcNMN>bId?V%(d>|xsvoA=7Y=hRSI6@O$DoDNHA;?OS$vJVYk{q&`zi588~p%>SuFzY`or%TvTAY1ipk7-oUSxtIX0UxIeZ;r>X z+egmZan5ZgSJiv!xo30;G}xVHJC~ zp_bJE!}?%w!o8Q~#NS20UVl0n80$8Gn%hK?z3{scvdJz3>^;-{Zx=sa)#8uIE5xU| z{CUFjfzA1eiI+D;+-)pUThi}uy)%=%&Ke-8viO?9po7r)A=F8}I${vqGI-s_M`@5n z7Ze*_jxk9^)<{q*)etfrJMZbEGd!lN>lVFwy&f09Kql`3{4YL#DL%k}D6|ZVwYP(q zhZ`5GXA26)@EMIM3w6tPYiDwji9t~-BlHdpvdKMfhSx9-jB}#*sVG(>`I>UeLXE}y z2-&az@!ayO)VGE+<(M9Iv1`o@cWV7ES!c)(5IpTY$q>&2>0R0X@O9LR<<^NK;bv)9 z4gQRZH=qQ51Sq`Yp>LRC$Yx=m-2oE(Tt^|K8Pf*JX1$4Vs_67~8EEEG{h3vxBd*}x zUD#S9HxYh(3Ch*=0{&^k{b>J~^X#rwGQU9n{5bynqo23H{J|K*9tKIQq-eGv6jk6# z__sS>_}kkuM1@!AD`HP99dqjLT0}AjZ*7EF0SMU$4i_Lg(0|WWsvb86)PLrEHo97n z1ES>X?zPTEBQ`RVb8bmR0^E1+>Z!zmF#(Y~Vu$z8mOQAl9I-csIAvn&LFhSn!l1-( zK_%~^_P@_(4fRtN$KL~IDe!ZEWP^pQ8Jt8qJ`Z2xG@znr@JZn|aMutk*j_uxyf2AK4D@!US7d7rktd2Er6QDQJ^ zw13P~-_BUUxjegUa;~qgF@UzIQm zRP@O89%YM1-%hTAz6D!=xIbeaJ1l!X4#un!Rtda3AgJ~GGa;D8j<0F7qO|nfKmiEY zn9VJ0$QW(Rgow=d3Nt_T699fMHgJJV!XZoJRc31?9ax>8Crl>oZ^<^Y+$>!LT4mM% zaaUGwh69=E(VTU2x;|El9?GL0*ySW7@Il%X zM|T71k!OCuIM7eG~mURwx-Y`gSWT5O;q-kn_)d z9elcZa92`gcwd{G1PH4xIhTuA8)yt;OY=-a+;8k_KTJM3!rw3ts9AMkSrXrGn+>A< zMy=7p!1(V@9hWc}u!an(WZajUBRWd5#rb>VVI%3FuD+1R`+9}+wk^Cto8Iqb^ zLWzC(bAI;`l3*cz7S!eGv2-fnj-lpU^yKa3$uFVKgTQ*;#2dsmpALGC0JBLe_)pMl zq4kUZ7FFeRKq^?=k6%8aP@hPb{H{nYdLKvAba8RXPMZKdix#Lr1>wo~_i=rh46q}> z?T)j%QnxjsSu=thy?6%Ey?&JCDPNOxQk|Pl?btJk(E8H>fN0ZtN-K#6)*pF;brd|@ zJ*;w@Y;f9H76}6=@>_U(oq?7_G}O&~5p;b{>K_MB3crwP@%g6WVlkl$A4hdpYD#Km z3)VK7LZdpC$)yth-_Ppjyz!bF=n^hjE-@*~oLc~L-I=EN)raRRgX}t2bptSf~%$)-aeHX;TT@ZYVj;1TDex{*tBxNZ2#TKhOqt&LH>lYDZb{blyU9Y{VBv+6WD?w!eGp z&o!=mu+?a5ucA2^E8Ga%m*2?WJ{=*Il2y*PG{o#YVEE5!7N5Npy_vBq>HX{;{cDxk z^p3~hHzrTC?s_{dT;-4xJSQlo_5UP9TqE5Fp(CYW#P;SE)F`9^d!DCx}`D4B@ zDn&1yw!k-b zV?38xxBu{G7C#J1Y}pfFCW>t7%+*d?81X?R2W(A$0T&Z0vXaC=Q|Vaei<3|2nfX_X z8vt@pV`TBJsGNpCwB6heh*zlZU~&Xm(9Pxl**I)7!dZTwFpM|LD$D<$OAXMx?w?Es z1xpF0)w(bMn)7xTc+a{B&OrMFP{qppbMBcP(vK^3f8agu!ogcftRR&Hpu1p4GFUa8 z89ml0?tT<3Xvq&Ry|#(17k2d(?E!jcBXf!CH2Mn$C=YLPV;9$J_UfS);l^=r$)`pfe&EGbxNfFKL4 z=AhL)i2+sk!#L&DzX`w_6S7T=wzUUaqMU_Uc%`!M;yvsZh=us}l+QBUJRGKb#2mm? z;pDys4(X~7-xa2WeD3Jy^jlD9xUC>Sb_do%Ajh^nb>}#8qZzpy+XhKB?vZ!1{%||G z0tFLAx!)f|X+(pDw{FHtRk^i9);RD&90n8nCA-e_A4cm`=A=CCwm=JY1DcJA9`q?= zN3Z)Fr7jG{RNLEem&g*+I9>9i6UqL;ePx>OE$*(8Fn@z$zPmIdWItwYP8t?_=fg%i$PkGt8?Iw^;^Z9DtNWz#);P zhA!0EzDA=yoxezKzesV`cr>o!RX&yGMce z+gM`*U~K*K`isj_MgH${4s>T8$Q!S0SZv}L-eh-|vq$g8dOiz%A8P9rR}ZFD^Yq@w z+6{NiI!fHmlGeV+6BS<;x8g2Au5V;C-qM5ct0#>qJD{k$r+EBfd(s3vsc*}O=K=3C z^{HQ!aFHy>9zP^=IjxqV+IEN{ zCzkjq+N|}k_LYN9Z<8gAqq*DQk!rSDW>RSYH&{FKnLX0ss0z8LVl4YZl+8zWB76L= z*J&GnjU*a(Y-w+;XuFuvPD{uem9Ca(MZ+wA7Zw|ebv@d!VU$;$Mm*Yg^cVwxQC~jaVUjkY4PC`qYqZ=qb$ptj1P3j-M-AAYUOcg}ueF3*G<3HG?mP2lcq{ zMalrbf{+n!=SRGr=>VlSUw?b}LRPX*btPthO|(i>>hvdEOQSEIT78R>KqnQ%uR&qb z6HNehc@+L3e`@JuZ_FrEfNrew*7S)QPqeT%#P|t6mEOFZiNqY@pTGHW{2w$2tcxe9 zqK1!Gf~=`FnFS%~p6}=>r_3zZpSjhr+2f47B?CoV$ugG4N zq&Bd1EPWZkrr(`g&*r`x>z5z)d~FsTthzVxGPz9gX67Q9JFac}i8DP4L<|51FG%rf z1>F6x`ic9a7lKKZ_C2BX6P^T8qSMk89L#2>Q{PjxYY>e4S7n2h_=<;@S`%eT+}`ow zV1a=)t*%IhnNMAA^T~i<+%z?yX7T|;9X~}`#uwlxX(!DNb({yO>F=A@Kd0e2aF+F4 z;6kJ~0OS4pvg)xi+;@f6Zuk5G#zN@1i2Nt=SNtnlT&FtH5E+%maa|3F%~|86;pyaas$=>PwlwO77b^-x8dFp?{zjXW4@lBIrEjTYw07ARXZcPS@tY!LF6` zhr1su>W?R8&q!K*TX3Dr&eu^(FZMd&S#*tKQ!yWL7MufqMHYAu;g9jWW8svsA*Ep5Dc(q zx7+*P*m^~>3%w04S{+2I6g2<-l6kPEy)1S)N$_(oxMX*C=VpYaCrvN+xe3HH;~!>N zUCR6P`+b*Ho*V=`m#7F;f4Lh641{@)^pbVS<7a+<=7*)@&jG7Q6CB|t0q>>% zpv$v@lE-ahuNXw4;S5>PJJQ4BA7ytdri7Z|m9sy8l)epy3qfb0TA`+PEU5eF?!N08 zFZ@w!aM9WJ?n8gRu)rRx-7M**8OL8(vfrB(+&wWdF(~Xqv;sH?8tfk$fne$&v8R;B zDF1t80;u>aZtphc3@2}wT%D@oEXUM{KBS-|?mDbU ze-hZqTNd#+pxs0*{Y3wPTmI&1QV-v`M+DSU$#}Mhq?PxNHhm}~^CJK{bTqqrW$8L? z?Ye);gJ$`aN1!Az0LU`#mOt0bpDT8Y+y4g3tz^K&t46#;Z#Tv))BR_8bxB8M?{n@j z!v)tf;?MQ}UD_&L+jbw(Ce?pnZ>*i~tgt)Y!Uz3#Q=&kG9!W%$E@y&cKmAPZ1+ja; zbD_PypAEka<))7wfDTVVZZQDv0I=MT_X;QUevExg1RicO1JMSZ2X4u*N>H=U?}^4JX!A|>8hTN5 zmDRh~nSEyJ^+vq`*=OSlr+(isSxIoi?3a4=wOP$u<0R*=eKDLqmCoIho39H(O~^^{voVrF_vR`7B!wx{C)6RZ|72M=6z5B zE^YFgyIn2Cz5=C}=etCpU}o3jI(p%+;4L7gl7O~@6V-<~Odt`#!etxOh1 z(m$-?s$Am2Snl#$V;o9c4CuA?w<#qm09)`nOLSy}Q`A zGeNu|=Y&(4O#5wM^O8B1XysoFQs-1=%5ysr;cbv0RLt{&NO?2*GFL*|;p^~k={&-YTa&6_$7 za)~t^zYf}|Pj)@8VNdh9EVPK-^d1}Qkl9#!pt-h55Ax^ktLLt_^|-2*zJ4jc~AtgU1Gv9SI#mC$#0#^a$7)zoHvta1H!R(|sXbLY)KTr7b$m(B1r_b`pF0-3uT`k4DPur#mH`uplFLSL97!v^)D}^yP#3;y^XPhr#dx zd4*h<*@vL_*O*Xk%D%B|AN0rKk2rsweGjC1!=kLy{3efLrAFuJdQ};wqAS@3Q@Y7+ zS-zj19t4*H&`2Eu>l$?Ev?AKYAi5`3O6C1G9@*VVT;bRJk?a7KOm1QH`c30vKC##V z>79|Kr3lf1`R8QNV$WG#7&V(oI<75_NGPFjO)dlK)QR{PQ(aj6}g zAvtPS?hYARa6%0U8JQD(24brTInK{jJ3vs_z|=hAu8O zpc`eA%Od;&aS7p#28p0M<>KG^`?>N$tSQuen(DN^9><~mGJm{oafvDSdLPc#SFx*f zk&FnU_AL;VQ=(Vvp_vM^(Bq7(gBdR|DyQo<8U5#jaU)nHbCGQ1<1$lO8Isdi7(I~4 zw6=T*u=2)wPI)=RTq`c1_&TV;?tTW_gx4yk>VB_Ks*8DFc>6%Af!F<*lpjwEpq5qD zjPYyboE-Hu(!!H`J?og_XM+_FjrWVHT$!=ncoiRSxD?|WDAqm0Rn5Ga=xGj+v&%K_ z$ro8(yah}1`*49)W^0`5odAb(kvO6ClXWDvU7%HidUOXk&+r`*oERP7_+NEhXIN8P z(?t}d3aAuOs(7Vb#6lF2&`S_eI!JFK(nN|NC7~lC^$G|oRcX?Z4ndmO=uMiwSk=Y>D#$w^L5_St*(%$hZ8(%<4AtcjX08|$7t}8v(>XJ{!vJ~!$nJ6uw9hu#Qx^-#=^+)w0d|p*LO)qF^(b_b z*|chG@Z%gMC^5s{6ye0hlld-{sJ-8`!Ow}rEK35+=;o^~G4j@%Uex1C82-Sz)^5^M zsRu4Q20Vvme4JpeY?#7RsG5~L3d)OCQn-4yHG!(}kmC~-Yn#m*>KvNj>9}Yb<4l^Y z>$Ke3+{{d7je#l@X_c&&rfHI#m6w+{0W=>se@&p-Pej#2Qbiat->fHCMGJ6Vh452e z?)^KwN|j3m)NhWEVs(@z*k40w?#P@TS?Y+lbF-4qC{HsUvrZftdhw6;(4^Hu)4{`Z zmy$R+DMf3}<;}Sa-by&-8?nfos585>x=El+{rV(6zM40;{A_M9ct+m4!5zH2Df|3B zKY6vsPHJVQ;QuEsj@5zBB&l0wH;L-jZ-3R&?2TyudnTtoGia9rMz`%3Jkp_AEVk?v zAHMB5{leo{H_>42!a|jX%dAn}0A>!~xAgxci2AWMfYM5ns#8v{t(5|&#W!xkEAioP z$10i|S-C14G1$bz%AL&FChh}!pk(9R6PtPULZ8EMWtR!bdSC=$O9g)B>>U2%%XJ5W zmFs~Pt9EipzLUq*C(1Job_P4JKk#QwUE5GjW?GCp)u<7@r><8W5|U<_NVrCO34Chm zTlKE>vAx}~b>IW&jE)6?r&35Lz*%+Q_J&_TU2*>SZ`T6!9l+nbqolxFmtIRgo$Q%j zvmPX?^6$52=oSHOtwmcw7R^t?pZ+TCY;IdI7-~+7J8%CuyKIQF*^qmjR)Fc?)wVGFrFF8zJbR65|@Z4U{11Wdm4 zSYeuN9X!M#WTQb6UWh_)!|0D3qY9^s$VLk**~$B7aqI9!xE3=j+)WJ9e?I401e+t! z+M4Ha&`c&tk%=~#+3tBb=N#Z9`7Zdc9!P6U#Nmv*)82cf5mJs=_Fs&-E*Pn3HoML} zAg0kfB_iaRhJH;t!Nw-3kV9OA5DkGtBAuOKkVDozW~ez*+}CxFu={OyHA|)pQX2;= zggZ7MXQ_XdM6M2q2u<(0*^x?RQyvJ#r692GeBN!!v}?qRjwvC82fO{RE%_@iXGnz0n&5rSs_p3$XPXGSAg;c*d62Xi@ynn zlZZpb#(6z8@kDJQcJ1}=07aof@1*4LHvnj!jTmD0NMpdQs~EZe%*ZD-&0XR^_FN$u z=^5n{Pm3DuE1iHg!Vj6;F(=s4WECT@RWR=+%LG=p_co*UX3E`hCF4iwq<6e~CZZC4 zxsh?xX$>q<+rKv#En+csbh?~n7lz}@a&*eX#^3P23WQrA$TE&SCR=e<=*baC3b?- zHZx-Jf$;n6u=cVuK1@tq*Ldi7j|K?zH`0XC(rGkKYngELAY4m?{P+;e2|({+LBR6M z{wTJSqm^Nd5;$ysu{XhsD?6bD3W~mkDAd9}rAxW5E|Q1;a4>?JKdQ&ev-AaaIx|om z(M&#QcqUkX`>x2nF08oppGe;eLcoWQ3GCVa3-%XgKAEZq^SeJMa0?ijde5VN<9lL6 zPvDZ&AoyPX5Mg~U0ezE@(@VQFl(%n)_UdIIVWP*=&pd6ktCU#^OAQqDr`li(&N9&6$)`tW+mTp<;7iUPBtcs0Vl&t_5lCtoqf@+L|x`|X37qS%6Q`AgEr5eGIkvYER(UEGt^Et`(Ri=nDdPY zaBHj90PCOaFY#QOfF%Pd1b9#>GB09Lln4bl^%g$_3zz$BG)$4CKVWzlI{u_d*W6T-@_1vAOM2h zx|E#qO^--F?c7cBZK`KNn^xGX{qi8TYioFe>3FV5!02@LZ56DSLBl- zg@mwwPLZkq*hz&}Cc)Y>Ye|v0bZH>JPedVV1I;NluOGF4B&(Iw8WySBM$?x@b3;rR zOC1T(Vckd9?baFJ-h|O-rrI4xuuf~=DiMl>`vs}BbPYKTE*@zd&ZlxC$jiW%7m>|6+PO<4j?`RuR|=bieHz zgg%l#(67LRo{F5zG0Hc+4I5emTPf0e+!5Sa_YH&^US-q;c2^V%&SGm)ax+6m12-XP zoSZ%3=9pBYef@Gv8!>Gz|IFv&5cx%n7vX+>5)0>c+S;|Q#{$Lgq7lBw^(Lcq`O?>) zp(bzFs5f8wnP7Jr+5Ig2W9h&j0dpx25GGYUjR;H<5PqYww^0aTpzXK24b_4TNmV!5 zc9z_{fz&(n^qkf3t~K4Ef*PV^MS*V$GIh_4uYJ|pOuH$v`bu|NGtN2V0m$(*Bn<+1 zEbNQNJO6)tdq%qXj*zXOVyOJdI+5%XsLxn#jHG?=8ZtmJsE_Sfp;OLP%;$X4!lavyp~g;f4$Z(Jk)Qeg8r+U>Qqip z$24fC-|u-Y4SfZ5lT}=z!WY*byA;0*QC8sNT;*OfO#lJ2_(g9g2l(n|8rk1xllA}u zb8b;5K{sh2aAY9KLMCp7qF3p_hi+BBumy|z>u8;KV1CU%7qLqIhJV_y*`!Vy3I`-#^IcJwa6S~;sQ-HZ}2jm4W z`A+kY7ekxzUVr;Ah;4CrMb11J7m_VYVILiV=JsX<_U$wigw+v9GOn2T1F!O!ZFHuAg3>S`&9PcW>sT3ax8@eC$Gi%G_N- zgTdrmE|EbHwbsRgR8pqTb@#AfX@T6U)-aqMBQq$AZ+wuU`n(~YGm^qy7hvqeQ=|BawOoYxHTtq3W zoPBN{qS|-mS=8>EpZ+mx9P)KO3m2dx35bn<>;Tb*E+PJZsJ~zRl*W9In$uBObFL2ErP-P_q%^DI)B>+LtmL3zMd)Vz8 z4}~T7({wh7BihpX;oPK!J@Uf0V-cZqJZ9(Jkx&h9y$nQG9WV~o&A^2{Viw(4N3F={ ziY^c{^)lWU;B}%F(x73}L4o^GpZn_WXt#Ey-v9YpGBbzJNi2GtY-%r;` ztSglQYEFFqJU5xgn@74xW0M6Xel*F$>gK?A$SIw-7-ILSR zwa`_V+`hfO4Q!Vmz;?l$QiJf;f|erUFA)(D1BFb*S(SPlgs|Y?yb%3`{oa-DPe6#f zOy9u3R^P-u4!yUt(h#p;X|_7@H&@vb=+q3-i`9Okt1-P99^fhLbjgEgT#;<-S9%UL zg~YdfGIh!&NPJsrqaHkg6J`0cbdwM>F4_3#v%Qc76=QJjs~R$lQq8mbWtrkV?T~vP zFYwKsBSLw0jj0XVmhi{UbtxL`<297t)e*f`x`4fOcmowYC5Bz1-w0j?5C7hfR_^pb z{!e^ka)#xz-p9E2do+dCdjLY!2+fcI5gB3w2n;7BZ!2i6S3Nfty*fAwl#YhF_=w+2 zm{}c$-S2UFTJ+6`2fGKExPKO)`MUUw_dX*uYFj(jVaN^r%t=e-W<9_fp{C%dbKDw4 zdc?cW`~BGwT#*adM1{!P*m6WYdP=CCD}Oa`@Q%ZuOMJEBprPQ+;4Gmp=cE(v-U{T| zdApYTcD&mJhsqQ0QB|LnBvIdVw2OQL0q*ZF<bius!5Et@IBW0>icn;Rc_?(BEM zyXQtq`VM&xf4)4wjg7sk?*acjm#PTU`hDAhnqnUL!hDFIeq>4LvJMj0wN4ZAr#8e5!PMl zN*0flAAF_MzCcY)ZJ6ogQ#ilT8Tm783Ni>SkfNS6&8P=^n8T2}xU);z2)&&-{g0?t z`&)CF7xnIQdS5z7pHbExPnJF}*XWM$oR}PjXi&kZd^KOnH`qh1fj{89Zbux9g=7md z7Kt)AY#wV1P@gRG{r6qeR$Qi_474lZp)nU=Xgub|bm&m|wGoUX*aWVqYx6799mV~N z6D{Ee&o~2pGDDuDy)Q~#;4`uY-a_YkZsIqtUtEU;NkC_MNutN)6Chc>| z%F2!BsEI;N{&SlLgo5Ry=D)13uZOHKG~)iU=j{-u@TZ`plBq^I-}3Rk4T!swMBi~* z!Vhv3A&$p(uP!L7&Q}dUf=hu5aKA@ocYADYWiDQ1JC8QNQ6Au@vGenOH5On$b9@8l zfSW`L&`x>#A5DPnMe$$ol-!dW=v28H+^F!1F+abSiNGmHYFT{Zy}}c{Fj8~L;oj0j zi-E`^U`L0>QgS=2@HTYVFar0fZYz#~dc)cVAuV2xOP87t@S~O6_Z(DttKz9T#>yRy zvR(0Z)ztnmt4>dPxq&qiQKl>dpiX>{=zJnUPIXzf8Duk_)l>NzLM>Q!7KKI$^rbei zKX3&e+{@~-27Eb=Pt&9OO;VGoC#-3w!u2vKD5kYZI=N?A(`#*-fsZ#im;&QoGV%RW z0-xH+Tatd*vz=KTQl~!z()kDpES&a_%;_9Ri8-(MHsLCb2D5-rVN1-x<1|H6zFVe6 zHV|F?8iKxXWC#@4nwpyx)k5ADT6VT`vVBLL#IoO{x4t8V-I2IfQTbrSM(wTN^h+VZ zdFR>gS3g#0lZv>hdeAh+Z%TQ#_xLMYa7N!7@2Sh+_S#oEWMc^*VCI!u>>i+`xZ_Ae zJ}=QHAOFT`BskXv_P@>_U(4*-;Gc<-e>SL*nx-x=QVrKYe!`qiNW@@^Q`HST@Ccp{`8u_T zKpB_4LeZH6o9wI5fBek>TIfo2)IP!?%*X$#G?k>JB_W~B5AfWvqk(gSdzwYxRcJIA7#PI&OAase zz1B~Q1U`R=qnVl6mjDV4X{8|hDwUgL@>YTzGF2i;!m2&l6%`3p2h$!$MYU!yqS5F! z*WKOSCENX^Sz}$*jil$#cfR&PhGON!9FgcmWMX6tUO#f=i0HiFtmi1%`g!WhXWv5vKU7;~62Zy{86X99BDj8oJ3SX(iZUd%`L%KLIQ~wEj z!BlDT-%hGFN|8d$TLTJD@y;-M@i{EUO~vEm<3DymIh?E9+TGdr<3o(Z^>>uhmC_~$ zze4Z8IrFKM8%o;&@4GTojEIQ^<7h1V!n+GqH?Vil%Z7x6B+cCyu(l{N7n)xoXRSY2 zr#i-Yt#seU5A?4E@>H&wzyFjfjy5xx;n1NFNq+{sA3v<7>#U(|Vm9 N+Hesi_umNk{{X$nje!6F literal 0 HcmV?d00001 diff --git a/docs_src/index.md b/docs_src/index.md new file mode 100644 index 00000000..7f6be17b --- /dev/null +++ b/docs_src/index.md @@ -0,0 +1,34 @@ +# Vision Self Checkout + +## Introduction +This guide helps you build and run the Vision Self Checkout solution. + +Upon completing the steps in this guide, you will be ready to run and benchmark pipelines on different hardware setups. + +### Overview + +The Vision Self Checkout solution is a set of pre-configured pipelines that are optimized for performance on Intel. The pipelines run several models including yolov5s, efficientnet-b0, horizontal-test-detection-002, and text-recognitiion-0012-gpu. Details about the pipelines and how to run them can be found [HERE](./pipelinesetup.md) + +A set of branchmarking tools have been provided to demonstrate the performance of the pipelines on Intel hardware. Once you have completed the [pipeline setup steps](./pipelinesetup.md) you will be able to run benchmark by following these [steps](./benchmark.md) + +[![Vision Self Checkout Diagram](./images/vision-checkout-1.0.png)](./images/vision-checkout-1.0.png) + +### Prerequisites + +The following items are required to build the Vision Self Checkout solution. You will need to follow the guide that matches your specific hardware setup. + +- Ubuntu LTS Boot Device +- Docker +- GIT + +### Installation and Pipeline Setup + +Setup steps for each supported hardware can be found [HERE](./hardwaresetup.md) + + Certain third-party software or hardware identified in this document only may be used upon securing a license directly from the third-party software or hardware owner. The identification of non-Intel software, tools, or services in this document does not constitute a sponsorship, endorsement, or warranty by Intel. + + GStreamer is an open source framework licensed under LGPL. See https://gstreamer.freedesktop.org/documentation/frequently-asked-questions/licensing.html?gi-language=c. You are solely responsible for determining if your use of Gstreamer requires any additional licenses. Intel is not responsible for obtaining any such licenses, nor liable for any licensing fees due, in connection with your use of Gstreamer. + +### Releases + +Project release notes can be found on the github repo release site [HERE](https://github.com/intel-retail/vision-self-checkout/releases) diff --git a/docs_src/mkdocs.yml b/docs_src/mkdocs.yml new file mode 100644 index 00000000..f4d6215d --- /dev/null +++ b/docs_src/mkdocs.yml @@ -0,0 +1,26 @@ +site_name: Vision Checkout +docs_dir: ./docs_src +site_dir: ./docs +theme: + name: 'material' +plugins: [] +nav: + - 'index.md' + - 'hardwaresetup.md' + - 'pipelinesetup.md' + - 'pipelinerun.md' + - 'pipelinebenchmarking.md' + - 'releasenotes.md' + - 'troubleshooting.md' + - 'references.md' +google_analytics: + - 'UA-154731555-1' + - 'auto' +extra_css: + - 'https://fonts.googleapis.com/icon?family=Material+Icons' + - './stylesheets/extra.css' +extra_javascript: + - 'https://unpkg.com/lunr/lunr.js' +markdown_extensions: + - codehilite + - admonition \ No newline at end of file diff --git a/docs_src/pipelinebenchmarking.md b/docs_src/pipelinebenchmarking.md new file mode 100644 index 00000000..4540fa63 --- /dev/null +++ b/docs_src/pipelinebenchmarking.md @@ -0,0 +1,137 @@ +# Pipeline Benchmarking + +## Prerequisites: +Pipeline setup needs to be done first, pipeline setup documentation be found [HERE](./pipelinesetup.md) + +## Step 1: Run Benchmark +The benchmark.sh shell script is located under `benchmark_scripts` directory under the base directory. Before executing this script, +change the current directory to directory `benchmark_scripts`. + +### Determine the input source type + +### RTSP + + --inputsrc rtsp://127.0.0.1:8554/camera_0 + +- **__NOTE:__** using RTSP source with the benchmark.sh will automatically run the camera simulator. The camera simulator will start an RTSP stream for each video file found in the sample-media folder. + +### USB Camera + + --inputsrc /dev/videoN, where N is 0 or integer number + +### RealSense Camera + + --inputsrc + +#### Obtaining RealSense camera serial number + +[How_to_get_serial_number](./camera_serial_number.md) + +### File + + --inputsrc file:my_video_file.mp4 + +- **__NOTE:__** files must be in sample-media folder to access from the Docker container. You can provide your own video files or download a video using [download_sample_videos.sh](https://github.com/intel-retail/vision-self-checkout/benchmark-scripts/download_sample_videos.sh). + + +--- + +### Determine the platform + +#### Intel® Core + +- `--platform core.x` should be replaced with targeted GPUs such as core (for all GPUs), core.0, core.1, etc + +- `--platform core` will evenly distribute and utilize all available core GPUs + +#### Intel® Xeon SP + +- `--platform xeon` will use the xeon CPU for the pipelines + +#### DGPU (Intel® Data Center GPU Flex 140 & 170 and Intel® Arc™ Setup) + +- `--platform dgpu.x` should be replaced with targeted GPUs such as dgpu (for all GPUs), dgpu.0, dgpu.1, etc + +- `--platform dgpu` will evenly distribute and utilize all available dgpus + +--- + +### Specified number of pipelines ( Discover the the performance and system requirements for a given use case ) + +Run benchmarking pipelines: +```bash +sudo ./benchmark.sh --pipelines --logdir /data --init_duration 30 --duration 120 --platform --inputsrc +``` + +Get consolidated pipeline results: +```bash +sudo python3 consolidate_multiple_run_of_metrics.py --root_directory / --output /consolidated.csv +``` + +### Consolidate_multiple_run_of_metrics.py output example +``` +,Metric,data +0,Total Text count,0 +1,Total Barcode count,2 +2,Camera_1 FPS,15.0 +3,Camera_0 FPS,15.0 +4,CPU Utilization %,16.548 +5,Memory Utilization %,21.162 +6,Disk Read MB/s,0.0 +7,Disk Write MB/s,0.025 +8,S0 Memory Bandwidth Usage MB/s,1872.632 +9,S0 Power Draw W,27.502 +10,GPU_0 VDBOX0 Utilization %,0.0 +11,GPU_0 GPU Utilization %,17.282 +``` + +### Stream density (Discover the maximum number of workloads/streams that can be ran in parallel for a given stream_density target FPS) + +Run Stream Density: +```bash +sudo ./benchmark.sh --stream_density --logdir /data --init_duration 30 --duration 120 --platform --inputsrc +``` + + +## Additional Benchmark Examples + +### Run decode+pre-processing+object detection (Yolov5s 416x416) only pipeline: + +```bash +sudo ./benchmark.sh --pipelines --logdir /data --init_duration 30 --duration 120 --platform --inputsrc <4k rtsp stream with 5 objects> --ocr_disabled --barcode_disabled --classification_disabled +``` + +```bash +sudo ./benchmark.sh --stream_density --logdir /data --init_duration 30 --duration 120 --platform --inputsrc --ocr_disabled --barcode_disabled --classification_disabled +``` + +### Run decode+pre-processing+object detection (Yolov5s 416x416) + efficientnet-b0 (224x224) only pipeline: + +```bash +sudo ./benchmark.sh --pipelines --logdir /data --init_duration 30 --duration 120 --platform --inputsrc <4k rtsp stream with 5 objects> --ocr_disabled --barcode_disabled +``` + +```bash +sudo ./benchmark.sh --stream_density --logdir /data --init_duration 30 --duration 120 --platform --inputsrc --ocr_disabled --barcode_disabled +``` + +### Run decode+pre-processing+object detection (Yolov5s 416x416) + efficientnet-b0 (224x224) + optical character recognition + barcode detection and decoding : + +```bash +sudo ./benchmark.sh --pipelines --logdir /data --init_duration 30 --duration 120 --platform --inputsrc <4k rtsp stream with 5 objects> --ocr 5 GPU +``` + +```bash +sudo ./benchmark.sh --stream_density --logdir /data --init_duration 30 --duration 120 --platform --inputsrc --ocr 5 GPU +``` + +### Run Flex140 optimized decode+pre-processing+object detection (Yolov5s 416x416) + efficientnet-b0 (224x224) + optical character recognition + barcode detection and decoding : +```bash +sudo ./benchmark.sh --pipelines 2 --logdir /data1 --init_duration 30 --duration 120 --platform dgpu.0 --inputsrc <4k rtsp stream with 5 objects> --ocr 5 GPU + +sudo ./benchmark.sh --pipelines 2 --logdir /data1 --init_duration 30 --duration 120 --platform dgpu.1 --inputsrc <4k rtsp stream with 5 objects> --ocr 5 GPU +``` + +```bash +sudo ./benchmark.sh --stream_density --logdir /data --init_duration 30 --duration 120 --platform dgpu --inputsrc --ocr 5 GPU +``` diff --git a/docs_src/pipelinerun.md b/docs_src/pipelinerun.md new file mode 100644 index 00000000..f59a8459 --- /dev/null +++ b/docs_src/pipelinerun.md @@ -0,0 +1,146 @@ +# Pipeline Run + +## Prerequisites: +Pipeline setup needs to be done first, pipeline setup documentation be found [HERE](./pipelinesetup.md) + +## Run camera simulator + +``` +./camera-simulator/camera-simulator.sh +``` + +``` +docker ps --format 'table{{.Image}}\t{{.Status}}\t{{.Names}}' +``` + +!!! success + Your output is as follows: + + | IMAGE | STATUS | NAMES | + | -------------------------------------------------- | ------------------------ |-------------------| + | openvino/ubuntu20_data_runtime:2021.4.2 | Up 11 seconds | simulator_docker | + | openvino/ubuntu20_data_runtime:2021.4.2 | Up 11 seconds | simulator_docker2 | + | aler9/rtsp-simple-server | Up 13 seconds | camera-simulator | + +Note: there could be multiple containers with IMAGE "openvino/ubuntu20_data_runtime:2021.4.2", depending on number of sample-media video file you have. + +!!! failure + If you do not see all of the above docker containers, look through the consol output for errors. Sometimes dependencies fail to resolve and must be run again. Address obvious issues. To try again, repeat step 3. + + +## Run pipeline with different input source(inputsrc) types +Use docker-run.sh to run the pipeline + +### option 1 to run with simuated camera: + +``` +./docker-run.sh --platform core|xeon|dgpu.x --inputsrc rtsp://127.0.0.1:8554/camera_0 +``` + +### option 2 to run with USB Camera: + +``` +./docker-run.sh --platform core|xeon|dgpu.x --inputsrc /dev/video0 +``` + +### option 3 to run with RealSense Camera(serial number input): + +``` +./docker-run.sh --platform core|xeon|dgpu.x --inputsrc serial_number --realsense_enabled + +``` +Obtaining RealSense camera serial number: [How_to_get_serial_number](./camera_serial_number.md) +### option 4 to run with video file input: + +``` +./docker-run.sh --platform core|xeon|dgpu.x --inputsrc file:my_video_file.mp4 +``` + + +## Check for pipeline run success + +Make sure the command was successful. To do so, run: + +``` +docker ps --format 'table{{.Image}}\t{{.Status}}\t{{.Names}}' +``` + +!!! success + Your output for Core is as follows: + + | IMAGE | STATUS | NAMES | + | -------------------------------------------------- | ------------------------ |-----------------------| + | sco-soc:2.0 | Up 9 seconds | vision-self-checkout0 | + + Your output for DGPU is as follows: + | IMAGE | STATUS | NAMES | + | -------------------------------------------------- | ------------------------ |-----------------------| + | sco-dgpu:2.0 | Up 9 seconds | vision-self-checkout0 | + +!!! failure + If you do not see above docker container, look through the consol output for errors. Sometimes dependencies fail to resolve and must be run again. Address obvious issues. To try again, repeat step 4. + + +## Optional parameters +The optional parameters you can apply to the docker-run.sh input, just note that they will affect the performance of pipeline run. + +### `--classification_disabled` +To disable classification process of image extraction when applying model, default is NOT disabling classification if this is not provided as input to docker-run.sh + +### `--ocr_disabled` +To disable Optical character recognition when applying model, default is NOT disabling ocr if this is not provided as input to docker-run.sh + +### `--ocr` +To provide Optical character recogntion frame internal value such as `--ocr 5`, default value is 5 + +### `--barcode_disabled` +To disable barcode detection when applying model, default is NOT disabling barcode detection if this is not provided as input to docker-run.sh + +### `--realsense_enabled` +TO use realsense camera and to provide realsense camera 12 digit serial number as inputsrc to the docker-run.sh script + +### `--barcode` +To provide barcode detection frame internal value such as `--barcode 5`, default value is 5 + +### `--color-width` +Realsense camera color related property, to apply realsense camera color width, which will overwrite the default value of realsense gstreamer; if it's not provided, it will use the default value from realsense gstreamer; make sure to look up to your realsense camera's color capability using `rs-enumerate-devices` + +### `--color-height` +Realsense camera color related property, to apply realsense camera color height, which will overwrite the default value of realsense gstreamer; if it's not provided, it will use the default value from realsense gstreamer; make sure to look up to your realsense camera's color capability using `rs-enumerate-devices` + +### `--color-framerate` +Realsense camera color related property, to apply realsense camera color framerate, which will overwrite the default value of realsense gstreamer; if it's not provided, it will use the default value from realsense gstreamer; make sure to look up to your realsense camera's color capability using `rs-enumerate-devices` + +## RealSense option pipeline run example: + +`./docker-run.sh --platform core --inputsrc serial_number --realsense_enabled --color-width 1920 --color-height 1080 --color-framerate 15` + + +## Sample output in results/r0.jsonl: +``` +{"resolution":{"height":1080,"width":1920},"timestamp":1087436877} +{"resolution":{"height":1080,"width":1920},"timestamp":1099074821} +{"resolution":{"height":1080,"width":1920},"timestamp":1151501119} +{"resolution":{"height":1080,"width":1920},"timestamp":3975573215} +{"resolution":{"height":1080,"width":1920},"timestamp":3986134627} +{"resolution":{"height":1080,"width":1920},"timestamp":4038743185} +{"resolution":{"height":1080,"width":1920},"timestamp":4047353514} +{"resolution":{"height":1080,"width":1920},"timestamp":4105882925} +{"resolution":{"height":1080,"width":1920},"timestamp":4173170063} +{"resolution":{"height":1080,"width":1920},"timestamp":4240359869} +... +``` +## Sample output in results/pipeline0.log: +``` +27.34 +27.34 +27.60 +27.60 +28.30 +28.30 +28.61 +28.61 +28.48 +28.48 +... +``` diff --git a/docs_src/pipelinesetup.md b/docs_src/pipelinesetup.md new file mode 100644 index 00000000..8f2c62ea --- /dev/null +++ b/docs_src/pipelinesetup.md @@ -0,0 +1,79 @@ +# Pipeline Setup + +## Step 1: Clone the repository + +``` +git clone https://github.com/intel-retail/vision-self-checkout.git && cd ./vision-self-checkout +``` + +## Step 2: Install Utilities + +Install utilities using the install script with `sudo` command + +```bash +sudo ./benchmark-scripts/utility_install.sh +``` + +## Step 3: Download Models Manually (Optional) + +Model downloader script is automatically called as part of docker-run.sh. User can also download the models manually using the model downloader script shown as follows: + +```bash +sh modelDownload.sh +``` + +!!! note + To manually download models you can follow links provided in the [Model List](../configs/models/2022/models.list.yml) + +## Step 4: Build the reference design + +You must build the provided component services and create local docker images. To do so, run: + +For Core +```bash +./docker-build.sh soc +``` + +For DGPU systems +```bash +./docker-build.sh dgpu +``` + +!!! note: + This command may take a while to run depending on your internet connection and machine specifications. + +!!! build with proxy information: + If docker build system requires a proxy network, please provide the proxy URL after the first argument. For example, build the reference design docker image with the proxy information for Core systems: +```bash +./docker-build.sh soc http://http_proxy_server_ip:http_proxy_server_port http(s)://https_proxy_server_ip:https_proxy_server_port +``` + +Similarly for building with the proxy information for DGPU systems: + +```bash +./docker-build.sh dgpu http://http_proxy_server_ip:http_proxy_server_port http(s)://https_proxy_server_ip:https_proxy_server_port +``` + +#### Check for success + +Make sure the command was successful. To do so, run: + +``` +docker images +``` + +!!! success + The results are: + + - `sco-soc 2.0` + or + - `sco-dgpu 2.0` + +!!! failure + If you do not see all of the above docker image files, look through the console output for errors. Sometimes dependencies fail to resolve and must be run again. Address obvious issues. To try again, repeat step 2. + + +#### Next + +Run the workload [HERE](./pipelinerun.md) or +Run the benchmarking [HERE](./pipelinebenchmarking.md) \ No newline at end of file diff --git a/docs_src/references.md b/docs_src/references.md new file mode 100644 index 00000000..b6bfc266 --- /dev/null +++ b/docs_src/references.md @@ -0,0 +1,15 @@ +## Libraries + +| Library | Link | +| ----------------------------- | ------------------------------------------------------------------------------------------------ | +| Intel® DL Streamer | [https://github.com/dlstreamer/dlstreamer](https://github.com/dlstreamer/dlstreamer) | +| RealSense gstreamer | [https://github.com/G2020sudo/realsense-gstreamer](https://github.com/G2020sudo/realsense-gstreamer) | +| RealSense Libraries | [https://github.com/gwen2018/librealsense](https://github.com/gwen2018/librealsense) | + + +## Components + +| Component | Link | +| ---------------------------------------------------- | ------------------------------------------------------------------------ | +| gstreamer | [https://gstreamer.freedesktop.org/](https://gstreamer.freedesktop.org/) | +| Intel® RealSense Technology | [https://www.intel.com/content/www/us/en/architecture-and-technology/realsense-overview.html](https://www.intel.com/content/www/us/en/architecture-and-technology/realsense-overview.html) | diff --git a/docs_src/releasenotes.md b/docs_src/releasenotes.md new file mode 100644 index 00000000..2021e356 --- /dev/null +++ b/docs_src/releasenotes.md @@ -0,0 +1,16 @@ +# Troubleshooting + +## New Features + +| Title | Description | +| ----------- | ----------- | + +## Issues Fixed + +| Issue Number | Description | Link | +| ----------- | ----------- | ----------- | + +## Known Issues + +| Issue Number | Description | Link | +| ----------- | ----------- | ----------- | diff --git a/docs_src/toubleshooting.md b/docs_src/toubleshooting.md new file mode 100644 index 00000000..7ba39d39 --- /dev/null +++ b/docs_src/toubleshooting.md @@ -0,0 +1,5 @@ +# Troubleshooting + +## Issues + +[github issues](https://github.com/intel-retail/vision-self-checkout/issues) \ No newline at end of file diff --git a/get-gpu-info.sh b/get-gpu-info.sh new file mode 100755 index 00000000..d31db45d --- /dev/null +++ b/get-gpu-info.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +HAS_FLEX_140=0 +HAS_FLEX_170=0 +HAS_ARC=0 +GPU_NUM_140=0 +GPU_NUM_170=0 +#HAS_iGPU=0 + +#get_gpu_devices() { + has_gpu=0 + has_any_intel_non_server_gpu=`dmesg | grep -i "class 0x030000" | grep "8086"` + has_any_intel_server_gpu=`dmesg | grep -i "class 0x038000" | grep "8086"` + has_flex_170=`echo "$has_any_intel_server_gpu" | grep -i "56C0"` + has_flex_140=`echo "$has_any_intel_server_gpu" | grep -i "56C1"` + has_arc=`echo "$has_any_intel_non_server_gpu" | grep -iE "5690|5691|5692|56A0|56A1|56A2|5693|5694|5695|5698|56A5|56A6|56B0|56B1|5696|5697|56A3|56A4|56B2|56B3"` + + if [ -z "$has_any_intel_non_server_gpu" ] && [ -z "$has_any_intel_server_gpu" ] + then + echo "No Intel GPUs found" + return + fi + echo "GPU exists!" + + if [ ! -z "$has_flex_140" ] + then + HAS_FLEX_140=1 + GPU_NUM_140=`echo "$has_flex_140" | wc -l` + fi + if [ ! -z "$has_flex_170" ] + then + HAS_FLEX_170=1 + GPU_NUM_170=`echo "$has_flex_170" | wc -l` + fi + if [ ! -z "$has_arc" ] + then + HAS_ARC=1 + fi + + echo "HAS_FLEX_140=$HAS_FLEX_140, HAS_FLEX_170=$HAS_FLEX_170, HAS_ARC=$HAS_ARC, GPU_NUM_140=$GPU_NUM_140, GPU_NUM_170=$GPU_NUM_170" +#} \ No newline at end of file diff --git a/get-options.sh b/get-options.sh new file mode 100755 index 00000000..e82648a0 --- /dev/null +++ b/get-options.sh @@ -0,0 +1,185 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +BARCODE_DISABLED=0 +BARCODE_INTERVAL=5 +OCR_INTERVAL=5 +OCR_DEVICE=CPU +OCR_DISABLED=0 +CLASSIFICATION_DISABLED=0 +REALSENSE_ENABLED=0 +COLOR_WIDTH=0 +COLOR_HEIGHT=0 +COLOR_FRAMERATE=0 + +error() { + printf '%s\n' "$1" >&2 + exit 1 +} + +show_help() { + echo " + usage: ./docker-run.sh --platform core.x|xeon|dgpu.x --inputsrc RS_SERIAL_NUMBER|CAMERA_RTSP_URL|file:video.mp4|/dev/video0 [--classification_disabled] [ --ocr_disabled | --ocr [OCR_INTERVAL OCR_DEVICE] ] [ --barcode_disabled | --barcode [BARCODE_INTERVAL] ] [realsense_enabled] + + Note: + 1. dgpu.x should be replaced with targeted GPUs such as dgpu (for all GPUs), dgpu.0, dgpu.1, etc + 2. core.x should be replaced with targeted GPUs such as core (for all GPUs), core.0, core.1, etc + 3. filesrc will utilize videos stored in the sample-media folder + 4. Set environment variable STREAM_DENSITY_MODE=1 for starting single container stream density testing + 5. Set environment variable RENDER_MODE=1 for displaying pipeline and overlay CV metadata + 6. Set environment variable LOW_POWER=1 for using GPU usage only based pipeline for Core platforms + 7. Set environment variable CPU_ONLY=1 for overriding inference to be performed on CPU only + " +} + +while :; do + case $1 in + -h | -\? | --help) + show_help + exit + ;; + --platform) + if [ "$2" ]; then + if [ $2 == "xeon" ]; then + PLATFORM=$2 + shift + elif grep -q "core" <<< "$2"; then + PLATFORM="core" + arrgpu=(${2//./ }) + TARGET_GPU_NUMBER=${arrgpu[1]} + if [ -z "$TARGET_GPU_NUMBER" ]; then + TARGET_GPU="GPU.0" + TARGET_GPU_DEVICE="--privileged" + else + TARGET_GPU_ID=$((128+$TARGET_GPU_NUMBER)) + TARGET_GPU="GPU."$TARGET_GPU_NUMBER + TARGET_GPU_DEVICE="--device=/dev/dri/renderD"$TARGET_GPU_ID + TARGET_GPU_DEVICE_NAME="/dev/dri/renderD"$TARGET_GPU_ID + fi + echo "CORE" + shift + elif grep -q "dgpu" <<< "$2"; then + PLATFORM="dgpu" + arrgpu=(${2//./ }) + TARGET_GPU_NUMBER=${arrgpu[1]} + if [ -z "$TARGET_GPU_NUMBER" ]; then + TARGET_GPU="GPU.0" + TARGET_GPU_DEVICE="--privileged" + else + TARGET_GPU_ID=$((128+$TARGET_GPU_NUMBER)) + TARGET_GPU="GPU."$TARGET_GPU_NUMBER + TARGET_GPU_DEVICE="--device=/dev/dri/renderD"$TARGET_GPU_ID + TARGET_GPU_DEVICE_NAME="/dev/dri/renderD"$TARGET_GPU_ID + fi + #echo "$PLATFORM $TARGET_GPU" + shift + else + error 'ERROR: "--platform" requires an argument core|xeon|dgpu.' + fi + else + error 'ERROR: "--platform" requires an argument core|xeon|dgpu.' + fi + ;; + --inputsrc) + if [ "$2" ]; then + INPUTSRC=$2 + shift + else + error 'ERROR: "--inputsrc" requires an argument RS_SERIAL_NUMBER|CAMERA_RTSP_URL|file:video.mp4|/dev/video0.' + fi + ;; + --classification_disabled) + CLASSIFICATION_DISABLED=1 + ;; + --ocr_disabled) + OCR_DISABLED=1 + ;; + --barcode_disabled) + BARCODE_DISABLED=1 + ;; + --realsense_enabled) + REALSENSE_ENABLED=1 + ;; + --ocr) + if [ "$2" ]; then + OCR_INTERVAL=$2 + else + error 'ERROR: "--ocr" requires an argument [OCR_INTERVAL OCR_DEVICE].' + fi + if [ "$3" ]; then + OCR_DEVICE=$3 + shift 2 + else + error 'ERROR: "--ocr" requires an argument [OCR_INTERVAL] [OCR_DEVICE].' + fi + ;; + --barcode) + if [ "$2" ]; then + BARCODE_INTERVAL=$2 + shift 1 + else + error 'ERROR: "--barcode" requires an argument [BARCODE_INTERVAL].' + fi + ;; + --color-width) + if [ "$REALSENSE_ENABLED" != 1 ]; then + error 'ERROR: "--color-width requires realsense_enable flag' + else + if [ "$2" ]; then + COLOR_WIDTH=$2 + shift 1 + else + error 'ERROR: "--color-width" requires an argument [COLOR_WIDTH].' + fi + fi + ;; + --color-height) + if [ "$REALSENSE_ENABLED" != 1 ]; then + error 'ERROR: "--color-height requires realsense_enable flag' + else + if [ "$2" ]; then + COLOR_HEIGHT=$2 + shift 1 + else + error 'ERROR: "--color-height" requires an argument [COLOR_HEIGHT].' + fi + fi + ;; + --color-framerate) + if [ "$REALSENSE_ENABLED" != 1 ]; then + error 'ERROR: "--color-framerate requires realsense_enable flag' + else + if [ "$2" ]; then + COLOR_FRAMERATE=$2 + shift 1 + else + error 'ERROR: "--color-framerate" requires an argument [COLOR_FRAMERATE].' + fi + fi + ;; + -?*) + error "ERROR: Unknown option $1" + ;; + ?*) + error "ERROR: Unknown option $1" + ;; + *) + break + ;; + esac + + shift + +done + +if [ -z $PLATFORM ] || [ -z $INPUTSRC ] +then + #echo "Blanks: $1 $PLATFORM $INPUTSRC" + show_help + exit 0 +fi + diff --git a/modelDownload.sh b/modelDownload.sh new file mode 100755 index 00000000..bc44f8a5 --- /dev/null +++ b/modelDownload.sh @@ -0,0 +1,143 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +modelPrecisionFP16=FP16 +modelPrecisionFP32=FP32 +modelPrecisionFP16INT8=FP16-INT8 +modelPrecisionFP32INT8=FP32-INT8 + +modelDir="configs/models/2022/" +pipelineZooModel="https://github.com/dlstreamer/pipeline-zoo-models/raw/main/storage/" +openModelZoo="https://storage.openvinotoolkit.org/repositories/open_model_zoo/2022.1/models_bin/3/" +dlstreamerLabel="https://raw.githubusercontent.com/dlstreamer/dlstreamer/master/samples/labels/" + +# $1 model file name +# $2 download URL +# $3 model percision +getModelFiles() { + # Make model directory + # ex. kdir efficientnet-b0/1/FP16-INT8 + mkdir -p $1/1/$3 + + # Get the models + wget $2/$3/$1".bin" -P $1/1/$3 + wget $2/$3/$1".xml" -P $1/1/$3 +} + +# $1 model file name +# $2 download URL +# $3 process file name (this can be different than the model name ex. horizontal-text-detection-0001 is using horizontal-text-detection-0002.json) +getProcessFile() { + # Get process file + wget $2/$3.json -P $1/1 +} + +getLabelFile() { + mkdir -p $1/1 + + wget $2/$3 -P $1/1 +} + +REFRESH_MODE=0 +while [ $# -gt 0 ]; do + case "$1" in + --refresh) + echo "running model downloader in refresh mode" + REFRESH_MODE=1 + ;; + *) + echo "Invalid flag: $1" >&2 + exit 1 + ;; + esac + shift +done + +efficientNet="efficientnet-b0" +efficientNetDir="efficientnet-b0_INT8" +horizontalText0001="horizontal-text-detection-0001" +horizontalText0002="horizontal-text-detection-0002" +textRecognition0012GPU="text-recognition-0012-GPU" +textRec0014="text-recognition-0014" +yolov5s="yolov5s" + +# Move to model working directory +mkdir -p $modelDir +cd $modelDir + +if [ "$REFRESH_MODE" -eq 1 ]; then + # cleaned up all downloaded files so it will re-download all files again + rm -rf "${PWD}/$efficientNet/" || true + rm -rf "${PWD}/$horizontalText0001/" || true + rm -rf "${PWD}/$horizontalText0002/" || true + rm -rf "${PWD}/$textRecognition0012GPU/" || true + rm -rf "${PWD}/$textRec0014/" || true + # we don't delete the whole directory as there are some exisitng checked-in files + rm "${PWD}/$yolov5s/1/FP16-INT8/yolov5s.bin" || true + rm "${PWD}/$yolov5s/1/FP16-INT8/yolov5s.xml" || true + rm "${PWD}/$yolov5s/1/FP16/yolov5s.bin" || true + rm "${PWD}/$yolov5s/1/FP16/yolov5s.xml" || true + rm "${PWD}/$yolov5s/1/FP32-INT8/yolov5s.bin" || true + rm "${PWD}/$yolov5s/1/FP32-INT8/yolov5s.xml" || true + rm "${PWD}/$yolov5s/1/FP32/yolov5s.bin" || true + rm "${PWD}/$yolov5s/1/FP32/yolov5s.xml" || true + rm "${PWD}/$yolov5s/1/yolov5s.json" || true +fi + +# EfficientNet +# checking whether the model file .bin already exists or not before downloading +efficientNetModelFile="${PWD}/$efficientNet/1/$modelPrecisionFP16INT8/$efficientNet.bin" +echo $efficientNetModelFile +if [ -f "$efficientNetModelFile" ]; then + echo "models already exists, exiting..." + exit 0 +fi + +echo "Downloading models..." + +getModelFiles $efficientNet $pipelineZooModel$efficientNetDir $modelPrecisionFP16INT8 +getProcessFile $efficientNet $pipelineZooModel$efficientNetDir $efficientNet +getLabelFile $efficientNet $dlstreamerLabel "imagenet_2012.txt" +# EfficientNet get efficientnet.ckpt files +wget "https://storage.openvinotoolkit.org/repositories/open_model_zoo/public/2022.1/efficientnet-b0/efficientnet-b0.tar.gz" -P $efficientNet/1/ +tar -xvkf $efficientNet/1/efficientnet-b0.tar.gz -C $efficientNet/1/ +rm $efficientNet/1/efficientnet-b0.tar.gz + +# Horizontal Text 0001 +getModelFiles $horizontalText0001 $openModelZoo$horizontalText0001 $modelPrecisionFP16INT8 +getProcessFile $horizontalText0001 $pipelineZooModel$horizontalText0002 $horizontalText0002 +mv $horizontalText0001/1/$horizontalText0002.json $horizontalText0001/1/$horizontalText0001.json + +# Horizontal Text 0002 +getModelFiles $horizontalText0002 $pipelineZooModel$horizontalText0002 $modelPrecisionFP16INT8 +getProcessFile $horizontalText0002 $pipelineZooModel$horizontalText0002 $horizontalText0002 +cp Horizontal-text-detection-0002_fix.json ./$horizontalText0002/1/horizontal-text-detection-0002.json + +# Text Recognition 12 GPU +textRec0012GPU="text-recognition-0012-mod" +textRec0012="text-recognition-0012" +getModelFiles $textRec0012GPU $pipelineZooModel$textRec0012GPU $modelPrecisionFP16INT8 +getProcessFile $textRec0012GPU $pipelineZooModel$textRec0012GPU $textRec0012GPU +mv $textRec0012GPU/1/$textRec0012GPU.json $textRec0012GPU/1/$textRec0012.json +mv $textRec0012GPU/ "$textRecognition0012GPU" + +# Text Recognition 14 +getModelFiles $textRec0014 $openModelZoo$textRec0014 $modelPrecisionFP16INT8 +getProcessFile $textRec0014 $pipelineZooModel$textRec0012GPU $textRec0012GPU +mv $textRec0014/1/$textRec0012GPU.json $textRec0014/1/$textRec0012.json + +# Yolov5s +getModelFiles $yolov5s $pipelineZooModel"yolov5s-416" $modelPrecisionFP16 +getModelFiles $yolov5s $pipelineZooModel"yolov5s-416" $modelPrecisionFP32 +getProcessFile $yolov5s $pipelineZooModel"yolov5s-416" $yolov5s + +# Yolov5s INT8 +getModelFiles $yolov5s $pipelineZooModel"yolov5s-416_INT8" $modelPrecisionFP16INT8 +getModelFiles $yolov5s $pipelineZooModel"yolov5s-416_INT8" $modelPrecisionFP32INT8 + +# give some time for files settling down +sleep 3 diff --git a/patch/libusb.h b/patch/libusb.h new file mode 100644 index 00000000..c374086d --- /dev/null +++ b/patch/libusb.h @@ -0,0 +1,18 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2021 Intel Corporation. All Rights Reserved. + +#pragma once + +// GCC, when using -pedantic, gives the following inside libusb.h: + + +#ifdef __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpedantic" +#endif + +#include + +#ifdef __GNUC__ +#pragma GCC diagnostic pop +#endif diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..75fa89a5 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +pyzbar +zxing-cpp +numpy==1.23.1 +pandas==1.3.5 +natsort +opencv-python diff --git a/run.sh b/run.sh new file mode 100755 index 00000000..267a5ae0 --- /dev/null +++ b/run.sh @@ -0,0 +1,371 @@ +#!/bin/bash -e +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +RUN_PREFIX= +MODELS= +PIPELINES= +FRAMEWORK= +IMAGE= +VOLUME_MOUNT= +MODE=SERVICE +PORTS= +DEVICES= +DEFAULT_GSTREAMER_IMAGE="dlstreamer-pipeline-server-gstreamer" +DEFAULT_FFMPEG_IMAGE="dlstreamer-pipeline-server-ffmpeg" +ENTRYPOINT= +ENTRYPOINT_ARGS= +PRIVILEGED= +NETWORK= +USER= +INTERACTIVE=-it +DEVICE_CGROUP_RULE= +USER_GROUPS= +ENABLE_RTSP=${ENABLE_RTSP:-"false"} +ENABLE_WEBRTC=${ENABLE_WEBRTC:-"false"} +RTSP_PORT=8554 + +SCRIPT_DIR=$(dirname "$(readlink -f "$0")") +SOURCE_DIR=$(dirname $SCRIPT_DIR) +ENVIRONMENT=$(env | cut -f1 -d= | grep -E '_(proxy)$' | sed 's/^/-e / ' | tr '\n' ' ') +IGNORE_INIT_ERRORS=false + +show_options() { + echo "" + echo "Running Pipeline Server Image: '${IMAGE}'" + echo " Models: '${MODELS}'" + echo " Pipelines: '${PIPELINES}'" + echo " Framework: '${FRAMEWORK}'" + echo " Environment: '${ENVIRONMENT}'" + echo " Volume Mounts: '${VOLUME_MOUNT}'" + echo " Mode: '${MODE}'" + echo " Ports: '${PORTS}'" + echo " Name: '${NAME}'" + echo " Network: '${NETWORK}'" + echo " Entrypoint: '${ENTRYPOINT}'" + echo " EntrypointArgs: '${ENTRYPOINT_ARGS}'" + echo " User: '${USER}'" + echo " User Groups: '${USER_GROUPS}'" + echo " Devices: '${DEVICES}'" + echo " Device CGroup Rule: '${DEVICE_CGROUP_RULE}'" + echo "" +} + +show_help() { + echo "usage: run.sh" + echo " [--image image]" + echo " [--framework ffmpeg || gstreamer]" + echo " [--models path to models directory]" + echo " [--pipelines path to pipelines directory]" + echo " [-v additional volume mount to pass to docker run]" + echo " [-e additional environment to pass to docker run]" + echo " [--entrypoint-args additional parameters to pass to entrypoint in docker run]" + echo " [-p additional ports to pass to docker run]" + echo " [--network name network to pass to docker run]" + echo " [--user name of user to pass to docker run]" + echo " [--group-add name of user group to pass to docker run]" + echo " [--name container name to pass to docker run]" + echo " [--device device to pass to docker run]" + echo " [--enable-rtsp To enable rtsp re-streaming]" + echo " [--rtsp-port Specify the port to use for rtsp re-streaming]" + echo " [--enable-webrtc To enable WebRTC frame destination]" + echo " [--dev run in developer mode]" + exit 0 +} + +error() { + printf '%s\n' "$1" >&2 + exit +} + +enable_hardware_access() { + # GPU + if ls /dev/dri/render* 1> /dev/null 2>&1; then + echo "Found /dev/dri/render entry - enabling for GPU" + DEVICES+='--device /dev/dri ' + RENDER_GROUPS=$(stat -c '%g' /dev/dri/render*) + for group in $RENDER_GROUPS + do + USER_GROUPS+="--group-add $group " + done + fi + + # Intel(R) NCS2 + if [ -d /dev/bus/usb ]; then + echo "Found /dev/bus/usb - enabling for Intel(R) NCS2" + DEVICE_CGROUP_RULE=--device-cgroup-rule=\'c\ 189:*\ rmw\' + VOLUME_MOUNT+="-v /dev/bus/usb:/dev/bus/usb " + fi + + # HDDL + if compgen -G /dev/myriad* > /dev/null ; then + echo "Found /dev/myriad devices - enabling for HDDL-R" + VOLUME_MOUNT+="-v /var/tmp:/var/tmp -v /dev/shm:/dev/shm " + fi + + # Webcam + for device in $(ls /dev | grep video); do + echo "Found /dev/$device - enabling webcam" + DEVICES+="--device /dev/$device " + done + + # Microphone + if [ -e /dev/snd ]; then + echo "Found /dev/snd - enabling microphone" + DEVICES+="--device /dev/snd " + fi +} + +while [[ "$#" -gt 0 ]]; do + case $1 in + -h | -\? | --help) + show_help # Display a usage synopsis. + exit + ;; + --dry-run) + RUN_PREFIX=echo + ;; + --image) # Takes an option argument; ensure it has been specified. + if [ "$2" ]; then + IMAGE=$2 + shift + else + error 'ERROR: "--image" requires a non-empty option argument.' + fi + ;; + --models) + if [ "$2" ]; then + MODELS=$(realpath $2) + shift + else + error 'ERROR: "--models" requires a non-empty option argument.' + fi + ;; + --user) + if [ "$2" ]; then + USER="--user $2" + shift + else + error 'ERROR: "--user" requires a non-empty option argument.' + fi + ;; + --group-add) + if [ "$2" ]; then + USER_GROUPS+="--group-add $2 " + shift + else + error 'ERROR: "--group-add" requires a non-empty option argument.' + fi + ;; + --device) + if [ "$2" ]; then + DEVICES+="--device $2 " + shift + else + error 'ERROR: "--device" requires a non-empty option argument.' + fi + ;; + --privileged) + PRIVILEGED="--privileged " + ;; + --device-cgroup-rule) + if [ "$2" ]; then + DEVICE_CGROUP_RULE="--device-cgroup-rule=$2 " + shift + else + error 'ERROR: "--device-cgroup-rule" requires a non-empty option argument.' + fi + ;; + --pipelines) + if [ "$2" ]; then + PIPELINES=$(realpath $2) + shift + else + error 'ERROR: "--pipelines" requires a non-empty option argument.' + fi + ;; + --framework) + if [ "$2" ]; then + FRAMEWORK=$2 + shift + else + error 'ERROR: "--framework" requires a non-empty option argument.' + fi + ;; + -e) + if [ "$2" ]; then + ENVIRONMENT+="-e $2 " + shift + else + error 'ERROR: "-e" requires a non-empty option argument.' + fi + ;; + --entrypoint-args) + if [ "$2" ]; then + ENTRYPOINT_ARGS+="$2 " + shift + else + error 'ERROR: "--entrypoint-args" requires a non-empty option argument.' + fi + ;; + -p) + if [ "$2" ]; then + PORTS+="-p $2 " + shift + else + error 'ERROR: "-p" requires a non-empty option argument.' + fi + ;; + -v) + if [ "$2" ]; then + VOLUME_MOUNT+="-v $2 " + shift + else + error 'ERROR: "-v" requires a non-empty option argument.' + fi + ;; + --dev) + MODE=DEV + ;; + --name) + if [ "$2" ]; then + NAME=$2 + shift + else + error 'ERROR: "--name" requires a non-empty option argument.' + fi + ;; + --network) + if [ "$2" ]; then + NETWORK="--network $2" + shift + else + error 'ERROR: "--network" requires a non-empty option argument.' + fi + ;; + --entrypoint) + if [ "$2" ]; then + ENTRYPOINT="--entrypoint $2" + shift + else + error 'ERROR: "--entrypoint" requires a non-empty option argument.' + fi + ;; + --rtsp-port) + if [ "$2" ]; then + RTSP_PORT=$2 + shift + else + error 'ERROR: "--rtsp-port" requires a non-empty option argument.' + fi + ;; + --enable-rtsp) + ENABLE_RTSP=true + ;; + --enable-webrtc) + ENABLE_WEBRTC=true + ;; + --non-interactive) + unset INTERACTIVE + ;; + --) # End of all options. + shift + break + ;; + -?*) + printf 'WARN: Unknown option (ignored): %s\n' "$1" >&2 + ;; + *) # Default case: No more options, so break out of the loop. + break ;; + esac + + shift +done + +if [ -z "$FRAMEWORK" ]; then + FRAMEWORK="gstreamer" +elif [ $FRAMEWORK != 'gstreamer' ] && [ $FRAMEWORK != 'ffmpeg' ]; then + echo "Invalid framework" + show_help +fi + +if [ -z "$IMAGE" ]; then + IMAGE=DEFAULT_${FRAMEWORK^^}_IMAGE + IMAGE=${!IMAGE} +fi + +if [ -z "$NAME" ]; then + # Convert tag separator if exists + NAME=${IMAGE//[\:\/]/_} +fi + +if [ "${MODE}" == "DEV" ]; then + VOLUME_MOUNT+="-v $SOURCE_DIR:/home/pipeline-server/ " + VOLUME_MOUNT+="-v /tmp:/tmp " + VOLUME_MOUNT+="-v /dev:/dev " + if [ -z "$NETWORK" ]; then + NETWORK="--network=host" + fi + if [ -z "$ENTRYPOINT" ]; then + ENTRYPOINT="--entrypoint /bin/bash" + fi + if [ -z "$MODELS" ]; then + MODELS=$SOURCE_DIR/models + fi + if [ -z "$PIPELINES" ]; then + PIPELINES=$SOURCE_DIR/pipelines/$FRAMEWORK + fi + PRIVILEGED="--privileged " +elif [ ! -z "$ENTRYPOINT" ]; then + MODE=CUSTOM_ENTRYPOINT +elif [ "${MODE}" == "SERVICE" ]; then + if [ -z "$PORTS" ]; then + PORTS+="-p 8080:8080 " + fi +else + echo "Invalid Mode" + show_help +fi + +enable_hardware_access + +if [ "$ENABLE_RTSP" != "false" ]; then + ENVIRONMENT+="-e ENABLE_RTSP=$ENABLE_RTSP -e RTSP_PORT=$RTSP_PORT " + PORTS+="-p $RTSP_PORT:$RTSP_PORT " +fi + +if [ "$ENABLE_WEBRTC" != "false" ]; then + ENVIRONMENT+="-e ENABLE_WEBRTC=$ENABLE_WEBRTC " +fi + +if [ ! -z "$MODELS" ]; then + VOLUME_MOUNT+="-v $MODELS:/home/pipeline-server/models " +fi + +if [ ! -z "$PIPELINES" ]; then + VOLUME_MOUNT+="-v $PIPELINES:/home/pipeline-server/pipelines " +fi + +if [ ! -z "$VOLUME_MOUNT" ]; then + if [ -z "$USER" ]; then + USER="--user $UID" + fi +fi + +if [ ! -z "$USER" ]; then + for group in "audio" "users" + do + USER_GROUPS+="--group-add $group " + done +fi + +show_options + +echo "$RUN_PREFIX docker run $INTERACTIVE --rm $ENVIRONMENT -e IGNORE_INIT_ERRORS=$IGNORE_INIT_ERRORS $VOLUME_MOUNT $DEVICE_CGROUP_RULE $DEVICES $NETWORK $PORTS $ENTRYPOINT --name ${NAME} ${PRIVILEGED} ${USER} $USER_GROUPS $IMAGE ${ENTRYPOINT_ARGS}" + +# eval must be used to ensure the --device-cgroup-rule string is correctly parsed +eval "$RUN_PREFIX docker run $INTERACTIVE --rm $ENVIRONMENT -e IGNORE_INIT_ERRORS=$IGNORE_INIT_ERRORS $VOLUME_MOUNT $DEVICE_CGROUP_RULE $DEVICES $NETWORK $PORTS $ENTRYPOINT --name ${NAME} ${PRIVILEGED} ${USER} $USER_GROUPS $IMAGE ${ENTRYPOINT_ARGS}" + diff --git a/sample-media/README.md b/sample-media/README.md new file mode 100644 index 00000000..c4f3c2d8 --- /dev/null +++ b/sample-media/README.md @@ -0,0 +1 @@ +Place media files here for camera simulator \ No newline at end of file diff --git a/security.md b/security.md new file mode 100644 index 00000000..8826228e --- /dev/null +++ b/security.md @@ -0,0 +1,7 @@ +# Security Policy + +Intel is committed to rapidly addressing security vulnerabilities affecting our customers and providing clear guidance on the solution, impact, severity and mitigation. + +# Reporting a Vulnerability + +Please report any security vulnerabilities in this project utilizing the guidelines here. \ No newline at end of file diff --git a/stop_all_docker_containers.sh b/stop_all_docker_containers.sh new file mode 100755 index 00000000..aaf0bca3 --- /dev/null +++ b/stop_all_docker_containers.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +docker kill $(docker ps -q) diff --git a/testModelDownload.sh b/testModelDownload.sh new file mode 100755 index 00000000..9d487bcb --- /dev/null +++ b/testModelDownload.sh @@ -0,0 +1,127 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +./modelDownload.sh + +# $1 model directory +testModelDownload() { + if [ -d "$1" ]; then + # Take action if $DIR exists. # + # check if there is a 2nd parameter input as the filename + if [ ! -z "$2" ]; then + filePath="$1"/"$2" + if [ -f "$filePath" ]; then + echo "Passed: found file $filePath" + else + echo "Failed: expected file not found $filePath" + exit 1 + fi + else + echo "Passed: found folder $1" + fi + else + echo "Failed: expected folder not found $1" + exit 1 + fi +} + +cleanupDownload() { + echo + echo "cleaning up download files..." + # remove downloaded files so it's re-testable + rm -rf "$expectedEfficientnetDir" + rm -rf "$expectedHorizontalTextDetection0001Dir" + rm -rf "$expectedHorizontalTextDetection0002Dir" + rm -rf "$expectedTextRec0012GPU" + rm -rf "$expectedTextRec0014" + # we don't delete the whole directory as there are some exisitng checked-in files + rm configs/models/2022/yolov5s/1/FP16-INT8/yolov5s.bin + rm configs/models/2022/yolov5s/1/FP16-INT8/yolov5s.xml + rm configs/models/2022/yolov5s/1/FP16/yolov5s.bin + rm configs/models/2022/yolov5s/1/FP16/yolov5s.xml + rm configs/models/2022/yolov5s/1/FP32-INT8/yolov5s.bin + rm configs/models/2022/yolov5s/1/FP32-INT8/yolov5s.xml + rm configs/models/2022/yolov5s/1/FP32/yolov5s.bin + rm configs/models/2022/yolov5s/1/FP32/yolov5s.xml + rm configs/models/2022/yolov5s/1/yolov5s.json + + echo "done." +} + +modelDir="configs/models/2022/" + +# Test efficientNet download +expectedEfficientnetDir=$modelDir"efficientnet-b0" +# Test efficientnet-b0.bin model file exists +testModelDownload "$expectedEfficientnetDir"/1/FP16-INT8 "efficientnet-b0.bin" +testModelDownload "$expectedEfficientnetDir"/1 "imagenet_2012.txt" +expectedEfficientnetModelFile=$expectedEfficientnetDir"/1/FP16-INT8/efficientnet-b0.bin" +if [ -f "$expectedEfficientnetModelFile" ]; then + echo "Passed: found ${expectedEfficientnetModelFile}" +else + echo "Failed: expect model file not found ${expectedEfficientnetModelFile}" + exit 1 +fi + +timestamp_model=$(stat -c %Y "$expectedEfficientnetModelFile") + +# Test horizontal text detection download +expectedHorizontalTextDetection0001Dir=$modelDir"horizontal-text-detection-0001" +testModelDownload $expectedHorizontalTextDetection0001Dir + +# Test horizontal text detection download +expectedHorizontalTextDetection0002Dir=$modelDir"horizontal-text-detection-0002" +testModelDownload $expectedHorizontalTextDetection0002Dir + +# Test text recognition 0012-GPU +expectedTextRec0012GPU=$modelDir"text-recognition-0012-GPU" +testModelDownload $expectedTextRec0012GPU + +# Test text recognition 0014 +expectedTextRec0014=$modelDir"text-recognition-0014" +testModelDownload $expectedTextRec0014 + +# Test Yolov5s download +expectedYolov5sDir=$modelDir"yolov5s/1/FP32" +testModelDownload $expectedYolov5sDir "yolov5s.bin" + +# Test Yolov5s-INT8 download +expectedYolov5sINT8Dir=$modelDir"yolov5s/1/FP32-INT8" +testModelDownload $expectedYolov5sINT8Dir "yolov5s.bin" + +echo +echo "Test re-run modelDownloader and it should not re-download without --refresh option" +./modelDownload.sh +timestamp_model_rerun=$(stat -c %Y "$expectedEfficientnetModelFile") +if [ "$timestamp_model_rerun" -eq "$timestamp_model" ]; then + echo "Passed: re-run modelDownloader and it didn't re-download files" +else + echo "Failed: re-run modelDownloader and it re-download files" + cleanupDownload + exit 1 +fi + +echo +echo "Test --refresh option:" +# use refresh option to re-test: +./modelDownload.sh --refresh +if [ -f "$expectedEfficientnetModelFile" ]; then + refresh_timestamp_model=$(stat -c %Y "$expectedEfficientnetModelFile") + if [ "$refresh_timestamp_model" -gt "$timestamp_model" ]; then + echo "Passed: --refresh option test found ${expectedEfficientnetModelFile} and timestamp refreshed" + else + echo "Failed: --refresh option test found ${expectedEfficientnetModelFile} but timestamp not refreshed" + cleanupDownload + exit 1 + fi +else + echo "Failed: --refresh option test expect model file not found ${expectedEfficientnetModelFile}" + cleanupDownload + exit 1 +fi + +cleanupDownload diff --git a/test_barcode_docker_run.sh b/test_barcode_docker_run.sh new file mode 100755 index 00000000..4ea32695 --- /dev/null +++ b/test_barcode_docker_run.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# test case 1: barcode missing parameter +echo "test case 1: barcode missing parameter" +./docker-run.sh --platform core --inputsrc rtsp://127.0.0.1:8554/camera_0 --ocr 5 CPU --barcode +test $? -eq 1 && echo "barcode missing parameter test PASSED" || echo "test failed: expecting error with status code 1 but got status code 0" + +echo +# test case 2: barcode paramerter ok +echo "test case 2: barcode paramerter ok" +./docker-run.sh --platform core --inputsrc rtsp://127.0.0.1:8554/camera_0 --ocr 5 CPU --barcode 5 +test $? -eq 0 && echo "test PASSED" || echo "test failed: expecting status code 0 but got status code 1" diff --git a/test_realsense_params_docker_run.sh b/test_realsense_params_docker_run.sh new file mode 100755 index 00000000..c768fff8 --- /dev/null +++ b/test_realsense_params_docker_run.sh @@ -0,0 +1,101 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +cleanup() +{ + ./stop_all_docker_containers.sh + sleep 2 + sudo rm -rf ./results/*.log + sudo rm -rf ./results/*.jsonl +} + +REAL_SENSE_SERIAL_NUM=$1 + +if [ -z "$REAL_SENSE_SERIAL_NUM" ]; then + echo "please provide realsense serial number as input" + exit 1 +fi + +# test case 1: input param color-width=1280 +cleanup +expectedColorWidth=1280 +echo "test case 1: input param color-width=$expectedColorWidth" +./docker-run.sh --platform core --inputsrc "$REAL_SENSE_SERIAL_NUM" --realsense_enabled --color-width "$expectedColorWidth" +exitCode=$? +if [ "$exitCode" != 0 ]; then + echo "docker-run.sh exited with status code $exitCode" + exit 1 +fi + +echo "waiting for settling down..." +sleep 30 +fps_output=$(grep . ./results/pipeline0.log | tail -1) +if [ -z "$fps_output" ]; then + echo "test failed: no fps output from the log" +else + colorWidthOut=$(grep -Eo '("resolution":{"height":[[:digit:]]+,"width":[[:digit:]]+)' ./results/r0.jsonl | awk -F ':' '{print $4}' | tail -1) + if [ -z "$colorWidthOut" ]; then + echo "test failed: no width output found from the r0.jsonl" + elif [ "$expectedColorWidth" -ne "$colorWidthOut" ]; then + echo "test failed: the color width output $colorWidthOut is different from the expected width $expectedColorWidth" + else + echo "test passed; found color width $colorWidthOut" + fi +fi + +echo + +# test case 2: input param color-height=720 +cleanup +expectedColorHeight=720 +echo "test case 2: input param color-height=$expectedColorHeight" +./docker-run.sh --platform core --inputsrc "$REAL_SENSE_SERIAL_NUM" --realsense_enabled --color-height "$expectedColorHeight" +exitCode=$? +if [ "$exitCode" != 0 ]; then + echo "docker-run.sh exited with status code $exitCode" + exit 1 +fi +echo "waiting for settling down..." +sleep 30 +fps_output=$(grep . ./results/pipeline0.log | tail -1) +if [ -z "$fps_output" ]; then + echo "test failed: no fps output from the log" +else + colorHeightOut=$(grep -Eo '("resolution":{"height":[[:digit:]]+,)' ./results/r0.jsonl | awk -F ':' '{print $3}' | tail -1) + if [ -z "$colorHeightOut" ]; then + echo "test failed: no height output found from the r0.jsonl" + elif [ "$expectedColorHeight" -ne "$colorHeightOut" ]; then + echo "test failed: the color height output $colorHeightOut is different from the expected height $expectedColorHeight" + else + echo "test passed; found color height $colorHeightOut" + fi +fi + +echo + +# test case 3: input param color-framerate=30 +cleanup +expectedColorFramerate=30 +echo "test case 3: input param color-framerate=$expectedColorFramerate" +./docker-run.sh --platform core --inputsrc "$REAL_SENSE_SERIAL_NUM" --realsense_enabled --color-framerate "$expectedColorFramerate" +exitCode=$? +if [ "$exitCode" != 0 ]; then + echo "docker-run.sh exited with status code $exitCode" + exit 1 +fi +echo "waiting for settling down..." +sleep 30 +fps_output=$(grep . ./results/pipeline0.log | tail -1) +if [ -z "$fps_output" ]; then + echo "test failed: no fps output from the log" +else + echo "test passed: found fps output from the log" +fi + + +# tear down +cleanup From d6cac6d8820926c30d5bfc13101bafce42691d85 Mon Sep 17 00:00:00 2001 From: Brian McGinn Date: Fri, 14 Apr 2023 15:12:17 -0700 Subject: [PATCH 2/2] remove unused camera simulator --- benchmark-scripts/README.md | 2 +- .../start_emulated_camera_pipelines.sh | 64 ------------------- docs_src/benchmark.md | 2 +- 3 files changed, 2 insertions(+), 66 deletions(-) delete mode 100755 benchmark-scripts/start_emulated_camera_pipelines.sh diff --git a/benchmark-scripts/README.md b/benchmark-scripts/README.md index c3870fdc..ee7c6624 100644 --- a/benchmark-scripts/README.md +++ b/benchmark-scripts/README.md @@ -108,7 +108,7 @@ Sample command lines: **camera-simulator.sh** -Starts the camera simulator. To use, place the script in a folder named camera-simulator. At the same directory level as the camera-simulator folder, create a folder called sample-media. The camera-simulator.sh script will start a simulator for each .mp4 video that it finds in the sample-media folder and will enumerate them as camera_0, camera_1 etc. Be sure the path to camera-simulator.sh script is correct in the start_emulated_camera_pipelines.sh script. +Starts the camera simulator. To use, place the script in a folder named camera-simulator. At the same directory level as the camera-simulator folder, create a folder called sample-media. The camera-simulator.sh script will start a simulator for each .mp4 video that it finds in the sample-media folder and will enumerate them as camera_0, camera_1 etc. Be sure the path to camera-simulator.sh script is correct in the camera-simulator.sh script. diff --git a/benchmark-scripts/start_emulated_camera_pipelines.sh b/benchmark-scripts/start_emulated_camera_pipelines.sh deleted file mode 100755 index b0cf0301..00000000 --- a/benchmark-scripts/start_emulated_camera_pipelines.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash -# -# Copyright (C) 2023 Intel Corporation. -# -# SPDX-License-Identifier: BSD-3-Clause -# - -#to control number of object detected -INPUT_CAMERA=$1 -#to control number of instance for pipeline-server -PIPELINE_NUMBER=$2 -MODEL=yolov5s -STARTING_PORT=8080 - -# starting emulated cameras locally -./camera-simulator.sh -sleep 1 - -pipeline=' -{ - "source": { - "uri": "rtsp://127.0.0.1:8554/mycam", - "type": "uri" - }, - "destination": { - "metadata": { - "type": "file", - "path": "/tmp/results/r.jsonl", - "format":"json-lines" - }, - "frame": { - "type": "rtsp", - "sync-with-source": false, - "path": "mycam" - } - }, - "parameters": { - "classification": { - "device": "CPU" - }, - "detection": { - "device": "CPU" - } - } -}' - -pipelineFile=$MODEL"_tracking_mixed_cpu_full" -echo $pipelineFile -PORT=$STARTING_PORT -echo "Performing mixed tracking with OD-interval=1, OC-interval=1, OCR-interval=3, Barcode-interval=3 " -for i in $( seq 0 $(($PIPELINE_NUMBER - 1)) ) -do - if [ $i != 0 ]; then - PORT=$(($PORT + 1)) - fi - pipeline_num=$((i + 1)) - declare pipelineName="pipeline"$pipeline_num - pipelineName=$(echo $pipeline | sed "s/mycam/$INPUT_CAMERA/g") - pipelineName=${pipelineName/r.json/r$i.json} - echo $pipelineName - curl -H 'Content-Type: application/json' http://127.0.0.1:$PORT/pipelines/xeon/$pipelineFile --data @- <