From 90dca117ab784a3e82d8ab67aa047868899f58a9 Mon Sep 17 00:00:00 2001 From: Wentao Wu Date: Mon, 8 Jul 2024 19:07:25 +0800 Subject: [PATCH] k230 canmv docs release v0.7 --- .gitlab-ci.yml | 11 +- .markdownlint.json | 3 +- Makefile | 25 +- _static/topbar.css | 177 - _templates/versions.html | 41 - conf.py | 35 +- index.rst | 4 +- remote.py | 30 + requirements.txt | 2 + .../CanMV-K230_SDK_nncase.md | 3 +- ...30\350\247\243\347\255\224_MicroPython.md" | 41 +- ...45\351\227\250\346\214\207\345\215\227.md" | 321 +- ...10\346\234\254\350\257\264\346\230\216.md" | 48 +- zh/api/api.rst | 2 +- ...345\235\227API\346\211\213\345\206\214.md" | 2 +- ...345\235\227API\346\211\213\345\206\214.md" | 2 +- ...345\235\227API\346\211\213\345\206\214.md" | 2 +- ...345\235\227API\346\211\213\345\206\214.md" | 6 +- ...345\235\227API\346\211\213\345\206\214.md" | 32 +- zh/api/image_video.rst | 10 +- zh/api/lvgl/lvgl.md | 2 +- zh/api/machine.rst | 6 +- ...345\235\227API\346\211\213\345\206\214.md" | 10 +- ...345\235\227API\346\211\213\345\206\214.md" | 2 +- ...345\235\227API\346\211\213\345\206\214.md" | 2 +- ...345\235\227API\346\211\213\345\206\214.md" | 22 +- ...345\235\227API\346\211\213\345\206\214.md" | 14 +- ...345\235\227API\346\211\213\345\206\214.md" | 2 +- ...345\235\227API\346\211\213\345\206\214.md" | 44 +- ...345\235\227API\346\211\213\345\206\214.md" | 12 +- ...345\235\227API\346\211\213\345\206\214.md" | 18 +- ...345\235\227API\346\211\213\345\206\214.md" | 16 +- ...345\235\227API\346\211\213\345\206\214.md" | 22 +- ...345\235\227API\346\211\213\345\206\214.md" | 2 +- ...345\235\227API\346\211\213\345\206\214.md" | 669 - ...345\235\227API\346\211\213\345\206\214.md" | 344 +- ...345\235\227API\346\211\213\345\206\214.md" | 455 - ...345\235\227API\346\211\213\345\206\214.md" | 24 +- ...345\235\227API\346\211\213\345\206\214.md" | 151 +- ...345\235\227API\346\211\213\345\206\214.md" | 10 +- ...345\235\227API\346\211\213\345\206\214.md" | 561 +- ...345\235\227API\346\211\213\345\206\214.md" | 18 +- ...345\235\227API\346\211\213\345\206\214.md" | 28 +- ...345\235\227API\346\211\213\345\206\214.md" | 18 +- zh/api/nncase.rst | 2 +- ...se_runtime_API\346\211\213\345\206\214.md" | 69 +- zh/api/openmv/image.md | 44 +- zh/api/python_micropython.rst | 2 +- zh/api/stdlib/utime.md | 2 +- ...64\346\230\216\346\226\207\346\241\243.md" | 8826 +++++++++ ...72\344\276\213\350\257\264\346\230\216.md" | 14803 ---------------- ...72\350\204\270\346\243\200\346\265\213.md" | 81 - zh/example/demo/face_detection.md | 127 +- zh/example/example.rst | 14 +- zh/example/images/framework.png | Bin 0 -> 25220 bytes zh/example/images/task_diff.png | Bin 0 -> 86833 bytes zh/example/lvgl/lvgl.md | 142 - zh/example/machine/adc/adc.md | 16 - zh/example/machine/fft/fft.md | 34 - zh/example/machine/fpioa/fpioa.md | 27 - zh/example/machine/pwm/pwm.md | 19 - zh/example/machine/spi/spi.md | 63 - zh/example/machine/timer/timer.md | 22 - zh/example/machine/wdt/wdt.md | 17 - zh/example/media.md | 864 + zh/example/media.rst | 15 - zh/example/media/acodec.md | 145 - zh/example/media/audio.md | 125 - zh/example/media/camera.md | 132 - zh/example/media/camera_3sensors.md | 103 - zh/example/media/display.md | 76 - zh/example/media/media.md | 131 - zh/example/media/mp4muxer.md | 63 - zh/example/media/player.md | 32 - zh/example/media/venc.md | 136 - zh/example/network.md | 488 + zh/example/omv/omv.rst | 2 +- .../{cipher/cipher.md => peripheral.md} | 839 +- zh/example/peripheral.rst | 14 - zh/example/socket_network/http_client.md | 48 - zh/example/socket_network/http_server.md | 90 - zh/example/socket_network/network.rst | 13 - zh/example/socket_network/network_lan.md | 47 - zh/example/socket_network/network_wlan.md | 53 - zh/example/socket_network/tcp_client.md | 45 - zh/example/socket_network/tcp_server.md | 77 - zh/example/socket_network/udp_server.md | 54 - zh/example/socket_network/upd_client.md | 47 - ...72\350\204\270\346\243\200\346\265\213.md" | 31 + zh/images/canaan-cover.png | Bin 73229 -> 41201 bytes zh/images/canaan-cover.png:Zone.Identifier | 3 + ...77\347\224\250\350\257\264\346\230\216.md" | 54 +- ...77\347\224\250\350\257\264\346\230\216.md" | 62 +- ...77\347\224\250\350\257\264\346\230\216.md" | 55 +- zh/userguide/userguide.rst | 10 +- ...77\347\224\250\350\257\264\346\230\216.md" | 126 +- ...47\345\223\201\347\256\200\344\273\213.md" | 33 + 97 files changed, 11916 insertions(+), 19656 deletions(-) delete mode 100644 _static/topbar.css delete mode 100644 _templates/versions.html create mode 100644 remote.py rename "zh/CanMV-K230_SDK_nncase\347\211\210\346\234\254\345\257\271\345\272\224\345\205\263\347\263\273.md" => zh/CanMV-K230_SDK_nncase.md (95%) delete mode 100755 "zh/api/mpp/K230_CanMV_Camera\346\250\241\345\235\227API\346\211\213\345\206\214.md" delete mode 100755 "zh/api/mpp/K230_CanMV_Lcd\346\250\241\345\235\227API\346\211\213\345\206\214.md" create mode 100644 "zh/example/AI_Demo\350\257\264\346\230\216\346\226\207\346\241\243.md" delete mode 100755 "zh/example/K230_CanMV_AI_Demo\347\244\272\344\276\213\350\257\264\346\230\216.md" delete mode 100755 "zh/example/K230_Canmv\347\244\272\344\276\213\350\256\262\350\247\243-\344\272\272\350\204\270\346\243\200\346\265\213.md" create mode 100644 zh/example/images/framework.png create mode 100644 zh/example/images/task_diff.png delete mode 100644 zh/example/lvgl/lvgl.md delete mode 100755 zh/example/machine/adc/adc.md delete mode 100755 zh/example/machine/fft/fft.md delete mode 100755 zh/example/machine/fpioa/fpioa.md delete mode 100755 zh/example/machine/pwm/pwm.md delete mode 100755 zh/example/machine/spi/spi.md delete mode 100755 zh/example/machine/timer/timer.md delete mode 100755 zh/example/machine/wdt/wdt.md create mode 100644 zh/example/media.md delete mode 100755 zh/example/media.rst delete mode 100755 zh/example/media/acodec.md delete mode 100755 zh/example/media/audio.md delete mode 100755 zh/example/media/camera.md delete mode 100755 zh/example/media/camera_3sensors.md delete mode 100755 zh/example/media/display.md delete mode 100755 zh/example/media/media.md delete mode 100755 zh/example/media/mp4muxer.md delete mode 100755 zh/example/media/player.md delete mode 100755 zh/example/media/venc.md create mode 100644 zh/example/network.md rename zh/example/{cipher/cipher.md => peripheral.md} (69%) mode change 100755 => 100644 delete mode 100755 zh/example/peripheral.rst delete mode 100755 zh/example/socket_network/http_client.md delete mode 100755 zh/example/socket_network/http_server.md delete mode 100644 zh/example/socket_network/network.rst delete mode 100755 zh/example/socket_network/network_lan.md delete mode 100755 zh/example/socket_network/network_wlan.md delete mode 100755 zh/example/socket_network/tcp_client.md delete mode 100755 zh/example/socket_network/tcp_server.md delete mode 100755 zh/example/socket_network/udp_server.md delete mode 100755 zh/example/socket_network/upd_client.md create mode 100755 "zh/example/\344\272\272\350\204\270\346\243\200\346\265\213.md" create mode 100644 zh/images/canaan-cover.png:Zone.Identifier rename "zh/userguide/K230_CanMV_IDE\344\275\277\347\224\250\350\257\264\346\230\216.md" => "zh/userguide/IDE\344\275\277\347\224\250\350\257\264\346\230\216.md" (53%) rename "zh/userguide/K230_CanMV_nncase_runtime_\344\275\277\347\224\250\350\257\264\346\230\216.md" => "zh/userguide/nncase_runtime_\344\275\277\347\224\250\350\257\264\346\230\216.md" (53%) rename "zh/userguide/K230_CanMV ramdisk\344\275\277\347\224\250\350\257\264\346\230\216.md" => "zh/userguide/ramdisk\344\275\277\347\224\250\350\257\264\346\230\216.md" (62%) rename "zh/userguide/K230_CanMV\344\275\277\347\224\250\350\257\264\346\230\216.md" => "zh/userguide/\344\275\277\347\224\250\350\257\264\346\230\216.md" (66%) create mode 100644 "zh/\344\272\247\345\223\201\347\256\200\344\273\213.md" diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b286822..816f68d 100755 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -119,6 +119,7 @@ markdownlint: - git config --global --add safe.directory $CI_PROJECT_DIR .sync_github_gitee: + stage: build timeout: 45m image: ai.b-bug.org:5000/k230_sdk:latest tags: @@ -151,14 +152,20 @@ build web docs: stage: build rules: - if: $CI_PIPELINE_SOURCE == "push" && ($CI_COMMIT_BRANCH == "dev" || $CI_COMMIT_BRANCH == "main") - image: ai.b-bug.org:5000/python:web-docs-v2 + image: ai.b-bug.org:5000/huangziyi/web-docs-builder:3d9c3f0b2be1fe67b3b2f4e5d47504f6c2662b76 tags: - k230 environment: name: dev url: https://ai.b-bug.org/k230/k230_canmv_docs/dev/ script: - - git fetch + - git fetch --tags + - git branch main origin/main + - git checkout main + - python3 preprocess.py + - git config --global user.email "auto@canaan-creative.com" + - git config --global user.name "GitLab CI" + - git commit "*.md" -m "remove cover" - git checkout dev - git reset --hard origin/dev - python3 preprocess.py diff --git a/.markdownlint.json b/.markdownlint.json index 59b5a4f..0fd4713 100644 --- a/.markdownlint.json +++ b/.markdownlint.json @@ -69,7 +69,8 @@ // MD033 "no-inline-html": { "allowed_elements": [ - "div" + "div", + "br" ] }, // MD035 diff --git a/Makefile b/Makefile index 3d6647d..0773a98 100644 --- a/Makefile +++ b/Makefile @@ -8,8 +8,7 @@ SPHINXBUILD ?= sphinx-build SPHINXMULTIVERSION ?= sphinx-multiversion SOURCEDIR = . BUILDDIR = _build -WEB_DOCS_BUILDER_USER ?= gitlab+deploy-token-8 -WEB_DOCS_BUILDER_TOKEN ?= _qsc99tPFsbcBhSbXH4S +WEB_DOCS_BUILDER_URL ?= https://ai.b-bug.org/~zhengshanshan/web-docs-builder # Put it first so that "make" without argument is like "make help". help: @@ -19,13 +18,23 @@ help: # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile _templates/layout.html +%: Makefile _static/init_mermaid.js _templates/versionsFlex.html _static/topbar.css _static/custom-theme.css @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -mhtml: _templates/layout.html +mhtml: _static/init_mermaid.js _templates/versionsFlex.html _static/topbar.css _static/custom-theme.css @$(SPHINXMULTIVERSION) "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -_templates/layout.html: - git clone --depth 1 https://$(WEB_DOCS_BUILDER_USER):$(WEB_DOCS_BUILDER_TOKEN)@g.a-bug.org/huangziyi/web-docs-builder.git - cp web-docs-builder/layout.html _templates/layout.html - rm -rf web-docs-builder +_templates: + mkdir $@ + +_static/init_mermaid.js: _templates + wget $(WEB_DOCS_BUILDER_URL)/$@ -O $@ + +_templates/versionsFlex.html: _templates + wget $(WEB_DOCS_BUILDER_URL)/$@ -O $@ + +_static/topbar.css: + wget $(WEB_DOCS_BUILDER_URL)/$@ -O $@ + +_static/custom-theme.css: + wget $(WEB_DOCS_BUILDER_URL)/$@ -O $@ diff --git a/_static/topbar.css b/_static/topbar.css deleted file mode 100644 index bd66107..0000000 --- a/_static/topbar.css +++ /dev/null @@ -1,177 +0,0 @@ -/* Customize for top bar (testing and outdated docs)*/ - -:target { - scroll-margin-top: 50px; -} - -.wy-nav-side -{ - min-height: calc(100% - 50px); - top: 50px; -} - -.wy-nav-content-wrap -{ - margin-top: 50px !important; -} - -/* The navigation bar */ -nav.navbar { - z-index: 3; - overflow: hidden; - background-color: #333; - position: fixed; /* Set the navbar to fixed position */ - top: 0; /* Position the navbar at the top of the page */ - width: 100%; /* Full width */ -} - -/* Links inside the navbar */ -nav.navbar a { - float: left; - display: block; - color: rgba(255,255,255,0.5); - text-align: center; - padding: 14px 16px; - text-decoration: none; - text-wrap: nowrap; -} - -.jn_container-view { - width: 100%; - height: 50px; - background: #0d0d0d; - position: fixed; - left: 0; - top: 0; - z-index: 1501; -} - -.jn_menu-container { - width: 100%; - max-width: 1100px; - height: 50px; - align-items: center; - margin: 0 auto; - display: flex; - position: relative -} - -.jn_menu-container .logo { - width: 157px; - height: 28px; - background: url(img/logo.2f7049e2.svg) no-repeat 100%; - background-size: auto 100%; -} - -.jn_menu-container .menu-list { - display: flex; - height: 100%; - font-size: 16px; - color: rgba(255,255,255,0.4); - margin-left: 78px; - max-width: 506px; -} - -.menu-list .menu-item { - margin-right: 30px; - height: 100%; - cursor: pointer; - display: flex; - align-items: center; -} - -@media (max-width: 768px) { - .logo { - display: none; - } - - .menu-list .menu-item { - margin-right: 0; - } - - .jn_menu-container .menu-list { - margin-left: 0; - justify-content: space-around; - flex-grow: 1; - } - - nav.navbar a { - padding: 3px; - } -} - -.jn_menu-container .menu-item.active a { - color: #fff -} - -.jn_menu-container .menu-item:last-child { - margin-right: 0 -} - -.jn_menu-container .active, -.jn_menu-container .menu-item a:hover { - color: #fff -} - -.jn_menu-container .menu-lang { - position: absolute; - right: 0; - top: 50%; - font-size: 14px; - color: hsla(0, 0%, 100%, .5); - transform: translateY(-50%) -} - -.jn_menu-container .menu-lang .lang-txt { - height: 20px; - padding-right: 28px; - background: url(img/ch-flag.a61145a4.svg) no-repeat 100%; - background-size: auto 70%; - display: block; - cursor: pointer -} - -.menu-doc a { - color: #f2f2f2 !important; -} - -.model-list { - position: absolute; - top: 50px; - background: #0d0d0d; - display: none; - cursor: pointer; - flex-direction: column; -} - -.menu-doc:hover > .model-list { - display: flex; - align-items: center; -} - -.document-select { - padding: 10px !important; - color: #fff; - text-align: start !important; - text-wrap: wrap !important; -} - -.document-select:hover { - background-color: #249ee8 !important; -} - -.expanded-menu { - position: relative; -} -.expanded-menu-container { - display: none; - flex-direction: column; - position: absolute; - top: 0; - right: -120px; - width: 120px; - background-color: #0d0d0d; -} -.expanded-menu:hover .expanded-menu-container { - display: flex; -} diff --git a/_templates/versions.html b/_templates/versions.html deleted file mode 100644 index 4f17cae..0000000 --- a/_templates/versions.html +++ /dev/null @@ -1,41 +0,0 @@ -{%- if current_version %} -
- - Versions - {{ current_version.name }} - - -
- - {%- if versions.tags %} -
-
Tags
- {%- for item in versions.tags %} -
{{ item.name }}
- {%- endfor %} -
- {%- endif %} - {%- if versions.branches %} -
-
Branches
- {%- for item in versions.branches %} -
{{ item.name }}
- {%- endfor %} -
-
-
External links
-
- K210 CanMV -
-
- CanMV Docs -
-
- {%- endif %} -
-
-{%- endif %} diff --git a/conf.py b/conf.py index 08c76b7..47a17b4 100644 --- a/conf.py +++ b/conf.py @@ -4,7 +4,6 @@ # https://www.sphinx-doc.org/en/master/usage/configuration.html import sys, os -import sphinx_rtd_theme import datetime sys.path.append(os.path.abspath('exts')) @@ -24,9 +23,17 @@ extensions = [ 'sphinx_copybutton', 'myst_parser', - 'sphinx_rtd_dark_mode', - 'sphinx_multiversion' + 'sphinx_multiversion', + 'sphinxcontrib.mermaid' ] +html_js_files = [ + 'https://cdnjs.cloudflare.com/ajax/libs/mermaid/8.13.8/mermaid.min.js', + 'init_mermaid.js', +] +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} templates_path = ['_templates'] exclude_patterns = [] @@ -36,7 +43,7 @@ # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output -myst_heading_anchors = 4 +myst_heading_anchors = 6 suppress_warnings = ["myst.header"] html_copy_source = True @@ -47,29 +54,19 @@ # html_show_sphinx = False # html_theme = 'alabaster' -html_theme = "sphinx_rtd_theme" +html_theme = "sphinx_book_theme" html_static_path = ['_static'] # if want to add top nav for canann, enable this. -html_css_files = ['topbar.css'] +html_css_files = ['topbar.css', 'custom-theme.css'] -default_dark_mode = True locale_dirs = ['locale'] html_theme_options = { - # 'analytics_id': 'G-XXXXXXXXXX', # Provided by Google in your dashboard - # 'analytics_anonymize_ip': False, - # 'logo_only': False, - 'display_version': True, - 'prev_next_buttons_location': 'bottom', - 'style_external_links': False, - 'vcs_pageview_mode': '', - # 'style_nav_header_background': '#2980B9', - # Toc options 'collapse_navigation': True, - 'sticky_navigation': True, + "repository_url": "https://github.com/kendryte/k230_canmv_docs", 'navigation_depth': 7, - 'includehidden': True, - 'titles_only': False + "use_repository_button": True, + "primary_sidebar_end": ["versionsFlex.html"], } diff --git a/index.rst b/index.rst index 0bdf5c6..64ae27d 100644 --- a/index.rst +++ b/index.rst @@ -8,11 +8,11 @@ Welcome to K230 CanMV's documentation! .. toctree:: :maxdepth: 2 - + zh/CanMV-K230快速入门指南.md zh/userguide/userguide.rst zh/example/example.rst zh/api/api.rst zh/CanMV-K230常见问题解答_MicroPython.md zh/CanMV-K230版本说明.md - zh/CanMV-K230_SDK_nncase版本对应关系.md \ No newline at end of file + zh/CanMV-K230_SDK_nncase.md \ No newline at end of file diff --git a/remote.py b/remote.py new file mode 100644 index 0000000..980f315 --- /dev/null +++ b/remote.py @@ -0,0 +1,30 @@ +import os +import glob +import re + +def remove_content_between_markers(text, start_pattern, end_pattern): + """ + 移除从start_pattern开始到end_pattern(不包括end_pattern)之间所有内容。 + """ + # 使用正则表达式匹配并移除指定范围内的内容,不包括end_pattern + pattern = re.escape(start_pattern) + '(.*?)(?=\n## ' + re.escape(end_pattern) + '|$)' + return re.sub(pattern, '', text, flags=re.DOTALL) + +def process_files(directory='.'): + """ + 遍历指定目录及其子目录下所有的Markdown文件,移除从'## 前言'到'## 1. 概述'(不包括该行)之间的所有内容。 + """ + markdown_files = glob.glob(os.path.join(directory, '**', '*.md'), recursive=True) + + for file_path in markdown_files: + with open(file_path, 'r', encoding='utf-8') as file: + content = file.read() + + # 应用函数移除指定内容 + cleaned_content = remove_content_between_markers(content, '## 前言', '1. 概述') + + with open(file_path, 'w', encoding='utf-8') as file: + file.write(cleaned_content) + +# 从当前目录开始遍历 +process_files('.') \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 25e29e6..fbc0789 100644 --- a/requirements.txt +++ b/requirements.txt @@ -28,5 +28,7 @@ sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.6 sphinxcontrib-serializinghtml==1.1.9 +sphinxcontrib-mermaid +sphinx-book-theme urllib3==2.0.5 zipp==3.17.0 diff --git "a/zh/CanMV-K230_SDK_nncase\347\211\210\346\234\254\345\257\271\345\272\224\345\205\263\347\263\273.md" b/zh/CanMV-K230_SDK_nncase.md similarity index 95% rename from "zh/CanMV-K230_SDK_nncase\347\211\210\346\234\254\345\257\271\345\272\224\345\205\263\347\263\273.md" rename to zh/CanMV-K230_SDK_nncase.md index 088ff6d..9c007d5 100755 --- "a/zh/CanMV-K230_SDK_nncase\347\211\210\346\234\254\345\257\271\345\272\224\345\205\263\347\263\273.md" +++ b/zh/CanMV-K230_SDK_nncase.md @@ -1,4 +1,4 @@ -# CanMV-K230_SDK_nncase版本对应关系 +# 与K230_SDK、nncase版本对应关系 ![cover](images/canaan-cover.png) @@ -49,3 +49,4 @@ | 0.4.0 | 1.3.0 | 2.7.0 | - | | 0.5.0 | 1.4.0 | 2.8.0 | - | | 0.6.0 | 1.5.0 | 2.8.1 | - | +| 0.7.0 | 1.6.0 | 2.8.3 | - | diff --git "a/zh/CanMV-K230\345\270\270\350\247\201\351\227\256\351\242\230\350\247\243\347\255\224_MicroPython.md" "b/zh/CanMV-K230\345\270\270\350\247\201\351\227\256\351\242\230\350\247\243\347\255\224_MicroPython.md" index fd5749b..6a92feb 100644 --- "a/zh/CanMV-K230\345\270\270\350\247\201\351\227\256\351\242\230\350\247\243\347\255\224_MicroPython.md" +++ "b/zh/CanMV-K230\345\270\270\350\247\201\351\227\256\351\242\230\350\247\243\347\255\224_MicroPython.md" @@ -1,29 +1,4 @@ -# CanMV-k230常见问题解答(MicroPython) - -![cover](images/canaan-cover.png) - -版权所有©2023北京嘉楠捷思信息技术有限公司 - -
- -## 免责声明 - -您购买的产品、服务或特性等应受北京嘉楠捷思信息技术有限公司(“本公司”,下同)及其关联公司的商业合同和条款的约束,本文档中描述的全部或部分产品、服务或特性可能不在您的购买或使用范围之内。除非合同另有约定,本公司不对本文档的任何陈述、信息、内容的正确性、可靠性、完整性、适销性、符合特定目的和不侵权提供任何明示或默示的声明或保证。除非另有约定,本文档仅作为使用指导参考。 - -由于产品版本升级或其他原因,本文档内容将可能在未经任何通知的情况下,不定期进行更新或修改。 - -## 商标声明 - -![logo](images/logo.png)、“嘉楠”和其他嘉楠商标均为北京嘉楠捷思信息技术有限公司及其关联公司的商标。本文档可能提及的其他所有商标或注册商标,由各自的所有人拥有。 - -**版权所有 © 2023北京嘉楠捷思信息技术有限公司。保留一切权利。** -非经本公司书面许可,任何单位和个人不得擅自摘抄、复制本文档内容的部分或全部,并不得以任何形式传播。 - -
- -## 目录 - -[TOC] +# 常见问题解答 ## 1.硬件类问题解答 @@ -51,3 +26,17 @@ V0.5版本后的Canmv镜像支持虚拟U盘,即将板子的TF虚拟为U盘, ## 3.nncase类问题解答 ## 4.AI demo类问题解答 + +## 5.IDE 类问题解答 + +### 5.1 IDE 显示的图像帧率很低 + +IDE 显示图像默认来源是 VideoOutput 模块的回显,在使用HDMI时固定为1080P,由于USB传输速率限制,此时的帧率只能达到15~20FPS。 + +使用 image.compress_for_ide() 可以发送指定的图像,参考示例中的 camera_480p.py,使用硬件编码器发送 480P 图像时可以达到30FPS,需要注意硬件编码器对图像有一定要求,总的来说有一下几点 + +1. 图像来源必须是vb(通过 sensor.snapshot() 获得的图像满足这一要求) +1. 图像 buffer 的所有 planer 物理地址必须对齐 4096(通过 sensor.snapshot() 获得的图像满足这一要求) +1. 图像的格式必须为 YUV420SP/YUV422SP/ARGB8888/BGRA8888 这四种 + +如果图像不满足以上要求,那么 compress_for_ide 会使用 CPU 进行编码,此时帧率可能比较低。 diff --git "a/zh/CanMV-K230\345\277\253\351\200\237\345\205\245\351\227\250\346\214\207\345\215\227.md" "b/zh/CanMV-K230\345\277\253\351\200\237\345\205\245\351\227\250\346\214\207\345\215\227.md" index ab14193..3fbb02b 100755 --- "a/zh/CanMV-K230\345\277\253\351\200\237\345\205\245\351\227\250\346\214\207\345\215\227.md" +++ "b/zh/CanMV-K230\345\277\253\351\200\237\345\205\245\351\227\250\346\214\207\345\215\227.md" @@ -1,173 +1,148 @@ -# CanMV-K230 快速入门指南 - -![cover](images/canaan-cover.png) - -版权所有©2023北京嘉楠捷思信息技术有限公司 - -
- -## 免责声明 - -您购买的产品、服务或特性等应受北京嘉楠捷思信息技术有限公司(“本公司”,下同)及其关联公司的商业合同和条款的约束,本文档中描述的全部或部分产品、服务或特性可能不在您的购买或使用范围之内。除非合同另有约定,本公司不对本文档的任何陈述、信息、内容的正确性、可靠性、完整性、适销性、符合特定目的和不侵权提供任何明示或默示的声明或保证。除非另有约定,本文档仅作为使用指导参考。 - -由于产品版本升级或其他原因,本文档内容将可能在未经任何通知的情况下,不定期进行更新或修改。 - -## 商标声明 - -![logo](images/logo.png)、“嘉楠”和其他嘉楠商标均为北京嘉楠捷思信息技术有限公司及其关联公司的商标。本文档可能提及的其他所有商标或注册商标,由各自的所有人拥有。 - -**版权所有 © 2023北京嘉楠捷思信息技术有限公司。保留一切权利。** -非经本公司书面许可,任何单位和个人不得擅自摘抄、复制本文档内容的部分或全部,并不得以任何形式传播。 - -
- -## 目录 - -[TOC] - -## 1. 开发板概况 - -CanMV-K230开发板采用的是嘉楠科技Kendryte®系列AIoT芯片中的最新一代SoC芯片K230。该芯片采用全新的多异构单元加速计算架构,集成了2个RISC-V高能效计算核心,内置新一代KPU(Knowledge Process Unit)智能计算单元,具备多精度AI算力,广泛支持通用的AI计算框架,部分典型网络的利用率超过了70%。 - -该芯片同时具备丰富多样的外设接口,以及2D、2.5D等多个标量、向量、图形等专用硬件加速单元,可以对多种图像、视频、音频、AI等多样化计算任务进行全流程计算加速,具备低延迟、高性能、低功耗、快速启动、高安全性等多项特性。 - -![K230_block_diagram](images/K230_block_diagram.png) - -CanMV-K230采用单板设计,扩展接口丰富,极大程度的发挥K230高性能的优势,可直接用于各种智能产品的开发,加速产品落地。 - -![board-front](images/CanMV-K230_front.png) -![board-behind](images/CanMV-K230_behind.png) - -## 2. CanMV-K230默认套件 - -CanMV-K230开发板默认套件包含以下物品: - -1、CanMV-K230主板 x 1 - -2、OV5647摄像头 x 1 - -3、Type-C数据线 x 1 (micropython需要2根Type-C数据线,请用户自行准备1根) - -另外,需要用户准备以下配件: - -1、TF卡, 用于烧写固件,启动系统(必须) - -2、带HDMI接口的显示器及HDMI连接线,显示器要求支持1080P30,否则无法显示 - -3、100M/1000M 以太网线缆,及有线路由器 - -4、Type-C数据线 - -## 3. 固件获取及烧录 - -### 3.1 固件获取 - -CanMV-K230 固件下载地址: 或者 - -CanMV源码下载地址如下: - -`https://github.com/kendryte/k230_canmv` - -`https://gitee.com/kendryte/k230_canmv` - -请下载“CanMV-K230_micropython”开头的gz压缩包,解压得到sysimage-sdcard.img文件,即为CanMV-K230的固件。 - -### 3.2 固件烧录 - -将固件通过电脑烧写至TF卡中。 - -#### 3.2.1 Linux下烧录 - -在TF卡插到宿主机之前,输入: - -`ls -l /dev/sd\*` - -查看当前的存储设备。 - -将TF卡插入宿主机后,再次输入: - -`ls -l /dev/sd\*` - -查看此时的存储设备,新增加的就是TF卡设备节点。 - -假设/dev/sdc就是TF卡设备节点,执行如下命令烧录TF卡: - -`sudo dd if=sysimage-sdcard.img of=/dev/sdc bs=1M oflag=sync` - -#### 3.2.2 Windows下烧录 - -Windows下可通过rufus工具对TF卡进行烧录(rufus工具下载地址 `http://rufus.ie/downloads/`)。 - -1)将TF卡插入PC,然后启动rufus工具,点击工具界面的"选择”按钮,选择待烧写的固件。 - -![rufus-flash-from-file](images/rufus_select.png) - -2)点击“开始”按钮开始烧写,烧写过程有进度条展示,烧写结束后会提示“准备就绪”。 - -![rufus-flash](images/rufus_start.png) -![rufus-sure](images/rufus_sure.png) -![rufus-warning](images/rufus_warning.png) -![rufus-finish](images/rufus_finish.png) - -## 4. USB连接 - -使用Type-C线连接CanMV-K230如下图的位置,线另一端连接至电脑。这时请注意把第3步烧录好的TF卡插到板子上。 - -![CanMV-K230-usbotg](images/CanMV-K230-usbotg.png) - -### 4.1 Windows - -查看设备管理器 - -![CanMV-K230-micropython-serial](images/CanMV-K230-micropython-serial.png) - -- `USB-Enhanced-SERIAL-A CH342(COM80)`为小核linux调试串口 -- `USB-Enhanced-SERIAL-B CH342(COM81)`为大核rt-smart调试串口 -- `USB串行设备(COM75)`为micropython REPL串口 是CanMV-IDE需要连接的串口。如果没有这个设备,请确定两个USB口都与电脑连接,TF卡烧录的固件是否为“CanMV-K230_micropython”开头的固件。 - -### 4.2 linux - -Linux串口显示如下: - -- `/dev/ttyACM0`为小核linux调试串口 -- `/dev/ttyACM1`为大核rt-smart调试串口 -- `/dev/ttyACM2`为micropython REPL串口 是CanMV-IDE需要连接的串口。如果没有这个设备,请确定两个USB口都与电脑连接,TF卡烧录的固件是否为“CanMV-K230_micropython”开头的固件。 - -## 5. CanMV-IDE下载 - -下载地址 : 下载最新的CanMV IDE - -## 6. 启动系统 - -将烧好固件的TF卡插入CanMV-K230 TF卡插槽,Type-C线连接电脑和板端的POWER口,板子即上电,系统开始启动。(**注意:这里两个USB口都要连接电脑,否则板子无法与IDE联通**) -![CanMV-K230-poweron](images/CanMV-K230-poweron.png) - -### 6.1 连接开发板 - -打开CanMV-IDE,连接开发板如下图所示 -![Canmv-link-board](images/Canmv-link-board.png) - -点击左下角红框按钮。 - -![canmv_connect_pass](images/canmv_connect_pass.png) - -左下角图标变为绿色三角,即为连接成功。 - -### 6.2 运行python - -在 下载测试程序,通过IDE加载。(**V0.5版本之前,0.5版本后建议使用虚拟U盘**) - -![canmv_open_py](images/canmv_open_py.png) - -点击打开按钮,选择下载的文件,打开。 点击左下角的绿色按钮运行,等待一会儿,显示器会显示sensor采集的图像,该程序为人脸检测程序,可以看到人脸被框出。 - -(**V0.5版本之后已经支持虚拟U盘功能,可以直接打开TF卡中的示例**) - -查看“我的电脑”或“此电脑”,在设备和驱动器中会出现“CanMV"设备。 -![virtual_Udisk](images/virtual_Udisk.png) - -建议使用虚拟U盘里面的示例。 -![open_Udisk](images/open_Udisk.png) -![face_detect_file](images/face_detect_file.png) - -![CanMV-K230-aidemo](images/CanMV-K230-aidemo.png) +# CanMV-K230 快速入门指南 + +## 1. 开发板概况 + +CanMV-K230开发板采用的是嘉楠科技Kendryte®系列AIoT芯片中的最新一代SoC芯片K230。该芯片采用全新的多异构单元加速计算架构,集成了2个RISC-V高能效计算核心,内置新一代KPU(Knowledge Process Unit)智能计算单元,具备多精度AI算力,广泛支持通用的AI计算框架,部分典型网络的利用率超过了70%。 + +该芯片同时具备丰富多样的外设接口,以及2D、2.5D等多个标量、向量、图形等专用硬件加速单元,可以对多种图像、视频、音频、AI等多样化计算任务进行全流程计算加速,具备低延迟、高性能、低功耗、快速启动、高安全性等多项特性。 + +![K230_block_diagram](images/K230_block_diagram.png) + +CanMV-K230采用单板设计,扩展接口丰富,极大程度的发挥K230高性能的优势,可直接用于各种智能产品的开发,加速产品落地。 + +![board-front](images/CanMV-K230_front.png) +![board-behind](images/CanMV-K230_behind.png) + +## 2. CanMV-K230默认套件 + +CanMV-K230开发板默认套件包含以下物品: + +1、CanMV-K230主板 x 1 + +2、OV5647摄像头 x 1 + +3、Type-C数据线 x 1 (micropython需要2根Type-C数据线,请用户自行准备1根) + +另外,需要用户准备以下配件: + +1、TF卡, 用于烧写固件,启动系统(必须) + +2、带HDMI接口的显示器及HDMI连接线,显示器要求支持1080P30,否则无法显示 + +3、100M/1000M 以太网线缆,及有线路由器 + +4、Type-C数据线 + +## 3. 固件获取及烧录 + +### 3.1 固件获取 + +CanMV-K230 固件下载地址: 或者 + +CanMV源码下载地址如下: + +`https://github.com/kendryte/k230_canmv` + +`https://gitee.com/kendryte/k230_canmv` + +请下载“CanMV-K230_micropython”开头的gz压缩包,解压得到sysimage-sdcard.img文件,即为CanMV-K230的固件。 + +### 3.2 固件烧录 + +将固件通过电脑烧写至TF卡中。 + +#### 3.2.1 Linux下烧录 + +在TF卡插到宿主机之前,输入: + +`ls -l /dev/sd\*` + +查看当前的存储设备。 + +将TF卡插入宿主机后,再次输入: + +`ls -l /dev/sd\*` + +查看此时的存储设备,新增加的就是TF卡设备节点。 + +假设/dev/sdc就是TF卡设备节点,执行如下命令烧录TF卡: + +`sudo dd if=sysimage-sdcard.img of=/dev/sdc bs=1M oflag=sync` + +#### 3.2.2 Windows下烧录 + +Windows下可通过rufus工具对TF卡进行烧录(rufus工具下载地址 `http://rufus.ie/downloads/`)。 + +1)将TF卡插入PC,然后启动rufus工具,点击工具界面的"选择”按钮,选择待烧写的固件。 + +![rufus-flash-from-file](images/rufus_select.png) + +2)点击“开始”按钮开始烧写,烧写过程有进度条展示,烧写结束后会提示“准备就绪”。 + +![rufus-flash](images/rufus_start.png) +![rufus-sure](images/rufus_sure.png) +![rufus-warning](images/rufus_warning.png) +![rufus-finish](images/rufus_finish.png) + +## 4. USB连接 + +使用Type-C线连接CanMV-K230如下图的位置,线另一端连接至电脑。这时请注意把第3步烧录好的TF卡插到板子上。 + +![CanMV-K230-usbotg](images/CanMV-K230-usbotg.png) + +### 4.1 Windows + +查看设备管理器 + +![CanMV-K230-micropython-serial](images/CanMV-K230-micropython-serial.png) + +- `USB-Enhanced-SERIAL-A CH342(COM80)`为小核linux调试串口 +- `USB-Enhanced-SERIAL-B CH342(COM81)`为大核rt-smart调试串口 +- `USB串行设备(COM75)`为micropython REPL串口 是CanMV-IDE需要连接的串口。如果没有这个设备,请确定两个USB口都与电脑连接,TF卡烧录的固件是否为“CanMV-K230_micropython”开头的固件。 + +### 4.2 linux + +Linux串口显示如下: + +- `/dev/ttyACM0`为小核linux调试串口 +- `/dev/ttyACM1`为大核rt-smart调试串口 +- `/dev/ttyACM2`为micropython REPL串口 是CanMV-IDE需要连接的串口。如果没有这个设备,请确定两个USB口都与电脑连接,TF卡烧录的固件是否为“CanMV-K230_micropython”开头的固件。 + +## 5. CanMV-IDE下载 + +下载地址 : 下载最新的CanMV IDE + +## 6. 启动系统 + +将烧好固件的TF卡插入CanMV-K230 TF卡插槽,Type-C线连接电脑和板端的POWER口,板子即上电,系统开始启动。(**注意:这里两个USB口都要连接电脑,否则板子无法与IDE联通**) +![CanMV-K230-poweron](images/CanMV-K230-poweron.png) + +### 6.1 连接开发板 + +打开CanMV-IDE,连接开发板如下图所示 +![Canmv-link-board](images/Canmv-link-board.png) + +点击左下角红框按钮。 + +![canmv_connect_pass](images/canmv_connect_pass.png) + +左下角图标变为绿色三角,即为连接成功。 + +### 6.2 运行python + +在 下载测试程序,通过IDE加载。(**V0.5版本之前,0.5版本后建议使用虚拟U盘**) + +![canmv_open_py](images/canmv_open_py.png) + +点击打开按钮,选择下载的文件,打开。 点击左下角的绿色按钮运行,等待一会儿,显示器会显示sensor采集的图像,该程序为人脸检测程序,可以看到人脸被框出。 + +(**V0.5版本之后已经支持虚拟U盘功能,可以直接打开TF卡中的示例**) + +查看“我的电脑”或“此电脑”,在设备和驱动器中会出现“CanMV"设备。 +![virtual_Udisk](images/virtual_Udisk.png) + +建议使用虚拟U盘里面的示例。 +![open_Udisk](images/open_Udisk.png) +![face_detect_file](images/face_detect_file.png) + +![CanMV-K230-aidemo](images/CanMV-K230-aidemo.png) diff --git "a/zh/CanMV-K230\347\211\210\346\234\254\350\257\264\346\230\216.md" "b/zh/CanMV-K230\347\211\210\346\234\254\350\257\264\346\230\216.md" index 91edf72..746d257 100755 --- "a/zh/CanMV-K230\347\211\210\346\234\254\350\257\264\346\230\216.md" +++ "b/zh/CanMV-K230\347\211\210\346\234\254\350\257\264\346\230\216.md" @@ -1,47 +1,4 @@ -# CanMV-K230 版本说明 - -![cover](images/canaan-cover.png) - -版权所有©2023北京嘉楠捷思信息技术有限公司 - -
- -## 免责声明 - -您购买的产品、服务或特性等应受北京嘉楠捷思信息技术有限公司(“本公司”,下同)及其关联公司的商业合同和条款的约束,本文档中描述的全部或部分产品、服务或特性可能不在您的购买或使用范围之内。除非合同另有约定,本公司不对本文档的任何陈述、信息、内容的正确性、可靠性、完整性、适销性、符合特定目的和不侵权提供任何明示或默示的声明或保证。除非另有约定,本文档仅作为使用指导参考。 - -由于产品版本升级或其他原因,本文档内容将可能在未经任何通知的情况下,不定期进行更新或修改。 - -## 商标声明 - -![logo](images/logo.png)、“嘉楠”和其他嘉楠商标均为北京嘉楠捷思信息技术有限公司及其关联公司的商标。本文档可能提及的其他所有商标或注册商标,由各自的所有人拥有。 - -**版权所有 © 2023北京嘉楠捷思信息技术有限公司。保留一切权利。** -非经本公司书面许可,任何单位和个人不得擅自摘抄、复制本文档内容的部分或全部,并不得以任何形式传播。 - -
- -## 目录 - -[TOC] - -## 前言 - -### 概述 - -本文档主要介绍了K230 CanMV 版本发布相关的内容,包括当前版本支持的硬件、功能、使用限制等。 - -### 读者对象 - -本文档(本指南)主要适用于以下人员: - -- 技术支持工程师 -- 软件开发工程师 - -### 缩略词定义 - -| 简称 | 说明 | -| --- | --- | +# 版本说明 ## 1. 版本信息 @@ -52,6 +9,7 @@ | K230 CanMV | V0.4.0 | 2024-01-26 | | K230 CanMV | V0.5.0 | 2024-03-15 | | K230 CanMV | V0.6.0 | 2024-04-30 | +| K230 CanMV | V0.7.0 | 2024-07-05 | ## 2. 支持的硬件 @@ -93,6 +51,8 @@ K230平台支持CanMV-K230等主板 | 24 | V0.6.0 | sensor | 增加sensor类 | | | 25 | V0.6.0 | lcd | 增加lcd类 | | | 25 | V0.6.0 | HDMI | 添加720P、480P分辨率 | | +| 26 | V0.7.0 | API | 修改Display和Sensor以及Media API | | +| 27 | V0.7.0 | 开发板 | 支持01Studio-CanMV及K230D-Zero开发板 | | ### 4.2 AI Demo diff --git a/zh/api/api.rst b/zh/api/api.rst index b2c6d90..a0827a5 100755 --- a/zh/api/api.rst +++ b/zh/api/api.rst @@ -1,4 +1,4 @@ -CanMV API 手册 +API 手册 =========== .. toctree:: :maxdepth: 1 diff --git "a/zh/api/cipher/K230_CanMV_Hashlib\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/cipher/K230_CanMV_Hashlib\346\250\241\345\235\227API\346\211\213\345\206\214.md" index 27826fc..73d8370 100755 --- "a/zh/api/cipher/K230_CanMV_Hashlib\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/cipher/K230_CanMV_Hashlib\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV Hashlib 模块API手册 +# 1.2 Hashlib 模块API手册 ![cover](../images/canaan-cover.png) diff --git "a/zh/api/cipher/K230_CanMV_Ucryptolib\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/cipher/K230_CanMV_Ucryptolib\346\250\241\345\235\227API\346\211\213\345\206\214.md" index 018294c..1dff5e8 100755 --- "a/zh/api/cipher/K230_CanMV_Ucryptolib\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/cipher/K230_CanMV_Ucryptolib\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV Ucryptolib 模块API手册 +# 1.1 Ucryptolib 模块API手册 ![cover](../images/canaan-cover.png) diff --git "a/zh/api/extmod/K230_CanMV_network\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/extmod/K230_CanMV_network\346\250\241\345\235\227API\346\211\213\345\206\214.md" index 40541a7..9f090c3 100755 --- "a/zh/api/extmod/K230_CanMV_network\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/extmod/K230_CanMV_network\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV network 模块API手册 +# 2.2 network 模块API手册 ![cover](../images/canaan-cover.png) diff --git "a/zh/api/extmod/K230_CanMV_socket\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/extmod/K230_CanMV_socket\346\250\241\345\235\227API\346\211\213\345\206\214.md" index e177b00..603e7ff 100755 --- "a/zh/api/extmod/K230_CanMV_socket\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/extmod/K230_CanMV_socket\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV socket 模块API手册 +# 2.3 socket 模块API手册 ![cover](../images/canaan-cover.png) @@ -101,13 +101,13 @@ client() 详见`https://docs.micropython.org/en/latest/library/socket.html` -### 3.1定义 +### 3.1 定义 - *class*socket.socket(*af=AF_INET*, *type=SOCK_STREAM*, *proto=IPPROTO_TCP*, */*)[¶](https://docs.micropython.org/en/latest/library/socket.html#socket.socket) Create a new socket using the given address family, socket type and protocol number. Note that specifying *proto* in most cases is not required (and not recommended, as some MicroPython ports may omit `IPPROTO_*` constants). Instead, *type* argument will select needed protocol automatically:`# Create STREAM TCP socket socket(AF_INET, SOCK_STREAM) # Create DGRAM UDP socket socket(AF_INET, SOCK_DGRAM)` -### 3.3函数 +### 3.2 函数 - socket.close()[¶](https://docs.micropython.org/en/latest/library/socket.html#socket.socket.close) diff --git "a/zh/api/extmod/K230_CanMV_uctypes\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/extmod/K230_CanMV_uctypes\346\250\241\345\235\227API\346\211\213\345\206\214.md" index 0f1f8a9..9e90494 100755 --- "a/zh/api/extmod/K230_CanMV_uctypes\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/extmod/K230_CanMV_uctypes\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV uctypes 模块API手册 +# 2.1 uctypes 模块API手册 ![cover](../images/canaan-cover.png) @@ -65,7 +65,7 @@ ## 2. 结构说明 -### 示例 +### 2.1 示例 ```python import uctypes @@ -125,7 +125,7 @@ WWDG.WWDG_CR.WDGA = 1 print("Current counter:", WWDG.WWDG_CR.T) ``` -### 定义结构布局 +### 2.2定义结构布局 结构布局由“描述符”定义 - 一个Python字典,它将字段名称编码为键,以及将它们作为关联值访问它们所需的其他属性: @@ -206,7 +206,7 @@ print("Current counter:", WWDG.WWDG_CR.T) ## 3. API描述 -### struct类 +### 3.1 struct类 ```python class uctypes.struct(addr, descriptor, layout_type=NATIVE) @@ -224,7 +224,7 @@ class uctypes.struct(addr, descriptor, layout_type=NATIVE) struct类实例 -### sizeof +### 3.2 sizeof ```python uctypes.sizeof(struct, layout_type=NATIVE) @@ -241,7 +241,7 @@ uctypes.sizeof(struct, layout_type=NATIVE) 数据结构的大小 -### addressof +### 3.3 addressof ```python uctypes.addressof(obj) @@ -257,7 +257,7 @@ uctypes.addressof(obj) 对象的地址 -### bytes_at +### 3.4 bytes_at ```python uctypes.bytes_at(addr, size) @@ -274,7 +274,7 @@ uctypes.bytes_at(addr, size) bytes对象 -### bytearray_at +### 3.5 bytearray_at ```python uctypes.bytearray_at(addr, size) @@ -291,7 +291,7 @@ uctypes.bytearray_at(addr, size) bytearray对象 -### string_at +### 3.6 string_at ```python uctypes.string_at(addr, size=1048576) @@ -310,31 +310,31 @@ str对象 ## 4. 常量定义 -### uctypes.LITTLE_ENDIAN +### 4.1 uctypes.LITTLE_ENDIAN little-endian压缩结构的布局类型。 (打包意味着每个字段占用描述符中定义的字节数,即对齐为1)。 -### uctypes.BIG_ENDIAN +### 4.2 uctypes.BIG_ENDIAN big-endian压缩结构的布局类型。 -### uctypes.NATIVE +### 4.3 uctypes.NATIVE 本机结构的布局类型 - 数据字节顺序和对齐符合运行MicroPython的系统的ABI。 -### uctypes.UINT8 uctypes.INT8 uctypes.UINT16 uctypes.INT16 uctypes.UINT32 uctypes.INT32 uctypes.UINT64 uctypes.INT64 +### 4.4 uctypes.UINT8 uctypes.INT8 uctypes.UINT16 uctypes.INT16 uctypes.UINT32 uctypes.INT32 uctypes.UINT64 uctypes.INT64 结构描述符的整数类型。 提供了8,16,32和64位类型的常量,包括有符号和无符号。 -### uctypes.FLOAT32 uctypes.FLOAT64 +### 4.5 uctypes.FLOAT32 uctypes.FLOAT64 结构描述符的浮点类型。 -### uctypes.VOID +### 4.6 uctypes.VOID VOID是UINT8的别名,用于方便地定义C的void指针:(uctypes.PTR,uctypes.VOID)。 -### uctypes.PTR uctypes.ARRAY +### 4.7 uctypes.PTR uctypes.ARRAY 输入指针和数组的常量。 请注意,结构没有显式常量,它是隐式的:没有PTR或ARRAY标志的聚合类型是一种结构。 diff --git a/zh/api/image_video.rst b/zh/api/image_video.rst index e0c7dc3..09982d2 100644 --- a/zh/api/image_video.rst +++ b/zh/api/image_video.rst @@ -1,18 +1,12 @@ -图像多媒体 +3.图像多媒体 =========== .. toctree:: :maxdepth: 1 - mpp/K230_CanMV_Audio模块API手册.md - mpp/K230_CanMV_Camera模块API手册.md mpp/K230_CanMV_Sensor模块API手册.md mpp/K230_CanMV_Display模块API手册.md - mpp/K230_CanMV_Lcd模块API手册.md + mpp/K230_CanMV_Audio模块API手册.md mpp/K230_CanMV_Media模块API手册.md - mpp/K230_CanMV_MP4模块API手册.md - mpp/K230_CanMV_VDEC模块API手册.md - mpp/K230_CanMV_VENC模块API手册.md - mpp/K230_CanMV_播放器模块API手册.md mpp/K230_CanMV_PM模块API手册.md openmv/image.md lvgl/lvgl.md diff --git a/zh/api/lvgl/lvgl.md b/zh/api/lvgl/lvgl.md index c5cb872..d794468 100644 --- a/zh/api/lvgl/lvgl.md +++ b/zh/api/lvgl/lvgl.md @@ -1,4 +1,4 @@ -# LVGL 使用手册 +# 3.10 LVGL 使用手册 lvgl绑定micropython请参考 [lv_binding_micropython](https://github.com/lvgl/lv_binding_micropython/blob/update_micropython_v1.20/README.md) diff --git a/zh/api/machine.rst b/zh/api/machine.rst index 26caeb3..38bdad8 100644 --- a/zh/api/machine.rst +++ b/zh/api/machine.rst @@ -1,4 +1,4 @@ -Micropython特有库 +2.Micropython特有库 =========== .. toctree:: :maxdepth: 1 @@ -8,9 +8,9 @@ Micropython特有库 extmod/K230_CanMV_socket模块API手册.md machine/K230_CanMV_ADC模块API手册.md machine/K230_CanMV_FFT模块API手册.md - machine/K230_CanMV_GPIO模块API手册.md + machine/K230_CanMV_Pin模块API手册.md machine/K230_CanMV_I2C模块API手册.md - machine/K230_CanMV_IOMUX_API手册.md + machine/K230_CanMV_FPIOA模块API手册.md machine/K230_CanMV_PWM模块API手册.md machine/K230_CanMV_SPI模块API手册.md machine/K230_CanMV_Timer模块API手册.md diff --git "a/zh/api/machine/K230_CanMV_ADC\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/machine/K230_CanMV_ADC\346\250\241\345\235\227API\346\211\213\345\206\214.md" index 9a740a8..a39be14 100755 --- "a/zh/api/machine/K230_CanMV_ADC\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/machine/K230_CanMV_ADC\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV ADC 模块API手册 +# 2.4 ADC 模块API手册 ![cover](../images/canaan-cover.png) @@ -58,7 +58,7 @@ K230内部包含一个ADC硬件模块,有6个通道,采样分辨率为12bit( ADC类位于machine模块下 -### 示例 +### 2.1 示例 ```python from machine import ADC @@ -70,7 +70,7 @@ print(adc.read_u16()) print(adc.read_uv(), "uV") ``` -### 构造函数 +### 2.2 构造函数 ```python adc = ADC(channel) @@ -80,7 +80,7 @@ adc = ADC(channel) - channel: ADC通道号,取值:[0,5] -### read_u16 +### 2.3 read_u16 ```python ADC.read_u16() @@ -96,7 +96,7 @@ ADC.read_u16() 返回当前ADC通道采样值,[0-4095] -### read_uv +### 2.4 read_uv ```python ADC.read_uv() diff --git "a/zh/api/machine/K230_CanMV_FFT\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/machine/K230_CanMV_FFT\346\250\241\345\235\227API\346\211\213\345\206\214.md" index b26c6bb..be3dd00 100755 --- "a/zh/api/machine/K230_CanMV_FFT\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/machine/K230_CanMV_FFT\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV FFT API手册 +# 2.5 FFT API手册 ![cover](../images/canaan-cover.png) diff --git "a/zh/api/machine/K230_CanMV_FPIOA\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/machine/K230_CanMV_FPIOA\346\250\241\345\235\227API\346\211\213\345\206\214.md" index c555d3d..14ca740 100755 --- "a/zh/api/machine/K230_CanMV_FPIOA\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/machine/K230_CanMV_FPIOA\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV FPIOA 模块API手册 +# 2.8 K230 CanMV FPIOA 模块API手册 ![cover](../images/canaan-cover.png) diff --git "a/zh/api/machine/K230_CanMV_I2C\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/machine/K230_CanMV_I2C\346\250\241\345\235\227API\346\211\213\345\206\214.md" index 66239c8..7a24e53 100755 --- "a/zh/api/machine/K230_CanMV_I2C\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/machine/K230_CanMV_I2C\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV I2C 模块API手册 +# 2.7 I2C 模块API手册 ![cover](../images/canaan-cover.png) @@ -59,7 +59,7 @@ K230内部包含五个I2C硬件模块,支持标准100kb/s,快速400kb/s模 I2C类位于machine模块下 -### 示例 +### 2.1 示例 ```python from machine import I2C @@ -83,7 +83,7 @@ i2c.writeto_mem(addr, memaddr, buf, mem_size=8) i2c.deinit() ``` -### 构造函数 +### 2.2 构造函数 ```python i2c = I2C(id, freq=100000) @@ -94,7 +94,7 @@ i2c = I2C(id, freq=100000) - id: I2C ID, [0~4] (I2C.I2C0~I2C.I2C4) - freq: I2C时钟频率 -### scan +### 2.3 scan ```python i2c.scan() @@ -110,7 +110,7 @@ i2c.scan() list 对象, 包含了所有扫描到的从机地址 -### readfrom +### 2.4 readfrom ```python i2c.readfrom(addr, len, True) @@ -128,7 +128,7 @@ i2c.readfrom(addr, len, True) 读取到的数据,bytes 类型 -### readfrom_into +### 2.5 readfrom_into ```python i2c.readfrom_into(addr, buf, True) @@ -146,7 +146,7 @@ i2c.readfrom_into(addr, buf, True) 无 -### writeto +### 2.6 writeto ```python i2c.writeto(addr, buf, True) @@ -164,7 +164,7 @@ i2c.writeto(addr, buf, True) 成功发送的字节数 -### readfrom_mem +### 2.7readfrom_mem ```python i2c.readfrom_mem(addr, memaddr, nbytes, mem_size=8) @@ -183,7 +183,7 @@ i2c.readfrom_mem(addr, memaddr, nbytes, mem_size=8) 返回bytes类型的读取到的数据 -### readfrom_mem_into +### 2.8 readfrom_mem_into ```python i2c.readfrom_mem_into(addr, memaddr, buf, mem_size=8) @@ -202,7 +202,7 @@ i2c.readfrom_mem_into(addr, memaddr, buf, mem_size=8) 无 -### writeto_mem +### 2.9 writeto_mem ```python i2c.writeto_mem(addr, memaddr, buf, mem_size=8) @@ -221,7 +221,7 @@ i2c.writeto_mem(addr, memaddr, buf, mem_size=8) 无 -### deinit +### 2.10 deinit ```python i2c.deinit() diff --git "a/zh/api/machine/K230_CanMV_PWM\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/machine/K230_CanMV_PWM\346\250\241\345\235\227API\346\211\213\345\206\214.md" index ada16c3..e158e4a 100755 --- "a/zh/api/machine/K230_CanMV_PWM\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/machine/K230_CanMV_PWM\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV PWM 模块API手册 +# 2.9 PWM 模块API手册 ![cover](../images/canaan-cover.png) @@ -59,7 +59,7 @@ K230内部包含两个PWM硬件模块,每个模块有3个输出通道,模块 PWM类位于machine模块下 -### 示例 +### 2.1 示例 ```python from machine import PWM @@ -77,7 +77,7 @@ pwm0.enable(True) pwm0.deinit() ``` -### 构造函数 +### 2.2 构造函数 ```python pwm = PWM(channel, freq, duty=50, enable=False) @@ -90,7 +90,7 @@ pwm = PWM(channel, freq, duty=50, enable=False) - duty: PWM通道输出占空比,指高电平占整个周期的百分比,取值:[0,100],可选参数,默认50 - enable: PWM通道输出立即使能,可选参数,默认False -### freq +### 2.3 freq ```python PWM.freq([freq]) @@ -106,7 +106,7 @@ PWM.freq([freq]) 返回空或当前PWM通道输出频率 -### duty +### 2.4 duty ```python PWM.duty([duty]) @@ -122,7 +122,7 @@ PWM.duty([duty]) 返回空或当前PWM通道输出占空比 -### enable +### 2.5 enable ```python PWM.enable(enable) @@ -138,7 +138,7 @@ PWM.enable(enable) 无 -### deinit +### 2.6 deinit ```python PWM.deinit() diff --git "a/zh/api/machine/K230_CanMV_Pin\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/machine/K230_CanMV_Pin\346\250\241\345\235\227API\346\211\213\345\206\214.md" index f4201cf..11880ba 100755 --- "a/zh/api/machine/K230_CanMV_Pin\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/machine/K230_CanMV_Pin\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV Pin 模块API手册 +# 2.6 K230 CanMV Pin 模块API手册 ![cover](../images/canaan-cover.png) diff --git "a/zh/api/machine/K230_CanMV_SPI\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/machine/K230_CanMV_SPI\346\250\241\345\235\227API\346\211\213\345\206\214.md" index fcea737..7f97ef9 100755 --- "a/zh/api/machine/K230_CanMV_SPI\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/machine/K230_CanMV_SPI\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV SPI 模块API手册 +# 2.10 SPI 模块API手册 ![cover](../images/canaan-cover.png) @@ -59,7 +59,7 @@ K230内部包含三个SPI硬件模块,片选的极性可配置;支持时钟 SPI类位于machine模块下 -### 示例 +### 2.1 示例 ```python from machine import SPI @@ -73,7 +73,7 @@ spi.write_readinto(write_buf, read_buf) spi.deinit() ``` -### 构造函数 +### 2.2 构造函数 ```python spi = machine.SPI(id, baudrate=20, polarity=0, phase=0, bits=8) @@ -87,7 +87,39 @@ spi = machine.SPI(id, baudrate=20, polarity=0, phase=0, bits=8) - phase: 相位 - bits: 数据位宽 -### write +### 2.3 read + +```python +spi.read(nbytes) +``` + +读取指定的字节数 + +【参数】 + +- nbytes: 读取长度 + +【返回值】 + +返回bytes对象 + +### 2.4 readinto + +```python +spi.readinto(buf) +``` + +读入指定的缓冲区 + +【参数】 + +- buf: bytearray类型的缓冲区 + +【返回值】 + +无 + +### 2.5 write ```python spi.write(buf) @@ -103,7 +135,7 @@ spi.write(buf) 无 -### write_readinto +### 2.6 write_readinto ```python spi.write_readinto(write_buf, read_buf) @@ -120,7 +152,7 @@ spi.write_readinto(write_buf, read_buf) 无 -### deinit +### 2.7 deinit ```python spi.deinit() diff --git "a/zh/api/machine/K230_CanMV_Timer\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/machine/K230_CanMV_Timer\346\250\241\345\235\227API\346\211\213\345\206\214.md" index 9645880..058990f 100755 --- "a/zh/api/machine/K230_CanMV_Timer\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/machine/K230_CanMV_Timer\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV Timer 模块API手册 +# 2.11 Timer 模块API手册 ![cover](../images/canaan-cover.png) @@ -58,7 +58,7 @@ K230内部包含6个Timer硬件模块,最小定时周期为1us。 Timer类位于machine模块下 -### 示例 +### 2.1 示例 ```python from machine import Timer @@ -69,7 +69,7 @@ tim.init(period=1000, mode=Timer.PERIODIC, callback=lambda t:print(2)) tim.deinit() ``` -### 构造函数 +### 2.2 构造函数 ```python timer = Timer(index, mode=Timer.PERIODIC, freq=-1, period=-1, callback=None, arg=None) @@ -84,7 +84,9 @@ timer = Timer(index, mode=Timer.PERIODIC, freq=-1, period=-1, callback=None, arg - callback: 超时回调函数,必须设置,要带一个参数 - arg: 超时回调函数参数,可选参数 -### init +**注意:** [0-5]硬件Timer暂不可用 + +### 2.3 init ```python Timer.init(mode=Timer.PERIODIC, freq=-1, period=-1, callback=None, arg=None) @@ -104,7 +106,7 @@ Timer.init(mode=Timer.PERIODIC, freq=-1, period=-1, callback=None, arg=None) 无 -### deinit +### 2.4 deinit ```python Timer.deinit() diff --git "a/zh/api/machine/K230_CanMV_UART\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/machine/K230_CanMV_UART\346\250\241\345\235\227API\346\211\213\345\206\214.md" index 623584b..281355b 100755 --- "a/zh/api/machine/K230_CanMV_UART\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/machine/K230_CanMV_UART\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV UART 模块API手册 +# 2.13 UART 模块API手册 ![cover](../images/canaan-cover.png) @@ -58,7 +58,7 @@ UART IO配置参考IOMUX模块。 UART类位于machine模块下 -### 示例 +### 2.1 示例 ```python from machine import UART @@ -77,7 +77,7 @@ r = u1.readinto(b) u1.deinit() ``` -### 构造函数 +### 2.2 构造函数 ```python uart = UART(id, baudrate=115200, bits=UART.EIGHTBITS, parity=UART.PARITY_NONE, stop=UART.STOPBITS_ONE) @@ -91,7 +91,7 @@ uart = UART(id, baudrate=115200, bits=UART.EIGHTBITS, parity=UART.PARITY_NONE, s - parity: 奇偶校验,有效值 PARITY_NONE、PARITY_ODD、PARITY_EVEN,可选参数,默认PARITY_NONE - stop: 停止位的数目,有效值 STOPBITS_ONE、STOPBITS_TWO,可选参数,默认STOPBITS_ONE -### init +### 2.3 init ```python UART.init(baudrate=115200, bits=UART.EIGHTBITS, parity=UART.PARITY_NONE, stop=UART.STOPBITS_ONE) @@ -107,7 +107,7 @@ UART.init(baudrate=115200, bits=UART.EIGHTBITS, parity=UART.PARITY_NONE, stop=UA 无 -### read +### 2.4 read ```python UART.read([nbytes]) @@ -123,7 +123,7 @@ UART.read([nbytes]) 一个包括读入字节的字节对象 -### readline +### 2.5 readline ```python UART.readline() @@ -139,7 +139,7 @@ UART.readline() 一个包括读入字节的字节对象 -### readinto +### 2.6 readinto ```python UART.readinto(buf[, nbytes]) @@ -156,7 +156,7 @@ UART.readinto(buf[, nbytes]) 读取并存入buf的字节数 -### write +### 2.7 write ```python UART.write(buf) @@ -172,7 +172,7 @@ UART.write(buf) 写入的字节数 -### deinit +### 2.8 deinit ```python UART.deinit() diff --git "a/zh/api/machine/K230_CanMV_WDT\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/machine/K230_CanMV_WDT\346\250\241\345\235\227API\346\211\213\345\206\214.md" index b3e4079..b27a20a 100755 --- "a/zh/api/machine/K230_CanMV_WDT\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/machine/K230_CanMV_WDT\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV WDT 模块API手册 +# 2.12 WDT 模块API手册 ![cover](../images/canaan-cover.png) @@ -58,7 +58,7 @@ K230内部包含两个WDT硬件模块,用于在应用程序崩溃且最终进 WDT类位于machine模块下 -### 示例 +### 2.1 示例 ```python from machine import WDT @@ -68,18 +68,20 @@ wdt1 = WDT(1,3) wdt1.feed() ``` -### 构造函数 +### 2.2 构造函数 ```python -wdt = WDT(index, timeout) +wdt = WDT(id=1, timeout=5) ``` 【参数】 -- index: WDT号,取值:[0,1] -- timeout: 超时值,单位s +- id: WDT号,取值:[0,1],默认1 +- timeout: 超时值,单位s,默认5 -### feed +**注意:** WDT0暂不可用 + +### 2.3 feed ```python WDT.feed() diff --git "a/zh/api/machine/K230_CanMV_machine\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/machine/K230_CanMV_machine\346\250\241\345\235\227API\346\211\213\345\206\214.md" index e39e03b..f170229 100644 --- "a/zh/api/machine/K230_CanMV_machine\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/machine/K230_CanMV_machine\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV machine 模块API手册 +# 2.14 machine 模块API手册 ![cover](../images/canaan-cover.png) @@ -56,7 +56,7 @@ ## 2. API描述 -### reset +### 2.1 reset ```python machine.reset() @@ -71,3 +71,21 @@ machine.reset() 【返回值】 无 + +### 2.2 mem_copy + +```python +machine.mem_copy(dst, src, size) +``` + +内存拷贝 + +【参数】 + +- dst: 目标地址 +- src: 源地址 +- size: 字节数 + +【返回值】 + +无 diff --git "a/zh/api/mpp/K230_CanMV_Audio\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/mpp/K230_CanMV_Audio\346\250\241\345\235\227API\346\211\213\345\206\214.md" index ed967c2..43dcd63 100755 --- "a/zh/api/mpp/K230_CanMV_Audio\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/mpp/K230_CanMV_Audio\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV Audio 模块API手册 +# 3.3 Audio 模块API手册 ![cover](../images/canaan-cover.png) diff --git "a/zh/api/mpp/K230_CanMV_Camera\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/mpp/K230_CanMV_Camera\346\250\241\345\235\227API\346\211\213\345\206\214.md" deleted file mode 100755 index d211393..0000000 --- "a/zh/api/mpp/K230_CanMV_Camera\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ /dev/null @@ -1,669 +0,0 @@ -# K230 CanMV Camera 模块API手册 - -![cover](../images/canaan-cover.png) - -版权所有©2023北京嘉楠捷思信息技术有限公司 - -
- -## 免责声明 - -您购买的产品、服务或特性等应受北京嘉楠捷思信息技术有限公司(“本公司”,下同)及其关联公司的商业合同和条款的约束,本文档中描述的全部或部分产品、服务或特性可能不在您的购买或使用范围之内。除非合同另有约定,本公司不对本文档的任何陈述、信息、内容的正确性、可靠性、完整性、适销性、符合特定目的和不侵权提供任何明示或默示的声明或保证。除非另有约定,本文档仅作为使用指导参考。 - -由于产品版本升级或其他原因,本文档内容将可能在未经任何通知的情况下,不定期进行更新或修改。 - -## 商标声明 - -![logo](../images/logo.png)、“嘉楠”和其他嘉楠商标均为北京嘉楠捷思信息技术有限公司及其关联公司的商标。本文档可能提及的其他所有商标或注册商标,由各自的所有人拥有。 - -**版权所有 © 2023北京嘉楠捷思信息技术有限公司。保留一切权利。** -非经本公司书面许可,任何单位和个人不得擅自摘抄、复制本文档内容的部分或全部,并不得以任何形式传播。 - -
- -## 目录 - -[TOC] - -## 前言 - -### 概述 - -本文档主要介绍K230 CanMV平台Camera模块 API使用说明及应用示例。 - -### 读者对象 - -本文档(本指南)主要适用于以下人员: - -- 技术支持工程师 -- 软件开发工程师 - -### 缩略词定义 - -| 简称 | 说明 | -|--------------------|--------------------------------------------------------| -| VICAP | Video Input Capture,图像输入采集模块 | -| MCM | Multi Camera Management ,多摄像头管理 | - -### 修订记录 - -| 文档版本号 | 修改说明 | 修改者 | 日期 | -| ---------- | -------- | ---------- | ---------- | -| V1.0 | 初版 | 汪成根 | 2023-09-18 | -| V1.1 | 添加多sensor MCM 相关的API | 赵忠祥 | 2024-03-11 | - -## 1. 概述 - -`该模块将在V1.0版本后废弃,请使用sensor模块` - -​ K230 CanMV平台Camera模块负责图像采集处理任务。本模块提供了一系列Highe Levl的API,应用开发者可以不用关注底层硬件细节,仅通过该模块提供的API即可获取不同格式和尺寸的图像。 - -​ K230 CanMV平台Camera模块包括三个独立的能力完全相同的camera设备,每个camera设备均可独立完成图像数据采集捕获处理,并可以同时输出3路图像数据。如下图所示: - -![cover](../images/k230-canmv-camera-top.png) - -sensor 0,sensor 1,sensor 2表示三个图像传感器;Camera Device 0,Camera Device 1,Camera Device 2表示三个camera设备;output channel 0,output channel 1,output channel 2表示camera设备的三个输出通道。三个图像传感器可以通过软件配置映射到不同的camera 设备。 - -## 2. API描述 - -K230 CanMV平台Camera模块提供camera静态类,该类提供以下章节描述的方法。 - -### 2.1 sensor_init - -【描述】 - -根据指定的camera设备和sensor类型执行初始化 - -【语法】 - -```python -def sensor_init(cls, dev_num, type) -``` - -【参数】 - -| 参数名称 | 描述 | 输入/输出 | -|-----------------|-------------------------------|-----------| -| dev_num | camera设备号 | | -| sensor_type | sensor类型,CanMV平台定义的已经支持的各类sensor | 输入 | - -【返回值】 - -| 返回值 | 描述 | -|---------|---------------------------------| -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【注意】 -这是使用camera模块需要调用的第一个方法。 - -用户不调用该方法,默认初始化camera设备0及sensor OV5647 - -【举例】 - -```python -# 初始化camera设备0以及sensor OV5647 -camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) -``` - -【相关主题】 - -无 - -### 2.2 set_inbufs - -【描述】 - -设置指定camera设备使用的输入缓冲区个数 - -【语法】 - -```python -def set_inbufs(cls, dev_num, num) -``` - -【参数】 - -| 参数名称 | 描述 | 输入/输出 | -| -------- | -------------- | --------- | -| dev_num | camera设备号 | 输入 | -| num | 输入缓冲区个数 | 输入 | - -【返回值】 - -| 返回值 | 描述 | -| ------ | ---------------------- | -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【注意】 -该方法仅在用户需要同时使用多个camera设备捕获图像时才有效。 - -当用户使用多个camera设备而不调用该方法时,将使用默认配置参数。这有可能存在默认配置输入缓冲区个数较少而引起丢帧问题。 - -【举例】 - -```python -# 配置camera设备0 使用4个输入缓冲区 -camera.set_inbufs(CAM_DEV_ID_0,4) - -# 配置camera设备1 使用6个输入缓冲区 -camera.set_inbufs(CAM_DEV_ID_1,6) -``` - -【相关主题】 - -无 - -### 2.3 set_outbufs - -【描述】 - -设置指定camera设备和通道的输出缓冲区个数 - -【语法】 - -```python -def set_outbufs(cls, dev_num, chn_num, num) -``` - -【参数】 - -| 参数名称 | 描述 | 输入/输出 | -| -------- | ---------------- | --------- | -| dev_num | camera设备号 | 输入 | -| chn_num | camera输出通道号 | 输入 | -| num | 输出缓冲区个数 | 输入 | - -【返回值】 - -| 返回值 | 描述 | -| ------ | ---------------------- | -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【注意】 -如果用户不调用方法,将使用默认配置。 - -【举例】 - -```python -# 配置camera设备0,输出通道0, 使用4个输出缓冲区 -camera.set_outbufs(CAM_DEV_ID_0, CAM_CHN_ID_0, 4) - -# 配置camera设备0,输出通道1, 使用4个输出缓冲区 -camera.set_outbufs(CAM_DEV_ID_0, CAM_CHN_ID_1, 4) -``` - -【相关主题】 - -无 - -### 2.4 set_outsize - -【描述】 - -设置指定camera设备和通道的输出图像尺寸 - -【语法】 - -```python -def set_outsize(cls, dev_num, chn_num, width, height) -``` - -【参数】 - -| 参数名称 | 描述 | 输入/输出 | -| -------- | ---------------- | --------- | -| dev_num | camera设备号 | 输入 | -| chn_num | camera输出通道号 | 输入 | -| width | 输出图像宽度 | 输入 | -| height | 输出图像高度 | 输入 | - -【返回值】 - -| 返回值 | 描述 | -| ------ | ---------------------- | -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【注意】 - -输出图像尺寸不能超过输入图像尺寸。 - -不同输出通道最大可输出图像尺寸由硬件限制。 - -用户不调用该方法,默认输出图像尺寸与输入图像一致。 - -【举例】 - -```python -# 配置camera设备0,输出通道0, 输出图尺寸为640x480 -camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, 640, 480) - -# 配置camera设备0,输出通道1, 输出图尺寸为320x240 -camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, 320, 240) -``` - -【相关主题】 - -无 - -### 2.5 set_outfmt - -【描述】 - -设置指定camera设备和通道的输出图像格式 - -【语法】 - -```python -def set_outfmt(cls, dev_num, chn_num, pix_format) -``` - -【参数】 - -| 参数名称 | 描述 | 输入/输出 | -| ---------- | ---------------- | --------- | -| dev_num | camera设备号 | 输入 | -| chn_num | camera输出通道号 | 输入 | -| pix_format | 输出图像格式 | 输入 | - -【返回值】 - -| 返回值 | 描述 | -| ------ | ---------------------- | -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【注意】 -用户不调用方法,将使用默认配置。 - -【举例】 - -```python -# 配置camera设备0,输出通道0, 输出NV12格式 -camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, CAM_OUT_NV12) - -# 配置camera设备0,输出通道1, 输出RGB888格式 -camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, CAM_OUT_RGB888) -``` - -【相关主题】 - -无 - -### 2.5 start_stream - -【描述】 - -启动camera数据流 - -【语法】 - -```python -def start_stream(cls, dev_num) -``` - -【参数】 - -| 参数名称 | 描述 | 输入/输出 | -| -------- | ------------ | --------- | -| dev_num | camera设备号 | 输入 | - -【返回值】 - -| 返回值 | 描述 | -| ------ | ---------------------- | -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【注意】 -用户不调用方法,将使用默认配置。 - -【举例】 - -```python -# 启动camera设备0输出数据流 -camera.start_stream(CAM_DEV_ID_0) -``` - -【相关主题】 - -无 - -### 2.6 stop_stream - -【描述】 - -停止camera数据流 - -【语法】 - -```python -def stop_stream(cls, dev_num) -``` - -【参数】 - -| 参数名称 | 描述 | 输入/输出 | -| -------- | ------------ | --------- | -| dev_num | camera设备号 | 输入 | - -【返回值】 - -| 返回值 | 描述 | -| ------ | ---------------------- | -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【注意】 -用户不调用方法,将使用默认配置。 - -【举例】 - -```python -# 停止camera设备0输出数据流 -camera.stop_stream(CAM_DEV_ID_0) -``` - -【相关主题】 - -无 - -### 2.7 capture_image - -【描述】 - -从指定camera设备的支持输出通道中捕获一帧图像数据 - -【语法】 - -```python -def capture_image(cls, dev_num, chn_num) -``` - -【参数】 - -| 参数名称 | 描述 | 输入/输出 | -| -------- | ---------------- | --------- | -| dev_num | camera设备号 | 输入 | -| chn_num | camera输出通道号 | | - -【返回值】 - -| 返回值 | 描述 | -| --------- | ---- | -| image对象 | 成功 | -| 其他 | 失败 | - -【注意】 - -该方法捕获的图像格式由set_outfmt方法指定。 - -【举例】 - -```python -# 从camera设备0的通道0输出捕获一帧图像数据 -camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_0) -``` - -【相关主题】 - -无 - -### 2.7 start_mcm_stream - -【描述】 - -多sensor时,启动camera数据流。多sensor时一定要先配置好各个sensor的参数,然后调该函数启动数据流。 - -【语法】 - -```python -def start_mcm_stream(cls) -``` - -【返回值】 - -| 返回值 | 描述 | -| ------ | ---------------------- | -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【举例】 - -```python -# 启动camera设备0输出数据流 -camera.start_mcm_stream() -``` - -【相关主题】 - -无 - -### 2.8 stop_mcm_stream - -【描述】 - -多sensor时,停止camera数据流。与start_mcm_stream配套使用。 - -【语法】 - -```python -def stop_mcm_stream(cls, dev_num) -``` - -【返回值】 - -| 返回值 | 描述 | -| ------ | ---------------------- | -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【举例】 - -```python -# 停止camera设备0输出数据流 -camera.stop_mcm_stream() -``` - -【相关主题】 - -无 - -## 3. 数据结构描述 - -K230 CanMV平台Camera模块包含如下描述的各个数据定义。 - -### 3.1 sensor类型 - -【说明】 - -下面是目前Canmv-K230板micropython支持的Sensor。 -其中CSI1/2是可以使用树莓派的ov5647模组,如果使用Canmv-K230 V1.0/1.1版的板子,要修改该模组的电压。 - -【定义】 - -```python -CAM_IMX335_2LANE_1920X1080_30FPS_12BIT_USEMCLK_LINEAR # Imx335 CSI0 -CAM_OV5647_1920X1080_30FPS_10BIT_USEMCLK_LINEAR # OV5647 CSI0 -CAM_OV5647_1920X1080_CSI1_30FPS_10BIT_USEMCLK_LINEAR # OV5647 CSI1 -CAM_OV5647_1920X1080_CSI2_30FPS_10BIT_USEMCLK_LINEAR # OV5647 CSI2 -# the default sensor type -CAM_DEFAULT_SENSOR = CAM_OV5647_1920X1080_30FPS_10BIT_USEMCLK_LINEAR # 默认的sensor使用OV5647 CSI0 -``` - -【注意事项】 - -Canmv-K230 V1.0/1.1版的板子外设接口为1.8V,不能直接使用树莓派的ov5647模组,必须修改电压为1.8V。 - -![ov5647_v1.8](../../images/ov5647_v1.8.jpg) - -【相关数据类型及接口】 - -### 3.2 输出图像尺寸 - -【说明】 - -定义各个输出通道能够支持的输出图像最大尺寸和最小尺寸 - -【定义】 - -```python -CAM_CHN0_OUT_WIDTH_MAX = 3072 -CAM_CHN0_OUT_HEIGHT_MAX = 2160 - -CAM_CHN1_OUT_WIDTH_MAX = 1920 -CAM_CHN1_OUT_HEIGHT_MAX = 1080 - -CAM_CHN2_OUT_WIDTH_MAX = 1920 -CAM_CHN2_OUT_HEIGHT_MAX = 1080 - -CAM_OUT_WIDTH_MIN = 64 -CAM_OUT_HEIGHT_MIN = 64 -``` - -【注意事项】 - -无 - -【相关数据类型及接口】 - -## 4. 示例程序 - -### 例程 - -```python -# 本示例程序包括以下内容: -# 1. 配置camera设备0同时输出三路图像数据 -# 2. 通道0输出YUV格式用于预览显示,通道1、2输出RGB888P -# 3. 抓取三路输出的图像各100张 -# - -from media.camera import * #导入camera模块,使用camera相关接口 -from media.display import * #导入display模块,使用display相关接口 -from media.media import * #导入media模块,使用meida相关接口 -from time import * #导入time模块,使用time相关接口 -import time -import image #导入image模块,使用image相关接口 - -def canmv_camera_test(): - print("canmv_camera_test") - - #初始化HDMI显示 - display.init(LT9611_1920X1080_30FPS) - - #初始化默认sensor配置(OV5647) - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - #camera.sensor_init(CAM_DEV_ID_0, CAM_IMX335_2LANE_1920X1080_30FPS_12BIT_LINEAR) - - out_width = 1920 - out_height = 1080 - # 设置输出宽度16字节对齐 - out_width = ALIGN_UP(out_width, 16) - - #设置通道0输出尺寸 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, out_width, out_height) - #设置通道0输出格式 - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - #创建媒体数据源设备 - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - #创建媒体数据接收设备 - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - #创建媒体链路,数据从源设备流到接收设备 - media.create_link(media_source, media_sink) - #设置显示输出平面的属性 - display.set_plane(0, 0, out_width, out_height, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - - out_width = 640 - out_height = 480 - out_width = ALIGN_UP(out_width, 16) - - #设置通道1输出尺寸 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, out_width, out_height) - #设置通道1输出格式 - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - - #设置通道2输出尺寸 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_2, out_width, out_height) - #设置通道2输出格式 - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - - #初始化媒体缓冲区 - ret = media.buffer_init() - if ret: - print("canmv_camera_test, buffer init failed") - return ret - - #启动摄像头数据流 - camera.start_stream(CAM_DEV_ID_0) - time.sleep(15) - - capture_count = 0 - while capture_count < 100: - time.sleep(1) - for dev_num in range(CAM_DEV_ID_MAX): - if not camera.cam_dev[dev_num].dev_attr.dev_enable: - continue - - for chn_num in range(CAM_CHN_ID_MAX): - if not camera.cam_dev[dev_num].chn_attr[chn_num].chn_enable: - continue - - print(f"canmv_camera_test, dev({dev_num}) chn({chn_num}) capture frame.") - #从指定设备和通道捕获图像 - img = camera.capture_image(dev_num, chn_num) - if img == -1: - print("camera.capture_image failed") - continue - - if img.format() == image.YUV420: - suffix = "yuv420sp" - elif img.format() == image.RGB888: - suffix = "rgb888" - elif img.format() == image.RGBP888: - suffix = "rgb888p" - else: - suffix = "unkown" - - filename = f"/sdcard/dev_{dev_num:02d}_chn_{chn_num:02d}_{img.width()}x{img.height()}_{capture_count:04d}.{suffix}" - print("save capture image to file:", filename) - - with open(filename, "wb") as f: - if f: - img_data = uctypes.bytearray_at(img.virtaddr(), img.size()) - # save yuv data to sdcard. - #f.write(img_data) - else: - print(f"capture_image, open dump file failed({filename})") - - time.sleep(1) - #释放捕获的图像数据 - camera.release_image(dev_num, chn_num, img) - - capture_count += 1 - - #停止摄像头输出 - camera.stop_stream(CAM_DEV_ID_0) - - #去初始化显示设备 - display.deinit() - - #销毁媒体链路 - media.destroy_link(media_source, media_sink) - - time.sleep(1) - #去初始化媒体缓冲区资源 - ret = media.buffer_deinit() - if ret: - print("camera test, media_buffer_deinit failed") - return ret - - print("camera test exit") - return 0 - - -canmv_camera_test() -``` diff --git "a/zh/api/mpp/K230_CanMV_Display\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/mpp/K230_CanMV_Display\346\250\241\345\235\227API\346\211\213\345\206\214.md" index f2592cf..de8693a 100755 --- "a/zh/api/mpp/K230_CanMV_Display\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/mpp/K230_CanMV_Display\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV Display模块API手册 +# 3.2 Display模块API手册 ![cover](../images/canaan-cover.png) @@ -29,7 +29,7 @@ ### 概述 -此文档介绍CanMV Display模块,用以指导开发人员如何调用MicroPython API实现图像输出功能。 +此文档介绍CanMV Display模块,用以指导开发人员如何调用MicroPython API实现图像显示功能。 ### 读者对象 @@ -50,12 +50,13 @@ | 文档版本号 | 修改说明 | 修改者 | 日期 | | ---------- | -------- | ---------- | ---------- | | V1.0 | 初版 | 王权 | 2023-09-15 | +| V2.0 | 重构API | xel | 2024-06-11 | ## 1. 概述 -`该模块将在V1.0版本后废弃,请使用lcd模块` +**`该模块在固件版本V0.6之后有较大改变,若使用V0.6之前固件请参考旧版本的文档`** -此文档介绍CanMV Display模块,用以指导开发人员如何调用Micro Python API实现图像输出功能。 +此文档介绍CanMV Display模块,用以指导开发人员如何调用Micro Python API实现图像显示功能。 ## 2. API描述 @@ -65,24 +66,30 @@ 初始化整个Display通路,包括VO模块、DSI模块、LCD/HDMI +**`必须在MediaManager.init()之前调用`** + 【语法】 ```python -def init(type) +def init(type = None, width = None, height = None, osd_num = 1, to_ide = False, fps = None) ``` 【参数】 -| 参数名称 | 描述 | 输入/输出 | -|-----------------|-------------------------------|-----------| -| type | 输出接口参数 | 输入 | +| 参数名称 | 描述 | 输入/输出 | 说明 | +|-----------------|-------------------------------|-----------|-----| +| type | [显示设备类型](#31-type) | 输入 | 必选 | +| width | 分辨率宽度 | 输入 | 默认值根据`type`决定 | +| height | 分辨率高度 | 输入 | 默认值根据`type`决定 | +| osd_num | 在[show_image](#22-show_image)时可以支持的LAYER数量 | 输入 |越大占用内存越多 | +| to_ide | 是否将屏幕显示传输到IDE显示 | 输入 | 开启会占用更多内存 | +| fps | 显示帧率 | 输入 | 仅支持`VIRT`类型 | 【返回值】 | 返回值 | 描述 | |---------|---------------------------------| -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | +| 无 | | 【注意】 @@ -96,79 +103,39 @@ def init(type) 无 -### 2.2 set_backlight +### 2.2 show_image 【描述】 -设置LCD背光 +在屏幕上显示图像 【语法】 ```python -def set_backlight(level) +def show_image(img, x = 0, y = 0, layer = None, alpha = 255, flag = 0) ``` 【参数】 -| 参数名称 | 描述 | 输入/输出 | -|-----------------|-------------------------------|-----------| -| level | 0:关闭LCD背光;1:打开LCD背光 | 输入 | +| 参数名称 | 描述 | 输入/输出 | 说明 | +|-----------------|-------------------------------|-----------|------| +| img | 显示的图像 | 输入 | | +| x | 起始坐标的x值 | 输入 | | +| y | 起始坐标的y值 | 输入 | | +| layer | 显示到[指定层](#32-layer) | 输入 | 仅支持`OSD`层
若需要多层请设置[init](#21-init)参数中的`osd_num`| +| alpha | 图层混合alpha | 输入 | | +| flag | 显示[标志](#33-flag) | 输入 | | 【返回值】 | 返回值 | 描述 | |---------|---------------------------------| -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | +| 无 | | 【注意】 -set_backlight仅适用于LCD输出 - -【举例】 - -无 - -【相关主题】 - 无 -### 2.3 set_plane - -【描述】 - -设置VO通道参数,set_plane方法主要用来设置和Camera、vdec、DPU、AI2D绑定的VO通道 - -【语法】 - -```python -def set_plane(x, y, width, height, pixelformat, rotate, mirror, chn) -``` - -【参数】 - -| 参数名称 | 描述 | 输入/输出 | -|-----------------|-------------------------------|-----------| -| x | 起始坐标的x值 | 输入 | -| y | 起始坐标的y值 | 输入 | -| width | 宽度 | 输入 | -| height | 高度 | 输入 | -| pixelformat | 像素格式 | 输入 | -| rotate | 顺时针旋转功能 | 输入 | -| mirror | 水平方向和垂直方向镜像翻转功能 | 输入 | -| chn | VO通道 | 输入 | - -【返回值】 - -| 返回值 | 描述 | -|---------|---------------------------------| -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【注意】 - -只有DISPLAY_CHN_VIDEO1通道支持rotate功能和mirror功能 - 【举例】 无 @@ -177,33 +144,26 @@ def set_plane(x, y, width, height, pixelformat, rotate, mirror, chn) 无 -### 2.4 show_image +### 2.3 deinit 【描述】 -输出image到VO通道 +执行反初始化,deinit方法会关闭整个Display通路,包括VO模块、DSI模块、LCD/HDMI + +**`必须在MediaManager.deinit()之前调用`** +**`必须在sensor.stop()之后调用`** 【语法】 ```python -def show_image(image, x, y, chn) +def deinit() ``` -【参数】 - -| 参数名称 | 描述 | 输入/输出 | -|-----------------|-------------------------------|-----------| -| image | 待输出的图像 | 输入 | -| x | 起始坐标的x值 | 输入 | -| y | 起始坐标的y值 | 输入 | -| chn | VO通道 | 输入 | - 【返回值】 | 返回值 | 描述 | |---------|---------------------------------| -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | +| 无 | | 【注意】 @@ -217,61 +177,37 @@ def show_image(image, x, y, chn) 无 -### 2.5 disable_plane +### 2.4 bind_layer 【描述】 -关闭VO通道 +绑定`sensor`或`vdec`模块输出到屏幕显示 +不需要用户手动参与即可将图像持续显示到屏幕 + +**`必须在init之前调用`** 【语法】 ```python -def disable_plane(chn) +def bind_layer(src=(mod, dev, layer), dstlayer, rect = (x, y, w, h), pix_format, alpha, flag) ``` 【参数】 -| 参数名称 | 描述 | 输入/输出 | -|-----------------|-------------------------------|-----------| -| chn | VO通道 | 输入 | +| 参数名称 | 描述 | 输入/输出 | 说明 | +|-----------------|-------------------------------|-----------|------| +| src | `sensor`或`vdec` 输出信息 | 输入 | 可通过`sensor.bind_info()`获取 | +| dstlayer | 绑定到Display的[显示层](#32-layer) | 输入 | 可绑定到`video`或`osd`层 | +| rect | 显示区域 | 输入 | 可通过`sensor.bind_info()`获取 | +| pix_format | 图像像素格式 | 输入 | 可通过`sensor.bind_info()`获取 | +| alpha | 图层混合alpha | 输入 | | +| flag | 显示[标志](#33-flag) | 输入 | `LAYER_VIDEO1`不支持 | 【返回值】 | 返回值 | 描述 | |---------|---------------------------------| -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【注意】 - -通过disable_plane方法关闭的VO通道,可以通过set_plane方法或者show_image方法重新打开 - -【举例】 - -无 - -【相关主题】 - -无 - -### 2.6 deinit - -【描述】 - -执行反初始化,deinit方法会关闭整个Display通路,包括VO模块、DSI模块、LCD/HDMI - -【语法】 - -```python -def deinit() -``` - -【返回值】 - -| 返回值 | 描述 | -|---------|---------------------------------| -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | +| 无 | | 【注意】 @@ -289,129 +225,71 @@ def deinit() ### 3.1 type -【说明】 - -输出接口参数 - -【定义】 - -【成员】 - -| 成员名称 | 描述 | -|---------|---------------------------------| -| HX8377_1080X1920_30FPS | VO和DSI模块输出1080X1920 30FPS时序到LCD | -| ST7701_V1_MIPI_2LAN_480X800_30FPS | VO和DSI模块输出480x800 30FPS时序到LCD | -| LT9611_1920X1080_30FPS | VO和DSI模块输出1920X1080 30FPS时序到HDMI | -| LT9611_MIPI_4LAN_1920X1080_60FPS | VO和DSI模块输出1920X1080 60FPS时序到HDMI | -| LT9611_MIPI_4LAN_1280X720_60FPS | VO和DSI模块输出1280x720 60FPS时序到HDMI | -| LT9611_MIPI_4LAN_1280X720_50FPS | VO和DSI模块输出1280x720 50FPS时序到HDMI | -| LT9611_MIPI_4LAN_1280X720_30FPS | VO和DSI模块输出1280x720 30FPS时序到HDMI | -| LT9611_MIPI_4LAN_640X480_60FPS | VO和DSI模块输出640x480 30FPS时序到HDMI | - -【注意事项】 - -无 - -### 3.2 chn - -【说明】 - -VO通道 - -【定义】 - -【成员】 - -| 成员名称 | 描述 | -|---------|---------------------------------| -| DISPLAY_CHN_VIDEO1 | VO模块video 1 通道,支持DISPLAY_OUT_NV12输出 | -| DISPLAY_CHN_VIDEO2 | VO模块video 2 通道,支持DISPLAY_OUT_NV12输出 | -| DISPLAY_CHN_OSD0 | VO模块OSD 0 通道,支持DISPLAY_OUT_ARGB8888、DISPLAY_OUT_RGB888、DISPLAY_OUT_RGB565输出 | -| DISPLAY_CHN_OSD1 | VO模块OSD 1 通道,支持DISPLAY_OUT_ARGB8888、DISPLAY_OUT_RGB888、DISPLAY_OUT_RGB565输出 | -| DISPLAY_CHN_OSD2 | VO模块OSD 2 通道,支持DISPLAY_OUT_ARGB8888、DISPLAY_OUT_RGB888、DISPLAY_OUT_RGB565输出 | -| DISPLAY_CHN_OSD3 | VO模块OSD 3 通道,支持DISPLAY_OUT_ARGB8888、DISPLAY_OUT_RGB888、DISPLAY_OUT_RGB565输出 | - -【注意事项】 - -只有DISPLAY_CHN_VIDEO1通道支持rotate功能和mirror功能 - -### 3.3 pixelformat - -【说明】 - -像素格式 - -【定义】 - -【成员】 - -| 成员名称 | 描述 | -|---------|---------------------------------| -| DISPLAY_OUT_NV12 | 输出NV12 | -| DISPLAY_OUT_ARGB8888 | 输出ARGB8888 | -| DISPLAY_OUT_RGB888 | 输出RGB888 | -| DISPLAY_OUT_RGB565 | 输出RGB565 | - -【注意事项】 - -无 - -### 3.4 rotate - -【说明】 - -顺时针旋转功能 - -【定义】 - -【成员】 - -| 成员名称 | 描述 | -|---------|---------------------------------| -| DISPLAY_ROTATE_0 | 不进行旋转 | -| DISPLAY_ROTATE_90 | 顺时针旋转90度 | -| DISPLAY_ROTATE_180 | 顺时针旋转180度 | -| DISPLAY_ROTATE_270 | 顺时针旋转270度 | - -【注意事项】 - -无 - -### 3.5 mirror - -【说明】 - -水平方向和垂直方向镜像翻转功能 - -【定义】 - -【成员】 - -| 成员名称 | 描述 | -|---------|---------------------------------| -| DISPLAY_MIRROR_NONE | 不进行翻转 | -| DISPLAY_MIRROR_HOR | 水平方向翻转180度 | -| DISPLAY_MIRROR_VER | 垂直方向翻转180度 | -| DISPLAY_MIRROR_BOTH | 水平方向和垂直方向都翻转180度 | - -【注意事项】 - -无 +| 类型 | 分辨率
(width x height @ fps) | 备注 | +|------|----------|----------| +| LT9611 | 1920x1080@30 | *默认值* | +| | 1280x720@30 | | +| | 640x480@60 | | +| HX8377 | 1080x1920@30 | *默认值* | +| ST7701 | 800x480@30 | *默认值*
可设置为竖屏480x800 | +| | 854x480@30 | 可设置为竖屏480x854 | +| VIRT | 640x480@90 | *默认值*
| +| | | `IDE`调试专用,不显示内容在外接屏幕
用户可自定义设置分辨率(64x64)-(4096x4096)和帧率(1-200)
| + +### 3.2 layer + +| 显示层 | 说明 | 备注 | +| -- | -- | -- | +| LAYER_VIDEO1 | | 仅[bind_layer](#24-bind_layer)可用 | +| LAYER_VIDEO2 | | 仅[bind_layer](#24-bind_layer)可用 | +| LAYER_OSD0 | | 支持[show_image](#22-show_image)和[bind_layer](#24-bind_layer)使用 | +| LAYER_OSD1 | | 支持[show_image](#22-show_image)和[bind_layer](#24-bind_layer)使用 | +| LAYER_OSD2 | | 支持[show_image](#22-show_image)和[bind_layer](#24-bind_layer)使用 | +| LAYER_OSD3 | | 支持[show_image](#22-show_image)和[bind_layer](#24-bind_layer)使用 | + +### 3.3 flag + +| 标志 | 说明 | 备注 | +|---------|--------|-------| +| FLAG_ROTATION_0 | 旋转`0`度 | | +| FLAG_ROTATION_90 | 旋转`90`度 | | +| FLAG_ROTATION_180 | 旋转`180`度 | | +| FLAG_ROTATION_270 |旋转`270`度 | | +| FLAG_MIRROR_NONE | 不镜像| | +| FLAG_MIRROR_HOR |水平镜像 | | +| FLAG_MIRROR_VER |垂直镜像 | | +| FLAG_MIRROR_BOTH |水平与垂直镜像 | | ## 4. 示例程序 例程 ```python - from media.display import * #导入display模块,使用display相关接口 -import image #导入image模块,使用image相关接口 - -display.init(LT9611_1920X1080_30FPS) #初始化HDMI显示 - -img = image.open("test.jpg") -img.draw_rectangle(20, 20, 80, 80, color=White) #画框 -display.show_image(img, 0, 0, DISPLAY_CHN_OSD2) #显示 - -display.deinit() +from media.media import * #导入display模块,使用display相关接口 +import os, time, image #导入image模块,使用image相关接口 + +# use lcd as display output +Display.init(Display.ST7701, width = 800, height = 480, to_ide = True) +# init media manager +MediaManager.init() + +# create image for drawing +img = image.Image(800, 480, image.RGB565) +img.clear() +img.draw_string_advanced(0,0,32, "Hello World!,你好世界!!!", color = (255, 0, 0)) + +Display.show_image(img) + +try: + while True: + time.sleep(1) + os.exitpoint() +except KeyboardInterrupt as e: + print("user stop: ", e) +except BaseException as e: + print(f"Exception {e}") + +Display.deinit() +MediaManager.deinit() ``` diff --git "a/zh/api/mpp/K230_CanMV_Lcd\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/mpp/K230_CanMV_Lcd\346\250\241\345\235\227API\346\211\213\345\206\214.md" deleted file mode 100755 index f1e1eff..0000000 --- "a/zh/api/mpp/K230_CanMV_Lcd\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ /dev/null @@ -1,455 +0,0 @@ -# K230 CanMV Lcd模块API手册 - -![cover](../images/canaan-cover.png) - -版权所有©2023北京嘉楠捷思信息技术有限公司 - -
- -## 免责声明 - -您购买的产品、服务或特性等应受北京嘉楠捷思信息技术有限公司(“本公司”,下同)及其关联公司的商业合同和条款的约束,本文档中描述的全部或部分产品、服务或特性可能不在您的购买或使用范围之内。除非合同另有约定,本公司不对本文档的任何陈述、信息、内容的正确性、可靠性、完整性、适销性、符合特定目的和不侵权提供任何明示或默示的声明或保证。除非另有约定,本文档仅作为使用指导参考。 - -由于产品版本升级或其他原因,本文档内容将可能在未经任何通知的情况下,不定期进行更新或修改。 - -## 商标声明 - -![logo](../images/logo.png)、“嘉楠”和其他嘉楠商标均为北京嘉楠捷思信息技术有限公司及其关联公司的商标。本文档可能提及的其他所有商标或注册商标,由各自的所有人拥有。 - -**版权所有 © 2023北京嘉楠捷思信息技术有限公司。保留一切权利。** -非经本公司书面许可,任何单位和个人不得擅自摘抄、复制本文档内容的部分或全部,并不得以任何形式传播。 - -
- -## 目录 - -[TOC] - -## 前言 - -### 概述 - -此文档介绍CanMV lcd模块,用以指导开发人员如何调用MicroPython API实现图像输出功能。 - -### 读者对象 - -本文档(本指南)主要适用于以下人员: - -- 技术支持工程师 -- 软件开发工程师 - -### 缩略词定义 - -| 简称 | 说明 | -| ---- | ---- | -| VO | Video Output | -| DSI | Display Serial Interface | - -### 修订记录 - -| 文档版本号 | 修改说明 | 修改者 | 日期 | -| ---------- | -------- | ---------- | ---------- | -| V1.0 | 初版 | 赵忠祥 | 2024-04-25 | - -## 1. 概述 - -此文档介绍CanMV lcd模块,用以指导开发人员如何调用Micro Python API实现图像输出功能。 - -## 2. API描述 - -### 2.1 lcd.init - -【描述】 - -初始化整个lcd通路,包括VO模块、DSI模块、LCD/HDMI - -【语法】 - -```python -lcd.init(type) -``` - -【参数】 - -| 参数名称 | 描述 | 输入/输出 | -|-----------------|-------------------------------|-----------| -| type | 输出接口参数 | 输入 | - -【返回值】 - -| 返回值 | 描述 | -|---------|---------------------------------| -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【注意】 - -无 - -【举例】 - -无 - -【相关主题】 - -无 - -### 2.2 lcd.set_backlight - -【描述】 - -设置LCD背光 - -【语法】 - -```python -lcd.set_backlight(level) -``` - -【参数】 - -| 参数名称 | 描述 | 输入/输出 | -|-----------------|-------------------------------|-----------| -| level | 0:关闭LCD背光;1:打开LCD背光 | 输入 | - -【返回值】 - -| 返回值 | 描述 | -|---------|---------------------------------| -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【注意】 - -set_backlight仅适用于LCD输出 - -【举例】 - -无 - -【相关主题】 - -无 - -### 2.3 lcd.set_plane - -【描述】 - -设置VO通道参数,set_plane方法主要用来设置和Camera、vdec、DPU、AI2D绑定的VO通道 - -【语法】 - -```python -lcd.set_plane(x, y, width, height, pixelformat, rotate, chn, mirror) -``` - -【参数】 - -| 参数名称 | 描述 | 输入/输出 | -|-----------------|-------------------------------|-----------| -| x | 起始坐标的x值 | 输入 | -| y | 起始坐标的y值 | 输入 | -| width | 宽度 | 输入 | -| height | 高度 | 输入 | -| pixelformat | 像素格式 | 输入 | -| rotate | 顺时针旋转功能 | 输入 | -| chn | VO通道 | 输入 | -| mirror | 翻转功能 | 输入 | - -【返回值】 - -| 返回值 | 描述 | -|---------|---------------------------------| -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【注意】 - -只有DISPLAY_CHN_VIDEO1通道支持rotate功能和mirror功能 - -【举例】 - -无 - -【相关主题】 - -无 - -### 2.4 lcd.display - -【描述】 - -输出image到VO通道 - -【语法】 - -```python -lcd.display(image, x, y, chn) -``` - -【参数】 - -| 参数名称 | 描述 | 输入/输出 | -|-----------------|-------------------------------|-----------| -| image | 待输出的图像 | 输入 | -| x | 起始坐标的x值 | 输入 | -| y | 起始坐标的y值 | 输入 | -| chn | VO通道 | 输入 | - -【返回值】 - -| 返回值 | 描述 | -|---------|---------------------------------| -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【注意】 - -无 - -【举例】 - -无 - -【相关主题】 - -无 - -### 2.5 lcd.disable_plane - -【描述】 - -关闭VO通道 - -【语法】 - -```python -lcd.disable_plane(chn) -``` - -【参数】 - -| 参数名称 | 描述 | 输入/输出 | -|-----------------|-------------------------------|-----------| -| chn | VO通道 | 输入 | - -【返回值】 - -| 返回值 | 描述 | -|---------|---------------------------------| -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【注意】 - -通过disable_plane方法关闭的VO通道,可以通过set_plane方法或者display方法重新打开 - -【举例】 - -无 - -【相关主题】 - -无 - -### 2.6 lcd.deinit - -【描述】 - -执行反初始化,deinit方法会关闭整个lcd通路,包括VO模块、DSI模块、LCD/HDMI - -【语法】 - -```python -lcd.deinit() -``` - -【返回值】 - -| 返回值 | 描述 | -|---------|---------------------------------| -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | - -【注意】 - -无 - -【举例】 - -无 - -【相关主题】 - -无 - -## 3. 数据结构描述 - -### 3.1 type - -【说明】 - -输出接口参数 - -【定义】 - -【成员】 - -| 成员名称 | 描述 | -|---------|---------------------------------| -| HX8377_1080X1920_30FPS | VO和DSI模块输出1080X1920 30FPS时序到LCD | -| ST7701_V1_MIPI_2LAN_480X800_30FPS | VO和DSI模块输出480x800 30FPS时序到LCD | -| LT9611_1920X1080_30FPS | VO和DSI模块输出1920X1080 30FPS时序到HDMI | -| LT9611_MIPI_4LAN_1920X1080_60FPS | VO和DSI模块输出1920X1080 60FPS时序到HDMI | -| LT9611_MIPI_4LAN_1280X720_60FPS | VO和DSI模块输出1280x720 60FPS时序到HDMI | -| LT9611_MIPI_4LAN_1280X720_50FPS | VO和DSI模块输出1280x720 50FPS时序到HDMI | -| LT9611_MIPI_4LAN_1280X720_30FPS | VO和DSI模块输出1280x720 30FPS时序到HDMI | -| LT9611_MIPI_4LAN_640X480_60FPS | VO和DSI模块输出640x480 30FPS时序到HDMI | - -【注意事项】 - -无 - -### 3.2 chn - -【说明】 - -VO通道 - -【定义】 - -【成员】 - -| 成员名称 | 描述 | -|---------|---------------------------------| -| DISPLAY_CHN_VIDEO1 | VO模块video 1 通道,支持DISPLAY_OUT_NV12输出 | -| DISPLAY_CHN_VIDEO2 | VO模块video 2 通道,支持DISPLAY_OUT_NV12输出 | -| DISPLAY_CHN_OSD0 | VO模块OSD 0 通道,支持DISPLAY_OUT_ARGB8888、DISPLAY_OUT_RGB888、DISPLAY_OUT_RGB565输出 | -| DISPLAY_CHN_OSD1 | VO模块OSD 1 通道,支持DISPLAY_OUT_ARGB8888、DISPLAY_OUT_RGB888、DISPLAY_OUT_RGB565输出 | -| DISPLAY_CHN_OSD2 | VO模块OSD 2 通道,支持DISPLAY_OUT_ARGB8888、DISPLAY_OUT_RGB888、DISPLAY_OUT_RGB565输出 | -| DISPLAY_CHN_OSD3 | VO模块OSD 3 通道,支持DISPLAY_OUT_ARGB8888、DISPLAY_OUT_RGB888、DISPLAY_OUT_RGB565输出 | - -【注意事项】 - -只有DISPLAY_CHN_VIDEO1通道支持rotate功能和mirror功能 - -### 3.3 pixelformat - -【说明】 - -像素格式 - -【定义】 - -【成员】 - -| 成员名称 | 描述 | -|---------|---------------------------------| -| DISPLAY_OUT_NV12 | 输出NV12 | -| DISPLAY_OUT_ARGB8888 | 输出ARGB8888 | -| DISPLAY_OUT_RGB888 | 输出RGB888 | -| DISPLAY_OUT_RGB565 | 输出RGB565 | - -【注意事项】 - -无 - -### 3.4 mirror - -【说明】 - -水平方向和垂直方向镜像翻转功能 - -【定义】 - -【成员】 - -| 成员名称 | 描述 | -|---------|---------------------------------| -| DISPLAY_ROTATE_0 | 不进行旋转 | -| DISPLAY_ROTATE_90 | 顺时针旋转90度 | -| DISPLAY_ROTATE_180 | 顺时针旋转180度 | -| DISPLAY_ROTATE_270 | 顺时针旋转270度 | -| DISPLAY_MIRROR_NONE | 不进行翻转 | -| DISPLAY_MIRROR_HOR | 水平方向翻转180度 | -| DISPLAY_MIRROR_VER | 垂直方向翻转180度 | -| DISPLAY_MIRROR_BOTH | 水平方向和垂直方向都翻转180度 | - -【注意事项】 - -无 - -## 4. 示例程序 - -例程 - -```python - -from media.lcd import * -from media.media import * -import time, os, urandom, sys - -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -def lcd_test(): - print("lcd test") - # use hdmi for lcd - lcd.init(LT9611_1920X1080_30FPS) // 实始化lcd - # config vb for osd layer - config = k_vb_config() // 配置内存 - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - - # create image for drawing - img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - lcd.display(osd_img, 0, 0, DISPLAY_CHN_OSD0) // 显示 - try: - while True: - img.clear() - for i in range(10): - x = (urandom.getrandbits(11) % img.width()) - y = (urandom.getrandbits(11) % img.height()) - r = (urandom.getrandbits(8)) - g = (urandom.getrandbits(8)) - b = (urandom.getrandbits(8)) - # If the first argument is a scaler then this method expects - # to see x, y, and text. Otherwise, it expects a (x,y,text) tuple. - # Character and string rotation can be done at 0, 90, 180, 270, and etc. degrees. - img.draw_string(x, y, "Hello World!", color = (r, g, b), scale = 2, mono_space = False, - char_rotation = 0, char_hmirror = False, char_vflip = False, - string_rotation = 0, string_hmirror = False, string_vflip = False) // 绘图 - img.copy_to(osd_img) - time.sleep(1) - os.exitpoint() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - - # deinit lcd - lcd.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # deinit media buffer - media.buffer_deinit() - -if __name__ == "__main__": - os.exitpoint(os.EXITPOINT_ENABLE) - lcd_test() - -``` diff --git "a/zh/api/mpp/K230_CanMV_MP4\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/mpp/K230_CanMV_MP4\346\250\241\345\235\227API\346\211\213\345\206\214.md" index 81c091e..2dd8640 100755 --- "a/zh/api/mpp/K230_CanMV_MP4\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/mpp/K230_CanMV_MP4\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV MP4 模块API手册 +# 3.6 MP4 模块API手册 ![cover](../images/canaan-cover.png) @@ -59,7 +59,7 @@ 提供MP4Container类,该类提供如下方法: -### MP4Container.Create +### 2.1 MP4Container.Create 【描述】 @@ -95,7 +95,7 @@ MP4Container.Create(mp4Cfg) 无 -### MP4Container.Start +### 2.2 MP4Container.Start 【描述】 @@ -126,7 +126,7 @@ MP4Container.Start() 无 -### MP4Container.Process +### 2.3 MP4Container.Process 【描述】 @@ -157,7 +157,7 @@ MP4Container.Process() 无 -### MP4Container.Stop +### 2.4 MP4Container.Stop 【描述】 @@ -188,7 +188,7 @@ MP4Container.Stop() 无 -### MP4Container.Destroy +### 2.5 MP4Container.Destroy 【描述】 @@ -221,7 +221,7 @@ MP4Container.Destroy() ## 3. 数据结构描述 -### Mp4CfgStr +### 3.1 Mp4CfgStr 【说明】 @@ -259,7 +259,7 @@ class Mp4CfgStr: MP4Container.Create -### MuxerCfgStr +### 3.2 MuxerCfgStr 【说明】 @@ -295,7 +295,7 @@ class MuxerCfgStr: MP4Container.Create -### MP4Container类型 +### 3.3 MP4Container类型 【说明】 @@ -308,7 +308,7 @@ MP4Container类型枚举 | MP4_CONFIG_TYPE_MUXER | muxer类型 | | MP4_CONFIG_TYPE_DEMUXER | demuxer类型,目前不支持 | -### video_payload_type +### 3.4 video_payload_type 【说明】 @@ -321,7 +321,7 @@ MP4Container类型枚举 | MP4_CODEC_ID_H264 | h264视频编码类型 | | MP4_CODEC_ID_H265 | h265视频编码类型 | -### audio_payload_type +### 3.5 audio_payload_type 【说明】 @@ -336,7 +336,7 @@ MP4Container类型枚举 ## 4. 示例程序 -### 例程1 +### 4.1 例程1 ```python diff --git "a/zh/api/mpp/K230_CanMV_Media\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/mpp/K230_CanMV_Media\346\250\241\345\235\227API\346\211\213\345\206\214.md" index af3e2ac..222bc86 100755 --- "a/zh/api/mpp/K230_CanMV_Media\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/mpp/K230_CanMV_Media\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV Media模块API手册 +# 3.8 Media模块API手册 ![cover](../images/canaan-cover.png) @@ -29,7 +29,7 @@ ### 概述 -本文档主要介绍K230 CanMV平台Meida模块 API使用说明及应用示例。 +本文档主要介绍K230 CanMV平台media模块 API使用说明及应用示例。 ### 读者对象 @@ -42,50 +42,46 @@ | 简称 | 说明 | | ---- | ---- | -| XXX | xx | -| XXX | xx | +| | | ### 修订记录 | 文档版本号 | 修改说明 | 修改者 | 日期 | | ---------- | -------- | ---------- | ---------- | | V1.0 | 初版 | 汪成根 | 2023-09-25 | -| V1.1 | | | | -| V1.2 | | | | +| V2.0 | 重构API | xel | 2024-06-11 | ## 1. 概述 -​ K230 CanMV平台Meida模块是一个软件抽象模块,主要是针对K230 CanMV平台媒体数据链路以及媒体缓冲区相关操作的封装。 +​ K230 CanMV平台media模块是一个软件抽象模块,主要是针对K230 CanMV平台媒体数据链路以及媒体缓冲区相关操作的封装。 ## 2. API描述 -K230 CanMV平台Meida模块提供meida静态类,该类提供以下章节描述的方法。 +K230 CanMV平台media模块提供MediaManager静态类,该类提供以下章节描述的方法。 -### 2.1 create_link +### 2.1 init 【描述】 -创建由参数source和sink指定的媒体链路,链路创建成功后,数据流会自动从souce流入sink,无需用户干预。 +用户[配置](#23-_config)完`buffer`之后,调用`init`进行初始化,必须在最后进行调用 【语法】 ```python -def create_link(cls, souce, sink) +MediaManager.init() ``` 【参数】 | 参数名称 | 描述 | 输入/输出 | |-----------------|-------------------------------|-----------| -| souce | 媒体数据源 | 输入 | -| sink | 媒体数据接收者 | 输入 | +| 无 | | | 【返回值】 | 返回值 | 描述 | |---------|---------------------------------| -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | +| 无 | | 【注意】 @@ -97,31 +93,29 @@ def create_link(cls, souce, sink) 无 -### 2.2 destroy_link +### 2.2 deinit 【描述】 -销毁已经创建的媒体链路 +销毁所有申请的`buffer` 【语法】 ```python -def destroy_link(cls, souce, sink) +MediaManager.deinit() ``` 【参数】 | 参数名称 | 描述 | 输入/输出 | | -------- | -------------- | --------- | -| souce | 媒体数据源 | 输入 | -| sink | 媒体数据接收者 | 输入 | +| 无 | | | 【返回值】 | 返回值 | 描述 | | ------ | ---------------------- | -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | +| 无 | | 【注意】 @@ -133,7 +127,7 @@ def destroy_link(cls, souce, sink) 无 -### 2.3 buffer_config +### 2.3 _config 【描述】 @@ -142,7 +136,7 @@ def destroy_link(cls, souce, sink) 【语法】 ```python -def buffer_config(cls, config) +MediaManager._config(config) ``` 【参数】 @@ -169,30 +163,34 @@ def buffer_config(cls, config) 无 -### 2.4 buffer_init +### 2.4 link 【描述】 -初始化K230 CanMV平台媒体缓冲区。 +为不同模块的通道建立连接,数据自动流转,无需用户手动操作 + +`Display`可使用`bind_layer`自动创建`link` 【语法】 ```python -def buffer_init(cls) +MediaManager.link(src=(mod,dev,chn), dst = (mod,dev,chn)) ``` -【参数】无 +【参数】 + +| 参数名称 | 描述 | 输入/输出 | +|-----------------|-------------------------------|-----------| +| 无 | | | 【返回值】 | 返回值 | 描述 | | ------ | ---------------------- | -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | +| `MediaManager.linker`类 | | 【注意】 - -该方法只能调用一次,应用开发者进行应用媒体开发时,需要先初始化所需要的各个子模块,并在启动媒体数据流之前调用一次该方法!!! +该方法仅提供给K230 CanMV平台各媒体子模块(例如:camera,video encode等)封装本模块接口时内部使用。上层应用开发者无需关注! 【举例】 @@ -202,29 +200,65 @@ def buffer_init(cls) 无 -### 2.5 buffer_deinit +### 2.5 Buffer 管理 + +#### 2.5.1 get 【描述】 -去初始化K230 CanMV平台媒体缓冲区。 +用户在[_config](#23-_config)之后,可通过`MediaManager.Buffer.get`获取`buffer` + +**必须在[init](#21-init)执行之后才能获取** 【语法】 ```python -def buffer_deinit(cls): +MediaManager.Buffer.get(size) ``` -【参数】无 +【参数】 + +| 参数名称 | 描述 | 输入/输出 | +|-----------------|-------------------------------|-----------| +| size | 想要获取的`buffer`大小,不能超过`_config`中配置的 | 输入 | 【返回值】 | 返回值 | 描述 | | ------ | ---------------------- | -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | +| `MediaManager.Buffer` 类 | 成功 | -【注意】 -该方法必须要做退出媒体应用之前调用,如果去初始化媒体缓冲区失败,则会导致下次启动媒体应用时缓冲区初始化失败! +【举例】 + +无 + +【相关主题】 + +无 + +#### 2.5.2 释放内存 + +【描述】 + +用户手动释放获取到的`buffer` + +【语法】 + +```python +buffer.__del__() +``` + +【参数】 + +| 参数名称 | 描述 | 输入/输出 | +|-----------------|-------------------------------|-----------| +| 无 | | | + +【返回值】 + +| 返回值 | 描述 | +| ------ | ---------------------- | +| 无 | | 【举例】 @@ -354,28 +388,37 @@ DISPLAY_CHN_ID_6 = K_VO_DISPLAY_CHN_ID6 【相关数据类型及接口】 -### 3.3 图像格式 - -【说明】 +无 -K230 CanMV平台当前定义的各种图像格式 +## 4. 示例程序 -【定义】 +### 例程 ```python -#TODO -``` +from media.media import * -【注意事项】 +config = k_vb_config() +config.max_pool_cnt = 1 +config.comm_pool[0].blk_size = 1024 +config.comm_pool[0].blk_cnt = 1 +config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE -无 +ret = MediaManager._config(config) +if not ret: + raise RuntimeError(f"configure buffer failed.") -【相关数据类型及接口】 +MediaManager.init() -## 4. 示例程序 +buffer = MediaManager.Buffer.get(1024) -### 例程 +print(buffer) -```python -# 参考Camera模块API手册:示例程序 +buffer.__del__() + +MediaManager.deinit() + +''' +buffer pool : 1 +MediaManager.Buffer: handle 0, size 1024, poolId 0, phyAddr 268439552, virtAddr 100424000 +''' ``` diff --git "a/zh/api/mpp/K230_CanMV_PM\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/mpp/K230_CanMV_PM\346\250\241\345\235\227API\346\211\213\345\206\214.md" index fd54830..bb8f75f 100644 --- "a/zh/api/mpp/K230_CanMV_PM\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/mpp/K230_CanMV_PM\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV PM 模块API手册 +# 3.9 PM 模块API手册 ![cover](../images/canaan-cover.png) @@ -58,7 +58,7 @@ PM模块是功耗管理模块,具体可参考SDK中关于PM框架的描述([K2 pm类位于mpp模块下,模块内部包含了两个实例化对象cpu, kpu -### 示例 +### 2.1 示例 ```python from mpp import pm @@ -70,7 +70,7 @@ pm.cpu.list_profiles() pm.cpu.set_profile(1) ``` -### get_freq +### 2.2 get_freq ```python pm.pm_domain.get_freq() @@ -86,7 +86,7 @@ pm.pm_domain.get_freq() 指定域频率 -### list_profiles +### 2.3 list_profiles ```python pm.pm_domain.list_profiles() @@ -102,7 +102,7 @@ pm.pm_domain.list_profiles() 指定域支持的频率列表 -### set_profile +### 2.4 set_profile ```python pm.pm_domain.set_profile(index) diff --git "a/zh/api/mpp/K230_CanMV_Sensor\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/mpp/K230_CanMV_Sensor\346\250\241\345\235\227API\346\211\213\345\206\214.md" index 00dfd8c..e7c579f 100755 --- "a/zh/api/mpp/K230_CanMV_Sensor\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/mpp/K230_CanMV_Sensor\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV Sensor 模块API手册 +# 3.1 Sensor 模块API手册 ![cover](../images/canaan-cover.png) @@ -29,7 +29,9 @@ ### 概述 -本文档主要介绍K230 CanMV平台Camera模块 API使用说明及应用示例。 +**`该模块在固件版本V0.6之后有较大改变,若使用V0.6之前固件请参考旧版本的文档`** + +本文档主要介绍K230 CanMV平台Sensor模块 API使用说明及应用示例。 ### 读者对象 @@ -42,14 +44,14 @@ | 简称 | 说明 | |--------------------|--------------------------------------------------------| -| VICAP | Video Input Capture,图像输入采集模块 | -| MCM | Multi Camera Management ,多摄像头管理 | +||| ### 修订记录 | 文档版本号 | 修改说明 | 修改者 | 日期 | | ---------- | -------- | ---------- | ---------- | | V1.0 | 初版 | 赵忠祥 | 2024-04-24 | +| V2.0 | 重构API | xel | 2024-06-11 | ## 1. 概述 @@ -63,52 +65,84 @@ sensor 0,sensor 1,sensor 2表示三个图像传感器;Camera Device 0,Ca ## 2. API描述 -K230 CanMV平台sensor模块提供sensor静态类,该类提供以下章节描述的方法。 - -### 2.1 sensor.reset +### 构造函数 【描述】 -根据指定的sensor设备和sensor类型执行初始化 +根据`csi id`和摄像头类型构建`Sensor`对象 + +用户需要先构建`Sensor`对象再继续操作 + +目前已实现自动探测摄像头,用户可选择输出图像的最大分辨率和帧率,参考[摄像头列表](#4-摄像头列表) + +用户设置目标分辨率和帧率之后,如果底层驱动不支持该设置,则会进行自动匹配出最佳配置 + +具体使用的配置可参考日志,如`use sensor 23, output 640x480@90` 【语法】 ```python -sensor.reset(dev_num, type) +sensor = Sensor(id, [width, height, fps]) ``` 【参数】 -| 参数名称 | 描述 | 输入/输出 | -|-----------------|-------------------------------|-----------| -| dev_num | sensor设备号 | | -| sensor_type | sensor类型,CanMV平台定义的已经支持的各类sensor | 输入 | +| 参数名称 | 描述 | 输入/输出 | 说明 | +|-----------------|-------------------------------|-----------| --- | +| id | `csi` 端口, 支持`0-2` | 输入 | 必选 | +| width | `sensor`最大输出图像宽度 | 输入 | 可选,默认`1920` | +| height | `sensor`最大输出图像高度 | 输入 | 可选,默认`1080` | +| fps | `sensor`最大输出图像帧率 | 输入 | 可选,默认`30` | 【返回值】 | 返回值 | 描述 | |---------|---------------------------------| -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | +| Sensor 对象 | | -sensor_type: +【举例】 -CAM_DEFAULT_SENSOR/OV_OV5647_MIPI_CSI0_1920X1080_30FPS_10BIT_LINEAR +```python +sensor = Sensor(id = 0) +sensor = Sensor(id = 0, witdh = 1280, height = 720, fps = 60) +sensor = Sensor(id = 0, witdh = 640, height = 480) +``` -CAM_OV5647_1920X1080_CSI1_30FPS_10BIT_USEMCLK_LINEAR/OV_OV5647_MIPI_CSI1_1920X1080_30FPS_10BIT_LINEAR +【相关主题】 -CAM_OV5647_1920X1080_CSI2_30FPS_10BIT_USEMCLK_LINEAR/OV_OV5647_MIPI_CSI2_1920X1080_30FPS_10BIT_LINEAR +无 -【注意】 -这是使用sensor模块需要调用的第一个方法。 +### 2.1 sensor.reset + +【描述】 + +复位`sensor` + +在构造`Sensor`对象之后,必须调用本函数才能继续其他操作 + +【语法】 + +```python +sensor.reset() +``` + +【参数】 + +| 参数名称 | 描述 | 输入/输出 | +|-----------------|-------------------------------|-----------| +| 无 | | | + +【返回值】 -用户不调用该方法,默认初始化sensor设备0及sensor OV5647 +| 返回值 | 描述 | +|---------|---------------------------------| +| 无 | | 【举例】 ```python # 初始化sensor设备0以及sensor OV5647 -sensor.reset(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) +sensor.reset() ``` 【相关主题】 @@ -119,46 +153,47 @@ sensor.reset(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) 【描述】 -设置指定sensor设备和通道的输出图像尺寸 +设置指定通道的输出图像尺寸 + +用户可使用`framesize`或通过指定`width`&`height`来设置输出图像尺寸 + +***宽度会自动对齐到16像素宽*** 【语法】 ```python -sensor.set_framesize(dev_num, chn_num, width, height) +sensor.set_framesize(framesize = FRAME_SIZE_INVAILD, chn = CAM_CHN_ID_0, alignment=0, **kwargs) ``` 【参数】 | 参数名称 | 描述 | 输入/输出 | | -------- | ---------------- | --------- | -| dev_num | sensor设备号 | 输入 | -| chn_num | sensor输出通道号 | 输入 | -| width | 输出图像宽度 | 输入 | -| height | 输出图像高度 | 输入 | +| framesize | sensor[输出图像尺寸](#31-frame_size) | 输入 | +| chn | sensor输出[通道号](#33-channel) | 输入 | +| width | 输出图像宽度,*kw_arg* | 输入 | +| height | 输出图像高度,*kw_arg* | 输入 | 【返回值】 | 返回值 | 描述 | | ------ | ---------------------- | -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | +| 无 | | 【注意】 -输出图像尺寸不能超过输入图像尺寸。 +输出图像尺寸不能超过摄像头实际输出。 不同输出通道最大可输出图像尺寸由硬件限制。 -用户不调用该方法,默认输出图像尺寸与输入图像一致。 - 【举例】 ```python # 配置sensor设备0,输出通道0, 输出图尺寸为640x480 -sensor.set_framesize(CAM_DEV_ID_0, CAM_CHN_ID_0, 640, 480) +sensor.set_framesize(chn = CAM_CHN_ID_0, width = 640, height = 480) # 配置sensor设备0,输出通道1, 输出图尺寸为320x240 -sensor.set_framesize(CAM_DEV_ID_0, CAM_CHN_ID_1, 320, 240) +sensor.set_framesize(chn = CAM_CHN_ID_1, width = 320, height = 240) ``` 【相关主题】 @@ -174,59 +209,120 @@ sensor.set_framesize(CAM_DEV_ID_0, CAM_CHN_ID_1, 320, 240) 【语法】 ```python -sensor.set_pixformat(dev_num, chn_num, pix_format) +sensor.set_pixformat(pix_format, chn = CAM_CHN_ID_0) ``` 【参数】 | 参数名称 | 描述 | 输入/输出 | | ---------- | ---------------- | --------- | -| dev_num | sensor设备号 | 输入 | -| chn_num | sensor输出通道号 | 输入 | -| pix_format | 输出图像格式 | 输入 | +| pix_format | [输出图像格式](#32-pixel_format) | 输入 | +| chn_num | sensor输出[通道号](#33-channel) | 输入 | + +【返回值】 -pix_format: +| 返回值 | 描述 | +| ------ | ---------------------- | +| 无 | | -sensor.YUV420SP: NV12 +【举例】 -sensor.RGB888: RGB888 interleave +```python +# 配置sensor设备0,输出通道0, 输出NV12格式 +sensor.set_pixformat(sensor.YUV420SP, chn = CAM_CHN_ID_0) -sensor.RGB888P: RGB888 planer +# 配置sensor设备0,输出通道1, 输出RGB888格式 +sensor.set_pixformat(sensor.RGB888, chn = CAM_CHN_ID_1) +``` + +【相关主题】 + +无 + +### 2.4 sensor.set_hmirror + +【描述】 + +设置摄像头水平镜像 + +【语法】 + +```python +sensor.set_hmirror(enable) +``` + +【参数】 + +| 参数名称 | 描述 | 输入/输出 | +| ---------- | ---------------- | --------- | +| enable | `True` 表示开启水平镜像
`False`表示关闭水平镜像 | 输入 | 【返回值】 | 返回值 | 描述 | | ------ | ---------------------- | -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | +| 无 | | 【注意】 -用户不调用方法,将使用默认配置。 【举例】 ```python -# 配置sensor设备0,输出通道0, 输出NV12格式 -sensor.set_pixformat(CAM_DEV_ID_0, CAM_CHN_ID_0, sensor.YUV420SP) +sensor.set_hmirror(True) +``` -# 配置sensor设备0,输出通道1, 输出RGB888格式 -sensor.set_pixformat(CAM_DEV_ID_0, CAM_CHN_ID_1, sensor.RGB888) +【相关主题】 + +无 + +### 2.5 sensor.set_vflip + +【描述】 + +设置摄像头垂直翻转 + +【语法】 + +```python +sensor.set_vflip(enable) +``` + +【参数】 + +| 参数名称 | 描述 | 输入/输出 | +| ---------- | ---------------- | --------- | +| enable | `True` 表示开启垂直翻转
`False` 表示关闭垂直翻转 | 输入 | + +【返回值】 + +| 返回值 | 描述 | +| ------ | ---------------------- | +| 无 | | + +【注意】 + +【举例】 + +```python +sensor.set_vflip(True) ``` 【相关主题】 无 -### 2.4 sensor.start_stream +### 2.6 sensor.run 【描述】 -启动sensor数据流 +摄像头开始输出 + +**`必须在MediaManager.init()之前调用`** 【语法】 ```python -sensor.start_stream() +sensor.run() ``` 【参数】 @@ -235,30 +331,35 @@ sensor.start_stream() | 返回值 | 描述 | | ------ | ---------------------- | -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | +| 无 | | + +【注意】 + +如果同时使用多个摄像头(最多3个),只需要其中一个执行`run`即可 【举例】 ```python # 启动sensor设备输出数据流 -sensor.start_stream() +sensor.run() ``` 【相关主题】 无 -### 2.5 sensor.stop_stream +### 2.7 sensor.stop 【描述】 -停止sensor数据流 +停止sensor输出 + +**`必须在MediaManager.deinit()之前调用`** 【语法】 ```python -sensor.stop_stream() +sensor.stop() ``` 【参数】 @@ -267,21 +368,24 @@ sensor.stop_stream() | 返回值 | 描述 | | ------ | ---------------------- | -| 0 | 成功。 | -| 非 0 | 失败,其值为\[错误码\] | +| 无 | | + +【注意】 + +如果同时使用多个摄像头(最多3个),**需要每一个都执行`stop`** 【举例】 ```python # 停止sensor设备0输出数据流 -sensor.stop_stream(CAM_DEV_ID_0) +sensor.stop() ``` 【相关主题】 无 -### 2.6 sensor.snapshot +### 2.8 sensor.snapshot 【描述】 @@ -290,15 +394,14 @@ sensor.stop_stream(CAM_DEV_ID_0) 【语法】 ```python -sensor.snapshot(dev_num, chn_num) +sensor.snapshot(chn = CAM_CHN_ID_0) ``` 【参数】 | 参数名称 | 描述 | 输入/输出 | | -------- | ---------------- | --------- | -| dev_num | sensor设备号 | 输入 | -| chn_num | sensor输出通道号 | | +| chn_num | sensor输出[通道号](#33-channel) | | 【返回值】 @@ -309,210 +412,192 @@ sensor.snapshot(dev_num, chn_num) 【注意】 -该方法捕获的图像格式由set_outfmt方法指定。 - 【举例】 ```python # 从sensor设备0的通道0输出捕获一帧图像数据 -sensor.snapshot(CAM_DEV_ID_0, CAM_CHN_ID_0) +sensor.snapshot() ``` 【相关主题】 无 -## 3. 数据结构描述 +### 2.9 sensor.bind_info -K230 CanMV平台Camera模块包含如下描述的各个数据定义。 - -### 3.1 sensor类型 - -【说明】 +【描述】 -下面是目前Canmv-K230板micropython支持的Sensor。 -其中CSI1/2是可以使用树莓派的ov5647模组,如果使用Canmv-K230 V1.0/1.1版的板子,要修改该模组的电压。 +在`Display.bind_layer`时使用,获取绑定信息 -【定义】 +【语法】 ```python -CAM_IMX335_2LANE_1920X1080_30FPS_12BIT_USEMCLK_LINEAR # Imx335 CSI0 -CAM_OV5647_1920X1080_30FPS_10BIT_USEMCLK_LINEAR # OV5647 CSI0 -CAM_OV5647_1920X1080_CSI1_30FPS_10BIT_USEMCLK_LINEAR # OV5647 CSI1 -CAM_OV5647_1920X1080_CSI2_30FPS_10BIT_USEMCLK_LINEAR # OV5647 CSI2 -# the default sensor type -CAM_DEFAULT_SENSOR = CAM_OV5647_1920X1080_30FPS_10BIT_USEMCLK_LINEAR # 默认的sensor使用OV5647 CSI0 +sensor.bind_info(x = 0, y = 0, chn = CAM_CHN_ID_0) ``` -【注意事项】 - -Canmv-K230 V1.0/1.1版的板子外设接口为1.8V,不能直接使用树莓派的ov5647模组,必须修改电压为1.8V。 - -![ov5647_v1.8](../../images/ov5647_v1.8.jpg) +【参数】 -【相关数据类型及接口】 +| 参数名称 | 描述 | 输入/输出 | +| -------- | ---------------- | --------- | +| x | 将`sensor`指定通道输出图像绑定到`Display`或`Venc`模块的指定坐标 | | +| y | 将`sensor`指定通道输出图像绑定到`Display`或`Venc`模块的指定坐标 | | +| chn_num | sensor输出[通道号](#33-channel) | | -### 3.2 输出图像尺寸 +【返回值】 -【说明】 +| 返回值 | 描述 | +| --------- | ---- | +| 无 | | -定义各个输出通道能够支持的输出图像最大尺寸和最小尺寸 +【注意】 -【定义】 +【举例】 ```python -CAM_CHN0_OUT_WIDTH_MAX = 3072 -CAM_CHN0_OUT_HEIGHT_MAX = 2160 - -CAM_CHN1_OUT_WIDTH_MAX = 1920 -CAM_CHN1_OUT_HEIGHT_MAX = 1080 -CAM_CHN2_OUT_WIDTH_MAX = 1920 -CAM_CHN2_OUT_HEIGHT_MAX = 1080 - -CAM_OUT_WIDTH_MIN = 64 -CAM_OUT_HEIGHT_MIN = 64 ``` -【注意事项】 +【相关主题】 无 -【相关数据类型及接口】 +## 3. 数据结构描述 -## 4. 示例程序 +### 3.1 frame_size + +| 图像帧尺寸 | 分辨率 | +| -- | -- | +| QQCIF | 88x72 | +| QCIF | 176x144 | +| CIF | 352x288 | +| QSIF | 176x120 | +| SIF | 352x240 | +| QQVGA | 160x120 | +| QVGA | 320x240 | +| VGA | 640x480 | +| HQQVGA | 120x80 | +| HQVGA | 240x160 | +| HVGA | 480x320 | +| B64X64 | 64x64 | +| B128X64 | 128x64 | +| B128X128 | 128x128 | +| B160X160 | 160x160 | +| B320X320 | 320x320 | +| QQVGA2 | 128x160 | +| WVGA | 720x480 | +| WVGA2 | 752x480 | +| SVGA | 800x600 | +| XGA | 1024x768 | +| WXGA | 1280x768 | +| SXGA | 1280x1024 | +| SXGAM | 1280x960 | +| UXGA | 1600x1200 | +| HD | 1280x720 | +| FHD | 1920x1080 | +| QHD | 2560x1440 | +| QXGA | 2048x1536 | +| WQXGA | 2560x1600 | +| WQXGA2 | 2592x1944 | + +### 3.2 pixel_format + +| 像素格式 | | +| -- | -- | +| RGB565 | | +| RGB888 | | +| RGBP888 | | +| YUV420SP | NV12 | +| GRAYSCALE | | + +### 3.3 channel + +| 通道号 | | +| -- | -- | +| CAM_CHN_ID_0 | 通道0 | +| CAM_CHN_ID_1 | 通道1 | +| CAM_CHN_ID_2 | 通道2 | +| CAM_CHN_ID_MAX | 非法通道 | + +## 4. 摄像头列表 + +| 摄像头型号 | 分辨率
Width x Height | 帧率 | +| -- | -- | -- | +| OV5647 | 1920x1080 | 30 | +| | 1280x960 | 60 | +| | 1280x720 | 60 | +| | 640x480 | 90 | + +## 5. 示例程序 ### 例程 ```python -# 本示例程序包括以下内容: -# 1. 配置sensor设备0同时输出三路图像数据 -# 2. 通道0输出YUV格式用于预览显示,通道1、2输出RGB888P -# 3. 抓取三路输出的图像各100张 -# - -from media.sensor import * #导入sensor模块,使用sensor相关接口 -from media.display import * #导入display模块,使用display相关接口 -from media.media import * #导入media模块,使用meida相关接口 -from time import * #导入time模块,使用time相关接口 -import time -import image #导入image模块,使用image相关接口 - -def canmv_sensor_test(): - print("canmv_sensor_test") - - #初始化HDMI显示 - display.init(LT9611_1920X1080_30FPS) - - #初始化默认sensor配置(OV5647) - sensor.reset(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - - out_width = 1920 - out_height = 1080 - # 设置输出宽度16字节对齐 - out_width = ALIGN_UP(out_width, 16) - - #设置通道0输出尺寸 - sensor.set_framesize(CAM_DEV_ID_0, CAM_CHN_ID_0, out_width, out_height) - #设置通道0输出格式 - sensor.set_pixformat(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - #创建媒体数据源设备 - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - #创建媒体数据接收设备 - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - #创建媒体链路,数据从源设备流到接收设备 - media.create_link(media_source, media_sink) - #设置显示输出平面的属性 - display.set_plane(0, 0, out_width, out_height, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - - out_width = 640 - out_height = 480 - out_width = ALIGN_UP(out_width, 16) - - #设置通道1输出尺寸 - sensor.set_framesize(CAM_DEV_ID_0, CAM_CHN_ID_1, out_width, out_height) - #设置通道1输出格式 - sensor.set_pixformat(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - - #设置通道2输出尺寸 - sensor.set_framesize(CAM_DEV_ID_0, CAM_CHN_ID_2, out_width, out_height) - #设置通道2输出格式 - sensor.set_pixformat(CAM_DEV_ID_0, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - - #初始化媒体缓冲区 - ret = media.buffer_init() - if ret: - print("canmv_sensor_test, buffer init failed") - return ret - - #启动摄像头数据流 - sensor.start_stream(CAM_DEV_ID_0) - time.sleep(15) - - capture_count = 0 - while capture_count < 100: - time.sleep(1) - for dev_num in range(CAM_DEV_ID_MAX): - if not sensor.cam_dev[dev_num].dev_attr.dev_enable: - continue - - for chn_num in range(CAM_CHN_ID_MAX): - if not sensor.cam_dev[dev_num].chn_attr[chn_num].chn_enable: - continue - - print(f"canmv_sensor_test, dev({dev_num}) chn({chn_num}) capture frame.") - #从指定设备和通道捕获图像 - img = sensor.capture_image(dev_num, chn_num) - if img == -1: - print("sensor.capture_image failed") - continue - - if img.format() == image.YUV420: - suffix = "yuv420sp" - elif img.format() == image.RGB888: - suffix = "rgb888" - elif img.format() == image.RGBP888: - suffix = "rgb888p" - else: - suffix = "unkown" - - filename = f"/sdcard/dev_{dev_num:02d}_chn_{chn_num:02d}_{img.width()}x{img.height()}_{capture_count:04d}.{suffix}" - print("save capture image to file:", filename) - - with open(filename, "wb") as f: - if f: - img_data = uctypes.bytearray_at(img.virtaddr(), img.size()) - # save yuv data to sdcard. - #f.write(img_data) - else: - print(f"capture_image, open dump file failed({filename})") - - time.sleep(1) - #释放捕获的图像数据 - sensor.release_image(dev_num, chn_num, img) - - capture_count += 1 - - #停止摄像头输出 - sensor.stop_stream(CAM_DEV_ID_0) - - #去初始化显示设备 - display.deinit() - - #销毁媒体链路 - media.destroy_link(media_source, media_sink) - - time.sleep(1) - #去初始化媒体缓冲区资源 - ret = media.buffer_deinit() - if ret: - print("sensor test, media_buffer_deinit failed") - return ret - - print("sensor test exit") - return 0 - - -canmv_sensor_test() +# Camera Example +import time, os, sys + +from media.sensor import * +from media.display import * +from media.media import * + +def camera_test(): + print("camera_test") + + # construct a Sensor object with default configure + sensor = Sensor() + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + + # set chn0 output size, 1920x1080 + sensor.set_framesize(Sensor.FHD) + # set chn0 output format + sensor.set_pixformat(Sensor.YUV420SP) + # bind sensor chn0 to display layer video 1 + bind_info = sensor.bind_info() + Display.bind_layer(**bind_info, layer = Display.LAYER_VIDEO1) + + # set chn1 output format + sensor.set_framesize(width = 640, height = 480, chn = CAM_CHN_ID_1) + sensor.set_pixformat(Sensor.RGB888, chn = CAM_CHN_ID_1) + + # set chn2 output format + sensor.set_framesize(width = 640, height = 480, chn = CAM_CHN_ID_2) + sensor.set_pixformat(Sensor.RGB565, chn = CAM_CHN_ID_2) + + # use hdmi as display output + Display.init(Display.LT9611, to_ide = True, osd_num = 2) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + + try: + while True: + os.exitpoint() + + img = sensor.snapshot(chn = CAM_CHN_ID_1) + Display.show_image(img, alpha = 128) + + img = sensor.snapshot(chn = CAM_CHN_ID_2) + Display.show_image(img, x = 1920 - 640, layer = Display.LAYER_OSD1) + + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + print(f"Exception {e}") + # sensor stop run + sensor.stop() + # deinit display + Display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + MediaManager.deinit() + +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + camera_test() ``` diff --git "a/zh/api/mpp/K230_CanMV_VDEC\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/mpp/K230_CanMV_VDEC\346\250\241\345\235\227API\346\211\213\345\206\214.md" index 951d565..70ce868 100755 --- "a/zh/api/mpp/K230_CanMV_VDEC\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/mpp/K230_CanMV_VDEC\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV VDEC 模块API手册 +# 3.4 VDEC 模块API手册 ![cover](../images/canaan-cover.png) @@ -58,7 +58,7 @@ 提供Decoder类,该类提供如下方法: -### Decoder.\_\_init__ +### 2.1 Decoder.\_\_init__ 【描述】 @@ -95,7 +95,7 @@ VDEC最多支持4路解码 无 -### Decoder.Create +### 2.2 Decoder.Create 【描述】 @@ -127,7 +127,7 @@ decoder.create() 无 -### Decoder.destroy +### 2.3 Decoder.destroy 【描述】 @@ -159,7 +159,7 @@ decoder.destroy() 无 -### Decoder.Start +### 2.4 Decoder.Start 【描述】 @@ -189,7 +189,7 @@ decoder.Start() 无 -### Decoder.decode +### 2.5 Decoder.decode 【描述】 @@ -223,7 +223,7 @@ decoder.decode(stream_data) 无 -### Decoder.stop +### 2.6 Decoder.stop 【描述】 @@ -257,7 +257,7 @@ decoder.stop() ## 3. 数据结构描述 -### StreamData +### 3.1 StreamData 【说明】 @@ -285,7 +285,7 @@ class StreamData: ## 4. 示例程序 -### 例程1 +### 4.1 例程1 ```python from media.media import * diff --git "a/zh/api/mpp/K230_CanMV_VENC\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/mpp/K230_CanMV_VENC\346\250\241\345\235\227API\346\211\213\345\206\214.md" index 420223f..1255f08 100755 --- "a/zh/api/mpp/K230_CanMV_VENC\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/mpp/K230_CanMV_VENC\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV VENC 模块API手册 +# 3.5 VENC 模块API手册 ![cover](../images/canaan-cover.png) @@ -58,7 +58,7 @@ 提供Encoder类,该类提供如下方法: -### Encoder.Create +### 2.1 Encoder.Create 【描述】 @@ -95,7 +95,7 @@ VENC最多支持4路编码,编码通道号取值范围[0, 3],其中第4路 无 -### Encoder.SetOutBufs +### 2.2 Encoder.SetOutBufs 【描述】 @@ -131,7 +131,7 @@ Encoder.SetOutBufs(chn, buf_num, width, height) 无 -### Encoder.Start +### 2.3 Encoder.Start 【描述】 @@ -164,7 +164,7 @@ Encoder.Start(chn) 无 -### Encoder.GetStream +### 2.4 Encoder.GetStream 【描述】 @@ -198,7 +198,7 @@ Encoder.GetStream(chn, streamData) 无 -### Encoder.ReleaseStream +### 2.5 Encoder.ReleaseStream 【描述】 @@ -232,7 +232,7 @@ Encoder.ReleaseStream(chn, streamData) 无 -### Encoder.Stop +### 2.6 Encoder.Stop 【描述】 @@ -258,7 +258,7 @@ Encoder.Stop(chn) 无 -### Encoder.Destroy +### 2.7 Encoder.Destroy 【描述】 @@ -286,7 +286,7 @@ Encoder.Destroy(chn) ## 3. 数据结构描述 -### ChnAttrStr +### 3.1 ChnAttrStr 【说明】 @@ -322,7 +322,7 @@ class ChnAttrStr: Encoder.Create -### StreamData +### 3.2 StreamData 【说明】 @@ -357,7 +357,7 @@ VENC_PACK_CNT_MAX是码流结构体中pack的最大个数,目前设置为12 Encoder.GetStream Encoder.ReleaseStream -### payload_type +### 3.3 payload_type 【描述】 @@ -370,7 +370,7 @@ Encoder.ReleaseStream | PAYLOAD_TYPE_H264 | h264编码格式 | | PAYLOAD_TYPE_H265| h265编码格式 | -### profile +### 3.4 profile 【描述】 @@ -385,7 +385,7 @@ Encoder.ReleaseStream | H264_PROFILE_HIGH | h264 high profile | | H265_PROFILE_MAIN | h265 main profile | -### stream_type +### 3.5 stream_type 【描述】 @@ -401,7 +401,7 @@ Encoder.ReleaseStream ## 4. 示例程序 -### 例程1 +### 4.1 例程1 ```python from media.vencoder import * diff --git "a/zh/api/mpp/K230_CanMV_\346\222\255\346\224\276\345\231\250\346\250\241\345\235\227API\346\211\213\345\206\214.md" "b/zh/api/mpp/K230_CanMV_\346\222\255\346\224\276\345\231\250\346\250\241\345\235\227API\346\211\213\345\206\214.md" index 7e44e73..20446d9 100755 --- "a/zh/api/mpp/K230_CanMV_\346\222\255\346\224\276\345\231\250\346\250\241\345\235\227API\346\211\213\345\206\214.md" +++ "b/zh/api/mpp/K230_CanMV_\346\222\255\346\224\276\345\231\250\346\250\241\345\235\227API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV 播放器 模块API手册 +# 3.7 播放器 模块API手册 ![cover](../images/canaan-cover.png) @@ -52,7 +52,7 @@ 提供Player类,该类提供如下方法: -### Player.load +### 2.1 Player.load 【描述】 @@ -90,7 +90,7 @@ player.load("test.mp4") 无 -### Player.start +### 2.2 Player.start 【描述】 @@ -122,7 +122,7 @@ player.start() 无 -### Player.pause +### 2.3 Player.pause 【描述】 @@ -154,7 +154,7 @@ player.pause() 无 -### Player.resume +### 2.4 Player.resume 【描述】 @@ -186,7 +186,7 @@ player.resume() 无 -### Player.stop +### 2.5 Player.stop 【描述】 @@ -218,7 +218,7 @@ player.stop() 无 -### Player.set_event_callback +### 2.6 Player.set_event_callback 【描述】 @@ -262,7 +262,7 @@ player.set_event_callback(callback=player_event) ## 3. 数据结构描述 -### playe_event_type +### 3.1 playe_event_type 【描述】 @@ -277,7 +277,7 @@ player.set_event_callback(callback=player_event) ## 4. 示例程序 -### 例程1 +### 4.1 例程1 ```python from media.player import * diff --git a/zh/api/nncase.rst b/zh/api/nncase.rst index 2d5f3b8..9a9c392 100755 --- a/zh/api/nncase.rst +++ b/zh/api/nncase.rst @@ -1,4 +1,4 @@ -nncase +4.nncase =========== .. toctree:: :maxdepth: 1 diff --git "a/zh/api/nncase_runtime/K230_CanMV_nncase_runtime_API\346\211\213\345\206\214.md" "b/zh/api/nncase_runtime/K230_CanMV_nncase_runtime_API\346\211\213\345\206\214.md" index 43ba948..e2a7d6c 100755 --- "a/zh/api/nncase_runtime/K230_CanMV_nncase_runtime_API\346\211\213\345\206\214.md" +++ "b/zh/api/nncase_runtime/K230_CanMV_nncase_runtime_API\346\211\213\345\206\214.md" @@ -1,4 +1,4 @@ -# K230 CanMV nncase_runtime 模块API手册 +# 4.1 K230 CanMV nncase_runtime 模块API手册 ![cover](../images/canaan-cover.png) @@ -316,8 +316,8 @@ get_input_desc(index) 【返回值】 -| 返回值 | 描述 | -| :---------- | -------------------------------------------------- | +| 返回值 | 描述 | +| :---------- | -------------------------------------------- | | MemoryRange | 第index个输入信息:`dtype`, `start`, `size`。 | #### 2.3.10 get_output_desc @@ -340,8 +340,8 @@ get_output_desc(index) 【返回值】 -| 返回值 | 描述 | -| :---------- | -------------------------------------------------- | +| 返回值 | 描述 | +| :---------- | -------------------------------------------- | | MemoryRange | 第index个输出信息:`dtype`, `start`, `size`。 | ### 2.4 nncase_runtime.ai2d @@ -488,7 +488,7 @@ set_pad_param(pad_flag, paddings, pad_mode, pad_val) 【定义】 ```Python -set_resize_param(resize_flag, interp_method, interp_mode) +set_resize_param(resize_flag, ai2d_interp_method, ai2d_interp_mode) ``` 【参数】 @@ -496,8 +496,8 @@ set_resize_param(resize_flag, interp_method, interp_mode) | 名称 | 类型 | 描述 | | ------------- | ------------------ | ------------------ | | resize_flag | bool | 是否开启resize功能 | -| interp_method | ai2d_interp_method | resize插值方法 | -| interp_mode | ai2d_interp_mode | resize模式 | +| ai2d_interp_method | interp_method | resize插值方法 | +| ai2d_interp_mode | interp_mode | resize模式 | #### 2.4.8 set_affine_param @@ -508,7 +508,7 @@ set_resize_param(resize_flag, interp_method, interp_mode) 【定义】 ```Python -set_affine_param(affine_flag, interp_method, cord_round, bound_ind, bound_val, bound_smooth, M) +set_affine_param(affine_flag, ai2d_interp_method, cord_round, bound_ind, bound_val, bound_smooth, M) ``` 【参数】 @@ -516,13 +516,62 @@ set_affine_param(affine_flag, interp_method, cord_round, bound_ind, bound_val, b | 名称 | 类型 | 描述 | | ------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------ | | affine_flag | bool | 是否开启affine功能 | -| interp_method | ai2d_interp_method | Affine采用的插值方法 | +| ai2d_interp_method | interp_method | Affine采用的插值方法 | | cord_round | uint32_t | 整数边界0或者1 | | bound_ind | uint32_t | 边界像素模式0或者1 | | bound_val | uint32_t | 边界填充值 | | bound_smooth | uint32_t | 边界平滑0或者1 | | M | list | 仿射变换矩阵对应的vector,仿射变换为Y=\[a_0, a_1; a_2, a_3\] \cdot X + \[b_0, b_1\] $, 则 M=[a_0,a_1,b_0,a_2,a_3,b_1 ] | +#### 2.4.9 ai2d_format + +【描述】 + +ai2d_format用于配置输入输出的可选数据格式. + +【定义】 + +```Python +class ai2d_format + YUV420_NV12 = 0 + YUV420_NV21 = 1 + YUV420_I420 = 2 + NCHW_FMT = 3 + RGB_packed = 4 + RAW16 = 5 +``` + +#### 2.4.10 interp_method + +【描述】 + +interp_method用于配置可选的插值方式. + +【定义】 + +```Python +class interp_method: + tf_nearest = 0 + tf_bilinear = 1 + cv2_nearest = 2 + cv2_bilinear = 3 +``` + +#### 2.4.11 interp_mode + +【描述】 + +interp_mode 用于配置可选的插值模式. + +【定义】 + +```Python +class interp_mode: + none = 0 + align_corner = 1 + half_pixel = 2 +``` + ### 2.5 shrink_memory_pool 【描述】 diff --git a/zh/api/openmv/image.md b/zh/api/openmv/image.md index 82ab608..c26da50 100644 --- a/zh/api/openmv/image.md +++ b/zh/api/openmv/image.md @@ -1,8 +1,8 @@ -# image 图像处理 API手册 +# 3.9 image 图像处理 API手册 移植于`openmv`,与`openmv`功能相同,详细请参考[官方文档](https://docs.openmv.io/library/omv.image.html),以下仅列出与官方API的差异部分及新增API。 -## 类 `Image` +## 1. 类 `Image` 图像对象是机器视觉操作的基本对象。 image支持从micropython gc,mmz,heap,vb区域创建,REF为在参考对象内存上直接生成image。 @@ -30,7 +30,7 @@ image支持的分配区域: - ALLOC_VB:视频缓冲区 - ALLOC_REF:不分配内存,使用参考对象内存 -### 构造函数 +### 1.1 构造函数 ```python class image.Image(path, alloc=ALLOC_MMZ, cache=True, phyaddr=0, virtaddr=0, poolid=0, data=None) @@ -69,11 +69,11 @@ del img gc.collect() ``` -### 函数 +### 1.2 函数 -#### 新增函数 +#### 1.2.1 新增函数 -##### `phyaddr` +##### 1.2.2 `phyaddr` ```python image.phyaddr() @@ -81,7 +81,7 @@ image.phyaddr() 获取image数据的物理内存地址。 -##### `virtaddr` +##### 1.2.3 `virtaddr` ```python image.virtaddr() @@ -89,7 +89,7 @@ image.virtaddr() 获取image数据的虚拟内存地址。 -##### `poolid` +##### 1.3.4 `poolid` ```python image.poolid() @@ -97,7 +97,7 @@ image.poolid() 获取image数据的VB poolid。 -##### `to_rgb888` +##### 1.2.5 `to_rgb888` ```python image.to_rgb888(x_scale=1.0[, y_scale=1.0[, roi=None[, rgb_channel=-1[, alpha=256[, color_palette=None[, alpha_palette=None[, hint=0[, alloc=ALLOC_MMZ, cache=True, phyaddr=0, virtaddr=0, poolid=0]]]]]]]]) @@ -106,7 +106,7 @@ image.to_rgb888(x_scale=1.0[, y_scale=1.0[, roi=None[, rgb_channel=-1[, alpha=25 转换图像格式为RGB888,返回一个新图像对象。 除原生支持格式外,额外添加`RGB888`格式支持,其它格式不支持。 -##### `copy_from` +##### 1.2.6 `copy_from` ```python image.copy_from(src_img) @@ -114,7 +114,7 @@ image.copy_from(src_img) 拷贝src_img到img。 -##### `copy_to` +##### 1.2.7 `copy_to` ```python image.copy_to(dst_img) @@ -122,7 +122,7 @@ image.copy_to(dst_img) 拷贝img到dst_img。 -##### `to_numpy_ref` +##### 1.2.8 `to_numpy_ref` ```python image.to_numpy_ref() @@ -130,9 +130,17 @@ image.to_numpy_ref() 将image类转换成numpy类,转换完成后numpy与image指向的是同一块数据,在numpy使用完成之前不能删除image,支持格式`GRAYSCALE`、`RGB565`、`ARGB8888`、`RGB888`、`RGBP888`。 -#### 差异函数 +##### 1.2.9 `draw_string_advanced` -##### 图像变换API +```python +image.draw_string_advanced(x,y,char_size,str,[color, font]) +``` + +`draw_string`的升级版,支持中文显示,用户可通过`font`自定义字体文件 + +#### 1.3 差异函数 + +##### 1.3.1 图像变换API - `to_bitmap` - `to_grayscale` @@ -152,18 +160,18 @@ image.to_numpy_ref() 除原生支持格式外,额外添加`RGB888`格式支持,其它格式不支持。 -##### 画图API +##### 1.3.2 画图API 除原生支持格式外,额外添加`ARGB8888`、`RGB888`格式支持,其它格式不支持。 -##### BINARY API +##### 1.3.3 BINARY API `binary`新增分配方式参数,仅在copy=True时有效。 -##### POOL API +##### 1.3.4 POOL API `mean_pooled`和`midpoint_pooled`新增分配方式参数。 -#### 其它图像算法API +#### 1.4 其它图像算法API 只支持原生格式,`RGB888`格式需要经过转换后才能使用。 diff --git a/zh/api/python_micropython.rst b/zh/api/python_micropython.rst index 92715bd..052095e 100644 --- a/zh/api/python_micropython.rst +++ b/zh/api/python_micropython.rst @@ -1,4 +1,4 @@ -Python 标准库和 Micropython 标准微库 +1.Python 标准库和 Micropython 标准微库 =========== .. toctree:: :maxdepth: 1 diff --git a/zh/api/stdlib/utime.md b/zh/api/stdlib/utime.md index 69c2f6e..aecbeb8 100644 --- a/zh/api/stdlib/utime.md +++ b/zh/api/stdlib/utime.md @@ -1,4 +1,4 @@ -# utime 时间相关的功能 API手册 +# 1.3 utime 时间相关的功能 API手册 该模块实现了相应CPython模块的子集,如下所述。有关更多信息,请参阅原始CPython文档: [time](https://docs.python.org/3.5/library/time.html#module-time). diff --git "a/zh/example/AI_Demo\350\257\264\346\230\216\346\226\207\346\241\243.md" "b/zh/example/AI_Demo\350\257\264\346\230\216\346\226\207\346\241\243.md" new file mode 100644 index 0000000..d56741e --- /dev/null +++ "b/zh/example/AI_Demo\350\257\264\346\230\216\346\226\207\346\241\243.md" @@ -0,0 +1,8826 @@ +# AI Demo说明文档 + +## 1. AI Demo开发框架介绍 + +### 1.1. AI Demo开发框架 + +为了帮助用户简化AI部分的开发,基于K230_CanMV提供的API接口,搭建了配套的AI 开发框架。框架结构如下图所示: + +![开发框架](./images/framework.png) + +Camera默认出两路图像,一路格式为YUV420,直接给到Display显示;另一路格式为RGB888,给到AI部分进行处理。AI主要实现任务的前处理、推理和后处理流程,得到后处理结果后将其绘制在osd image实例上,并送给Display叠加显示。 + +### 1.2. 接口介绍 + +#### 1.2.1. PipeLine + +我们将Media部分的代码封装在PipeLine类型中,通过固定的接口实现整个流程操作。 + +其中PipeLine类提供的接口包括: + +- 初始化参数包括: + + (1)rgb888p_size:list类型,预设给到AI部分的图像分辨率;如rgb888p_size=[1920,1080]。 + + (2)display_size:list类型,显示部分Display的分辨率;如display_size=[1920,1080]。 + + (3)display_mode:str类型,显示模式,包括”hdmi“和”lcd“;如display_mode=”hdmi“。 + + (4)debug_mode:int类型,耗时调试模式,如果大于0,打印操作耗时;如debug_mode=0。 + +- create(sensor=None,hmirror=None,vfilp=None): + + (1)sensor:参数为可选参数,类型为Sensor对象,可自主配置现有CanMV、01Studio和k230d zero开发板实现了自动探测,可以默认使用create()实现。 + + (2)hmirror:默认为None,当主动设置时为bool类型(True/False),表示是否实现水平方向镜像显示。 + + (3)vflip: 默认为None,当主动设置时为bool类型(True/False),表示是否实现垂直方向翻转。 + +- get_frame():返回一帧ulab.numpy.ndarray类型图像数据,分辨率为rgb888p_size,排布为CHW。 + +- show_image():PipeLine实例中预设一帧OSD图像,该接口将成员变量osd_img显示在屏幕上。 + +- destroy():销毁PipeLine实例。 + +下面给出无AI部分的示例代码: + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from media.media import * +import gc +import sys,os + +if __name__ == "__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + + # 初始化PipeLine,用于图像处理流程 + pl = PipeLine(rgb888p_size=[1920,1080], display_size=display_size, display_mode=display_mode) + pl.create() # 创建PipeLine实例 + try: + while True: + os.exitpoint() # 检查是否有退出信号 + with ScopedTiming("total",1): + img = pl.get_frame() # 获取当前帧数据 + print(img.shape) + gc.collect() # 垃圾回收 + except Exception as e: + sys.print_exception(e) # 打印异常信息 + finally: + pl.destroy() # 销毁PipeLine实例 +``` + +上述代码中,通过`pl.get_frame()`接口获取一帧分辨率为rgb888p_size的图像,类型为ulab.numpy.ndarray,排布为CHW。基于上面的代码得到了一帧图像给AI处理,您可以只关注AI推理部分的操作。 + +图像AI开发过程包括:图像预处理、模型推理、输出后处理的过程,我们将整个过程封装在Ai2d类和AIBase类中。 + +#### 1.2.2. Ai2d + +对于Ai2d类,我们给出了常见的几种预处理方法,包括crop/shift/pad/resize/affine。该类别提供的接口包括: + +- 初始化参数包括: + +​ (1)debug_mode:int类型,耗时调试模式,如果大于0,打印操作耗时;如debug_mode=0。 + +- set_ai2d_dtype(input_format,output_format,input_type,output_type) + +​ (1)input_format:ai2d预处理输入格式。 + +​ (2)output_format:ai2d预处理输出格式。 + +输入输出格式支持如下所示: + +```c++ +enum class ai2d_format +{ + YUV420_NV12 = 0, + YUV420_NV21 = 1, + YUV420_I420 = 2, + NCHW_FMT = 3, + RGB_packed = 4, + RAW16 = 5, +} +``` + +| 输入格式 | 输出格式 | 备注 | +| ---------------- | ---------------------- | --------------------- | +| YUV420_NV12 | RGB_planar/YUV420_NV12 | | +| YUV420_NV21 | RGB_planar/YUV420_NV21 | | +| YUV420_I420 | RGB_planar/YUV420_I420 | | +| YUV400 | YUV400 | | +| NCHW(RGB_planar) | NCHW(RGB_planar) | | +| RGB_packed | RGB_planar/RGB_packed | | +| RAW16 | RAW16/8 | 深度图,执行shift操作 | + +​ (3)input_type:输入数据类型。 + +​ (4)output_type:输出数据类型。 + +下面是接口调用示例: + +```python +from libs.AI2D import Ai2d +import nncase_runtime as nn + +my_ai2d=Ai2d(debug_mode=1) +my_ai2d.set_ai2d_type(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) +my_ai2d.set_ai2d_type(nn.ai2d_format.RGB_packed, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) +``` + +- crop(start_x,start_y,width,height):预处理crop函数。 + + (1)start_x:宽度方向的起始像素,int类型; + + (2)start_y: 高度方向的起始像素,int类型; + + (3)width: 宽度方向的crop长度,int类型; + + (4)height: 高度方向的crop长度,int类型; + +```python +my_ai2d.crop(0,0,200,300) +``` + +- shift(shift_val):预处理shift函数。 + +​ (1)shift_val:右移的比特数,int类型; + +```python +my_ai2d.shift(2) +``` + +- pad(paddings,pad_mode,pad_val):预处理padding函数。 + +​ (1)paddings:list类型,各维度两侧padding的大小,对于4维的图像(NCHW),该参数包含8个值,分别表示N/C/H/W四个维度两侧的padding大小,一般只在后两个维度做padding; + +​ (2)pad_mode:只支持constant padding,直接设为0; + +​ (3)pad_val:list类型,每个像素位置填充的值,比如[114,114,114]、[0,0,0] + +```python +my_ai2d.pad([0,0,0,0,5,5,15,15],0,[114,114,114]) +``` + +- resize(interp_method,interp_mode):预处理resize函数。 + +​ (1)interp_method:resize插值方法,ai2d_interp_method类型,包括:nn.interp_method.tf_nearest、nn.interp_method.tf_bilinear、nn.interp_method.cv2_nearest、nn.interp_method.cv2_bilinear; + +​ (2)interp_mode:resize模式,ai2d_interp_mode类型,包括:nn.interp_mode.none、nn.interp_mode.align_corner、nn.interp_mode.half_pixel; + +```python +my_ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) +``` + +- affine(interp_method,crop_round,bound_ind,bound_val,bound_smooth,M):预处理affine函数。 + +​ (1)interp_method:Affine采用的插值方法,ai2d_interp_method类型,包括:nn.interp_method.tf_nearest、nn.interp_method.tf_bilinear、nn.interp_method.cv2_nearest、nn.interp_method.cv2_bilinear; + +​ (2)cord_round:整数边界0或者1,uint32_t类型; + +​ (3)bound_ind:边界像素模式0或者1,uint32_t类型; + +​ (4)bound_val:边界填充值,uint32_t类型; + +​ (5)bound_smooth:边界平滑0或者1,uint32_t类型; + +​ (6)M:仿射变换矩阵对应的vector,仿射变换为Y=[a_0, a_1; a_2, a_3] \cdot X + [b_0, b_1] $, 则 M=[a_0,a_1,b_0,a_2,a_3,b_1 ],list类型。 + +```python +affine_matrix=[0.2159457, -0.031286, -59.5312, 0.031286, 0.2159457, -35.30719] +my_ai2d.affine(nn.interp_method.cv2_bilinear,0, 0, 127, 1,affine_matrix) +``` + +- build(ai2d_input_shape,ai2d_output_shape):ai2d构造函数,前面配置的预处理方法起作用。 + +​ (1)ai2d_input_shape:ai2d输入shape,list类型; + +​ (2)ai2d_output_shape:ai2d输出shape,list类型; + +```python +my_ai2d.build([1,3,224,224],[1,3,512,512]) +``` + +- run(input_np):调用配置好的ai2d进行预处理的函数,返回一个tensor类型数据,可以直接给模型使用,也可以通过to_numpy()转换成ulab.numpy.ndarray类型的数据。 + +​ (1)input_np:ulab.numpy.ndarray类型,ai2d预处理的输入数据,shape和build函数中设置的ai2d_input_shape一致。 + +> 注意: +> +> (1) Affine和Resize功能是互斥的,不能同时开启; +> (2) Shift功能的输入格式只能是Raw16; +> (3) Pad value是按通道配置的,对应的list元素个数要与channel数相等; +> (4) 当配置了多个功能时,执行顺序是Crop->Shift->Resize/Affine->Pad, 配置参数时注意要匹配;如果不符合该顺序,需要初始化多个Ai2d实例实现预处理过程; + +下面是一个完整的示例: + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AI2D import Ai2d +from media.media import * +import nncase_runtime as nn +import gc +import sys,os + +if __name__ == "__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + + # 初始化PipeLine,用于图像处理流程 + pl = PipeLine(rgb888p_size=[512,512], display_size=display_size, display_mode=display_mode) + pl.create() # 创建PipeLine实例 + my_ai2d=Ai2d(debug_mode=0) #初始化Ai2d实例 + # 配置resize预处理方法 + my_ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理过程 + my_ai2d.build([1,3,512,512],[1,3,640,640]) + try: + while True: + os.exitpoint() # 检查是否有退出信号 + with ScopedTiming("total",1): + img = pl.get_frame() # 获取当前帧数据 + print(img.shape) # 原图shape为[1,3,512,512] + ai2d_output_tensor=my_ai2d.run(img) # 执行resize预处理 + ai2d_output_np=ai2d_output_tensor.to_numpy() # 类型转换 + print(ai2d_output_np.shape) # 预处理后的shape为[1,3,640,640] + gc.collect() # 垃圾回收 + except Exception as e: + sys.print_exception(e) # 打印异常信息 + finally: + pl.destroy() # 销毁PipeLine实例 +``` + +#### 1.2.3. AIBase + +AIBase部分封装了实现模型推理的主要接口,也是进行AI开发主要关注的部分。用户需要按照自己demo的要求实现前处理和后处理部分。 + +AIBase提供的接口包括: + +- 初始化参数包括: + +​ (1)kmodel_path:str类型,kmodel路径,用于初始化kpu对象并加载kmodel; + +​ (2)model_input_size:list类型,可选,模型输入分辨率,在单输入时起作用,格式为[width,height],如:model_input_size=[512,512]; + +​ (3)rgb888p_size:list类型,可选,AI得到的图像的分辨率,在单输入时起作用,格式为[width,height],如:rgb888p_size=[640,640]; + +​ (4)debug_mode:int类型,耗时调试模式,如果大于0,打印操作耗时;如debug_mode=0。 + +- get_kmodel_inputs_num():返回当前模型的输入个数; +- get_kmodel_outputs_num():返回当前模型的输出个数; +- preprocess(input_np):使用ai2d对input_np做预处理,**如果不使用单个ai2d实例做预处理,需要在子类重写该函数**。 + +​ (1)input_np:ulab.numpy.ndarray类型,ai2d预处理输入数据; + +​ (2)返回tensor列表;**如果该方法重写,请注意返回类型:tensor类型的列表;** + +- inference(tensors):对预处理后得到的kmodel的输入(类型为tensor)进行推理,得到多个输出(类型为ulab.numpy.ndarray); + +​ (1)tensors:列表类型,模型的输入,可以是一个可以是多个; + +​ (2)返回ulab.numpy.ndarray类型的列表; + +> **Tips:** +> +> Image对象转ulab.numpy.ndarray: +> +> ```python +> import image +> img.to_rgb888().to_numpy_ref() #返回的array是HWC排布 +> ``` +> +> ulab.numpy.ndarray转Image对象: +> +> ```python +> import ulab.numpy as np +> import image +> img_np = np.zeros((height,width,4),dtype=np.uint8) +> img = image.Image(width, height, image.ARGB8888, alloc=image.ALLOC_REF,data =img_np) +> ``` +> +> ulab.numpy.ndarray转tensor类型: +> +> ```python +> import ulab.numpy as np +> import nncase_runtime as nn +> img_np = np.zeros((height,width,4),dtype=np.uint8) +> tensor = nn.from_numpy(img_np) +> ``` +> +> tensor 类型转ulab.numpy.ndarray: +> +> ```python +> import ulab.numpy as np +> import nncase_runtime as nn +> img_np=tensor.to_numpy() +> ``` + +- postprocess(results):模型输出后处理函数,该函数需要用户在任务子类重写,因为不同AI任务的后处理是不同的。 + +​ (1)results:list类型,list元素是ulab.numpy.ndarray类型,模型的推理输出。 + +- run(input_np):模型的前处理、推理、后处理流程,适用于单ai2d实例能解决的前处理的AI任务,其他任务需要用户在子类重写。 + +​ (1)input_np:ulab.numpy.ndarray类型,ai2d预处理输入数据;该数据通过ai2d预处理输出1个tensor,tensor通过模型推理得到输出列表results,results经过后处理过程得到AI结果。 + +- deinit():AIBase销毁函数。 + +#### 1.2.4. ScopedTiming + +ScopedTiming 类在PipeLine.py模块内,是一个用来测量代码块执行时间的上下文管理器。上下文管理器通过定义包含 `__enter__` 和 `__exit__` 方法的类来创建。当在 with 语句中使用该类的实例时,`__enter__` 在进入 with 块时被调用,`__exit__` 在离开时被调用。 + +```python +from libs.PipeLine import ScopedTiming + +def test_time(): + with ScopedTiming("test",1): + #####代码##### + # ... + ############## +``` + +### 1.3. 应用方法和示例 + +#### 1.3.1. 概述 + +用户可根据具体的AI场景自写任务类继承AIBase,可以将任务分为如下四类:单模型任务、多模型任务,自定义预处理任务、无预处理任务。不同任务需要编写不同的代码实现,具体如下图所示: + +![不同任务类型](./images/task_diff.png) + +关于不同任务的介绍: + +| 任务类型 | 任务描述 | 代码说明 | +| ---------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | +| 单模型任务 | 该任务只有一个模型,只需要关注该模型的前处理、推理、后处理过程,此类任务的前处理使用Ai2d实现,可能使用一个Ai2d实例,也可能使用多个Ai2d实例,后处理基于场景自定义。 | 编写自定义任务类,主要关注任务类的config_preprocess、postprocess、以及该任务需要的其他方法如:draw_result等。
如果该任务包含多个Ai2d实例,则需要重写preprocess,按照预处理的顺序设置预处理阶段的计算过程。 | +| 自定义预处理任务 | 该任务只有一个模型,只需要关注该模型的前处理、推理、后处理过程,此类任务的前处理不使用Ai2d实现,可以使用ulab.numpy自定义,后处理基于场景自定义。 | 编写自定义任务类,主要关注任务类的preprocess、postprocess、以及该任务需要的其他方法如:draw_result等 | +| 无预处理任务 | 该任务只有一个模型且不需要预处理,只需要关注该模型的推理和后处理过程,此类任务一般作为多模型任务的一部分,直接对前一个模型的输出做为输入推理,后处理基于需求自定义。 | 编写自定义任务类,主要关注任务类的run(模型推理的整个过程,包括preprocess、inference、postprocess中的全部或某一些步骤)、postprocess、以及该任务需要的其他方法如:draw_results等 | +| 多模型任务 | 该任务包含多个模型,可能是串联,也可能是其他组合方式。对于每个模型基本上属于前三种模型中的一种,最后通过一个完整的任务类将上述模型子任务统一起来。 | 编写多个子模型任务类,不同子模型任务参照前三种任务定义。不同任务关注不同的方法。
编写多模型任务类,将子模型任务类统一起来实现整个场景。 | + +#### 1.3.2. 单模型任务 + +单模型任务的伪代码结构如下: + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +from media.media import * +import nncase_runtime as nn +import ulab.numpy as np +import image +import gc +import sys + +# 自定义AI任务类,继承自AIBase基类 +class MyAIApp(AIBase): + def __init__(self, kmodel_path, model_input_size, rgb888p_size=[224,224], display_size=[1920,1080], debug_mode=0): + # 调用基类的构造函数 + super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode) + # 模型文件路径 + self.kmodel_path = kmodel_path + # 模型输入分辨率 + self.model_input_size = model_input_size + # sensor给到AI的图像分辨率,并对宽度进行16的对齐 + self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]] + # 显示分辨率,并对宽度进行16的对齐 + self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]] + # 是否开启调试模式 + self.debug_mode = debug_mode + # 实例化Ai2d,用于实现模型预处理 + self.ai2d = Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self, input_image_size=None): + with ScopedTiming("set preprocess config", self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + # 配置resize预处理方法 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程 + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理,results是模型输出array列表,需要根据实际任务重写 + def postprocess(self, results): + with ScopedTiming("postprocess", self.debug_mode > 0): + pass + + # 绘制结果到画面上,需要根据任务自己写 + def draw_result(self, pl, dets): + with ScopedTiming("display_draw", self.debug_mode > 0): + pass + +if __name__ == "__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 设置模型路径,这里要替换成当前任务模型 + kmodel_path = "example_test.kmodel" + rgb888p_size = [1920, 1080] + ###### 其它参数######## + ... + ###################### + + # 初始化PipeLine,用于图像处理流程 + pl = PipeLine(rgb888p_size=rgb888p_size, display_size=display_size, display_mode=display_mode) + pl.create() # 创建PipeLine实例 + # 初始化自定义AI任务实例 + my_ai = MyAIApp(kmodel_path, model_input_size=[320, 320],rgb888p_size=rgb888p_size, display_size=display_size, debug_mode=0) + my_ai.config_preprocess() # 配置预处理 + + try: + while True: + os.exitpoint() # 检查是否有退出信号 + with ScopedTiming("total",1): + img = pl.get_frame() # 获取当前帧数据 + res = my_ai.run(img) # 推理当前帧 + my_ai.draw_result(pl, res) # 绘制结果 + pl.show_image() # 显示结果 + gc.collect() # 垃圾回收 + except Exception as e: + sys.print_exception(e) # 打印异常信息 + finally: + my_ai.deinit() # 反初始化 + pl.destroy() # 销毁PipeLine实例 + +``` + +下面以人脸检测为例给出示例代码: + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys +import aidemo + +# 自定义人脸检测类,继承自AIBase基类 +class FaceDetectionApp(AIBase): + def __init__(self, kmodel_path, model_input_size, anchors, confidence_threshold=0.5, nms_threshold=0.2, rgb888p_size=[224,224], display_size=[1920,1080], debug_mode=0): + super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode) # 调用基类的构造函数 + self.kmodel_path = kmodel_path # 模型文件路径 + self.model_input_size = model_input_size # 模型输入分辨率 + self.confidence_threshold = confidence_threshold # 置信度阈值 + self.nms_threshold = nms_threshold # NMS(非极大值抑制)阈值 + self.anchors = anchors # 锚点数据,用于目标检测 + self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]] # sensor给到AI的图像分辨率,并对宽度进行16的对齐 + self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]] # 显示分辨率,并对宽度进行16的对齐 + self.debug_mode = debug_mode # 是否开启调试模式 + self.ai2d = Ai2d(debug_mode) # 实例化Ai2d,用于实现模型预处理 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) # 设置Ai2d的输入输出格式和类型 + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self, input_image_size=None): + with ScopedTiming("set preprocess config", self.debug_mode > 0): # 计时器,如果debug_mode大于0则开启 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + top, bottom, left, right = self.get_padding_param() # 获取padding参数 + self.ai2d.pad([0, 0, 0, 0, top, bottom, left, right], 0, [104, 117, 123]) # 填充边缘 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) # 缩放图像 + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) # 构建预处理流程 + + # 自定义当前任务的后处理,results是模型输出array列表,这里使用了aidemo库的face_det_post_process接口 + def postprocess(self, results): + with ScopedTiming("postprocess", self.debug_mode > 0): + post_ret = aidemo.face_det_post_process(self.confidence_threshold, self.nms_threshold, self.model_input_size[1], self.anchors, self.rgb888p_size, results) + if len(post_ret) == 0: + return post_ret + else: + return post_ret[0] + + # 绘制检测结果到画面上 + def draw_result(self, pl, dets): + with ScopedTiming("display_draw", self.debug_mode > 0): + if dets: + pl.osd_img.clear() # 清除OSD图像 + for det in dets: + # 将检测框的坐标转换为显示分辨率下的坐标 + x, y, w, h = map(lambda x: int(round(x, 0)), det[:4]) + x = x * self.display_size[0] // self.rgb888p_size[0] + y = y * self.display_size[1] // self.rgb888p_size[1] + w = w * self.display_size[0] // self.rgb888p_size[0] + h = h * self.display_size[1] // self.rgb888p_size[1] + pl.osd_img.draw_rectangle(x, y, w, h, color=(255, 255, 0, 255), thickness=2) # 绘制矩形框 + else: + pl.osd_img.clear() + + # 获取padding参数 + def get_padding_param(self): + dst_w = self.model_input_size[0] # 模型输入宽度 + dst_h = self.model_input_size[1] # 模型输入高度 + ratio_w = dst_w / self.rgb888p_size[0] # 宽度缩放比例 + ratio_h = dst_h / self.rgb888p_size[1] # 高度缩放比例 + ratio = min(ratio_w, ratio_h) # 取较小的缩放比例 + new_w = int(ratio * self.rgb888p_size[0]) # 新宽度 + new_h = int(ratio * self.rgb888p_size[1]) # 新高度 + dw = (dst_w - new_w) / 2 # 宽度差 + dh = (dst_h - new_h) / 2 # 高度差 + top = int(round(0)) + bottom = int(round(dh * 2 + 0.1)) + left = int(round(0)) + right = int(round(dw * 2 - 0.1)) + return top, bottom, left, right + +if __name__ == "__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 设置模型路径和其他参数 + kmodel_path = "/sdcard/app/tests/kmodel/face_detection_320.kmodel" + # 其它参数 + confidence_threshold = 0.5 + nms_threshold = 0.2 + anchor_len = 4200 + det_dim = 4 + anchors_path = "/sdcard/app/tests/utils/prior_data_320.bin" + anchors = np.fromfile(anchors_path, dtype=np.float) + anchors = anchors.reshape((anchor_len, det_dim)) + rgb888p_size = [1920, 1080] + + # 初始化PipeLine,用于图像处理流程 + pl = PipeLine(rgb888p_size=rgb888p_size, display_size=display_size, display_mode=display_mode) + pl.create() # 创建PipeLine实例 + # 初始化自定义人脸检测实例 + face_det = FaceDetectionApp(kmodel_path, model_input_size=[320, 320], anchors=anchors, confidence_threshold=confidence_threshold, nms_threshold=nms_threshold, rgb888p_size=rgb888p_size, display_size=display_size, debug_mode=0) + face_det.config_preprocess() # 配置预处理 + + try: + while True: + os.exitpoint() # 检查是否有退出信号 + with ScopedTiming("total",1): + img = pl.get_frame() # 获取当前帧数据 + res = face_det.run(img) # 推理当前帧 + face_det.draw_result(pl, res) # 绘制结果 + pl.show_image() # 显示结果 + gc.collect() # 垃圾回收 + except Exception as e: + sys.print_exception(e) # 打印异常信息 + finally: + face_det.deinit() # 反初始化 + pl.destroy() # 销毁PipeLine实例 +``` + +多个Ai2d实例时的伪代码如下: + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +from media.media import * +import nncase_runtime as nn +import ulab.numpy as np +import image +import gc +import sys + +# 自定义AI任务类,继承自AIBase基类 +class MyAIApp(AIBase): + def __init__(self, kmodel_path, model_input_size, rgb888p_size=[224,224], display_size=[1920,1080], debug_mode=0): + # 调用基类的构造函数 + super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode) + # 模型文件路径 + self.kmodel_path = kmodel_path + # 模型输入分辨率 + self.model_input_size = model_input_size + # sensor给到AI的图像分辨率,并对宽度进行16的对齐 + self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]] + # 显示分辨率,并对宽度进行16的对齐 + self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]] + # 是否开启调试模式 + self.debug_mode = debug_mode + # 实例化Ai2d,用于实现模型预处理 + self.ai2d_resize = Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d_resize.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) + # 实例化Ai2d,用于实现模型预处理 + self.ai2d_resize = Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d_resize.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) + # 实例化Ai2d,用于实现模型预处理 + self.ai2d_crop = Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d_crop.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self, input_image_size=None): + with ScopedTiming("set preprocess config", self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + # 配置resize预处理方法 + self.ai2d_resize.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程 + self.ai2d_resize.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,640,640]) + # 配置crop预处理方法 + self.ai2d_crop.crop(0,0,320,320) + # 构建预处理流程 + self.ai2d_crop.build([1,3,640,640],[1,3,320,320]) + + # 假设该任务需要crop和resize预处理,顺序是先resize再crop,该顺序不符合ai2d的处理顺序,因此需要设置两个Ai2d实例分别处理 + def preprocess(self,input_np): + resize_tensor=self.ai2d_resize.run(input_np) + resize_np=resize_tensor.to_numpy() + crop_tensor=self.ai2d_crop.run(resize_np) + return [crop_tensor] + + + # 自定义当前任务的后处理,results是模型输出array列表,需要根据实际任务重写 + def postprocess(self, results): + with ScopedTiming("postprocess", self.debug_mode > 0): + pass + + # 绘制结果到画面上,需要根据任务自己写 + def draw_result(self, pl, dets): + with ScopedTiming("display_draw", self.debug_mode > 0): + pass + + # 重写deinit,释放多个ai2d资源 + def deinit(self): + with ScopedTiming("deinit",self.debug_mode > 0): + del self.ai2d_resize + del self.ai2d_crop + super().deinit() + +if __name__ == "__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 设置模型路径,这里要替换成当前任务模型 + kmodel_path = "example_test.kmodel" + rgb888p_size = [1920, 1080] + ###### 其它参数######## + ... + ###################### + + # 初始化PipeLine,用于图像处理流程 + pl = PipeLine(rgb888p_size=rgb888p_size, display_size=display_size, display_mode=display_mode) + pl.create() # 创建PipeLine实例 + # 初始化自定义AI任务实例 + my_ai = MyAIApp(kmodel_path, model_input_size=[320, 320],rgb888p_size=rgb888p_size, display_size=display_size, debug_mode=0) + my_ai.config_preprocess() # 配置预处理 + + try: + while True: + os.exitpoint() # 检查是否有退出信号 + with ScopedTiming("total",1): + img = pl.get_frame() # 获取当前帧数据 + res = my_ai.run(img) # 推理当前帧 + my_ai.draw_result(pl, res) # 绘制结果 + pl.show_image() # 显示结果 + gc.collect() # 垃圾回收 + except Exception as e: + sys.print_exception(e) # 打印异常信息 + finally: + my_ai.deinit() # 反初始化 + pl.destroy() # 销毁PipeLine实例 +``` + +#### 1.3.3. 自定义预处理任务 + +对于需要重写前处理(不使用提供的ai2d类,自己手动写预处理)的AI任务伪代码如下: + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +from media.media import * +import nncase_runtime as nn +import ulab.numpy as np +import image +import gc +import sys + +# 自定义AI任务类,继承自AIBase基类 +class MyAIApp(AIBase): + def __init__(self, kmodel_path, model_input_size, rgb888p_size=[224,224], display_size=[1920,1080], debug_mode=0): + # 调用基类的构造函数 + super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode) + # 模型文件路径 + self.kmodel_path = kmodel_path + # 模型输入分辨率 + self.model_input_size = model_input_size + # sensor给到AI的图像分辨率,并对宽度进行16的对齐 + self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]] + # 显示分辨率,并对宽度进行16的对齐 + self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]] + # 是否开启调试模式 + self.debug_mode = debug_mode + # 实例化Ai2d,用于实现模型预处理 + self.ai2d = Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) + + # 对于不使用ai2d完成预处理的AI任务,使用封装的接口或者ulab.numpy实现预处理,需要在子类中重写该函数 + def preprocess(self,input_np): + ############# + #注意自定义预处理过程 + ############# + return [tensor] + + # 自定义当前任务的后处理,results是模型输出array列表,需要根据实际任务重写 + def postprocess(self, results): + with ScopedTiming("postprocess", self.debug_mode > 0): + pass + + # 绘制结果到画面上,需要根据任务自己写 + def draw_result(self, pl, dets): + with ScopedTiming("display_draw", self.debug_mode > 0): + pass + +if __name__ == "__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 设置模型路径,这里要替换成当前任务模型 + kmodel_path = "example_test.kmodel" + rgb888p_size = [1920, 1080] + ###### 其它参数######## + ... + ###################### + + # 初始化PipeLine,用于图像处理流程 + pl = PipeLine(rgb888p_size=rgb888p_size, display_size=display_size, display_mode=display_mode) + pl.create() # 创建PipeLine实例 + # 初始化自定义AI任务实例 + my_ai = MyAIApp(kmodel_path, model_input_size=[320, 320],rgb888p_size=rgb888p_size, display_size=display_size, debug_mode=0) + my_ai.config_preprocess() # 配置预处理 + + try: + while True: + os.exitpoint() # 检查是否有退出信号 + with ScopedTiming("total",1): + img = pl.get_frame() # 获取当前帧数据 + res = my_ai.run(img) # 推理当前帧 + my_ai.draw_result(pl, res) # 绘制结果 + pl.show_image() # 显示结果 + gc.collect() # 垃圾回收 + except Exception as e: + sys.print_exception(e) # 打印异常信息 + finally: + my_ai.deinit() # 反初始化 + pl.destroy() # 销毁PipeLine实例 + +``` + +以关键词唤醒keyword_spotting为例: + +```python +from libs.PipeLine import ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +from media.pyaudio import * # 音频模块 +from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 +import media.wave as wave # wav音频处理模块 +import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 +import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 +import aidemo # aidemo模块,封装ai demo相关前处理、后处理等操作 +import time # 时间统计 +import struct # 字节字符转换模块 +import gc # 垃圾回收模块 +import os,sys # 操作系统接口模块 + +# 自定义关键词唤醒类,继承自AIBase基类 +class KWSApp(AIBase): + def __init__(self, kmodel_path, threshold, debug_mode=0): + super().__init__(kmodel_path) # 调用基类的构造函数 + self.kmodel_path = kmodel_path # 模型文件路径 + self.threshold=threshold + self.debug_mode = debug_mode # 是否开启调试模式 + self.cache_np = np.zeros((1, 256, 105), dtype=np.float) + + # 自定义预处理,返回模型输入tensor列表 + def preprocess(self,pcm_data): + pcm_data_list=[] + # 获取音频流数据 + for i in range(0, len(pcm_data), 2): + # 每两个字节组织成一个有符号整数,然后将其转换为浮点数,即为一次采样的数据,加入到当前一帧(0.3s)的数据列表中 + int_pcm_data = struct.unpack(" 0): + logits_np = results[0] + self.cache_np= results[1] + max_logits = np.max(logits_np, axis=1)[0] + max_p = np.max(max_logits) + idx = np.argmax(max_logits) + # 如果分数大于阈值,且idx==1(即包含唤醒词),播放回复音频 + if max_p > self.threshold and idx == 1: + return 1 + else: + return 0 + + +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + nn.shrink_memory_pool() + # 设置模型路径和其他参数 + kmodel_path = "/sdcard/app/tests/kmodel/kws.kmodel" + # 其它参数 + THRESH = 0.5 # 检测阈值 + SAMPLE_RATE = 16000 # 采样率16000Hz,即每秒采样16000次 + CHANNELS = 1 # 通道数 1为单声道,2为立体声 + FORMAT = paInt16 # 音频输入输出格式 paInt16 + CHUNK = int(0.3 * 16000) # 每次读取音频数据的帧数,设置为0.3s的帧数16000*0.3=4800 + reply_wav_file = "/sdcard/app/tests/utils/wozai.wav" # kws唤醒词回复音频路径 + + # 初始化音频预处理接口 + fp = aidemo.kws_fp_create() + # 初始化音频流 + p = PyAudio() + p.initialize(CHUNK) + MediaManager.init() #vb buffer初始化 + # 用于采集实时音频数据 + input_stream = p.open(format=FORMAT,channels=CHANNELS,rate=SAMPLE_RATE,input=True,frames_per_buffer=CHUNK) + # 用于播放回复音频 + output_stream = p.open(format=FORMAT,channels=CHANNELS,rate=SAMPLE_RATE,output=True,frames_per_buffer=CHUNK) + # 初始化自定义关键词唤醒实例 + kws = KWSApp(kmodel_path,threshold=THRESH,debug_mode=0) + + try: + while True: + os.exitpoint() # 检查是否有退出信号 + with ScopedTiming("total",1): + pcm_data=input_stream.read() + res=kws.run(pcm_data) + if res: + print("====Detected XiaonanXiaonan!====") + wf = wave.open(reply_wav_file, "rb") + wav_data = wf.read_frames(CHUNK) + while wav_data: + output_stream.write(wav_data) + wav_data = wf.read_frames(CHUNK) + time.sleep(1) # 时间缓冲,用于播放回复声音 + wf.close() + else: + print("Deactivated!") + gc.collect() # 垃圾回收 + except Exception as e: + sys.print_exception(e) # 打印异常信息 + finally: + input_stream.stop_stream() + output_stream.stop_stream() + input_stream.close() + output_stream.close() + p.terminate() + MediaManager.deinit() #释放vb buffer + aidemo.kws_fp_destroy(fp) + kws.deinit() # 反初始化 +``` + +#### 1.3.4. 无预处理任务 + +对于不需要预处理(直接输入推理)的AI任务伪代码如下: + +```python + from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +from media.media import * +import nncase_runtime as nn +import ulab.numpy as np +import image +import gc +import sys + +# 自定义AI任务类,继承自AIBase基类 +class MyAIApp(AIBase): + def __init__(self, kmodel_path, model_input_size, rgb888p_size=[224,224], display_size=[1920,1080], debug_mode=0): + # 调用基类的构造函数 + super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode) + # 模型文件路径 + self.kmodel_path = kmodel_path + # 模型输入分辨率 + self.model_input_size = model_input_size + # sensor给到AI的图像分辨率,并对宽度进行16的对齐 + self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]] + # 显示分辨率,并对宽度进行16的对齐 + self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]] + # 是否开启调试模式 + self.debug_mode = debug_mode + + # 自定义当前任务的后处理,results是模型输出array列表,需要根据实际任务重写 + def postprocess(self, results): + with ScopedTiming("postprocess", self.debug_mode > 0): + pass + + # 对于用预处理的AI任务,需要在子类中重写该函数 + def run(self,inputs_np): + # 先将ulab.numpy.ndarray列表转换成tensor列表 + tensors=[] + for input_np in inputs_np: + tensors.append(nn.from_numpy(input_np)) + # 调用AIBase内的inference函数进行模型推理 + results=self.inference(tensors) + # 调用当前子类的postprocess方法进行自定义后处理 + outputs=self.postprocess(results) + return outputs + + # 绘制结果到画面上,需要根据任务自己写 + def draw_result(self, pl, dets): + with ScopedTiming("display_draw", self.debug_mode > 0): + pass + +if __name__ == "__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 设置模型路径,这里要替换成当前任务模型 + kmodel_path = "example_test.kmodel" + rgb888p_size = [1920, 1080] + ###### 其它参数######## + ... + ###################### + + # 初始化PipeLine,用于图像处理流程 + pl = PipeLine(rgb888p_size=rgb888p_size, display_size=display_size, display_mode=display_mode) + pl.create() # 创建PipeLine实例 + # 初始化自定义AI任务实例 + my_ai = MyAIApp(kmodel_path, model_input_size=[320, 320],rgb888p_size=rgb888p_size, display_size=display_size, debug_mode=0) + my_ai.config_preprocess() # 配置预处理 + + try: + while True: + os.exitpoint() # 检查是否有退出信号 + with ScopedTiming("total",1): + img = pl.get_frame() # 获取当前帧数据 + res = my_ai.run(img) # 推理当前帧 + my_ai.draw_result(pl, res) # 绘制结果 + pl.show_image() # 显示结果 + gc.collect() # 垃圾回收 + except Exception as e: + sys.print_exception(e) # 打印异常信息 + finally: + my_ai.deinit() # 反初始化 + pl.destroy() # 销毁PipeLine实例 + +``` + +比如单目标跟踪(nanotracker.py)中的追踪模块,只需要对模版模型和实时推理模型的输出作为追踪模型的输入,不需要预处理: + +```python +class TrackerApp(AIBase): + def __init__(self,kmodel_path,crop_input_size,thresh,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # crop模型的输入尺寸 + self.crop_input_size=crop_input_size + # 跟踪框阈值 + self.thresh=thresh + # 跟踪框宽、高调整系数 + self.CONTEXT_AMOUNT = 0.5 + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 可以不定义 + self.ai2d=Ai2d(debug_mode) + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 可以不定义 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + pass + + # 重写run函数,因为没有预处理过程,所以原来run操作中包含的preprocess->inference->postprocess不合适,这里只包含inference->postprocess + def run(self,input_np_1,input_np_2,center_xy_wh): + input_tensors=[] + input_tensors.append(nn.from_numpy(input_np_1)) + input_tensors.append(nn.from_numpy(input_np_2)) + results=self.inference(input_tensors) + return self.postprocess(results,center_xy_wh) + + + # 自定义后处理,results是模型输出array的列表,这里使用了aidemo的nanotracker_postprocess列表 + def postprocess(self,results,center_xy_wh): + with ScopedTiming("postprocess",self.debug_mode > 0): + det = aidemo.nanotracker_postprocess(results[0],results[1],[self.rgb888p_size[1],self.rgb888p_size[0]],self.thresh,center_xy_wh,self.crop_input_size[0],self.CONTEXT_AMOUNT) + return det +``` + +#### 1.3.5. 多模型任务 + +这里以双模型串联推理为例,给出的伪代码如下: + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +from media.media import * +import nncase_runtime as nn +import ulab.numpy as np +import image +import gc +import sys + +# 自定义AI任务类,继承自AIBase基类 +class MyAIApp_1(AIBase): + def __init__(self, kmodel_path, model_input_size, rgb888p_size=[224,224], display_size=[1920,1080], debug_mode=0): + # 调用基类的构造函数 + super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode) + # 模型文件路径 + self.kmodel_path = kmodel_path + # 模型输入分辨率 + self.model_input_size = model_input_size + # sensor给到AI的图像分辨率,并对宽度进行16的对齐 + self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]] + # 显示分辨率,并对宽度进行16的对齐 + self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]] + # 是否开启调试模式 + self.debug_mode = debug_mode + # 实例化Ai2d,用于实现模型预处理 + self.ai2d = Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self, input_image_size=None): + with ScopedTiming("set preprocess config", self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + # 配置resize预处理方法 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程 + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理,results是模型输出array列表,需要根据实际任务重写 + def postprocess(self, results): + with ScopedTiming("postprocess", self.debug_mode > 0): + pass + + +# 自定义AI任务类,继承自AIBase基类 +class MyAIApp_2(AIBase): + def __init__(self, kmodel_path, model_input_size, rgb888p_size=[224,224], display_size=[1920,1080], debug_mode=0): + # 调用基类的构造函数 + super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode) + # 模型文件路径 + self.kmodel_path = kmodel_path + # 模型输入分辨率 + self.model_input_size = model_input_size + # sensor给到AI的图像分辨率,并对宽度进行16的对齐 + self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]] + # 显示分辨率,并对宽度进行16的对齐 + self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]] + # 是否开启调试模式 + self.debug_mode = debug_mode + # 实例化Ai2d,用于实现模型预处理 + self.ai2d = Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self, input_image_size=None): + with ScopedTiming("set preprocess config", self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + # 配置resize预处理方法 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程 + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理,results是模型输出array列表,需要根据实际任务重写 + def postprocess(self, results): + with ScopedTiming("postprocess", self.debug_mode > 0): + pass + + +class MyApp: + def __init__(kmodel1_path,kmodel2_path,kmodel1_input_size,kmodel2_input_size,rgb888p_size,display_size,debug_mode): + # 创建两个模型推理的实例 + self.app_1=MyApp_1(kmodel1_path,kmodel1_input_size,rgb888p_size,display_size,debug_mode) + self.app_2=MyApp_2(kmodel2_path,kmodel2_input_size,rgb888p_size,display_size,debug_mode) + self.app_1.config_preprocess() + + # 编写run函数,具体代码根据AI任务的需求编写,此处只是给出一个示例 + def run(self,input_np): + outputs_1=self.app_1.run(input_np) + outputs_2=[] + for out in outputs_1: + self.app_2.config_preprocess(out) + out_2=self.app_2.run(input_np) + outputs_2.append(out_2) + return outputs_1,outputs_2 + + # 绘制 + def draw_result(self,pl,outputs_1,outputs_2): + pass + + ######其他函数######## + ... + #################### + + +if __name__ == "__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + rgb888p_size = [1920, 1080] + # 设置模型路径,这里要替换成当前任务模型 + kmodel1_path = "test_kmodel1.kmodel" + kmdoel1_input_size=[320,320] + kmodel2_path = "test_kmodel2.kmodel" + kmodel2_input_size=[48,48] + + ###### 其它参数######## + ... + ###################### + + # 初始化PipeLine,用于图像处理流程 + pl = PipeLine(rgb888p_size=rgb888p_size, display_size=display_size, display_mode=display_mode) + pl.create() # 创建PipeLine实例 + # 初始化自定义AI任务实例 + my_ai = MyApp(kmodel1_path,kmodel2_path, kmodel1_input_size,kmodel2_input_size,rgb888p_size=rgb888p_size, display_size=display_size, debug_mode=0) + my_ai.config_preprocess() # 配置预处理 + + try: + while True: + os.exitpoint() # 检查是否有退出信号 + with ScopedTiming("total",1): + img = pl.get_frame() # 获取当前帧数据 + outputs_1,outputs_2 = my_ai.run(img) # 推理当前帧 + my_ai.draw_result(pl, outputs_1,outputs_2) # 绘制结果 + pl.show_image() # 显示结果 + gc.collect() # 垃圾回收 + except Exception as e: + sys.print_exception(e) # 打印异常信息 + finally: + my_ai.app_1.deinit() # 反初始化 + my_ai.app_2.deinit() + pl.destroy() # 销毁PipeLine实例 +``` + +下面以车牌检测为例给出示例代码: + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aidemo +import random +import gc +import sys + +# 自定义车牌检测类 +class LicenceDetectionApp(AIBase): + # 初始化函数,设置车牌检测应用的参数 + def __init__(self, kmodel_path, model_input_size, confidence_threshold=0.5, nms_threshold=0.2, rgb888p_size=[224,224], display_size=[1920,1080], debug_mode=0): + super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode) # 调用基类的初始化函数 + self.kmodel_path = kmodel_path # 模型路径 + # 模型输入分辨率 + self.model_input_size = model_input_size + # 分类阈值 + self.confidence_threshold = confidence_threshold + self.nms_threshold = nms_threshold + # sensor给到AI的图像分辨率 + self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]] + # 显示分辨率 + self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]] + self.debug_mode = debug_mode + # Ai2d实例,用于实现模型预处理 + self.ai2d = Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine + def config_preprocess(self, input_image_size=None): + with ScopedTiming("set preprocess config", self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理 + def postprocess(self, results): + with ScopedTiming("postprocess", self.debug_mode > 0): + # 对检测结果进行后处理 + det_res = aidemo.licence_det_postprocess(results, [self.rgb888p_size[1], self.rgb888p_size[0]], self.model_input_size, self.confidence_threshold, self.nms_threshold) + return det_res + +# 自定义车牌识别任务类 +class LicenceRecognitionApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 车牌字符字典 + self.dict_rec = ["挂", "使", "领", "澳", "港", "皖", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "京", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂", "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "警", "学", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "_", "-"] + self.dict_size = len(self.dict_rec) + self.ai2d=Ai2d(debug_mode) + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了resize,Ai2d支持crop/shift/pad/resize/affine + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + output_data=results[0].reshape((-1,self.dict_size)) + max_indices = np.argmax(output_data, axis=1) + result_str = "" + for i in range(max_indices.shape[0]): + index = max_indices[i] + if index > 0 and (i == 0 or index != max_indices[i - 1]): + result_str += self.dict_rec[index - 1] + return result_str + +# 车牌识别任务类 +class LicenceRec: + def __init__(self,licence_det_kmodel,licence_rec_kmodel,det_input_size,rec_input_size,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + # 车牌检测模型路径 + self.licence_det_kmodel=licence_det_kmodel + # 车牌识别模型路径 + self.licence_rec_kmodel=licence_rec_kmodel + # 人脸检测模型输入分辨率 + self.det_input_size=det_input_size + # 人脸姿态模型输入分辨率 + self.rec_input_size=rec_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + self.licence_det=LicenceDetectionApp(self.licence_det_kmodel,model_input_size=self.det_input_size,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + self.licence_rec=LicenceRecognitionApp(self.licence_rec_kmodel,model_input_size=self.rec_input_size,rgb888p_size=self.rgb888p_size) + self.licence_det.config_preprocess() + + # run函数 + def run(self,input_np): + # 执行车牌检测 + det_boxes=self.licence_det.run(input_np) + # 将车牌部分抠出来 + imgs_array_boxes = aidemo.ocr_rec_preprocess(input_np,[self.rgb888p_size[1],self.rgb888p_size[0]],det_boxes) + imgs_array = imgs_array_boxes[0] + boxes = imgs_array_boxes[1] + rec_res = [] + for img_array in imgs_array: + # 对每一个检测到的车牌进行识别 + self.licence_rec.config_preprocess(input_image_size=[img_array.shape[3],img_array.shape[2]]) + licence_str=self.licence_rec.run(img_array) + rec_res.append(licence_str) + gc.collect() + return det_boxes,rec_res + + # 绘制车牌检测识别效果 + def draw_result(self,pl,det_res,rec_res): + pl.osd_img.clear() + if det_res: + point_8 = np.zeros((8),dtype=np.int16) + for det_index in range(len(det_res)): + for i in range(4): + x = det_res[det_index][i * 2 + 0]/self.rgb888p_size[0]*self.display_size[0] + y = det_res[det_index][i * 2 + 1]/self.rgb888p_size[1]*self.display_size[1] + point_8[i * 2 + 0] = int(x) + point_8[i * 2 + 1] = int(y) + for i in range(4): + pl.osd_img.draw_line(point_8[i * 2 + 0],point_8[i * 2 + 1],point_8[(i+1) % 4 * 2 + 0],point_8[(i+1) % 4 * 2 + 1],color=(255, 0, 255, 0),thickness=4) + pl.osd_img.draw_string_advanced( point_8[6], point_8[7] + 20, 40,rec_res[det_index] , color=(255,255,153,18)) + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 车牌检测模型路径 + licence_det_kmodel_path="/sdcard/app/tests/kmodel/LPD_640.kmodel" + # 车牌识别模型路径 + licence_rec_kmodel_path="/sdcard/app/tests/kmodel/licence_reco.kmodel" + # 其它参数 + rgb888p_size=[640,360] + licence_det_input_size=[640,640] + licence_rec_input_size=[220,32] + confidence_threshold=0.2 + nms_threshold=0.2 + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + lr=LicenceRec(licence_det_kmodel_path,licence_rec_kmodel_path,det_input_size=licence_det_input_size,rec_input_size=licence_rec_input_size,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + det_res,rec_res=lr.run(img) # 推理当前帧 + lr.draw_result(pl,det_res,rec_res) # 绘制当前帧推理结果 + pl.show_image() # 展示推理结果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + lr.licence_det.deinit() + lr.licence_rec.deinit() + pl.destroy() +``` + +### 1.4. 参考文档 + +#### 1.4.1. k230 canmv文档 + +文档链接:[Welcome to K230 CanMV’s documentation! — K230 CanMV 文档 (canaan-creative.com)](https://developer.canaan-creative.com/k230_canmv/dev/index.html) + +#### 1.4.2. Ulab库支持 + +链接: [ulab – Manipulate numeric data similar to numpy — Adafruit CircuitPython 9.1.0-beta.3 documentation](https://docs.circuitpython.org/en/latest/shared-bindings/ulab/index.html) + +github链接:[v923z/micropython-ulab: a numpy-like fast vector module for micropython, circuitpython, and their derivatives (github.com)](https://github.com/v923z/micropython-ulab) + +## 2. AI Demo + +AI Demo分为两种类型:单模型、多模型,涵盖物体、人脸、人手、人体、车牌、OCR、音频(KWS、TTS)等方向;参考该文档,k230用户可以更快上手K230 AI应用的开发,实现预期效果。 + +| Demo名称 | 场景 | 任务类型 | +| ----------------------- | --------------- | ---------- | +| dynamic_gesture | 动态手势识别 | 多模型任务 | +| eye_gaze | 注视估计 | 多模型任务 | +| face_detection | 人脸检测 | 单模型任务 | +| face_landmark | 人脸关键部位 | 多模型任务 | +| face_mesh | 人脸3D网格 | 多模型任务 | +| face_parse | 人脸解析 | 多模型任务 | +| face_pose | 人脸姿态 | 多模型任务 | +| face_recognition | 人脸识别 | 多模型任务 | +| face_registration | 人脸注册 | 多模型任务 | +| falldown_detection | 跌倒检测 | 单模型任务 | +| finger_guessing | 猜拳游戏 | 多模型任务 | +| hand_detection | 手掌检测 | 单模型任务 | +| hand_keypoint_class | 手掌关键点分类 | 多模型任务 | +| hand_keypoint_detection | 手掌关键点检测 | 多模型任务 | +| hand_recognition | 手势识别 | 多模型任务 | +| keyword_spotting | 关键词唤醒 | 单模型任务 | +| licence_det | 车牌检测 | 单模型任务 | +| licence_det_rec | 车牌识别 | 多模型任务 | +| nanotracker | 单目标跟踪 | 多模型任务 | +| object_detect_yolov8n | yolov8n目标检测 | 单模型任务 | +| ocr_det | OCR检测 | 单模型任务 | +| ocr_rec | OCR识别 | 多模型任务 | +| person_detection | 人体检测 | 单模型任务 | +| person_kp_detect | 人体关键点检测 | 多模型任务 | +| puzzle_game | 拼图游戏 | 多模型任务 | +| segment_yolov8n | yolov8分割 | 单模型任务 | +| self_learning | 自学习 | 单模型任务 | +| space_resize | 局部放大器 | 多模型任务 | +| tts_zh | 中文文本转语音 | 多模型任务 | + +### 2.1. 动态手势识别 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +from random import randint +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aicube +import random +import gc +import sys + +# 自定义手掌检测任务类 +class HandDetApp(AIBase): + def __init__(self,kmodel_path,labels,model_input_size,anchors,confidence_threshold=0.2,nms_threshold=0.5,nms_option=False, strides=[8,16,32],rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测标签 + self.labels=labels + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # 检测锚框 + self.anchors=anchors + self.strides = strides # 特征下采样倍数 + self.nms_option = nms_option # NMS选项,如果为True做类间NMS,如果为False做类内NMS + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了padding和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + # 计算padding参数并应用pad操作,以确保输入图像尺寸与模型输入尺寸匹配 + top, bottom, left, right = self.get_padding_param() + self.ai2d.pad([0, 0, 0, 0, top, bottom, left, right], 0, [114, 114, 114]) + # 使用双线性插值进行resize操作,调整图像尺寸以符合模型输入要求 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理过程,这里使用了aicube的anchorbasedet_post_process接口 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + dets = aicube.anchorbasedet_post_process(results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides, len(self.labels), self.confidence_threshold, self.nms_threshold, self.anchors, self.nms_option) + # 返回手掌检测结果 + return dets + + # 计算padding参数,确保输入图像尺寸与模型输入尺寸匹配 + def get_padding_param(self): + # 根据目标宽度和高度计算比例因子 + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + input_width = self.rgb888p_size[0] + input_high = self.rgb888p_size[1] + ratio_w = dst_w / input_width + ratio_h = dst_h / input_high + # 选择较小的比例因子,以确保图像内容完整 + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + # 计算新的宽度和高度 + new_w = int(ratio * input_width) + new_h = int(ratio * input_high) + # 计算宽度和高度的差值,并确定padding的位置 + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = int(round(dh - 0.1)) + bottom = int(round(dh + 0.1)) + left = int(round(dw - 0.1)) + right = int(round(dw + 0.1)) + return top, bottom, left, right + +# 自定义手势关键点分类任务类 +class HandKPClassApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 手掌关键点模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # crop参数列表 + self.crop_params=[] + # debug模式 + self.debug_mode=debug_mode + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了crop和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,det,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 如果input_image_size为None,使用视频出图大小,否则按照自定义设置 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 计算crop参数 + self.crop_params = self.get_crop_param(det) + # 设置crop预处理过程 + self.ai2d.crop(self.crop_params[0],self.crop_params[1],self.crop_params[2],self.crop_params[3]) + # 设置resize预处理过程 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # build预处理过程,参数为输入tensor的shape和输出tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + results=results[0].reshape(results[0].shape[0]*results[0].shape[1]) + results_show = np.zeros(results.shape,dtype=np.int16) + results_show[0::2] = results[0::2] * self.crop_params[3] + self.crop_params[0] + results_show[1::2] = results[1::2] * self.crop_params[2] + self.crop_params[1] + # 根据输出计算手势 + gesture=self.hk_gesture(results_show) + return results_show,gesture + + # 计算crop参数 + def get_crop_param(self,det_box): + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + w_det = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0]) + h_det = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1]) + x_det = int(x1*self.display_size[0] // self.rgb888p_size[0]) + y_det = int(y1*self.display_size[1] // self.rgb888p_size[1]) + length = max(w, h)/2 + cx = (x1+x2)/2 + cy = (y1+y2)/2 + ratio_num = 1.26*length + x1_kp = int(max(0,cx-ratio_num)) + y1_kp = int(max(0,cy-ratio_num)) + x2_kp = int(min(self.rgb888p_size[0]-1, cx+ratio_num)) + y2_kp = int(min(self.rgb888p_size[1]-1, cy+ratio_num)) + w_kp = int(x2_kp - x1_kp + 1) + h_kp = int(y2_kp - y1_kp + 1) + return [x1_kp, y1_kp, w_kp, h_kp] + + # 求两个vector之间的夹角 + def hk_vector_2d_angle(self,v1,v2): + with ScopedTiming("hk_vector_2d_angle",self.debug_mode > 0): + v1_x,v1_y,v2_x,v2_y = v1[0],v1[1],v2[0],v2[1] + v1_norm = np.sqrt(v1_x * v1_x+ v1_y * v1_y) + v2_norm = np.sqrt(v2_x * v2_x + v2_y * v2_y) + dot_product = v1_x * v2_x + v1_y * v2_y + cos_angle = dot_product/(v1_norm*v2_norm) + angle = np.acos(cos_angle)*180/np.pi + return angle + + # 根据手掌关键点检测结果判断手势类别 + def hk_gesture(self,results): + with ScopedTiming("hk_gesture",self.debug_mode > 0): + angle_list = [] + for i in range(5): + angle = self.hk_vector_2d_angle([(results[0]-results[i*8+4]), (results[1]-results[i*8+5])],[(results[i*8+6]-results[i*8+8]),(results[i*8+7]-results[i*8+9])]) + angle_list.append(angle) + thr_angle,thr_angle_thumb,thr_angle_s,gesture_str = 65.,53.,49.,None + if 65535. not in angle_list: + if (angle_list[0]>thr_angle_thumb) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "fist" + elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "gun" + elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]5) and (angle_list[1]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "one" + elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]thr_angle_thumb) and (angle_list[1]thr_angle): + gesture_str = "three" + elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "thumbUp" + elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "yeah" + return gesture_str + +# 自定义动态手势识别任务类 +class DynamicGestureApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 注意:ai2d设置多个预处理时执行的顺序为:crop->shift->resize/affine->pad,如果不符合该顺序,需要配置多个ai2d对象; + # 如下模型预处理要先做resize再做crop,因此要配置两个Ai2d对象 + self.ai2d_resize=Ai2d(debug_mode) + self.ai2d_resize.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + self.ai2d_crop=Ai2d(debug_mode) + self.ai2d_crop.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 动态手势识别模型输入tensors列表 + self.input_tensors=[] + # 动态手势识别模型的输入tensor的shape + self.gesture_kmodel_input_shape = [[1, 3, 224, 224], # 动态手势识别kmodel输入分辨率 + [1,3,56,56], + [1,4,28,28], + [1,4,28,28], + [1,8,14,14], + [1,8,14,14], + [1,8,14,14], + [1,12,14,14], + [1,12,14,14], + [1,20,7,7], + [1,20,7,7]] + # 预处理参数 + self.resize_shape = 256 + self.mean_values = np.array([0.485, 0.456, 0.406]).reshape((3,1,1)) # 动态手势识别预处理均值 + self.std_values = np.array([0.229, 0.224, 0.225]).reshape((3,1,1)) # 动态手势识别预处理方差 + self.first_data=None + self.max_hist_len=20 + self.crop_params=self.get_crop_param() + + # 配置预处理 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 配置resize和crop预处理 + self.ai2d_resize.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d_resize.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.crop_params[1],self.crop_params[0]]) + self.ai2d_crop.crop(self.crop_params[2],self.crop_params[3],self.crop_params[4],self.crop_params[5]) + self.ai2d_crop.build([1,3,self.crop_params[1],self.crop_params[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + # 初始化动态手势识别模型输入列表 + inputs_num=self.get_kmodel_inputs_num() + self.first_data = np.ones(self.gesture_kmodel_input_shape[0], dtype=np.float) + for i in range(inputs_num): + data = np.zeros(self.gesture_kmodel_input_shape[i], dtype=np.float) + self.input_tensors.append(nn.from_numpy(data)) + + # 重写预处理,因为该部分不是单纯的走一个ai2d做预处理,所以该函数需要重写 + def preprocess(self,input_np): + # 先走resize,再走crop + resize_tensor=self.ai2d_resize.run(input_np) + crop_output_tensor=self.ai2d_crop.run(resize_tensor.to_numpy()) + ai2d_output = crop_output_tensor.to_numpy() + self.first_data[0] = ai2d_output[0].copy() + self.first_data[0] = (self.first_data[0]*1.0/255 -self.mean_values)/self.std_values + self.input_tensors[0]=nn.from_numpy(self.first_data) + return + + # run函数重写 + def run(self,input_np,his_logit,history): + # 预处理 + self.preprocess(input_np) + # 推理 + outputs=self.inference(self.input_tensors) + # 使用当前帧的输出更新下一帧的输入列表 + outputs_num=self.get_kmodel_outputs_num() + for i in range(1,outputs_num): + self.input_tensors[i]=nn.from_numpy(outputs[i]) + # 返回后处理结果 + return self.postprocess(outputs,his_logit,history) + + # 自定义后处理 + def postprocess(self,results,his_logit, history): + with ScopedTiming("postprocess",self.debug_mode > 0): + his_logit.append(results[0]) + avg_logit = sum(np.array(his_logit)) + idx_ = np.argmax(avg_logit) + idx = self.gesture_process_output(idx_, history) + if (idx_ != idx): + his_logit_last = his_logit[-1] + his_logit = [] + his_logit.append(his_logit_last) + return idx, avg_logit + + # 手势处理函数 + def gesture_process_output(self,pred,history): + if (pred == 7 or pred == 8 or pred == 21 or pred == 22 or pred == 3 ): + pred = history[-1] + if (pred == 0 or pred == 4 or pred == 6 or pred == 9 or pred == 14 or pred == 1 or pred == 19 or pred == 20 or pred == 23 or pred == 24) : + pred = history[-1] + if (pred == 0) : + pred = 2 + if (pred != history[-1]) : + if (len(history)>= 2) : + if (history[-1] != history[len(history)-2]) : + pred = history[-1] + history.append(pred) + if (len(history) > self.max_hist_len) : + history = history[-self.max_hist_len:] + return history[-1] + + # 计算crop参数 + def get_crop_param(self): + ori_w = self.rgb888p_size[0] + ori_h = self.rgb888p_size[1] + width = self.model_input_size[0] + height = self.model_input_size[1] + ratiow = float(self.resize_shape) / ori_w + ratioh = float(self.resize_shape) / ori_h + if ratiow < ratioh: + ratio = ratioh + else: + ratio = ratiow + new_w = int(ratio * ori_w) + new_h = int(ratio * ori_h) + top = int((new_h-height)/2) + left = int((new_w-width)/2) + return new_w,new_h,left,top,width,height + + # 重写逆初始化 + def deinit(self): + with ScopedTiming("deinit",self.debug_mode > 0): + del self.kpu + del self.ai2d_resize + del self.ai2d_crop + self.tensors.clear() + del self.tensors + gc.collect() + nn.shrink_memory_pool() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + +# 自定义动态手势识别任务 +class DynamicGesture: + def __init__(self,hand_det_kmodel,hand_kp_kmodel,gesture_kmodel,det_input_size,kp_input_size,gesture_input_size,labels,anchors,confidence_threshold=0.25,nms_threshold=0.3,nms_option=False,strides=[8,16,32],rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + # 手掌检测模型路径 + self.hand_det_kmodel=hand_det_kmodel + # 手掌关键点模型路径 + self.hand_kp_kmodel=hand_kp_kmodel + # 动态手势识别路径 + self.gesture_kmodel=gesture_kmodel + # 手掌检测模型输入分辨率 + self.det_input_size=det_input_size + # 手掌关键点模型输入分辨率 + self.kp_input_size=kp_input_size + # 动态手势识别模型输入分辨率 + self.gesture_input_size=gesture_input_size + self.labels=labels + # anchors + self.anchors=anchors + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + self.nms_option=nms_option + self.strides=strides + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # 动态手势识别贴图 + self.bin_width = 150 # 动态手势识别屏幕坐上角标志状态文件的短边尺寸 + self.bin_height = 216 # 动态手势识别屏幕坐上角标志状态文件的长边尺寸 + shang_argb = np.fromfile("/sdcard/app/tests/utils/shang.bin", dtype=np.uint8) + self.shang_argb = shang_argb.reshape((self.bin_height, self.bin_width, 4)) + xia_argb = np.fromfile("/sdcard/app/tests/utils/xia.bin", dtype=np.uint8) + self.xia_argb = xia_argb.reshape((self.bin_height, self.bin_width, 4)) + zuo_argb = np.fromfile("/sdcard/app/tests/utils/zuo.bin", dtype=np.uint8) + self.zuo_argb = zuo_argb.reshape((self.bin_width, self.bin_height, 4)) + you_argb = np.fromfile("/sdcard/app/tests/utils/you.bin", dtype=np.uint8) + self.you_argb = you_argb.reshape((self.bin_width, self.bin_height, 4)) + #其他参数 + self.TRIGGER = 0 # 动态手势识别应用的结果状态 + self.MIDDLE = 1 + self.UP = 2 + self.DOWN = 3 + self.LEFT = 4 + self.RIGHT = 5 + self.max_hist_len = 20 # 最多存储多少帧的结果 + # debug_mode模式 + self.debug_mode=debug_mode + self.cur_state = self.TRIGGER + self.pre_state = self.TRIGGER + self.draw_state = self.TRIGGER + self.vec_flag = [] + self.his_logit = [] + self.history = [2] + self.s_start = time.time_ns() + self.m_start=None + self.hand_det=HandDetApp(self.hand_det_kmodel,self.labels,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,nms_option=self.nms_option,strides=self.strides,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + self.hand_kp=HandKPClassApp(self.hand_kp_kmodel,model_input_size=self.kp_input_size,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + self.dg=DynamicGestureApp(self.gesture_kmodel,model_input_size=self.gesture_input_size,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + self.hand_det.config_preprocess() + self.dg.config_preprocess() + + # run函数 + def run(self,input_np): + if self.cur_state == self.TRIGGER: + # 手掌检测 + det_boxes=self.hand_det.run(input_np) + boxes=[] + gesture_res=[] + for det_box in det_boxes: + # 筛选检测框 + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + if (h<(0.1*self.rgb888p_size[1])): + continue + if (w<(0.25*self.rgb888p_size[0]) and ((x1<(0.03*self.rgb888p_size[0])) or (x2>(0.97*self.rgb888p_size[0])))): + continue + if (w<(0.15*self.rgb888p_size[0]) and ((x1<(0.01*self.rgb888p_size[0])) or (x2>(0.99*self.rgb888p_size[0])))): + continue + # 手掌关键点预处理配置 + self.hand_kp.config_preprocess(det_box) + # 手掌关键点检测 + hk_results,gesture_str=self.hand_kp.run(input_np) + boxes.append(det_box) + gesture_res.append((hk_results,gesture_str)) + return boxes,gesture_res + else: + # 动态手势识别 + idx, avg_logit = self.dg.run(input_np, self.his_logit, self.history) + return idx,avg_logit + + # 根据输出结果绘制效果 + def draw_result(self,pl,output1,output2): + pl.osd_img.clear() + draw_img_np = np.zeros((self.display_size[1],self.display_size[0],4),dtype=np.uint8) + draw_img=image.Image(self.display_size[0], self.display_size[1], image.ARGB8888,alloc=image.ALLOC_REF,data=draw_img_np) + if self.cur_state == self.TRIGGER: + for i in range(len(output1)): + hk_results,gesture=output2[i][0],output2[i][1] + if ((gesture == "five") or (gesture == "yeah")): + v_x = hk_results[24]-hk_results[0] + v_y = hk_results[25]-hk_results[1] + angle = self.hand_kp.hk_vector_2d_angle([v_x,v_y],[1.0,0.0]) + if (v_y>0): + angle = 360-angle + if ((70.0<=angle) and (angle<110.0)): + if ((self.pre_state != self.UP) or (self.pre_state != self.MIDDLE)): + self.vec_flag.append(self.pre_state) + if ((len(self.vec_flag)>10)or(self.pre_state == self.UP) or (self.pre_state == self.MIDDLE) or(self.pre_state == self.TRIGGER)): + draw_img_np[:self.bin_height,:self.bin_width,:] = self.shang_argb + self.cur_state = self.UP + elif ((110.0<=angle) and (angle<225.0)): # 手指向右(实际方向) + if (self.pre_state != self.RIGHT): + self.vec_flag.append(self.pre_state) + if ((len(self.vec_flag)>10)or(self.pre_state == self.RIGHT)or(self.pre_state == self.TRIGGER)): + draw_img_np[:self.bin_width,:self.bin_height,:] = self.you_argb + self.cur_state = self.RIGHT + elif((225.0<=angle) and (angle<315.0)): # 手指向下 + if (self.pre_state != self.DOWN): + self.vec_flag.append(self.pre_state) + if ((len(self.vec_flag)>10)or(self.pre_state == self.DOWN)or(self.pre_state == self.TRIGGER)): + draw_img_np[:self.bin_height,:self.bin_width,:] = self.xia_argb + self.cur_state = self.DOWN + else: # 手指向左(实际方向) + if (self.pre_state != self.LEFT): + self.vec_flag.append(self.pre_state) + if ((len(self.vec_flag)>10)or(self.pre_state == self.LEFT)or(self.pre_state == self.TRIGGER)): + draw_img_np[:self.bin_width,:self.bin_height,:] = self.zuo_argb + self.cur_state = self.LEFT + self.m_start = time.time_ns() + self.his_logit = [] + else: + idx,avg_logit=output1,output2[0] + if (self.cur_state == self.UP): + draw_img_np[:self.bin_height,:self.bin_width,:] = self.shang_argb + if ((idx==15) or (idx==10)): + self.vec_flag.clear() + if (((avg_logit[idx] >= 0.7) and (len(self.his_logit) >= 2)) or ((avg_logit[idx] >= 0.3) and (len(self.his_logit) >= 4))): + self.s_start = time.time_ns() + self.cur_state = self.TRIGGER + self.draw_state = self.DOWN + self.history = [2] + self.pre_state = self.UP + elif ((idx==25)or(idx==26)) : + self.vec_flag.clear() + if (((avg_logit[idx] >= 0.4) and (len(self.his_logit) >= 2)) or ((avg_logit[idx] >= 0.3) and (len(self.his_logit) >= 3))): + self.s_start = time.time_ns() + self.cur_state = self.TRIGGER + self.draw_state = self.MIDDLE + self.history = [2] + self.pre_state = self.MIDDLE + else: + self.his_logit.clear() + elif (self.cur_state == self.RIGHT): + draw_img_np[:self.bin_width,:self.bin_height,:] = self.you_argb + if ((idx==16)or(idx==11)) : + self.vec_flag.clear() + if (((avg_logit[idx] >= 0.4) and (len(self.his_logit) >= 2)) or ((avg_logit[idx] >= 0.3) and (len(self.his_logit) >= 3))): + self.s_start = time.time_ns() + self.cur_state = self.TRIGGER + self.draw_state = self.RIGHT + self.history = [2] + self.pre_state = self.RIGHT + else: + self.his_logit.clear() + elif (self.cur_state == self.DOWN): + draw_img_np[:self.bin_height,:self.bin_width,:] = self.xia_argb + if ((idx==18)or(idx==13)): + self.vec_flag.clear() + if (((avg_logit[idx] >= 0.4) and (len(self.his_logit) >= 2)) or ((avg_logit[idx] >= 0.3) and (len(self.his_logit) >= 3))): + self.s_start = time.time_ns() + self.cur_state = self.TRIGGER + self.draw_state = self.UP + self.history = [2] + self.pre_state = self.DOWN + else: + self.his_logit.clear() + elif (self.cur_state == self.LEFT): + draw_img_np[:self.bin_width,:self.bin_height,:] = self.zuo_argb + if ((idx==17)or(idx==12)): + self.vec_flag.clear() + if (((avg_logit[idx] >= 0.4) and (len(self.his_logit) >= 2)) or ((avg_logit[idx] >= 0.3) and (len(self.his_logit) >= 3))): + self.s_start = time.time_ns() + self.cur_state = self.TRIGGER + self.draw_state = self.LEFT + self.history = [2] + self.pre_state = self.LEFT + else: + self.his_logit.clear() + + self.elapsed_time = round((time.time_ns() - self.m_start)/1000000) + + if ((self.cur_state != self.TRIGGER) and (self.elapsed_time>2000)): + self.cur_state = self.TRIGGER + self.pre_state = self.TRIGGER + + self.elapsed_ms_show = round((time.time_ns()-self.s_start)/1000000) + if (self.elapsed_ms_show<1000): + if (self.draw_state == self.UP): + draw_img.draw_arrow(1068,330,1068,130, (255,170,190,230), thickness=13) # 判断为向上挥动时,画一个向上的箭头 + draw_img.draw_string_advanced(self.display_size[0]//2-50,self.display_size[1]//2-50,32,"向上") + elif (self.draw_state == self.RIGHT): + draw_img.draw_arrow(1290,540,1536,540, (255,170,190,230), thickness=13) # 判断为向右挥动时,画一个向右的箭头 + draw_img.draw_string_advanced(self.display_size[0]//2-50,self.display_size[1]//2-50,32,"向右") + elif (self.draw_state == self.DOWN): + draw_img.draw_arrow(1068,750,1068,950, (255,170,190,230), thickness=13) # 判断为向下挥动时,画一个向下的箭头 + draw_img.draw_string_advanced(self.display_size[0]//2-50,self.display_size[1]//2-50,32,"向下") + elif (self.draw_state == self.LEFT): + draw_img.draw_arrow(846,540,600,540, (255,170,190,230), thickness=13) # 判断为向左挥动时,画一个向左的箭头 + draw_img.draw_string_advanced(self.display_size[0]//2-50,self.display_size[1]//2-50,32,"向左") + elif (self.draw_state == self.MIDDLE): + draw_img.draw_circle(1068,540,100, (255,170,190,230), thickness=2, fill=True) # 判断为五指捏合手势时,画一个实心圆 + draw_img.draw_string_advanced(self.display_size[0]//2-50,self.display_size[1]//2-50,32,"中间") + else: + self.draw_state = self.TRIGGER + pl.osd_img.copy_from(draw_img) + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 手掌检测模型路径 + hand_det_kmodel_path="/sdcard/app/tests/kmodel/hand_det.kmodel" + # 手部关键点模型路径 + hand_kp_kmodel_path="/sdcard/app/tests/kmodel/handkp_det.kmodel" + # 动态手势识别模型路径 + gesture_kmodel_path="/sdcard/app/tests/kmodel/gesture.kmodel" + # 其他参数 + rgb888p_size=[1920,1080] + hand_det_input_size=[512,512] + hand_kp_input_size=[256,256] + gesture_input_size=[224,224] + confidence_threshold=0.2 + nms_threshold=0.5 + labels=["hand"] + anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + # 自定义动态手势识别任务实例 + dg=DynamicGesture(hand_det_kmodel_path,hand_kp_kmodel_path,gesture_kmodel_path,det_input_size=hand_det_input_size,kp_input_size=hand_kp_input_size,gesture_input_size=gesture_input_size,labels=labels,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,nms_option=False,strides=[8,16,32],rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + output1,output2=dg.run(img) # 推理当前帧 + dg.draw_result(pl,output1,output2) # 绘制推理结果 + pl.show_image() # 展示推理结果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + dg.hand_det.deinit() + dg.hand_kp.deinit() + dg.dg.deinit() + pl.destroy() +``` + +### 2.2. 注视估计 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aidemo +import random +import gc +import sys +import math + +# 自定义人脸检测任务类 +class FaceDetApp(AIBase): + def __init__(self,kmodel_path,model_input_size,anchors,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + self.anchors=anchors + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了padding和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 设置padding预处理 + self.ai2d.pad(self.get_pad_param(), 0, [104,117,123]) + # 设置resize预处理 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义任务后处理,这里使用了aidemo库的face_det_post_process接口 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + res = aidemo.face_det_post_process(self.confidence_threshold,self.nms_threshold,self.model_input_size[0],self.anchors,self.rgb888p_size,results) + if len(res)==0: + return res + else: + return res[0] + + # 计算padding参数 + def get_pad_param(self): + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + # 计算最小的缩放比例,等比例缩放 + ratio_w = dst_w / self.rgb888p_size[0] + ratio_h = dst_h / self.rgb888p_size[1] + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + new_w = (int)(ratio * self.rgb888p_size[0]) + new_h = (int)(ratio * self.rgb888p_size[1]) + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = (int)(round(0)) + bottom = (int)(round(dh * 2 + 0.1)) + left = (int)(round(0)) + right = (int)(round(dw * 2 - 0.1)) + return [0,0,0,0,top, bottom, left, right] + +# 自定义注视估计任务类 +class EyeGazeApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 注视估计模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了crop和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,det,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 计算crop预处理参数 + x, y, w, h = map(lambda x: int(round(x, 0)), det[:4]) + # 设置crop预处理 + self.ai2d.crop(x,y,w,h) + # 设置resize预处理 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表,这里调用了aidemo库的eye_gaze_post_process接口 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + post_ret = aidemo.eye_gaze_post_process(results) + return post_ret[0],post_ret[1] + +# 自定义注视估计类 +class EyeGaze: + def __init__(self,face_det_kmodel,eye_gaze_kmodel,det_input_size,eye_gaze_input_size,anchors,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + # 人脸检测模型路径 + self.face_det_kmodel=face_det_kmodel + # 人脸注视估计模型路径 + self.eye_gaze_kmodel=eye_gaze_kmodel + # 人脸检测模型输入分辨率 + self.det_input_size=det_input_size + # 人脸注视估计模型输入分辨率 + self.eye_gaze_input_size=eye_gaze_input_size + # anchors + self.anchors=anchors + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + # 人脸检测实例 + self.face_det=FaceDetApp(self.face_det_kmodel,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + # 注视估计实例 + self.eye_gaze=EyeGazeApp(self.eye_gaze_kmodel,model_input_size=self.eye_gaze_input_size,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + # 人脸检测配置预处理 + self.face_det.config_preprocess() + + #run方法 + def run(self,input_np): + # 先进行人脸检测 + det_boxes=self.face_det.run(input_np) + eye_gaze_res=[] + for det_box in det_boxes: + # 对每一个检测到的人脸做注视估计 + self.eye_gaze.config_preprocess(det_box) + pitch,yaw=self.eye_gaze.run(input_np) + eye_gaze_res.append((pitch,yaw)) + return det_boxes,eye_gaze_res + + # 绘制注视估计效果 + def draw_result(self,pl,dets,eye_gaze_res): + pl.osd_img.clear() + if dets: + for det,gaze_ret in zip(dets,eye_gaze_res): + pitch , yaw = gaze_ret + length = self.display_size[0]/ 2 + x, y, w, h = map(lambda x: int(round(x, 0)), det[:4]) + x = x * self.display_size[0] // self.rgb888p_size[0] + y = y * self.display_size[1] // self.rgb888p_size[1] + w = w * self.display_size[0] // self.rgb888p_size[0] + h = h * self.display_size[1] // self.rgb888p_size[1] + center_x = (x + w / 2.0) + center_y = (y + h / 2.0) + dx = -length * math.sin(pitch) * math.cos(yaw) + target_x = int(center_x + dx) + dy = -length * math.sin(yaw) + target_y = int(center_y + dy) + pl.osd_img.draw_arrow(int(center_x), int(center_y), target_x, target_y, color = (255,255,0,0), size = 30, thickness = 2) + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 人脸检测模型路径 + face_det_kmodel_path="/sdcard/app/tests/kmodel/face_detection_320.kmodel" + # 人脸注视估计模型路径 + eye_gaze_kmodel_path="/sdcard/app/tests/kmodel/eye_gaze.kmodel" + # 其他参数 + anchors_path="/sdcard/app/tests/utils/prior_data_320.bin" + rgb888p_size=[1920,1080] + face_det_input_size=[320,320] + eye_gaze_input_size=[448,448] + confidence_threshold=0.5 + nms_threshold=0.2 + anchor_len=4200 + det_dim=4 + anchors = np.fromfile(anchors_path, dtype=np.float) + anchors = anchors.reshape((anchor_len,det_dim)) + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + eg=EyeGaze(face_det_kmodel_path,eye_gaze_kmodel_path,det_input_size=face_det_input_size,eye_gaze_input_size=eye_gaze_input_size,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + det_boxes,eye_gaze_res=eg.run(img) # 推理当前帧 + eg.draw_result(pl,det_boxes,eye_gaze_res) # 绘制推理效果 + pl.show_image() # 展示推理效果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + eg.face_det.deinit() + eg.eye_gaze.deinit() + pl.destroy() +``` + +### 2.3. 人脸检测 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys +import aidemo + +# 自定义人脸检测类,继承自AIBase基类 +class FaceDetectionApp(AIBase): + def __init__(self, kmodel_path, model_input_size, anchors, confidence_threshold=0.5, nms_threshold=0.2, rgb888p_size=[224,224], display_size=[1920,1080], debug_mode=0): + super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode) # 调用基类的构造函数 + self.kmodel_path = kmodel_path # 模型文件路径 + self.model_input_size = model_input_size # 模型输入分辨率 + self.confidence_threshold = confidence_threshold # 置信度阈值 + self.nms_threshold = nms_threshold # NMS(非极大值抑制)阈值 + self.anchors = anchors # 锚点数据,用于目标检测 + self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]] # sensor给到AI的图像分辨率,并对宽度进行16的对齐 + self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]] # 显示分辨率,并对宽度进行16的对齐 + self.debug_mode = debug_mode # 是否开启调试模式 + self.ai2d = Ai2d(debug_mode) # 实例化Ai2d,用于实现模型预处理 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) # 设置Ai2d的输入输出格式和类型 + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self, input_image_size=None): + with ScopedTiming("set preprocess config", self.debug_mode > 0): # 计时器,如果debug_mode大于0则开启 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + top, bottom, left, right = self.get_padding_param() # 获取padding参数 + self.ai2d.pad([0, 0, 0, 0, top, bottom, left, right], 0, [104, 117, 123]) # 填充边缘 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) # 缩放图像 + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) # 构建预处理流程 + + # 自定义当前任务的后处理,results是模型输出array列表,这里使用了aidemo库的face_det_post_process接口 + def postprocess(self, results): + with ScopedTiming("postprocess", self.debug_mode > 0): + post_ret = aidemo.face_det_post_process(self.confidence_threshold, self.nms_threshold, self.model_input_size[1], self.anchors, self.rgb888p_size, results) + if len(post_ret) == 0: + return post_ret + else: + return post_ret[0] + + # 绘制检测结果到画面上 + def draw_result(self, pl, dets): + with ScopedTiming("display_draw", self.debug_mode > 0): + if dets: + pl.osd_img.clear() # 清除OSD图像 + for det in dets: + # 将检测框的坐标转换为显示分辨率下的坐标 + x, y, w, h = map(lambda x: int(round(x, 0)), det[:4]) + x = x * self.display_size[0] // self.rgb888p_size[0] + y = y * self.display_size[1] // self.rgb888p_size[1] + w = w * self.display_size[0] // self.rgb888p_size[0] + h = h * self.display_size[1] // self.rgb888p_size[1] + pl.osd_img.draw_rectangle(x, y, w, h, color=(255, 255, 0, 255), thickness=2) # 绘制矩形框 + else: + pl.osd_img.clear() + + # 获取padding参数 + def get_padding_param(self): + dst_w = self.model_input_size[0] # 模型输入宽度 + dst_h = self.model_input_size[1] # 模型输入高度 + ratio_w = dst_w / self.rgb888p_size[0] # 宽度缩放比例 + ratio_h = dst_h / self.rgb888p_size[1] # 高度缩放比例 + ratio = min(ratio_w, ratio_h) # 取较小的缩放比例 + new_w = int(ratio * self.rgb888p_size[0]) # 新宽度 + new_h = int(ratio * self.rgb888p_size[1]) # 新高度 + dw = (dst_w - new_w) / 2 # 宽度差 + dh = (dst_h - new_h) / 2 # 高度差 + top = int(round(0)) + bottom = int(round(dh * 2 + 0.1)) + left = int(round(0)) + right = int(round(dw * 2 - 0.1)) + return top, bottom, left, right + +if __name__ == "__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 设置模型路径和其他参数 + kmodel_path = "/sdcard/app/tests/kmodel/face_detection_320.kmodel" + # 其它参数 + confidence_threshold = 0.5 + nms_threshold = 0.2 + anchor_len = 4200 + det_dim = 4 + anchors_path = "/sdcard/app/tests/utils/prior_data_320.bin" + anchors = np.fromfile(anchors_path, dtype=np.float) + anchors = anchors.reshape((anchor_len, det_dim)) + rgb888p_size = [1920, 1080] + + # 初始化PipeLine,用于图像处理流程 + pl = PipeLine(rgb888p_size=rgb888p_size, display_size=display_size, display_mode=display_mode) + pl.create() # 创建PipeLine实例 + # 初始化自定义人脸检测实例 + face_det = FaceDetectionApp(kmodel_path, model_input_size=[320, 320], anchors=anchors, confidence_threshold=confidence_threshold, nms_threshold=nms_threshold, rgb888p_size=rgb888p_size, display_size=display_size, debug_mode=0) + face_det.config_preprocess() # 配置预处理 + + try: + while True: + os.exitpoint() # 检查是否有退出信号 + with ScopedTiming("total",1): + img = pl.get_frame() # 获取当前帧数据 + res = face_det.run(img) # 推理当前帧 + face_det.draw_result(pl, res) # 绘制结果 + pl.show_image() # 显示结果 + gc.collect() # 垃圾回收 + except Exception as e: + sys.print_exception(e) # 打印异常信息 + finally: + face_det.deinit() # 反初始化 + pl.destroy() # 销毁PipeLine实例 +``` + +### 2.4. 人脸关键部位 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aidemo +import random +import gc +import sys + +# 自定义人脸检测任务类 +class FaceDetApp(AIBase): + def __init__(self,kmodel_path,model_input_size,anchors,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # 检测任务锚框 + self.anchors=anchors + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 实例化Ai2d,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 设置padding预处理 + self.ai2d.pad(self.get_pad_param(), 0, [104,117,123]) + # 设置resize预处理 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表,这里使用了aidemo的face_det_post_process列表 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + res = aidemo.face_det_post_process(self.confidence_threshold,self.nms_threshold,self.model_input_size[0],self.anchors,self.rgb888p_size,results) + if len(res)==0: + return res + else: + return res[0] + + # 计算padding参数 + def get_pad_param(self): + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + # 计算最小的缩放比例,等比例缩放 + ratio_w = dst_w / self.rgb888p_size[0] + ratio_h = dst_h / self.rgb888p_size[1] + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + new_w = (int)(ratio * self.rgb888p_size[0]) + new_h = (int)(ratio * self.rgb888p_size[1]) + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = (int)(round(0)) + bottom = (int)(round(dh * 2 + 0.1)) + left = (int)(round(0)) + right = (int)(round(dw * 2 - 0.1)) + return [0,0,0,0,top, bottom, left, right] + +# 自定义人脸关键点任务类 +class FaceLandMarkApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 关键点模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 目标矩阵 + self.matrix_dst=None + self.ai2d=Ai2d(debug_mode) + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了affine,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,det,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 计算目标矩阵,并获取仿射变换矩阵 + self.matrix_dst = self.get_affine_matrix(det) + affine_matrix = [self.matrix_dst[0][0],self.matrix_dst[0][1],self.matrix_dst[0][2], + self.matrix_dst[1][0],self.matrix_dst[1][1],self.matrix_dst[1][2]] + # 设置仿射变换预处理 + self.ai2d.affine(nn.interp_method.cv2_bilinear,0, 0, 127, 1,affine_matrix) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表,这里使用了aidemo库的invert_affine_transform接口 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + pred=results[0] + # (1)将人脸关键点输出变换模型输入 + half_input_len = self.model_input_size[0] // 2 + pred = pred.flatten() + for i in range(len(pred)): + pred[i] += (pred[i] + 1) * half_input_len + # (2)获取仿射矩阵的逆矩阵 + matrix_dst_inv = aidemo.invert_affine_transform(self.matrix_dst) + matrix_dst_inv = matrix_dst_inv.flatten() + # (3)对每个关键点进行逆变换 + half_out_len = len(pred) // 2 + for kp_id in range(half_out_len): + old_x = pred[kp_id * 2] + old_y = pred[kp_id * 2 + 1] + # 逆变换公式 + new_x = old_x * matrix_dst_inv[0] + old_y * matrix_dst_inv[1] + matrix_dst_inv[2] + new_y = old_x * matrix_dst_inv[3] + old_y * matrix_dst_inv[4] + matrix_dst_inv[5] + pred[kp_id * 2] = new_x + pred[kp_id * 2 + 1] = new_y + return pred + + def get_affine_matrix(self,bbox): + # 获取仿射矩阵,用于将边界框映射到模型输入空间 + with ScopedTiming("get_affine_matrix", self.debug_mode > 1): + # 从边界框提取坐标和尺寸 + x1, y1, w, h = map(lambda x: int(round(x, 0)), bbox[:4]) + # 计算缩放比例,使得边界框映射到模型输入空间的一部分 + scale_ratio = (self.model_input_size[0]) / (max(w, h) * 1.5) + # 计算边界框中心点在模型输入空间的坐标 + cx = (x1 + w / 2) * scale_ratio + cy = (y1 + h / 2) * scale_ratio + # 计算模型输入空间的一半长度 + half_input_len = self.model_input_size[0] / 2 + # 创建仿射矩阵并进行设置 + matrix_dst = np.zeros((2, 3), dtype=np.float) + matrix_dst[0, 0] = scale_ratio + matrix_dst[0, 1] = 0 + matrix_dst[0, 2] = half_input_len - cx + matrix_dst[1, 0] = 0 + matrix_dst[1, 1] = scale_ratio + matrix_dst[1, 2] = half_input_len - cy + return matrix_dst + +# 人脸标志解析 +class FaceLandMark: + def __init__(self,face_det_kmodel,face_landmark_kmodel,det_input_size,landmark_input_size,anchors,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + # 人脸检测模型路径 + self.face_det_kmodel=face_det_kmodel + # 人脸标志解析模型路径 + self.face_landmark_kmodel=face_landmark_kmodel + # 人脸检测模型输入分辨率 + self.det_input_size=det_input_size + # 人脸标志解析模型输入分辨率 + self.landmark_input_size=landmark_input_size + # anchors + self.anchors=anchors + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + + # 人脸关键点不同部位关键点列表 + self.dict_kp_seq = [ + [43, 44, 45, 47, 46, 50, 51, 49, 48], # left_eyebrow + [97, 98, 99, 100, 101, 105, 104, 103, 102], # right_eyebrow + [35, 36, 33, 37, 39, 42, 40, 41], # left_eye + [89, 90, 87, 91, 93, 96, 94, 95], # right_eye + [34, 88], # pupil + [72, 73, 74, 86], # bridge_nose + [77, 78, 79, 80, 85, 84, 83], # wing_nose + [52, 55, 56, 53, 59, 58, 61, 68, 67, 71, 63, 64], # out_lip + [65, 54, 60, 57, 69, 70, 62, 66], # in_lip + [1, 9, 10, 11, 12, 13, 14, 15, 16, 2, 3, 4, 5, 6, 7, 8, 0, 24, 23, 22, 21, 20, 19, 18, 32, 31, 30, 29, 28, 27, 26, 25, 17] # basin + ] + + # 人脸关键点不同部位(顺序同dict_kp_seq)颜色配置,argb + self.color_list_for_osd_kp = [ + (255, 0, 255, 0), + (255, 0, 255, 0), + (255, 255, 0, 255), + (255, 255, 0, 255), + (255, 255, 0, 0), + (255, 255, 170, 0), + (255, 255, 255, 0), + (255, 0, 255, 255), + (255, 255, 220, 50), + (255, 30, 30, 255) + ] + # 人脸检测实例 + self.face_det=FaceDetApp(self.face_det_kmodel,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + # 人脸标志解析实例 + self.face_landmark=FaceLandMarkApp(self.face_landmark_kmodel,model_input_size=self.landmark_input_size,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + # 配置人脸检测的预处理 + self.face_det.config_preprocess() + + # run函数 + def run(self,input_np): + # 执行人脸检测 + det_boxes=self.face_det.run(input_np) + landmark_res=[] + for det_box in det_boxes: + # 对每一个检测到的人脸解析关键部位 + self.face_landmark.config_preprocess(det_box) + res=self.face_landmark.run(input_np) + landmark_res.append(res) + return det_boxes,landmark_res + + + # 绘制人脸解析效果 + def draw_result(self,pl,dets,landmark_res): + pl.osd_img.clear() + if dets: + draw_img_np = np.zeros((self.display_size[1],self.display_size[0],4),dtype=np.uint8) + draw_img = image.Image(self.display_size[0], self.display_size[1], image.ARGB8888, alloc=image.ALLOC_REF,data = draw_img_np) + for pred in landmark_res: + # (1)获取单个人脸框对应的人脸关键点 + for sub_part_index in range(len(self.dict_kp_seq)): + # (2)构建人脸某个区域关键点集 + sub_part = self.dict_kp_seq[sub_part_index] + face_sub_part_point_set = [] + for kp_index in range(len(sub_part)): + real_kp_index = sub_part[kp_index] + x, y = pred[real_kp_index * 2], pred[real_kp_index * 2 + 1] + x = int(x * self.display_size[0] // self.rgb888p_size[0]) + y = int(y * self.display_size[1] // self.rgb888p_size[1]) + face_sub_part_point_set.append((x, y)) + # (3)画人脸不同区域的轮廓 + if sub_part_index in (9, 6): + color = np.array(self.color_list_for_osd_kp[sub_part_index],dtype = np.uint8) + face_sub_part_point_set = np.array(face_sub_part_point_set) + aidemo.polylines(draw_img_np, face_sub_part_point_set,False,color,5,8,0) + elif sub_part_index == 4: + color = self.color_list_for_osd_kp[sub_part_index] + for kp in face_sub_part_point_set: + x,y = kp[0],kp[1] + draw_img.draw_circle(x,y ,2, color, 1) + else: + color = np.array(self.color_list_for_osd_kp[sub_part_index],dtype = np.uint8) + face_sub_part_point_set = np.array(face_sub_part_point_set) + aidemo.contours(draw_img_np, face_sub_part_point_set,-1,color,2,8) + pl.osd_img.copy_from(draw_img) + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 人脸检测模型路径 + face_det_kmodel_path="/sdcard/app/tests/kmodel/face_detection_320.kmodel" + # 人脸关键标志模型路径 + face_landmark_kmodel_path="/sdcard/app/tests/kmodel/face_landmark.kmodel" + # 其它参数 + anchors_path="/sdcard/app/tests/utils/prior_data_320.bin" + rgb888p_size=[1920,1080] + face_det_input_size=[320,320] + face_landmark_input_size=[192,192] + confidence_threshold=0.5 + nms_threshold=0.2 + anchor_len=4200 + det_dim=4 + anchors = np.fromfile(anchors_path, dtype=np.float) + anchors = anchors.reshape((anchor_len,det_dim)) + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + flm=FaceLandMark(face_det_kmodel_path,face_landmark_kmodel_path,det_input_size=face_det_input_size,landmark_input_size=face_landmark_input_size,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + det_boxes,landmark_res=flm.run(img) # 推理当前帧 + flm.draw_result(pl,det_boxes,landmark_res) # 绘制推理结果 + pl.show_image() # 展示推理效果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + flm.face_det.deinit() + flm.face_landmark.deinit() + pl.destroy() +``` + +### 2.5. 人脸3D网络 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aidemo +import random +import gc +import sys + +# 自定义人脸检测任务类 +class FaceDetApp(AIBase): + def __init__(self,kmodel_path,model_input_size,anchors,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # 检测任务锚框 + self.anchors=anchors + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 实例化Ai2d,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 设置padding预处理 + self.ai2d.pad(self.get_pad_param(), 0, [104,117,123]) + # 设置resize预处理 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型推理输出的array列表,这里使用了aidemo库的face_det_post_process接口 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + res = aidemo.face_det_post_process(self.confidence_threshold,self.nms_threshold,self.model_input_size[0],self.anchors,self.rgb888p_size,results) + if len(res)==0: + return res + else: + return res[0] + + # padding参数计算 + def get_pad_param(self): + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + # 计算最小的缩放比例,等比例缩放 + ratio_w = dst_w / self.rgb888p_size[0] + ratio_h = dst_h / self.rgb888p_size[1] + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + new_w = (int)(ratio * self.rgb888p_size[0]) + new_h = (int)(ratio * self.rgb888p_size[1]) + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = (int)(round(0)) + bottom = (int)(round(dh * 2 + 0.1)) + left = (int)(round(0)) + right = (int)(round(dw * 2 - 0.1)) + return [0,0,0,0,top, bottom, left, right] + +# 自定义人脸网格任务类 +class FaceMeshApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 人脸网格模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 人脸mesh参数均值 + self.param_mean = np.array([0.0003492636315058917,2.52790130161884e-07,-6.875197868794203e-07,60.1679573059082,-6.295513230725192e-07,0.0005757200415246189,-5.085391239845194e-05,74.2781982421875,5.400917189035681e-07,6.574138387804851e-05,0.0003442012530285865,-66.67157745361328,-346603.6875,-67468.234375,46822.265625,-15262.046875,4350.5888671875,-54261.453125,-18328.033203125,-1584.328857421875,-84566.34375,3835.960693359375,-20811.361328125,38094.9296875,-19967.85546875,-9241.3701171875,-19600.71484375,13168.08984375,-5259.14404296875,1848.6478271484375,-13030.662109375,-2435.55615234375,-2254.20654296875,-14396.5615234375,-6176.3291015625,-25621.919921875,226.39447021484375,-6326.12353515625,-10867.2509765625,868.465087890625,-5831.14794921875,2705.123779296875,-3629.417724609375,2043.9901123046875,-2446.6162109375,3658.697021484375,-7645.98974609375,-6674.45263671875,116.38838958740234,7185.59716796875,-1429.48681640625,2617.366455078125,-1.2070955038070679,0.6690792441368103,-0.17760828137397766,0.056725528091192245,0.03967815637588501,-0.13586315512657166,-0.09223993122577667,-0.1726071834564209,-0.015804484486579895,-0.1416848599910736],dtype=np.float) + # 人脸mesh参数方差 + self.param_std = np.array([0.00017632152594160289,6.737943476764485e-05,0.00044708489440381527,26.55023193359375,0.0001231376954820007,4.493021697271615e-05,7.923670636955649e-05,6.982563018798828,0.0004350444069132209,0.00012314890045672655,0.00017400001524947584,20.80303955078125,575421.125,277649.0625,258336.84375,255163.125,150994.375,160086.109375,111277.3046875,97311.78125,117198.453125,89317.3671875,88493.5546875,72229.9296875,71080.2109375,50013.953125,55968.58203125,47525.50390625,49515.06640625,38161.48046875,44872.05859375,46273.23828125,38116.76953125,28191.162109375,32191.4375,36006.171875,32559.892578125,25551.1171875,24267.509765625,27521.3984375,23166.53125,21101.576171875,19412.32421875,19452.203125,17454.984375,22537.623046875,16174.28125,14671.640625,15115.6884765625,13870.0732421875,13746.3125,12663.1337890625,1.5870834589004517,1.5077009201049805,0.5881357789039612,0.5889744758605957,0.21327851712703705,0.2630201280117035,0.2796429395675659,0.38030216097831726,0.16162841022014618,0.2559692859649658],dtype=np.float) + # 实例化Ai2d,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了crop和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,det,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 计算crop参数,并设置crop预处理 + roi = self.parse_roi_box_from_bbox(det) + self.ai2d.crop(int(roi[0]),int(roi[1]),int(roi[2]),int(roi[3])) + # 设置resize预处理 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + return roi + + # 自定义后处理,results是模型输出的array列表 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + param = results[0] * self.param_std + self.param_mean + return param + + def parse_roi_box_from_bbox(self,bbox): + # 获取人脸roi + x1, y1, w, h = map(lambda x: int(round(x, 0)), bbox[:4]) + old_size = (w + h) / 2 + center_x = x1 + w / 2 + center_y = y1 + h / 2 + old_size * 0.14 + size = int(old_size * 1.58) + x0 = center_x - float(size) / 2 + y0 = center_y - float(size) / 2 + x1 = x0 + size + y1 = y0 + size + x0 = max(0, min(x0, self.rgb888p_size[0])) + y0 = max(0, min(y0, self.rgb888p_size[1])) + x1 = max(0, min(x1, self.rgb888p_size[0])) + y1 = max(0, min(y1, self.rgb888p_size[1])) + roi = (x0, y0, x1 - x0, y1 - y0) + return roi + +# 自定义人脸网格后处理任务类 +class FaceMeshPostApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 人脸网格模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 实例化Ai2d,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 重写预处理函数preprocess,因为该模型的预处理不是单纯调用一个ai2d能实现的,返回模型输入的tensor列表 + def preprocess(self,param): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # face mesh post模型预处理,param解析 + param = param[0] + trans_dim, shape_dim, exp_dim = 12, 40, 10 + R_ = param[:trans_dim].copy().reshape((3, -1)) + R = R_[:, :3].copy() + offset = R_[:, 3].copy() + offset = offset.reshape((3, 1)) + alpha_shp = param[trans_dim:trans_dim + shape_dim].copy().reshape((-1, 1)) + alpha_exp = param[trans_dim + shape_dim:].copy().reshape((-1, 1)) + R_tensor = nn.from_numpy(R) + offset_tensor = nn.from_numpy(offset) + alpha_shp_tensor = nn.from_numpy(alpha_shp) + alpha_exp_tensor = nn.from_numpy(alpha_exp) + return [R_tensor,offset_tensor,alpha_shp_tensor,alpha_exp_tensor] + + # 自定义模型后处理,这里调用了aidemo的face_mesh_post_process接口 + def postprocess(self,results,roi): + with ScopedTiming("postprocess",self.debug_mode > 0): + x, y, w, h = map(lambda x: int(round(x, 0)), roi[:4]) + x = x * self.display_size[0] // self.rgb888p_size[0] + y = y * self.display_size[1] // self.rgb888p_size[1] + w = w * self.display_size[0] // self.rgb888p_size[0] + h = h * self.display_size[1] // self.rgb888p_size[1] + roi_array = np.array([x,y,w,h],dtype=np.float) + aidemo.face_mesh_post_process(roi_array,results[0]) + return results[0] + +# 3D人脸网格 +class FaceMesh: + def __init__(self,face_det_kmodel,face_mesh_kmodel,mesh_post_kmodel,det_input_size,mesh_input_size,anchors,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + # 人脸检测模型路径 + self.face_det_kmodel=face_det_kmodel + # 人脸3D网格模型路径 + self.face_mesh_kmodel=face_mesh_kmodel + # 人脸3D网格后处理模型路径 + self.mesh_post_kmodel=mesh_post_kmodel + # 人脸检测模型输入分辨率 + self.det_input_size=det_input_size + # 人脸3D网格模型输入分辨率 + self.mesh_input_size=mesh_input_size + # anchors + self.anchors=anchors + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + # 人脸检测实例 + self.face_det=FaceDetApp(self.face_det_kmodel,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + # 人脸网格实例 + self.face_mesh=FaceMeshApp(self.face_mesh_kmodel,model_input_size=self.mesh_input_size,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + # 人脸网格后处理实例 + self.face_mesh_post=FaceMeshPostApp(self.mesh_post_kmodel,model_input_size=self.mesh_input_size,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + # 人脸检测预处理配置 + self.face_det.config_preprocess() + + # run函数 + def run(self,input_np): + # 执行人脸检测 + det_boxes=self.face_det.run(input_np) + mesh_res=[] + for det_box in det_boxes: + # 对检测到的每一个人脸配置预处理,执行人脸网格和人脸网格后处理 + roi=self.face_mesh.config_preprocess(det_box) + param=self.face_mesh.run(input_np) + tensors=self.face_mesh_post.preprocess(param) + results=self.face_mesh_post.inference(tensors) + res=self.face_mesh_post.postprocess(results,roi) + mesh_res.append(res) + return det_boxes,mesh_res + + + # 绘制人脸解析效果 + def draw_result(self,pl,dets,mesh_res): + pl.osd_img.clear() + if dets: + draw_img_np = np.zeros((self.display_size[1],self.display_size[0],4),dtype=np.uint8) + draw_img = image.Image(self.display_size[0], self.display_size[1], image.ARGB8888, alloc=image.ALLOC_REF,data = draw_img_np) + for vertices in mesh_res: + aidemo.face_draw_mesh(draw_img_np, vertices) + pl.osd_img.copy_from(draw_img) + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 人脸检测模型路径 + face_det_kmodel_path="/sdcard/app/tests/kmodel/face_detection_320.kmodel" + # 人脸网格模型路径 + face_mesh_kmodel_path="/sdcard/app/tests/kmodel/face_alignment.kmodel" + # 人脸网格后处理模型路径 + face_mesh_post_kmodel_path="/sdcard/app/tests/kmodel/face_alignment_post.kmodel" + # 其他参数 + anchors_path="/sdcard/app/tests/utils/prior_data_320.bin" + rgb888p_size=[1920,1080] + face_det_input_size=[320,320] + face_mesh_input_size=[120,120] + confidence_threshold=0.5 + nms_threshold=0.2 + anchor_len=4200 + det_dim=4 + anchors = np.fromfile(anchors_path, dtype=np.float) + anchors = anchors.reshape((anchor_len,det_dim)) + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + fm=FaceMesh(face_det_kmodel_path,face_mesh_kmodel_path,face_mesh_post_kmodel_path,det_input_size=face_det_input_size,mesh_input_size=face_mesh_input_size,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + det_boxes,mesh_res=fm.run(img) # 推理当前帧 + fm.draw_result(pl,det_boxes,mesh_res) # 绘制推理结果 + pl.show_image() # 显示推理效果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + fm.face_det.deinit() + fm.face_mesh.deinit() + fm.face_mesh_post.deinit() + pl.destroy() +``` + +### 2.6. 人脸解析 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aidemo +import random +import gc +import sys + +# 自定义人脸检测任务类 +class FaceDetApp(AIBase): + def __init__(self,kmodel_path,model_input_size,anchors,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + self.anchors=anchors + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 实例化Ai2d,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 计算padding参数,并设置padding预处理 + self.ai2d.pad(self.get_pad_param(), 0, [104,117,123]) + # 设置resize预处理 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表,这里调用了aidemo库的face_det_post_process接口 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + res = aidemo.face_det_post_process(self.confidence_threshold,self.nms_threshold,self.model_input_size[0],self.anchors,self.rgb888p_size,results) + if len(res)==0: + return res + else: + return res[0] + + # 计算padding参数 + def get_pad_param(self): + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + # 计算最小的缩放比例,等比例缩放 + ratio_w = dst_w / self.rgb888p_size[0] + ratio_h = dst_h / self.rgb888p_size[1] + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + new_w = (int)(ratio * self.rgb888p_size[0]) + new_h = (int)(ratio * self.rgb888p_size[1]) + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = (int)(round(0)) + bottom = (int)(round(dh * 2 + 0.1)) + left = (int)(round(0)) + right = (int)(round(dw * 2 - 0.1)) + return [0,0,0,0,top, bottom, left, right] + +# 自定义人脸解析任务类 +class FaceParseApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 实例化Ai2d,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了affine,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,det,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 计算仿射变换矩阵并设置affine预处理 + matrix_dst = self.get_affine_matrix(det) + self.ai2d.affine(nn.interp_method.cv2_bilinear,0, 0, 127, 1,matrix_dst) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表,这里将第一个输出返回 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + return results[0] + + def get_affine_matrix(self,bbox): + # 获取仿射矩阵,用于将边界框映射到模型输入空间 + with ScopedTiming("get_affine_matrix", self.debug_mode > 1): + # 设置缩放因子 + factor = 2.7 + # 从边界框提取坐标和尺寸 + x1, y1, w, h = map(lambda x: int(round(x, 0)), bbox[:4]) + # 模型输入大小 + edge_size = self.model_input_size[1] + # 平移距离,使得模型输入空间的中心对准原点 + trans_distance = edge_size / 2.0 + # 计算边界框中心点的坐标 + center_x = x1 + w / 2.0 + center_y = y1 + h / 2.0 + # 计算最大边长 + maximum_edge = factor * (h if h > w else w) + # 计算缩放比例 + scale = edge_size * 2.0 / maximum_edge + # 计算平移参数 + cx = trans_distance - scale * center_x + cy = trans_distance - scale * center_y + # 创建仿射矩阵 + affine_matrix = [scale, 0, cx, 0, scale, cy] + return affine_matrix + +# 人脸解析任务 +class FaceParse: + def __init__(self,face_det_kmodel,face_parse_kmodel,det_input_size,parse_input_size,anchors,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + # 人脸检测模型路径 + self.face_det_kmodel=face_det_kmodel + # 人脸解析模型路径 + self.face_pose_kmodel=face_parse_kmodel + # 人脸检测模型输入分辨率 + self.det_input_size=det_input_size + # 人脸解析模型输入分辨率 + self.parse_input_size=parse_input_size + # anchors + self.anchors=anchors + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + # 人脸检测任务类实例 + self.face_det=FaceDetApp(self.face_det_kmodel,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + # 人脸解析实例 + self.face_parse=FaceParseApp(self.face_pose_kmodel,model_input_size=self.parse_input_size,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + # 人脸检测预处理配置 + self.face_det.config_preprocess() + + # run函数 + def run(self,input_np): + # 执行人脸检测 + det_boxes=self.face_det.run(input_np) + parse_res=[] + for det_box in det_boxes: + # 对检测到每一个人脸进行人脸解析 + self.face_parse.config_preprocess(det_box) + res=self.face_parse.run(input_np) + parse_res.append(res) + return det_boxes,parse_res + + + # 绘制人脸解析效果 + def draw_result(self,pl,dets,parse_res): + pl.osd_img.clear() + if dets: + draw_img_np = np.zeros((self.display_size[1],self.display_size[0],4),dtype=np.uint8) + draw_img=image.Image(self.display_size[0], self.display_size[1], image.ARGB8888,alloc=image.ALLOC_REF,data=draw_img_np) + for i,det in enumerate(dets): + # (1)将人脸检测框画到draw_img + x, y, w, h = map(lambda x: int(round(x, 0)), det[:4]) + x = x * self.display_size[0] // self.rgb888p_size[0] + y = y * self.display_size[1] // self.rgb888p_size[1] + w = w * self.display_size[0] // self.rgb888p_size[0] + h = h * self.display_size[1] // self.rgb888p_size[1] + aidemo.face_parse_post_process(draw_img_np,self.rgb888p_size,self.display_size,self.parse_input_size[0],det.tolist(),parse_res[i]) + pl.osd_img.copy_from(draw_img) + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 人脸检测模型路径 + face_det_kmodel_path="/sdcard/app/tests/kmodel/face_detection_320.kmodel" + # 人脸解析模型路径 + face_parse_kmodel_path="/sdcard/app/tests/kmodel/face_parse.kmodel" + # 其他参数 + anchors_path="/sdcard/app/tests/utils/prior_data_320.bin" + rgb888p_size=[1920,1080] + face_det_input_size=[320,320] + face_parse_input_size=[320,320] + confidence_threshold=0.5 + nms_threshold=0.2 + anchor_len=4200 + det_dim=4 + anchors = np.fromfile(anchors_path, dtype=np.float) + anchors = anchors.reshape((anchor_len,det_dim)) + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + fp=FaceParse(face_det_kmodel_path,face_parse_kmodel_path,det_input_size=face_det_input_size,parse_input_size=face_parse_input_size,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + det_boxes,parse_res=fp.run(img) # 推理当前帧 + fp.draw_result(pl,det_boxes,parse_res) # 绘制当前帧推理结果 + pl.show_image() # 展示推理效果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + fp.face_det.deinit() + fp.face_parse.deinit() + pl.destroy() +``` + +### 2.7. 人脸姿态 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aidemo +import random +import gc +import sys + +# 自定义人脸检测任务类 +class FaceDetApp(AIBase): + def __init__(self,kmodel_path,model_input_size,anchors,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + self.anchors=anchors + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 实例化Ai2d,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 计算padding参数,并设置padding预处理 + self.ai2d.pad(self.get_pad_param(), 0, [104,117,123]) + # 设置resize预处理 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表,这里使用了aidemo库的face_det_post_process接口 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + res = aidemo.face_det_post_process(self.confidence_threshold,self.nms_threshold,self.model_input_size[0],self.anchors,self.rgb888p_size,results) + if len(res)==0: + return res + else: + return res[0] + + # 计算padding参数 + def get_pad_param(self): + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + # 计算最小的缩放比例,等比例缩放 + ratio_w = dst_w / self.rgb888p_size[0] + ratio_h = dst_h / self.rgb888p_size[1] + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + new_w = (int)(ratio * self.rgb888p_size[0]) + new_h = (int)(ratio * self.rgb888p_size[1]) + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = (int)(round(0)) + bottom = (int)(round(dh * 2 + 0.1)) + left = (int)(round(0)) + right = (int)(round(dw * 2 - 0.1)) + return [0,0,0,0,top, bottom, left, right] + +# 自定义人脸姿态任务类 +class FacePoseApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 人脸姿态模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 实例化Ai2d,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了affine,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,det,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 计算affine矩阵并设置affine预处理 + matrix_dst = self.get_affine_matrix(det) + self.ai2d.affine(nn.interp_method.cv2_bilinear,0, 0, 127, 1,matrix_dst) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表,计算欧拉角 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + R,eular = self.get_euler(results[0][0]) + return R,eular + + def get_affine_matrix(self,bbox): + # 获取仿射矩阵,用于将边界框映射到模型输入空间 + with ScopedTiming("get_affine_matrix", self.debug_mode > 1): + # 设置缩放因子 + factor = 2.7 + # 从边界框提取坐标和尺寸 + x1, y1, w, h = map(lambda x: int(round(x, 0)), bbox[:4]) + # 模型输入大小 + edge_size = self.model_input_size[1] + # 平移距离,使得模型输入空间的中心对准原点 + trans_distance = edge_size / 2.0 + # 计算边界框中心点的坐标 + center_x = x1 + w / 2.0 + center_y = y1 + h / 2.0 + # 计算最大边长 + maximum_edge = factor * (h if h > w else w) + # 计算缩放比例 + scale = edge_size * 2.0 / maximum_edge + # 计算平移参数 + cx = trans_distance - scale * center_x + cy = trans_distance - scale * center_y + # 创建仿射矩阵 + affine_matrix = [scale, 0, cx, 0, scale, cy] + return affine_matrix + + def rotation_matrix_to_euler_angles(self,R): + # 将旋转矩阵(3x3 矩阵)转换为欧拉角(pitch、yaw、roll) + # 计算 sin(yaw) + sy = np.sqrt(R[0, 0] ** 2 + R[1, 0] ** 2) + if sy < 1e-6: + # 若 sin(yaw) 过小,说明 pitch 接近 ±90 度 + pitch = np.arctan2(-R[1, 2], R[1, 1]) * 180 / np.pi + yaw = np.arctan2(-R[2, 0], sy) * 180 / np.pi + roll = 0 + else: + # 计算 pitch、yaw、roll 的角度 + pitch = np.arctan2(R[2, 1], R[2, 2]) * 180 / np.pi + yaw = np.arctan2(-R[2, 0], sy) * 180 / np.pi + roll = np.arctan2(R[1, 0], R[0, 0]) * 180 / np.pi + return [pitch,yaw,roll] + + def get_euler(self,data): + # 获取旋转矩阵和欧拉角 + R = data[:3, :3].copy() + eular = self.rotation_matrix_to_euler_angles(R) + return R,eular + +# 人脸姿态任务类 +class FacePose: + def __init__(self,face_det_kmodel,face_pose_kmodel,det_input_size,pose_input_size,anchors,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + # 人脸检测模型路径 + self.face_det_kmodel=face_det_kmodel + # 人脸姿态模型路径 + self.face_pose_kmodel=face_pose_kmodel + # 人脸检测模型输入分辨率 + self.det_input_size=det_input_size + # 人脸姿态模型输入分辨率 + self.pose_input_size=pose_input_size + # anchors + self.anchors=anchors + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + self.face_det=FaceDetApp(self.face_det_kmodel,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + self.face_pose=FacePoseApp(self.face_pose_kmodel,model_input_size=self.pose_input_size,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + self.face_det.config_preprocess() + + # run函数 + def run(self,input_np): + # 人脸检测 + det_boxes=self.face_det.run(input_np) + pose_res=[] + for det_box in det_boxes: + # 对检测到的每一个人脸做人脸姿态估计 + self.face_pose.config_preprocess(det_box) + R,eular=self.face_pose.run(input_np) + pose_res.append((R,eular)) + return det_boxes,pose_res + + + # 绘制人脸姿态角效果 + def draw_result(self,pl,dets,pose_res): + pl.osd_img.clear() + if dets: + draw_img_np = np.zeros((self.display_size[1],self.display_size[0],4),dtype=np.uint8) + draw_img=image.Image(self.display_size[0], self.display_size[1], image.ARGB8888,alloc=image.ALLOC_REF,data=draw_img_np) + line_color = np.array([255, 0, 0 ,255],dtype=np.uint8) #bgra + for i,det in enumerate(dets): + # (1)获取人脸姿态矩阵和欧拉角 + projections,center_point = self.build_projection_matrix(det) + R,euler = pose_res[i] + # (2)遍历人脸投影矩阵的关键点,进行投影,并将结果画在图像上 + first_points = [] + second_points = [] + for pp in range(8): + sum_x, sum_y = 0.0, 0.0 + for cc in range(3): + sum_x += projections[pp][cc] * R[cc][0] + sum_y += projections[pp][cc] * (-R[cc][1]) + center_x,center_y = center_point[0],center_point[1] + x = (sum_x + center_x) / self.rgb888p_size[0] * self.display_size[0] + y = (sum_y + center_y) / self.rgb888p_size[1] * self.display_size[1] + x = max(0, min(x, self.display_size[0])) + y = max(0, min(y, self.display_size[1])) + if pp < 4: + first_points.append((x, y)) + else: + second_points.append((x, y)) + first_points = np.array(first_points,dtype=np.float) + aidemo.polylines(draw_img_np,first_points,True,line_color,2,8,0) + second_points = np.array(second_points,dtype=np.float) + aidemo.polylines(draw_img_np,second_points,True,line_color,2,8,0) + for ll in range(4): + x0, y0 = int(first_points[ll][0]),int(first_points[ll][1]) + x1, y1 = int(second_points[ll][0]),int(second_points[ll][1]) + draw_img.draw_line(x0, y0, x1, y1, color = (255, 0, 0 ,255), thickness = 2) + pl.osd_img.copy_from(draw_img) + + def build_projection_matrix(self,det): + x1, y1, w, h = map(lambda x: int(round(x, 0)), det[:4]) + # 计算边界框中心坐标 + center_x = x1 + w / 2.0 + center_y = y1 + h / 2.0 + # 定义后部(rear)和前部(front)的尺寸和深度 + rear_width = 0.5 * w + rear_height = 0.5 * h + rear_depth = 0 + factor = np.sqrt(2.0) + front_width = factor * rear_width + front_height = factor * rear_height + front_depth = factor * rear_width # 使用宽度来计算深度,也可以使用高度,取决于需求 + # 定义立方体的顶点坐标 + temp = [ + [-rear_width, -rear_height, rear_depth], + [-rear_width, rear_height, rear_depth], + [rear_width, rear_height, rear_depth], + [rear_width, -rear_height, rear_depth], + [-front_width, -front_height, front_depth], + [-front_width, front_height, front_depth], + [front_width, front_height, front_depth], + [front_width, -front_height, front_depth] + ] + projections = np.array(temp) + # 返回投影矩阵和中心坐标 + return projections, (center_x, center_y) + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 人脸检测模型路径 + face_det_kmodel_path="/sdcard/app/tests/kmodel/face_detection_320.kmodel" + # 人脸姿态模型路径 + face_pose_kmodel_path="/sdcard/app/tests/kmodel/face_pose.kmodel" + # 其它参数 + anchors_path="/sdcard/app/tests/utils/prior_data_320.bin" + rgb888p_size=[1920,1080] + face_det_input_size=[320,320] + face_pose_input_size=[120,120] + confidence_threshold=0.5 + nms_threshold=0.2 + anchor_len=4200 + det_dim=4 + anchors = np.fromfile(anchors_path, dtype=np.float) + anchors = anchors.reshape((anchor_len,det_dim)) + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + fp=FacePose(face_det_kmodel_path,face_pose_kmodel_path,det_input_size=face_det_input_size,pose_input_size=face_pose_input_size,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + det_boxes,pose_res=fp.run(img) # 推理当前帧 + fp.draw_result(pl,det_boxes,pose_res) # 绘制推理效果 + pl.show_image() # 展示推理效果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + fp.face_det.deinit() + fp.face_pose.deinit() + pl.destroy() +``` + +### 2.8. 人脸识别 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aidemo +import random +import gc +import sys +import math + +# 自定义人脸检测任务类 +class FaceDetApp(AIBase): + def __init__(self,kmodel_path,model_input_size,anchors,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + self.anchors=anchors + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 实例化Ai2d,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 计算padding参数,并设置padding预处理 + self.ai2d.pad(self.get_pad_param(), 0, [104,117,123]) + # 设置resize预处理 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表,这里使用了aidemo库的face_det_post_process接口 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + res = aidemo.face_det_post_process(self.confidence_threshold,self.nms_threshold,self.model_input_size[0],self.anchors,self.rgb888p_size,results) + if len(res)==0: + return res,res + else: + return res[0],res[1] + + def get_pad_param(self): + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + # 计算最小的缩放比例,等比例缩放 + ratio_w = dst_w / self.rgb888p_size[0] + ratio_h = dst_h / self.rgb888p_size[1] + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + new_w = (int)(ratio * self.rgb888p_size[0]) + new_h = (int)(ratio * self.rgb888p_size[1]) + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = (int)(round(0)) + bottom = (int)(round(dh * 2 + 0.1)) + left = (int)(round(0)) + right = (int)(round(dw * 2 - 0.1)) + return [0,0,0,0,top, bottom, left, right] + +# 自定义人脸注册任务类 +class FaceRegistrationApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 标准5官 + self.umeyama_args_112 = [ + 38.2946 , 51.6963 , + 73.5318 , 51.5014 , + 56.0252 , 71.7366 , + 41.5493 , 92.3655 , + 70.7299 , 92.2041 + ] + self.ai2d=Ai2d(debug_mode) + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了affine,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,landm,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 计算affine矩阵,并设置仿射变换预处理 + affine_matrix = self.get_affine_matrix(landm) + self.ai2d.affine(nn.interp_method.cv2_bilinear,0, 0, 127, 1,affine_matrix) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + return results[0][0] + + def svd22(self,a): + # svd + s = [0.0, 0.0] + u = [0.0, 0.0, 0.0, 0.0] + v = [0.0, 0.0, 0.0, 0.0] + s[0] = (math.sqrt((a[0] - a[3]) ** 2 + (a[1] + a[2]) ** 2) + math.sqrt((a[0] + a[3]) ** 2 + (a[1] - a[2]) ** 2)) / 2 + s[1] = abs(s[0] - math.sqrt((a[0] - a[3]) ** 2 + (a[1] + a[2]) ** 2)) + v[2] = math.sin((math.atan2(2 * (a[0] * a[1] + a[2] * a[3]), a[0] ** 2 - a[1] ** 2 + a[2] ** 2 - a[3] ** 2)) / 2) if \ + s[0] > s[1] else 0 + v[0] = math.sqrt(1 - v[2] ** 2) + v[1] = -v[2] + v[3] = v[0] + u[0] = -(a[0] * v[0] + a[1] * v[2]) / s[0] if s[0] != 0 else 1 + u[2] = -(a[2] * v[0] + a[3] * v[2]) / s[0] if s[0] != 0 else 0 + u[1] = (a[0] * v[1] + a[1] * v[3]) / s[1] if s[1] != 0 else -u[2] + u[3] = (a[2] * v[1] + a[3] * v[3]) / s[1] if s[1] != 0 else u[0] + v[0] = -v[0] + v[2] = -v[2] + return u, s, v + + def image_umeyama_112(self,src): + # 使用Umeyama算法计算仿射变换矩阵 + SRC_NUM = 5 + SRC_DIM = 2 + src_mean = [0.0, 0.0] + dst_mean = [0.0, 0.0] + for i in range(0,SRC_NUM * 2,2): + src_mean[0] += src[i] + src_mean[1] += src[i + 1] + dst_mean[0] += self.umeyama_args_112[i] + dst_mean[1] += self.umeyama_args_112[i + 1] + src_mean[0] /= SRC_NUM + src_mean[1] /= SRC_NUM + dst_mean[0] /= SRC_NUM + dst_mean[1] /= SRC_NUM + src_demean = [[0.0, 0.0] for _ in range(SRC_NUM)] + dst_demean = [[0.0, 0.0] for _ in range(SRC_NUM)] + for i in range(SRC_NUM): + src_demean[i][0] = src[2 * i] - src_mean[0] + src_demean[i][1] = src[2 * i + 1] - src_mean[1] + dst_demean[i][0] = self.umeyama_args_112[2 * i] - dst_mean[0] + dst_demean[i][1] = self.umeyama_args_112[2 * i + 1] - dst_mean[1] + A = [[0.0, 0.0], [0.0, 0.0]] + for i in range(SRC_DIM): + for k in range(SRC_DIM): + for j in range(SRC_NUM): + A[i][k] += dst_demean[j][i] * src_demean[j][k] + A[i][k] /= SRC_NUM + T = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] + U, S, V = self.svd22([A[0][0], A[0][1], A[1][0], A[1][1]]) + T[0][0] = U[0] * V[0] + U[1] * V[2] + T[0][1] = U[0] * V[1] + U[1] * V[3] + T[1][0] = U[2] * V[0] + U[3] * V[2] + T[1][1] = U[2] * V[1] + U[3] * V[3] + scale = 1.0 + src_demean_mean = [0.0, 0.0] + src_demean_var = [0.0, 0.0] + for i in range(SRC_NUM): + src_demean_mean[0] += src_demean[i][0] + src_demean_mean[1] += src_demean[i][1] + src_demean_mean[0] /= SRC_NUM + src_demean_mean[1] /= SRC_NUM + for i in range(SRC_NUM): + src_demean_var[0] += (src_demean_mean[0] - src_demean[i][0]) * (src_demean_mean[0] - src_demean[i][0]) + src_demean_var[1] += (src_demean_mean[1] - src_demean[i][1]) * (src_demean_mean[1] - src_demean[i][1]) + src_demean_var[0] /= SRC_NUM + src_demean_var[1] /= SRC_NUM + scale = 1.0 / (src_demean_var[0] + src_demean_var[1]) * (S[0] + S[1]) + T[0][2] = dst_mean[0] - scale * (T[0][0] * src_mean[0] + T[0][1] * src_mean[1]) + T[1][2] = dst_mean[1] - scale * (T[1][0] * src_mean[0] + T[1][1] * src_mean[1]) + T[0][0] *= scale + T[0][1] *= scale + T[1][0] *= scale + T[1][1] *= scale + return T + + def get_affine_matrix(self,sparse_points): + # 获取affine变换矩阵 + with ScopedTiming("get_affine_matrix", self.debug_mode > 1): + # 使用Umeyama算法计算仿射变换矩阵 + matrix_dst = self.image_umeyama_112(sparse_points) + matrix_dst = [matrix_dst[0][0],matrix_dst[0][1],matrix_dst[0][2], + matrix_dst[1][0],matrix_dst[1][1],matrix_dst[1][2]] + return matrix_dst + +# 人脸识别任务类 +class FaceRecognition: + def __init__(self,face_det_kmodel,face_reg_kmodel,det_input_size,reg_input_size,database_dir,anchors,confidence_threshold=0.25,nms_threshold=0.3,face_recognition_threshold=0.75,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + # 人脸检测模型路径 + self.face_det_kmodel=face_det_kmodel + # 人脸识别模型路径 + self.face_reg_kmodel=face_reg_kmodel + # 人脸检测模型输入分辨率 + self.det_input_size=det_input_size + # 人脸识别模型输入分辨率 + self.reg_input_size=reg_input_size + self.database_dir=database_dir + # anchors + self.anchors=anchors + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + self.face_recognition_threshold=face_recognition_threshold + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + self.max_register_face = 100 # 数据库最多人脸个数 + self.feature_num = 128 # 人脸识别特征维度 + self.valid_register_face = 0 # 已注册人脸数 + self.db_name= [] + self.db_data= [] + self.face_det=FaceDetApp(self.face_det_kmodel,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + self.face_reg=FaceRegistrationApp(self.face_reg_kmodel,model_input_size=self.reg_input_size,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + self.face_det.config_preprocess() + # 人脸数据库初始化 + self.database_init() + + # run函数 + def run(self,input_np): + # 执行人脸检测 + det_boxes,landms=self.face_det.run(input_np) + recg_res = [] + for landm in landms: + # 针对每个人脸五官点,推理得到人脸特征,并计算特征在数据库中相似度 + self.face_reg.config_preprocess(landm) + feature=self.face_reg.run(input_np) + res = self.database_search(feature) + recg_res.append(res) + return det_boxes,recg_res + + def database_init(self): + # 数据初始化,构建数据库人名列表和数据库特征列表 + with ScopedTiming("database_init", self.debug_mode > 1): + db_file_list = os.listdir(self.database_dir) + for db_file in db_file_list: + if not db_file.endswith('.bin'): + continue + if self.valid_register_face >= self.max_register_face: + break + valid_index = self.valid_register_face + full_db_file = self.database_dir + db_file + with open(full_db_file, 'rb') as f: + data = f.read() + feature = np.frombuffer(data, dtype=np.float) + self.db_data.append(feature) + name = db_file.split('.')[0] + self.db_name.append(name) + self.valid_register_face += 1 + + def database_reset(self): + # 数据库清空 + with ScopedTiming("database_reset", self.debug_mode > 1): + print("database clearing...") + self.db_name = [] + self.db_data = [] + self.valid_register_face = 0 + print("database clear Done!") + + def database_search(self,feature): + # 数据库查询 + with ScopedTiming("database_search", self.debug_mode > 1): + v_id = -1 + v_score_max = 0.0 + # 将当前人脸特征归一化 + feature /= np.linalg.norm(feature) + # 遍历当前人脸数据库,统计最高得分 + for i in range(self.valid_register_face): + db_feature = self.db_data[i] + db_feature /= np.linalg.norm(db_feature) + # 计算数据库特征与当前人脸特征相似度 + v_score = np.dot(feature, db_feature)/2 + 0.5 + if v_score > v_score_max: + v_score_max = v_score + v_id = i + if v_id == -1: + # 数据库中无人脸 + return 'unknown' + elif v_score_max < self.face_recognition_threshold: + # 小于人脸识别阈值,未识别 + return 'unknown' + else: + # 识别成功 + result = 'name: {}, score:{}'.format(self.db_name[v_id],v_score_max) + return result + + # 绘制识别结果 + def draw_result(self,pl,dets,recg_results): + pl.osd_img.clear() + if dets: + for i,det in enumerate(dets): + # (1)画人脸框 + x1, y1, w, h = map(lambda x: int(round(x, 0)), det[:4]) + x1 = x1 * self.display_size[0]//self.rgb888p_size[0] + y1 = y1 * self.display_size[1]//self.rgb888p_size[1] + w = w * self.display_size[0]//self.rgb888p_size[0] + h = h * self.display_size[1]//self.rgb888p_size[1] + pl.osd_img.draw_rectangle(x1,y1, w, h, color=(255,0, 0, 255), thickness = 4) + # (2)写人脸识别结果 + recg_text = recg_results[i] + pl.osd_img.draw_string_advanced(x1,y1,32,recg_text,color=(255, 255, 0, 0)) + + +if __name__=="__main__": + # 注意:执行人脸识别任务之前,需要先执行人脸注册任务进行人脸身份注册生成feature数据库 + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 人脸检测模型路径 + face_det_kmodel_path="/sdcard/app/tests/kmodel/face_detection_320.kmodel" + # 人脸识别模型路径 + face_reg_kmodel_path="/sdcard/app/tests/kmodel/face_recognition.kmodel" + # 其它参数 + anchors_path="/sdcard/app/tests/utils/prior_data_320.bin" + database_dir ="/sdcard/app/tests/utils/db/" + rgb888p_size=[1920,1080] + face_det_input_size=[320,320] + face_reg_input_size=[112,112] + confidence_threshold=0.5 + nms_threshold=0.2 + anchor_len=4200 + det_dim=4 + anchors = np.fromfile(anchors_path, dtype=np.float) + anchors = anchors.reshape((anchor_len,det_dim)) + face_recognition_threshold = 0.75 # 人脸识别阈值 + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + fr=FaceRecognition(face_det_kmodel_path,face_reg_kmodel_path,det_input_size=face_det_input_size,reg_input_size=face_reg_input_size,database_dir=database_dir,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,face_recognition_threshold=face_recognition_threshold,rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total", 1): + img=pl.get_frame() # 获取当前帧 + det_boxes,recg_res=fr.run(img) # 推理当前帧 + fr.draw_result(pl,det_boxes,recg_res) # 绘制推理结果 + pl.show_image() # 展示推理效果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + fr.face_det.deinit() + fr.face_reg.deinit() + pl.destroy() +``` + +### 2.9. 人脸注册 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aidemo +import random +import gc +import sys +import math + +# 自定义人脸检测任务类 +class FaceDetApp(AIBase): + def __init__(self,kmodel_path,model_input_size,anchors,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + self.anchors=anchors + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 实例化Ai2d,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + self.image_size=[] + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + self.image_size=[input_image_size[1],input_image_size[0]] + # 计算padding参数,并设置padding预处理 + self.ai2d.pad(self.get_pad_param(ai2d_input_size), 0, [104,117,123]) + # 设置resize预处理 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表,这里使用了aidemo库的face_det_post_process接口 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + res = aidemo.face_det_post_process(self.confidence_threshold,self.nms_threshold,self.model_input_size[0],self.anchors,self.image_size,results) + if len(res)==0: + return res + else: + return res[0],res[1] + + def get_pad_param(self,image_input_size): + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + # 计算最小的缩放比例,等比例缩放 + ratio_w = dst_w / image_input_size[0] + ratio_h = dst_h / image_input_size[1] + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + new_w = (int)(ratio * image_input_size[0]) + new_h = (int)(ratio * image_input_size[1]) + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = (int)(round(0)) + bottom = (int)(round(dh * 2 + 0.1)) + left = (int)(round(0)) + right = (int)(round(dw * 2 - 0.1)) + return [0,0,0,0,top, bottom, left, right] + +# 自定义人脸注册任务类 +class FaceRegistrationApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 人脸注册模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 标准5官 + self.umeyama_args_112 = [ + 38.2946 , 51.6963 , + 73.5318 , 51.5014 , + 56.0252 , 71.7366 , + 41.5493 , 92.3655 , + 70.7299 , 92.2041 + ] + self.ai2d=Ai2d(debug_mode) + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了affine,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,landm,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 计算affine矩阵,并设置仿射变换预处理 + affine_matrix = self.get_affine_matrix(landm) + self.ai2d.affine(nn.interp_method.cv2_bilinear,0, 0, 127, 1,affine_matrix) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + return results[0][0] + + def svd22(self,a): + # svd + s = [0.0, 0.0] + u = [0.0, 0.0, 0.0, 0.0] + v = [0.0, 0.0, 0.0, 0.0] + s[0] = (math.sqrt((a[0] - a[3]) ** 2 + (a[1] + a[2]) ** 2) + math.sqrt((a[0] + a[3]) ** 2 + (a[1] - a[2]) ** 2)) / 2 + s[1] = abs(s[0] - math.sqrt((a[0] - a[3]) ** 2 + (a[1] + a[2]) ** 2)) + v[2] = math.sin((math.atan2(2 * (a[0] * a[1] + a[2] * a[3]), a[0] ** 2 - a[1] ** 2 + a[2] ** 2 - a[3] ** 2)) / 2) if \ + s[0] > s[1] else 0 + v[0] = math.sqrt(1 - v[2] ** 2) + v[1] = -v[2] + v[3] = v[0] + u[0] = -(a[0] * v[0] + a[1] * v[2]) / s[0] if s[0] != 0 else 1 + u[2] = -(a[2] * v[0] + a[3] * v[2]) / s[0] if s[0] != 0 else 0 + u[1] = (a[0] * v[1] + a[1] * v[3]) / s[1] if s[1] != 0 else -u[2] + u[3] = (a[2] * v[1] + a[3] * v[3]) / s[1] if s[1] != 0 else u[0] + v[0] = -v[0] + v[2] = -v[2] + return u, s, v + + def image_umeyama_112(self,src): + # 使用Umeyama算法计算仿射变换矩阵 + SRC_NUM = 5 + SRC_DIM = 2 + src_mean = [0.0, 0.0] + dst_mean = [0.0, 0.0] + for i in range(0,SRC_NUM * 2,2): + src_mean[0] += src[i] + src_mean[1] += src[i + 1] + dst_mean[0] += self.umeyama_args_112[i] + dst_mean[1] += self.umeyama_args_112[i + 1] + src_mean[0] /= SRC_NUM + src_mean[1] /= SRC_NUM + dst_mean[0] /= SRC_NUM + dst_mean[1] /= SRC_NUM + src_demean = [[0.0, 0.0] for _ in range(SRC_NUM)] + dst_demean = [[0.0, 0.0] for _ in range(SRC_NUM)] + for i in range(SRC_NUM): + src_demean[i][0] = src[2 * i] - src_mean[0] + src_demean[i][1] = src[2 * i + 1] - src_mean[1] + dst_demean[i][0] = self.umeyama_args_112[2 * i] - dst_mean[0] + dst_demean[i][1] = self.umeyama_args_112[2 * i + 1] - dst_mean[1] + A = [[0.0, 0.0], [0.0, 0.0]] + for i in range(SRC_DIM): + for k in range(SRC_DIM): + for j in range(SRC_NUM): + A[i][k] += dst_demean[j][i] * src_demean[j][k] + A[i][k] /= SRC_NUM + T = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] + U, S, V = self.svd22([A[0][0], A[0][1], A[1][0], A[1][1]]) + T[0][0] = U[0] * V[0] + U[1] * V[2] + T[0][1] = U[0] * V[1] + U[1] * V[3] + T[1][0] = U[2] * V[0] + U[3] * V[2] + T[1][1] = U[2] * V[1] + U[3] * V[3] + scale = 1.0 + src_demean_mean = [0.0, 0.0] + src_demean_var = [0.0, 0.0] + for i in range(SRC_NUM): + src_demean_mean[0] += src_demean[i][0] + src_demean_mean[1] += src_demean[i][1] + src_demean_mean[0] /= SRC_NUM + src_demean_mean[1] /= SRC_NUM + for i in range(SRC_NUM): + src_demean_var[0] += (src_demean_mean[0] - src_demean[i][0]) * (src_demean_mean[0] - src_demean[i][0]) + src_demean_var[1] += (src_demean_mean[1] - src_demean[i][1]) * (src_demean_mean[1] - src_demean[i][1]) + src_demean_var[0] /= SRC_NUM + src_demean_var[1] /= SRC_NUM + scale = 1.0 / (src_demean_var[0] + src_demean_var[1]) * (S[0] + S[1]) + T[0][2] = dst_mean[0] - scale * (T[0][0] * src_mean[0] + T[0][1] * src_mean[1]) + T[1][2] = dst_mean[1] - scale * (T[1][0] * src_mean[0] + T[1][1] * src_mean[1]) + T[0][0] *= scale + T[0][1] *= scale + T[1][0] *= scale + T[1][1] *= scale + return T + + def get_affine_matrix(self,sparse_points): + # 获取affine变换矩阵 + with ScopedTiming("get_affine_matrix", self.debug_mode > 1): + # 使用Umeyama算法计算仿射变换矩阵 + matrix_dst = self.image_umeyama_112(sparse_points) + matrix_dst = [matrix_dst[0][0],matrix_dst[0][1],matrix_dst[0][2], + matrix_dst[1][0],matrix_dst[1][1],matrix_dst[1][2]] + return matrix_dst + +# 人脸注册任务类 +class FaceRegistration: + def __init__(self,face_det_kmodel,face_reg_kmodel,det_input_size,reg_input_size,database_dir,anchors,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + # 人脸检测模型路径 + self.face_det_kmodel=face_det_kmodel + # 人脸注册模型路径 + self.face_reg_kmodel=face_reg_kmodel + # 人脸检测模型输入分辨率 + self.det_input_size=det_input_size + # 人脸注册模型输入分辨率 + self.reg_input_size=reg_input_size + self.database_dir=database_dir + # anchors + self.anchors=anchors + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + self.face_det=FaceDetApp(self.face_det_kmodel,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,debug_mode=0) + self.face_reg=FaceRegistrationApp(self.face_reg_kmodel,model_input_size=self.reg_input_size,rgb888p_size=self.rgb888p_size) + + # run函数 + def run(self,input_np,img_file): + self.face_det.config_preprocess(input_image_size=[input_np.shape[3],input_np.shape[2]]) + det_boxes,landms=self.face_det.run(input_np) + if det_boxes: + if det_boxes.shape[0] == 1: + # 若是只检测到一张人脸,则将该人脸注册到数据库 + db_i_name = img_file.split('.')[0] + for landm in landms: + self.face_reg.config_preprocess(landm,input_image_size=[input_np.shape[3],input_np.shape[2]]) + reg_result = self.face_reg.run(input_np) + with open(self.database_dir+'{}.bin'.format(db_i_name), "wb") as file: + file.write(reg_result.tobytes()) + print('Success!') + else: + print('Only one person in a picture when you sign up') + else: + print('No person detected') + + def image2rgb888array(self,img): #4维 + # 将Image转换为rgb888格式 + with ScopedTiming("fr_kpu_deinit",self.debug_mode > 0): + img_data_rgb888=img.to_rgb888() + # hwc,rgb888 + img_hwc=img_data_rgb888.to_numpy_ref() + shape=img_hwc.shape + img_tmp = img_hwc.reshape((shape[0] * shape[1], shape[2])) + img_tmp_trans = img_tmp.transpose() + img_res=img_tmp_trans.copy() + # chw,rgb888 + img_return=img_res.reshape((1,shape[2],shape[0],shape[1])) + return img_return + + +if __name__=="__main__": + # 人脸检测模型路径 + face_det_kmodel_path="/sdcard/app/tests/kmodel/face_detection_320.kmodel" + # 人脸注册模型路径 + face_reg_kmodel_path="/sdcard/app/tests/kmodel/face_recognition.kmodel" + # 其它参数 + anchors_path="/sdcard/app/tests/utils/prior_data_320.bin" + database_dir="/sdcard/app/tests/utils/db/" + database_img_dir="/sdcard/app/tests/utils/db_img/" + face_det_input_size=[320,320] + face_reg_input_size=[112,112] + confidence_threshold=0.5 + nms_threshold=0.2 + anchor_len=4200 + det_dim=4 + anchors = np.fromfile(anchors_path, dtype=np.float) + anchors = anchors.reshape((anchor_len,det_dim)) + max_register_face = 100 #数据库最多人脸个数 + feature_num = 128 #人脸识别特征维度 + + fr=FaceRegistration(face_det_kmodel_path,face_reg_kmodel_path,det_input_size=face_det_input_size,reg_input_size=face_reg_input_size,database_dir=database_dir,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold) + try: + # 获取图像列表 + img_list = os.listdir(database_img_dir) + for img_file in img_list: + #本地读取一张图像 + full_img_file = database_img_dir + img_file + print(full_img_file) + img = image.Image(full_img_file) + img.compress_for_ide() + # 转rgb888的chw格式 + rgb888p_img_ndarry = fr.image2rgb888array(img) + # 人脸注册 + fr.run(rgb888p_img_ndarry,img_file) + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + fr.face_det.deinit() + fr.face_reg.deinit() +``` + +### 2.10. 跌倒检测 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys +import aicube + +# 自定义跌倒检测类,继承自AIBase基类 +class FallDetectionApp(AIBase): + def __init__(self, kmodel_path, model_input_size, labels, anchors, confidence_threshold=0.2, nms_threshold=0.5, nms_option=False, strides=[8,16,32], rgb888p_size=[224,224], display_size=[1920,1080], debug_mode=0): + super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode) # 调用基类的构造函数 + self.kmodel_path = kmodel_path # 模型文件路径 + self.model_input_size = model_input_size # 模型输入分辨率 + self.labels = labels # 分类标签 + self.anchors = anchors # 锚点数据,用于跌倒检测 + self.strides = strides # 步长设置 + self.confidence_threshold = confidence_threshold # 置信度阈值 + self.nms_threshold = nms_threshold # NMS(非极大值抑制)阈值 + self.nms_option = nms_option # NMS选项 + self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]] # sensor给到AI的图像分辨率,并对宽度进行16的对齐 + self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]] # 显示分辨率,并对宽度进行16的对齐 + self.debug_mode = debug_mode # 是否开启调试模式 + self.color = [(255,0, 0, 255), (255,0, 255, 0), (255,255,0, 0), (255,255,0, 255)] # 用于绘制不同类别的颜色 + # Ai2d实例,用于实现模型预处理 + self.ai2d = Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self, input_image_size=None): + with ScopedTiming("set preprocess config", self.debug_mode > 0): # 计时器,如果debug_mode大于0则开启 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + top, bottom, left, right = self.get_padding_param() # 获取padding参数 + self.ai2d.pad([0, 0, 0, 0, top, bottom, left, right], 0, [0,0,0]) # 填充边缘 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) # 缩放图像 + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) # 构建预处理流程 + + # 自定义当前任务的后处理,results是模型输出array的列表,这里使用了aicube库的anchorbasedet_post_process接口 + def postprocess(self, results): + with ScopedTiming("postprocess", self.debug_mode > 0): + dets = aicube.anchorbasedet_post_process(results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides, len(self.labels), self.confidence_threshold, self.nms_threshold, self.anchors, self.nms_option) + return dets + + # 绘制检测结果到画面上 + def draw_result(self, pl, dets): + with ScopedTiming("display_draw", self.debug_mode > 0): + if dets: + pl.osd_img.clear() # 清除OSD图像 + for det_box in dets: + # 计算显示分辨率下的坐标 + x1, y1, x2, y2 = det_box[2], det_box[3], det_box[4], det_box[5] + w = (x2 - x1) * self.display_size[0] // self.rgb888p_size[0] + h = (y2 - y1) * self.display_size[1] // self.rgb888p_size[1] + x1 = int(x1 * self.display_size[0] // self.rgb888p_size[0]) + y1 = int(y1 * self.display_size[1] // self.rgb888p_size[1]) + x2 = int(x2 * self.display_size[0] // self.rgb888p_size[0]) + y2 = int(y2 * self.display_size[1] // self.rgb888p_size[1]) + # 绘制矩形框和类别标签 + pl.osd_img.draw_rectangle(x1, y1, int(w), int(h), color=self.color[det_box[0]], thickness=2) + pl.osd_img.draw_string_advanced(x1, y1-50, 32," " + self.labels[det_box[0]] + " " + str(round(det_box[1],2)), color=self.color[det_box[0]]) + else: + pl.osd_img.clear() + + # 获取padding参数 + def get_padding_param(self): + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + input_width = self.rgb888p_size[0] + input_high = self.rgb888p_size[1] + ratio_w = dst_w / input_width + ratio_h = dst_h / input_high + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + new_w = int(ratio * input_width) + new_h = int(ratio * input_high) + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = int(round(dh - 0.1)) + bottom = int(round(dh + 0.1)) + left = int(round(dw - 0.1)) + right = int(round(dw - 0.1)) + return top, bottom, left, right + +if __name__ == "__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 设置模型路径和其他参数 + kmodel_path = "/sdcard/app/tests/kmodel/yolov5n-falldown.kmodel" + confidence_threshold = 0.3 + nms_threshold = 0.45 + rgb888p_size = [1920, 1080] + labels = ["Fall","NoFall"] # 模型输出类别名称 + anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326] # anchor设置 + + # 初始化PipeLine,用于图像处理流程 + pl = PipeLine(rgb888p_size=rgb888p_size, display_size=display_size, display_mode=display_mode) + pl.create() + # 初始化自定义跌倒检测实例 + fall_det = FallDetectionApp(kmodel_path, model_input_size=[640, 640], labels=labels, anchors=anchors, confidence_threshold=confidence_threshold, nms_threshold=nms_threshold, nms_option=False, strides=[8,16,32], rgb888p_size=rgb888p_size, display_size=display_size, debug_mode=0) + fall_det.config_preprocess() + try: + while True: + os.exitpoint() # 检查是否有退出信号 + with ScopedTiming("total",1): + img = pl.get_frame() # 获取当前帧数据 + res = fall_det.run(img) # 推理当前帧 + fall_det.draw_result(pl, res) # 绘制结果到PipeLine的osd图像 + pl.show_image() # 显示当前的绘制结果 + gc.collect() # 垃圾回收 + except Exception as e: + sys.print_exception(e) # 打印异常信息 + finally: + fall_det.deinit() # 反初始化 + pl.destroy() # 销毁PipeLine实例 +``` + +### 2.11. 猜拳游戏 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +from random import randint +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aicube +import random +import gc +import sys + +# 自定义手掌检测任务类 +class HandDetApp(AIBase): + def __init__(self,kmodel_path,labels,model_input_size,anchors,confidence_threshold=0.2,nms_threshold=0.5,nms_option=False, strides=[8,16,32],rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + self.labels=labels + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + self.anchors=anchors # 锚框,检测任务使用 + self.strides = strides # 特征下采样倍数 + self.nms_option = nms_option # NMS选项,如果为True做类间NMS,如果为False做类内NMS + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 实例化Ai2d,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + # 计算padding参数并应用pad操作,以确保输入图像尺寸与模型输入尺寸匹配 + top, bottom, left, right = self.get_padding_param() + self.ai2d.pad([0, 0, 0, 0, top, bottom, left, right], 0, [114, 114, 114]) + # 使用双线性插值进行resize操作,调整图像尺寸以符合模型输入要求 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程,参数是ai2d预处理的输入tensor的shape和输出tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理,results是模型输出array的列表,这里使用了aicube库的anchorbasedet_post_process接口 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + dets = aicube.anchorbasedet_post_process(results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides, len(self.labels), self.confidence_threshold, self.nms_threshold, self.anchors, self.nms_option) + # 返回手掌检测结果 + return dets + + # 计算padding参数,确保输入图像尺寸与模型输入尺寸匹配 + def get_padding_param(self): + # 根据目标宽度和高度计算比例因子 + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + input_width = self.rgb888p_size[0] + input_high = self.rgb888p_size[1] + ratio_w = dst_w / input_width + ratio_h = dst_h / input_high + # 选择较小的比例因子,以确保图像内容完整 + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + # 计算新的宽度和高度 + new_w = int(ratio * input_width) + new_h = int(ratio * input_high) + # 计算宽度和高度的差值,并确定padding的位置 + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = int(round(dh - 0.1)) + bottom = int(round(dh + 0.1)) + left = int(round(dw - 0.1)) + right = int(round(dw + 0.1)) + return top, bottom, left, right + +# 自定义手势关键点分类任务类 +class HandKPClassApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # crop参数列表 + self.crop_params=[] + # debug模式 + self.debug_mode=debug_mode + self.ai2d=Ai2d(debug_mode) + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了crop和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,det,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 计算crop参数并设置crop预处理 + self.crop_params = self.get_crop_param(det) + self.ai2d.crop(self.crop_params[0],self.crop_params[1],self.crop_params[2],self.crop_params[3]) + # 设置resize预处理 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程,参数是ai2d预处理的输入tensor的shape和输出tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出array的列表 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + results=results[0].reshape(results[0].shape[0]*results[0].shape[1]) + results_show = np.zeros(results.shape,dtype=np.int16) + results_show[0::2] = results[0::2] * self.crop_params[3] + self.crop_params[0] + results_show[1::2] = results[1::2] * self.crop_params[2] + self.crop_params[1] + gesture=self.hk_gesture(results_show) + results_show[0::2] = results_show[0::2] * (self.display_size[0] / self.rgb888p_size[0]) + results_show[1::2] = results_show[1::2] * (self.display_size[1] / self.rgb888p_size[1]) + return results_show,gesture + + # 计算crop参数 + def get_crop_param(self,det_box): + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + w_det = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0]) + h_det = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1]) + x_det = int(x1*self.display_size[0] // self.rgb888p_size[0]) + y_det = int(y1*self.display_size[1] // self.rgb888p_size[1]) + length = max(w, h)/2 + cx = (x1+x2)/2 + cy = (y1+y2)/2 + ratio_num = 1.26*length + x1_kp = int(max(0,cx-ratio_num)) + y1_kp = int(max(0,cy-ratio_num)) + x2_kp = int(min(self.rgb888p_size[0]-1, cx+ratio_num)) + y2_kp = int(min(self.rgb888p_size[1]-1, cy+ratio_num)) + w_kp = int(x2_kp - x1_kp + 1) + h_kp = int(y2_kp - y1_kp + 1) + return [x1_kp, y1_kp, w_kp, h_kp] + + # 求两个vector之间的夹角 + def hk_vector_2d_angle(self,v1,v2): + with ScopedTiming("hk_vector_2d_angle",self.debug_mode > 0): + v1_x,v1_y,v2_x,v2_y = v1[0],v1[1],v2[0],v2[1] + v1_norm = np.sqrt(v1_x * v1_x+ v1_y * v1_y) + v2_norm = np.sqrt(v2_x * v2_x + v2_y * v2_y) + dot_product = v1_x * v2_x + v1_y * v2_y + cos_angle = dot_product/(v1_norm*v2_norm) + angle = np.acos(cos_angle)*180/np.pi + return angle + + # 根据手掌关键点检测结果判断手势类别 + def hk_gesture(self,results): + with ScopedTiming("hk_gesture",self.debug_mode > 0): + angle_list = [] + for i in range(5): + angle = self.hk_vector_2d_angle([(results[0]-results[i*8+4]), (results[1]-results[i*8+5])],[(results[i*8+6]-results[i*8+8]),(results[i*8+7]-results[i*8+9])]) + angle_list.append(angle) + thr_angle,thr_angle_thumb,thr_angle_s,gesture_str = 65.,53.,49.,None + if 65535. not in angle_list: + if (angle_list[0]>thr_angle_thumb) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "fist" + elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "gun" + elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]5) and (angle_list[1]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "one" + elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]thr_angle_thumb) and (angle_list[1]thr_angle): + gesture_str = "three" + elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "thumbUp" + elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "yeah" + return gesture_str + +# 猜拳游戏任务类 +class FingerGuess: + def __init__(self,hand_det_kmodel,hand_kp_kmodel,det_input_size,kp_input_size,labels,anchors,confidence_threshold=0.25,nms_threshold=0.3,nms_option=False,strides=[8,16,32],guess_mode=3,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + # 手掌检测模型路径 + self.hand_det_kmodel=hand_det_kmodel + # 手掌关键点模型路径 + self.hand_kp_kmodel=hand_kp_kmodel + # 手掌检测模型输入分辨率 + self.det_input_size=det_input_size + # 手掌关键点模型输入分辨率 + self.kp_input_size=kp_input_size + self.labels=labels + # anchors + self.anchors=anchors + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # nms选项 + self.nms_option=nms_option + # 特征图针对输入的下采样倍数 + self.strides=strides + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + self.guess_mode=guess_mode + # 石头剪刀布的贴图array + self.five_image = self.read_file("/sdcard/app/tests/utils/five.bin") + self.fist_image = self.read_file("/sdcard/app/tests/utils/fist.bin") + self.shear_image = self.read_file("/sdcard/app/tests/utils/shear.bin") + self.counts_guess = -1 # 猜拳次数 计数 + self.player_win = 0 # 玩家 赢次计数 + self.k230_win = 0 # k230 赢次计数 + self.sleep_end = False # 是否 停顿 + self.set_stop_id = True # 是否 暂停猜拳 + self.LIBRARY = ["fist","yeah","five"] # 猜拳 石头剪刀布 三种方案的dict + self.hand_det=HandDetApp(self.hand_det_kmodel,self.labels,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,nms_option=self.nms_option,strides=self.strides,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + self.hand_kp=HandKPClassApp(self.hand_kp_kmodel,model_input_size=self.kp_input_size,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + self.hand_det.config_preprocess() + + # run函数 + def run(self,input_np): + # 先进行手掌检测 + det_boxes=self.hand_det.run(input_np) + boxes=[] + gesture_res=[] + for det_box in det_boxes: + # 对检测的手做手势识别 + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + if (h<(0.1*self.rgb888p_size[1])): + continue + if (w<(0.25*self.rgb888p_size[0]) and ((x1<(0.03*self.rgb888p_size[0])) or (x2>(0.97*self.rgb888p_size[0])))): + continue + if (w<(0.15*self.rgb888p_size[0]) and ((x1<(0.01*self.rgb888p_size[0])) or (x2>(0.99*self.rgb888p_size[0])))): + continue + self.hand_kp.config_preprocess(det_box) + results_show,gesture=self.hand_kp.run(input_np) + boxes.append(det_box) + gesture_res.append(gesture) + return boxes,gesture_res + + # 绘制效果 + def draw_result(self,pl,dets,gesture_res): + pl.osd_img.clear() + # 手掌的手势分类得到用户的出拳,根据不同模式给出开发板的出拳,并将对应的贴图放到屏幕上显示 + if (len(dets) >= 2): + pl.osd_img.draw_string_advanced( self.display_size[0]//2-50,self.display_size[1]//2-50,60, "请保证只有一只手入镜!", color=(255,255,0,0)) + elif (self.guess_mode == 0): + draw_img_np = np.zeros((self.display_size[1],self.display_size[0],4),dtype=np.uint8) + draw_img = image.Image(self.display_size[0], self.display_size[1], image.ARGB8888, alloc=image.ALLOC_REF,data = draw_img_np) + if (gesture_res[0] == "fist"): + draw_img_np[:400,:400,:] = self.shear_image + elif (gesture_res[0] == "five"): + draw_img_np[:400,:400,:] = self.fist_image + elif (gesture_res[0] == "yeah"): + draw_img_np[:400,:400,:] = self.five_image + pl.osd_img.copy_from(draw_img) + elif (self.guess_mode == 1): + draw_img_np = np.zeros((self.display_size[1],self.display_size[0],4),dtype=np.uint8) + draw_img = image.Image(self.display_size[0], self.display_size[1], image.ARGB8888, alloc=image.ALLOC_REF,data = draw_img_np) + if (gesture_res[0] == "fist"): + draw_img_np[:400,:400,:] = self.five_image + elif (gesture_res[0] == "five"): + draw_img_np[:400,:400,:] = self.shear_image + elif (gesture_res[0] == "yeah"): + draw_img_np[:400,:400,:] = self.fist_image + pl.osd_img.copy_from(draw_img) + else: + draw_img_np = np.zeros((self.display_size[1],self.display_size[0],4),dtype=np.uint8) + draw_img = image.Image(self.display_size[0], self.display_size[1], image.ARGB8888, alloc=image.ALLOC_REF,data = draw_img_np) + if (self.sleep_end): + time.sleep_ms(2000) + self.sleep_end = False + if (len(dets) == 0): + self.set_stop_id = True + return + if (self.counts_guess == -1 and gesture_res[0] != "fist" and gesture_res[0] != "yeah" and gesture_res[0] != "five"): + draw_img.draw_string_advanced( self.display_size[0]//2-50,self.display_size[1]//2-50,60, "游戏开始", color=(255,255,0,0)) + draw_img.draw_string_advanced( self.display_size[0]//2-50,self.display_size[1]//2-50,60, "第一回合", color=(255,255,0,0)) + elif (self.counts_guess == self.guess_mode): + draw_img.clear() + if (self.k230_win > self.player_win): + draw_img.draw_string_advanced( self.display_size[0]//2-50,self.display_size[1]//2-50,60, "你输了!", color=(255,255,0,0)) + elif (self.k230_win < self.player_win): + draw_img.draw_string_advanced( self.display_size[0]//2-50,self.display_size[1]//2-50,60, "你赢了!", color=(255,255,0,0)) + else: + draw_img.draw_string_advanced( self.display_size[0]//2-50,self.display_size[1]//2-50,60, "平局", color=(255,255,0,0)) + self.counts_guess = -1 + self.player_win = 0 + self.k230_win = 0 + self.sleep_end = True + else: + if (self.set_stop_id): + if (self.counts_guess == -1 and (gesture_res[0] == "fist" or gesture_res[0] == "yeah" or gesture_res[0] == "five")): + self.counts_guess = 0 + if (self.counts_guess != -1 and (gesture_res[0] == "fist" or gesture_res[0] == "yeah" or gesture_res[0] == "five")): + k230_guess = randint(1,10000) % 3 + if (gesture_res[0] == "fist" and self.LIBRARY[k230_guess] == "yeah"): + self.player_win += 1 + elif (gesture_res[0] == "fist" and self.LIBRARY[k230_guess] == "five"): + self.k230_win += 1 + if (gesture_res[0] == "yeah" and self.LIBRARY[k230_guess] == "fist"): + self.k230_win += 1 + elif (gesture_res[0] == "yeah" and self.LIBRARY[k230_guess] == "five"): + self.player_win += 1 + if (gesture_res[0] == "five" and self.LIBRARY[k230_guess] == "fist"): + self.player_win += 1 + elif (gesture_res[0] == "five" and self.LIBRARY[k230_guess] == "yeah"): + self.k230_win += 1 + if (self.LIBRARY[k230_guess] == "fist"): + draw_img_np[:400,:400,:] = self.fist_image + elif (self.LIBRARY[k230_guess] == "five"): + draw_img_np[:400,:400,:] = self.five_image + elif (self.LIBRARY[k230_guess] == "yeah"): + draw_img_np[:400,:400,:] = self.shear_image + self.counts_guess += 1 + draw_img.draw_string_advanced(self.display_size[0]//2-50,self.display_size[1]//2-50,60,"第" + str(self.counts_guess) + "回合", color=(255,255,0,0)) + self.set_stop_id = False + self.sleep_end = True + else: + draw_img.draw_string_advanced(self.display_size[0]//2-50,self.display_size[1]//2-50,60,"第" + str(self.counts_guess+1) + "回合", color=(255,255,0,0)) + pl.osd_img.copy_from(draw_img) + + # 读取石头剪刀布的bin文件方法 + def read_file(self,file_name): + image_arr = np.fromfile(file_name,dtype=np.uint8) + image_arr = image_arr.reshape((400,400,4)) + return image_arr + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 手掌检测模型路径 + hand_det_kmodel_path="/sdcard/app/tests/kmodel/hand_det.kmodel" + # 手掌关键点模型路径 + hand_kp_kmodel_path="/sdcard/app/tests/kmodel/handkp_det.kmodel" + # 其它参数 + anchors_path="/sdcard/app/tests/utils/prior_data_320.bin" + rgb888p_size=[1920,1080] + hand_det_input_size=[512,512] + hand_kp_input_size=[256,256] + confidence_threshold=0.2 + nms_threshold=0.5 + labels=["hand"] + anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] + # 猜拳模式 0 玩家稳赢 , 1 玩家必输 , n > 2 多局多胜 + guess_mode = 3 + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + hkc=FingerGuess(hand_det_kmodel_path,hand_kp_kmodel_path,det_input_size=hand_det_input_size,kp_input_size=hand_kp_input_size,labels=labels,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,nms_option=False,strides=[8,16,32],guess_mode=guess_mode,rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + det_boxes,gesture_res=hkc.run(img) # 推理当前帧 + hkc.draw_result(pl,det_boxes,gesture_res) # 绘制推理结果 + pl.show_image() # 展示推理结果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + hkc.hand_det.deinit() + hkc.hand_kp.deinit() + pl.destroy() +``` + +### 2.12. 手掌检测 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys +import aicube + +# 自定义手掌检测类,继承自AIBase基类 +class HandDetectionApp(AIBase): + def __init__(self, kmodel_path, model_input_size, labels, anchors, confidence_threshold=0.2, nms_threshold=0.5, nms_option=False, strides=[8,16,32], rgb888p_size=[224,224], display_size=[1920,1080], debug_mode=0): + super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode) # 调用基类的构造函数,初始化模型文件路径、模型输入分辨率、RGB图像分辨率和调试模式 + self.kmodel_path = kmodel_path # 模型文件路径 + self.model_input_size = model_input_size # 模型输入分辨率 + self.labels = labels # 模型输出的类别标签列表 + self.anchors = anchors # 用于目标检测的锚点尺寸列表 + self.strides = strides # 特征下采样倍数 + self.confidence_threshold = confidence_threshold # 置信度阈值,用于过滤低置信度的检测结果 + self.nms_threshold = nms_threshold # NMS(非极大值抑制)阈值,用于去除重叠的检测框 + self.nms_option = nms_option # NMS选项,可能影响NMS的具体实现 + self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]] # sensor给到AI的图像分辨率,对齐到最近的16的倍数 + self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]] # 显示分辨率,对齐到最近的16的倍数 + self.debug_mode = debug_mode # 调试模式,用于输出调试信息 + self.ai2d = Ai2d(debug_mode) # 实例化Ai2d类,用于实现模型预处理 + # 设置Ai2d的输入输出格式和类型,这里使用NCHW格式,数据类型为uint8 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize + def config_preprocess(self, input_image_size=None): + with ScopedTiming("set preprocess config", self.debug_mode > 0): # 使用ScopedTiming装饰器来测量预处理配置的时间 + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + # 计算padding参数并应用pad操作,以确保输入图像尺寸与模型输入尺寸匹配 + top, bottom, left, right = self.get_padding_param() + self.ai2d.pad([0, 0, 0, 0, top, bottom, left, right], 0, [0, 0, 0]) + # 使用双线性插值进行resize操作,调整图像尺寸以符合模型输入要求 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程 + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理,用于处理模型输出结果 + def postprocess(self, results): + with ScopedTiming("postprocess", self.debug_mode > 0): # 使用ScopedTiming装饰器来测量后处理的时间 + # 使用aicube库的函数进行后处理,得到最终的检测结果 + dets = aicube.anchorbasedet_post_process(results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides, len(self.labels), self.confidence_threshold, self.nms_threshold, self.anchors, self.nms_option) + return dets + + # 绘制检测结果到屏幕上 + def draw_result(self, pl, dets): + with ScopedTiming("display_draw", self.debug_mode > 0): # 使用ScopedTiming装饰器来测量绘制结果的时间 + if dets: # 如果存在检测结果 + pl.osd_img.clear() # 清除屏幕上的旧内容 + for det_box in dets: # 遍历每个检测框 + # 根据模型输出计算检测框的像素坐标,并调整大小以适应显示分辨率 + x1, y1, x2, y2 = det_box[2], det_box[3], det_box[4], det_box[5] + w = float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0] + h = float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1] + x1 = int(x1 * self.display_size[0] // self.rgb888p_size[0]) + y1 = int(y1 * self.display_size[1] // self.rgb888p_size[1]) + x2 = int(x2 * self.display_size[0] // self.rgb888p_size[0]) + y2 = int(y2 * self.display_size[1] // self.rgb888p_size[1]) + # 过滤掉太小或者位置不合理的检测框 + if (h < (0.1 * self.display_size[0])): + continue + if (w < (0.25 * self.display_size[0]) and ((x1 < (0.03 * self.display_size[0])) or (x2 > (0.97 * self.display_size[0])))): + continue + if (w < (0.15 * self.display_size[0]) and ((x1 < (0.01 * self.display_size[0])) or (x2 > (0.99 * self.display_size[0])))): + continue + # 绘制矩形框和类别标签 + pl.osd_img.draw_rectangle(x1, y1, int(w), int(h), color=(255, 0, 255, 0), thickness=2) + pl.osd_img.draw_string_advanced(x1, y1-50,32, " " + self.labels[det_box[0]] + " " + str(round(det_box[1], 2)), color=(255, 0, 255, 0)) + else: + pl.osd_img.clear() # 如果没有检测结果,清空屏幕 + + # 计算padding参数,确保输入图像尺寸与模型输入尺寸匹配 + def get_padding_param(self): + # 根据目标宽度和高度计算比例因子 + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + input_width = self.rgb888p_size[0] + input_high = self.rgb888p_size[1] + ratio_w = dst_w / input_width + ratio_h = dst_h / input_high + # 选择较小的比例因子,以确保图像内容完整 + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + # 计算新的宽度和高度 + new_w = int(ratio * input_width) + new_h = int(ratio * input_high) + # 计算宽度和高度的差值,并确定padding的位置 + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = int(round(dh - 0.1)) + bottom = int(round(dh + 0.1)) + left = int(round(dw - 0.1)) + right = int(round(dw + 0.1)) + return top, bottom, left, right + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 模型路径 + kmodel_path="/sdcard/app/tests/kmodel/hand_det.kmodel" + # 其它参数设置 + confidence_threshold = 0.2 + nms_threshold = 0.5 + rgb888p_size=[1920,1080] + labels = ["hand"] + anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] #anchor设置 + + # 初始化PipeLine + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + # 初始化自定义手掌检测实例 + hand_det=HandDetectionApp(kmodel_path,model_input_size=[512,512],labels=labels,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,nms_option=False,strides=[8,16,32],rgb888p_size=rgb888p_size,display_size=display_size,debug_mode=0) + hand_det.config_preprocess() + try: + while True: + os.exitpoint() # 检查是否有退出信号 + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧数据 + res=hand_det.run(img) # 推理当前帧 + hand_det.draw_result(pl,res) # 绘制结果到PipeLine的osd图像 + pl.show_image() # 显示当前的绘制结果 + gc.collect() # 垃圾回收 + except Exception as e: + sys.print_exception(e) + finally: + hand_det.deinit() # 反初始化 + pl.destroy() # 销毁PipeLine实例 +``` + +### 2.13. 手掌关键点分类 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aicube +import random +import gc +import sys + +# 自定义手掌检测任务类 +class HandDetApp(AIBase): + def __init__(self,kmodel_path,labels,model_input_size,anchors,confidence_threshold=0.2,nms_threshold=0.5,nms_option=False, strides=[8,16,32],rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + self.labels=labels + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # 锚框,目标检测任务使用 + self.anchors=anchors + # 特征下采样倍数 + self.strides = strides + # NMS选项,如果为True做类间NMS,如果为False做类内NMS + self.nms_option = nms_option + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # Ai2d实例用于实现预处理 + self.ai2d=Ai2d(debug_mode) + # 设置ai2d的输入输出的格式和数据类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + # 计算padding参数并应用pad操作,以确保输入图像尺寸与模型输入尺寸匹配 + top, bottom, left, right = self.get_padding_param() + self.ai2d.pad([0, 0, 0, 0, top, bottom, left, right], 0, [114, 114, 114]) + # 使用双线性插值进行resize操作,调整图像尺寸以符合模型输入要求 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理,用于处理模型输出结果,这里使用了aicube库的anchorbasedet_post_process接口 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + dets = aicube.anchorbasedet_post_process(results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides, len(self.labels), self.confidence_threshold, self.nms_threshold, self.anchors, self.nms_option) + # 返回手掌检测结果 + return dets + + # 计算padding参数,确保输入图像尺寸与模型输入尺寸匹配 + def get_padding_param(self): + # 根据目标宽度和高度计算比例因子 + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + input_width = self.rgb888p_size[0] + input_high = self.rgb888p_size[1] + ratio_w = dst_w / input_width + ratio_h = dst_h / input_high + # 选择较小的比例因子,以确保图像内容完整 + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + # 计算新的宽度和高度 + new_w = int(ratio * input_width) + new_h = int(ratio * input_high) + # 计算宽度和高度的差值,并确定padding的位置 + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = int(round(dh - 0.1)) + bottom = int(round(dh + 0.1)) + left = int(round(dw - 0.1)) + right = int(round(dw + 0.1)) + return top, bottom, left, right + +# 自定义手势关键点分类任务类 +class HandKPClassApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + self.crop_params=[] + # debug模式 + self.debug_mode=debug_mode + # Ai2d实例用于实现预处理 + self.ai2d=Ai2d(debug_mode) + # 设置ai2d的输入输出的格式和数据类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了crop和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,det,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + self.crop_params = self.get_crop_param(det) + self.ai2d.crop(self.crop_params[0],self.crop_params[1],self.crop_params[2],self.crop_params[3]) + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,得到手掌手势结果和手掌关键点数据 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + results=results[0].reshape(results[0].shape[0]*results[0].shape[1]) + results_show = np.zeros(results.shape,dtype=np.int16) + results_show[0::2] = results[0::2] * self.crop_params[3] + self.crop_params[0] + results_show[1::2] = results[1::2] * self.crop_params[2] + self.crop_params[1] + gesture=self.hk_gesture(results_show) + results_show[0::2] = results_show[0::2] * (self.display_size[0] / self.rgb888p_size[0]) + results_show[1::2] = results_show[1::2] * (self.display_size[1] / self.rgb888p_size[1]) + return results_show,gesture + + # 计算crop参数 + def get_crop_param(self,det_box): + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + w_det = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0]) + h_det = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1]) + x_det = int(x1*self.display_size[0] // self.rgb888p_size[0]) + y_det = int(y1*self.display_size[1] // self.rgb888p_size[1]) + length = max(w, h)/2 + cx = (x1+x2)/2 + cy = (y1+y2)/2 + ratio_num = 1.26*length + x1_kp = int(max(0,cx-ratio_num)) + y1_kp = int(max(0,cy-ratio_num)) + x2_kp = int(min(self.rgb888p_size[0]-1, cx+ratio_num)) + y2_kp = int(min(self.rgb888p_size[1]-1, cy+ratio_num)) + w_kp = int(x2_kp - x1_kp + 1) + h_kp = int(y2_kp - y1_kp + 1) + return [x1_kp, y1_kp, w_kp, h_kp] + + # 求两个vector之间的夹角 + def hk_vector_2d_angle(self,v1,v2): + with ScopedTiming("hk_vector_2d_angle",self.debug_mode > 0): + v1_x,v1_y,v2_x,v2_y = v1[0],v1[1],v2[0],v2[1] + v1_norm = np.sqrt(v1_x * v1_x+ v1_y * v1_y) + v2_norm = np.sqrt(v2_x * v2_x + v2_y * v2_y) + dot_product = v1_x * v2_x + v1_y * v2_y + cos_angle = dot_product/(v1_norm*v2_norm) + angle = np.acos(cos_angle)*180/np.pi + return angle + + # 根据手掌关键点检测结果判断手势类别 + def hk_gesture(self,results): + with ScopedTiming("hk_gesture",self.debug_mode > 0): + angle_list = [] + for i in range(5): + angle = self.hk_vector_2d_angle([(results[0]-results[i*8+4]), (results[1]-results[i*8+5])],[(results[i*8+6]-results[i*8+8]),(results[i*8+7]-results[i*8+9])]) + angle_list.append(angle) + thr_angle,thr_angle_thumb,thr_angle_s,gesture_str = 65.,53.,49.,None + if 65535. not in angle_list: + if (angle_list[0]>thr_angle_thumb) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "fist" + elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "gun" + elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]5) and (angle_list[1]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "one" + elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]thr_angle_thumb) and (angle_list[1]thr_angle): + gesture_str = "three" + elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "thumbUp" + elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]thr_angle) and (angle_list[4]>thr_angle): + gesture_str = "yeah" + return gesture_str + +# 手掌关键点分类任务 +class HandKeyPointClass: + def __init__(self,hand_det_kmodel,hand_kp_kmodel,det_input_size,kp_input_size,labels,anchors,confidence_threshold=0.25,nms_threshold=0.3,nms_option=False,strides=[8,16,32],rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + # 手掌检测模型路径 + self.hand_det_kmodel=hand_det_kmodel + # 手掌关键点模型路径 + self.hand_kp_kmodel=hand_kp_kmodel + # 手掌检测模型输入分辨率 + self.det_input_size=det_input_size + # 手掌关键点模型输入分辨率 + self.kp_input_size=kp_input_size + self.labels=labels + # anchors + self.anchors=anchors + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + self.nms_option=nms_option + self.strides=strides + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + self.hand_det=HandDetApp(self.hand_det_kmodel,self.labels,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,nms_option=self.nms_option,strides=self.strides,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + self.hand_kp=HandKPClassApp(self.hand_kp_kmodel,model_input_size=self.kp_input_size,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + self.hand_det.config_preprocess() + + # run函数 + def run(self,input_np): + # 执行手掌检测 + det_boxes=self.hand_det.run(input_np) + boxes=[] + gesture_res=[] + for det_box in det_boxes: + # 对于检测到的每一个手掌执行关键点识别 + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + if (h<(0.1*self.rgb888p_size[1])): + continue + if (w<(0.25*self.rgb888p_size[0]) and ((x1<(0.03*self.rgb888p_size[0])) or (x2>(0.97*self.rgb888p_size[0])))): + continue + if (w<(0.15*self.rgb888p_size[0]) and ((x1<(0.01*self.rgb888p_size[0])) or (x2>(0.99*self.rgb888p_size[0])))): + continue + self.hand_kp.config_preprocess(det_box) + results_show,gesture=self.hand_kp.run(input_np) + gesture_res.append((results_show,gesture)) + boxes.append(det_box) + return boxes,gesture_res + + # 绘制效果,绘制关键点、手掌检测框和识别结果 + def draw_result(self,pl,dets,gesture_res): + pl.osd_img.clear() + if len(dets)>0: + for k in range(len(dets)): + det_box=dets[k] + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + if (h<(0.1*self.rgb888p_size[1])): + continue + if (w<(0.25*self.rgb888p_size[0]) and ((x1<(0.03*self.rgb888p_size[0])) or (x2>(0.97*self.rgb888p_size[0])))): + continue + if (w<(0.15*self.rgb888p_size[0]) and ((x1<(0.01*self.rgb888p_size[0])) or (x2>(0.99*self.rgb888p_size[0])))): + continue + w_det = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0]) + h_det = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1]) + x_det = int(x1*self.display_size[0] // self.rgb888p_size[0]) + y_det = int(y1*self.display_size[1] // self.rgb888p_size[1]) + pl.osd_img.draw_rectangle(x_det, y_det, w_det, h_det, color=(255, 0, 255, 0), thickness = 2) + + results_show=gesture_res[k][0] + for i in range(len(results_show)/2): + pl.osd_img.draw_circle(results_show[i*2], results_show[i*2+1], 1, color=(255, 0, 255, 0),fill=False) + for i in range(5): + j = i*8 + if i==0: + R = 255; G = 0; B = 0 + if i==1: + R = 255; G = 0; B = 255 + if i==2: + R = 255; G = 255; B = 0 + if i==3: + R = 0; G = 255; B = 0 + if i==4: + R = 0; G = 0; B = 255 + pl.osd_img.draw_line(results_show[0], results_show[1], results_show[j+2], results_show[j+3], color=(255,R,G,B), thickness = 3) + pl.osd_img.draw_line(results_show[j+2], results_show[j+3], results_show[j+4], results_show[j+5], color=(255,R,G,B), thickness = 3) + pl.osd_img.draw_line(results_show[j+4], results_show[j+5], results_show[j+6], results_show[j+7], color=(255,R,G,B), thickness = 3) + pl.osd_img.draw_line(results_show[j+6], results_show[j+7], results_show[j+8], results_show[j+9], color=(255,R,G,B), thickness = 3) + + gesture_str=gesture_res[k][1] + pl.osd_img.draw_string_advanced( x_det , y_det-50,32, " " + str(gesture_str), color=(255,0, 255, 0)) + + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 手掌检测模型路径 + hand_det_kmodel_path="/sdcard/app/tests/kmodel/hand_det.kmodel" + # 手掌关键点模型路径 + hand_kp_kmodel_path="/sdcard/app/tests/kmodel/handkp_det.kmodel" + # 其他参数 + anchors_path="/sdcard/app/tests/utils/prior_data_320.bin" + rgb888p_size=[1920,1080] + hand_det_input_size=[512,512] + hand_kp_input_size=[256,256] + confidence_threshold=0.2 + nms_threshold=0.5 + labels=["hand"] + anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + hkc=HandKeyPointClass(hand_det_kmodel_path,hand_kp_kmodel_path,det_input_size=hand_det_input_size,kp_input_size=hand_kp_input_size,labels=labels,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,nms_option=False,strides=[8,16,32],rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + det_boxes,gesture_res=hkc.run(img) # 推理当前帧 + hkc.draw_result(pl,det_boxes,gesture_res) # 绘制当前帧推理结果 + pl.show_image() # 展示推理结果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + hkc.hand_det.deinit() + hkc.hand_kp.deinit() + pl.destroy() +``` + +### 2.14. 手掌关键点检测 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aicube +import random +import gc +import sys + +# 自定义手掌检测任务类 +class HandDetApp(AIBase): + def __init__(self,kmodel_path,labels,model_input_size,anchors,confidence_threshold=0.2,nms_threshold=0.5,nms_option=False, strides=[8,16,32],rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + self.labels=labels + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # 锚框,目标检测任务使用 + self.anchors=anchors + # 特征下采样倍数 + self.strides = strides + # NMS选项,如果为True做类间NMS,如果为False做类内NMS + self.nms_option = nms_option + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # Ai2d实例用于实现预处理 + self.ai2d=Ai2d(debug_mode) + # 设置ai2d的输入输出的格式和数据类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + # 计算padding参数并应用pad操作,以确保输入图像尺寸与模型输入尺寸匹配 + top, bottom, left, right = self.get_padding_param() + self.ai2d.pad([0, 0, 0, 0, top, bottom, left, right], 0, [114, 114, 114]) + # 使用双线性插值进行resize操作,调整图像尺寸以符合模型输入要求 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理,用于处理模型输出结果,这里使用了aicube库的anchorbasedet_post_process接口 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + dets = aicube.anchorbasedet_post_process(results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides, len(self.labels), self.confidence_threshold, self.nms_threshold, self.anchors, self.nms_option) + # 返回手掌检测结果 + return dets + + # 计算padding参数,确保输入图像尺寸与模型输入尺寸匹配 + def get_padding_param(self): + # 根据目标宽度和高度计算比例因子 + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + input_width = self.rgb888p_size[0] + input_high = self.rgb888p_size[1] + ratio_w = dst_w / input_width + ratio_h = dst_h / input_high + # 选择较小的比例因子,以确保图像内容完整 + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + # 计算新的宽度和高度 + new_w = int(ratio * input_width) + new_h = int(ratio * input_high) + # 计算宽度和高度的差值,并确定padding的位置 + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = int(round(dh - 0.1)) + bottom = int(round(dh + 0.1)) + left = int(round(dw - 0.1)) + right = int(round(dw + 0.1)) + return top, bottom, left, right + +# 自定义手势关键点检测任务类 +class HandKPDetApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + self.crop_params=[] + # debug模式 + self.debug_mode=debug_mode + # Ai2d实例用于实现预处理 + self.ai2d=Ai2d(debug_mode) + # 设置ai2d的输入输出的格式和数据类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了crop和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,det,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + self.crop_params = self.get_crop_param(det) + self.ai2d.crop(self.crop_params[0],self.crop_params[1],self.crop_params[2],self.crop_params[3]) + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + results=results[0].reshape(results[0].shape[0]*results[0].shape[1]) + results_show = np.zeros(results.shape,dtype=np.int16) + results_show[0::2] = results[0::2] * self.crop_params[3] + self.crop_params[0] + results_show[1::2] = results[1::2] * self.crop_params[2] + self.crop_params[1] + results_show[0::2] = results_show[0::2] * (self.display_size[0] / self.rgb888p_size[0]) + results_show[1::2] = results_show[1::2] * (self.display_size[1] / self.rgb888p_size[1]) + return results_show + + # 计算crop参数 + def get_crop_param(self,det_box): + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + w_det = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0]) + h_det = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1]) + x_det = int(x1*self.display_size[0] // self.rgb888p_size[0]) + y_det = int(y1*self.display_size[1] // self.rgb888p_size[1]) + length = max(w, h)/2 + cx = (x1+x2)/2 + cy = (y1+y2)/2 + ratio_num = 1.26*length + x1_kp = int(max(0,cx-ratio_num)) + y1_kp = int(max(0,cy-ratio_num)) + x2_kp = int(min(self.rgb888p_size[0]-1, cx+ratio_num)) + y2_kp = int(min(self.rgb888p_size[1]-1, cy+ratio_num)) + w_kp = int(x2_kp - x1_kp + 1) + h_kp = int(y2_kp - y1_kp + 1) + return [x1_kp, y1_kp, w_kp, h_kp] + +# 手掌关键点检测任务 +class HandKeyPointDet: + def __init__(self,hand_det_kmodel,hand_kp_kmodel,det_input_size,kp_input_size,labels,anchors,confidence_threshold=0.25,nms_threshold=0.3,nms_option=False,strides=[8,16,32],rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + # 手掌检测模型路径 + self.hand_det_kmodel=hand_det_kmodel + # 手掌关键点模型路径 + self.hand_kp_kmodel=hand_kp_kmodel + # 手掌检测模型输入分辨率 + self.det_input_size=det_input_size + # 手掌关键点模型输入分辨率 + self.kp_input_size=kp_input_size + self.labels=labels + # anchors + self.anchors=anchors + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # nms选项 + self.nms_option=nms_option + # 特征图对于输入的下采样倍数 + self.strides=strides + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + self.hand_det=HandDetApp(self.hand_det_kmodel,self.labels,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,nms_option=self.nms_option,strides=self.strides,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + self.hand_kp=HandKPDetApp(self.hand_kp_kmodel,model_input_size=self.kp_input_size,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + self.hand_det.config_preprocess() + + # run函数 + def run(self,input_np): + # 手掌检测 + det_boxes=self.hand_det.run(input_np) + hand_res=[] + boxes=[] + for det_box in det_boxes: + # 对检测到的每个手掌执行手势关键点识别 + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + # 丢弃不合理的框 + if (h<(0.1*self.rgb888p_size[1])): + continue + if (w<(0.25*self.rgb888p_size[0]) and ((x1<(0.03*self.rgb888p_size[0])) or (x2>(0.97*self.rgb888p_size[0])))): + continue + if (w<(0.15*self.rgb888p_size[0]) and ((x1<(0.01*self.rgb888p_size[0])) or (x2>(0.99*self.rgb888p_size[0])))): + continue + self.hand_kp.config_preprocess(det_box) + results_show=self.hand_kp.run(input_np) + boxes.append(det_box) + hand_res.append(results_show) + return boxes,hand_res + + # 绘制效果,绘制手掌关键点、检测框 + def draw_result(self,pl,dets,hand_res): + pl.osd_img.clear() + if dets: + for k in range(len(dets)): + det_box=dets[k] + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + w_det = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0]) + h_det = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1]) + x_det = int(x1*self.display_size[0] // self.rgb888p_size[0]) + y_det = int(y1*self.display_size[1] // self.rgb888p_size[1]) + pl.osd_img.draw_rectangle(x_det, y_det, w_det, h_det, color=(255, 0, 255, 0), thickness = 2) + + results_show=hand_res[k] + for i in range(len(results_show)/2): + pl.osd_img.draw_circle(results_show[i*2], results_show[i*2+1], 1, color=(255, 0, 255, 0),fill=False) + for i in range(5): + j = i*8 + if i==0: + R = 255; G = 0; B = 0 + if i==1: + R = 255; G = 0; B = 255 + if i==2: + R = 255; G = 255; B = 0 + if i==3: + R = 0; G = 255; B = 0 + if i==4: + R = 0; G = 0; B = 255 + pl.osd_img.draw_line(results_show[0], results_show[1], results_show[j+2], results_show[j+3], color=(255,R,G,B), thickness = 3) + pl.osd_img.draw_line(results_show[j+2], results_show[j+3], results_show[j+4], results_show[j+5], color=(255,R,G,B), thickness = 3) + pl.osd_img.draw_line(results_show[j+4], results_show[j+5], results_show[j+6], results_show[j+7], color=(255,R,G,B), thickness = 3) + pl.osd_img.draw_line(results_show[j+6], results_show[j+7], results_show[j+8], results_show[j+9], color=(255,R,G,B), thickness = 3) + + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 手掌检测模型路径 + hand_det_kmodel_path="/sdcard/app/tests/kmodel/hand_det.kmodel" + # 手部关键点模型路径 + hand_kp_kmodel_path="/sdcard/app/tests/kmodel/handkp_det.kmodel" + # 其它参数 + anchors_path="/sdcard/app/tests/utils/prior_data_320.bin" + rgb888p_size=[1920,1080] + hand_det_input_size=[512,512] + hand_kp_input_size=[256,256] + confidence_threshold=0.2 + nms_threshold=0.5 + labels=["hand"] + anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + hkd=HandKeyPointDet(hand_det_kmodel_path,hand_kp_kmodel_path,det_input_size=hand_det_input_size,kp_input_size=hand_kp_input_size,labels=labels,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,nms_option=False,strides=[8,16,32],rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + det_boxes,hand_res=hkd.run(img) # 推理当前帧 + hkd.draw_result(pl,det_boxes,hand_res) # 绘制推理结果 + pl.show_image() # 展示推理结果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + hkd.hand_det.deinit() + hkd.hand_kp.deinit() + pl.destroy() +``` + +### 2.15. 手势识别 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aicube +import random +import gc +import sys + +# 自定义手掌检测任务类 +class HandDetApp(AIBase): + def __init__(self,kmodel_path,model_input_size,anchors,confidence_threshold=0.2,nms_threshold=0.5,nms_option=False, strides=[8,16,32],rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # 锚框,目标检测任务使用 + self.anchors=anchors + # 特征下采样倍数 + self.strides = strides + # NMS选项,如果为True做类间NMS,如果为False做类内NMS + self.nms_option = nms_option + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # Ai2d实例用于实现预处理 + self.ai2d=Ai2d(debug_mode) + # 设置ai2d的输入输出的格式和数据类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + # 计算padding参数并应用pad操作,以确保输入图像尺寸与模型输入尺寸匹配 + top, bottom, left, right = self.get_padding_param() + self.ai2d.pad([0, 0, 0, 0, top, bottom, left, right], 0, [114, 114, 114]) + # 使用双线性插值进行resize操作,调整图像尺寸以符合模型输入要求 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理,用于处理模型输出结果,这里使用了aicube库的anchorbasedet_post_process接口 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + dets = aicube.anchorbasedet_post_process(results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides,1, self.confidence_threshold, self.nms_threshold, self.anchors, self.nms_option) + # 返回手掌检测结果 + return dets + + # 计算padding参数,确保输入图像尺寸与模型输入尺寸匹配 + def get_padding_param(self): + # 根据目标宽度和高度计算比例因子 + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + input_width = self.rgb888p_size[0] + input_high = self.rgb888p_size[1] + ratio_w = dst_w / input_width + ratio_h = dst_h / input_high + # 选择较小的比例因子,以确保图像内容完整 + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + # 计算新的宽度和高度 + new_w = int(ratio * input_width) + new_h = int(ratio * input_high) + # 计算宽度和高度的差值,并确定padding的位置 + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = int(round(dh - 0.1)) + bottom = int(round(dh + 0.1)) + left = int(round(dw - 0.1)) + right = int(round(dw + 0.1)) + return top, bottom, left, right + +# 自定义手势识别任务类 +class HandRecognitionApp(AIBase): + def __init__(self,kmodel_path,model_input_size,labels,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + self.labels=labels + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + self.crop_params=[] + # debug模式 + self.debug_mode=debug_mode + # Ai2d实例用于实现预处理 + self.ai2d=Ai2d(debug_mode) + # 设置ai2d的输入输出的格式和数据类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了crop和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,det,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + self.crop_params = self.get_crop_param(det) + self.ai2d.crop(self.crop_params[0],self.crop_params[1],self.crop_params[2],self.crop_params[3]) + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + result=results[0].reshape(results[0].shape[0]*results[0].shape[1]) + x_softmax = self.softmax(result) + idx = np.argmax(x_softmax) + text = " " + self.labels[idx] + ": " + str(round(x_softmax[idx],2)) + return text + + # 计算crop参数 + def get_crop_param(self,det_box): + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + w_det = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0]) + h_det = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1]) + x_det = int(x1*self.display_size[0] // self.rgb888p_size[0]) + y_det = int(y1*self.display_size[1] // self.rgb888p_size[1]) + length = max(w, h)/2 + cx = (x1+x2)/2 + cy = (y1+y2)/2 + ratio_num = 1.26*length + x1_kp = int(max(0,cx-ratio_num)) + y1_kp = int(max(0,cy-ratio_num)) + x2_kp = int(min(self.rgb888p_size[0]-1, cx+ratio_num)) + y2_kp = int(min(self.rgb888p_size[1]-1, cy+ratio_num)) + w_kp = int(x2_kp - x1_kp + 1) + h_kp = int(y2_kp - y1_kp + 1) + return [x1_kp, y1_kp, w_kp, h_kp] + + # softmax实现 + def softmax(self,x): + x -= np.max(x) + x = np.exp(x) / np.sum(np.exp(x)) + return x + +class HandRecognition: + def __init__(self,hand_det_kmodel,hand_kp_kmodel,det_input_size,kp_input_size,labels,anchors,confidence_threshold=0.25,nms_threshold=0.3,nms_option=False,strides=[8,16,32],rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + # 手掌检测模型路径 + self.hand_det_kmodel=hand_det_kmodel + # 手掌关键点模型路径 + self.hand_kp_kmodel=hand_kp_kmodel + # 手掌检测模型输入分辨率 + self.det_input_size=det_input_size + # 手掌关键点模型输入分辨率 + self.kp_input_size=kp_input_size + self.labels=labels + # anchors + self.anchors=anchors + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # nms选项 + self.nms_option=nms_option + # 特征图针对输出的下采样倍数 + self.strides=strides + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + self.hand_det=HandDetApp(self.hand_det_kmodel,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,nms_option=self.nms_option,strides=self.strides,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + self.hand_rec=HandRecognitionApp(self.hand_kp_kmodel,model_input_size=self.kp_input_size,labels=self.labels,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + self.hand_det.config_preprocess() + + # run函数 + def run(self,input_np): + # 执行手掌检测 + det_boxes=self.hand_det.run(input_np) + hand_rec_res=[] + hand_det_res=[] + for det_box in det_boxes: + # 对检测到的每一个手掌执行手势识别 + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + if (h<(0.1*self.rgb888p_size[1])): + continue + if (w<(0.25*self.rgb888p_size[0]) and ((x1<(0.03*self.rgb888p_size[0])) or (x2>(0.97*self.rgb888p_size[0])))): + continue + if (w<(0.15*self.rgb888p_size[0]) and ((x1<(0.01*self.rgb888p_size[0])) or (x2>(0.99*self.rgb888p_size[0])))): + continue + self.hand_rec.config_preprocess(det_box) + text=self.hand_rec.run(input_np) + hand_det_res.append(det_box) + hand_rec_res.append(text) + return hand_det_res,hand_rec_res + + # 绘制效果,绘制识别结果和检测框 + def draw_result(self,pl,hand_det_res,hand_rec_res): + pl.osd_img.clear() + if hand_det_res: + for k in range(len(hand_det_res)): + det_box=hand_det_res[k] + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + w_det = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0]) + h_det = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1]) + x_det = int(x1*self.display_size[0] // self.rgb888p_size[0]) + y_det = int(y1*self.display_size[1] // self.rgb888p_size[1]) + pl.osd_img.draw_rectangle(x_det, y_det, w_det, h_det, color=(255, 0, 255, 0), thickness = 2) + pl.osd_img.draw_string_advanced( x_det, y_det-50, 32,hand_rec_res[k], color=(255,0, 255, 0)) + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 手掌检测模型路径 + hand_det_kmodel_path="/sdcard/app/tests/kmodel/hand_det.kmodel" + # 手势识别模型路径 + hand_rec_kmodel_path="/sdcard/app/tests/kmodel/hand_reco.kmodel" + # 其它参数 + anchors_path="/sdcard/app/tests/utils/prior_data_320.bin" + rgb888p_size=[1920,1080] + hand_det_input_size=[512,512] + hand_rec_input_size=[224,224] + confidence_threshold=0.2 + nms_threshold=0.5 + labels=["gun","other","yeah","five"] + anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + hr=HandRecognition(hand_det_kmodel_path,hand_rec_kmodel_path,det_input_size=hand_det_input_size,kp_input_size=hand_rec_input_size,labels=labels,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,nms_option=False,strides=[8,16,32],rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + hand_det_res,hand_rec_res=hr.run(img) # 推理当前帧 + hr.draw_result(pl,hand_det_res,hand_rec_res) # 绘制推理结果 + pl.show_image() # 展示推理结果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + hr.hand_det.deinit() + hr.hand_rec.deinit() + pl.destroy() +``` + +### 2.16 关键词唤醒 + +```python +from libs.PipeLine import ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +from media.pyaudio import * # 音频模块 +from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 +import media.wave as wave # wav音频处理模块 +import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 +import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 +import aidemo # aidemo模块,封装ai demo相关前处理、后处理等操作 +import time # 时间统计 +import struct # 字节字符转换模块 +import gc # 垃圾回收模块 +import os,sys # 操作系统接口模块 + +# 自定义关键词唤醒类,继承自AIBase基类 +class KWSApp(AIBase): + def __init__(self, kmodel_path, threshold, debug_mode=0): + super().__init__(kmodel_path) # 调用基类的构造函数 + self.kmodel_path = kmodel_path # 模型文件路径 + self.threshold=threshold + self.debug_mode = debug_mode # 是否开启调试模式 + self.cache_np = np.zeros((1, 256, 105), dtype=np.float) + + # 自定义预处理,返回模型输入tensor列表 + def preprocess(self,pcm_data): + pcm_data_list=[] + # 获取音频流数据 + for i in range(0, len(pcm_data), 2): + # 每两个字节组织成一个有符号整数,然后将其转换为浮点数,即为一次采样的数据,加入到当前一帧(0.3s)的数据列表中 + int_pcm_data = struct.unpack(" 0): + logits_np = results[0] + self.cache_np= results[1] + max_logits = np.max(logits_np, axis=1)[0] + max_p = np.max(max_logits) + idx = np.argmax(max_logits) + # 如果分数大于阈值,且idx==1(即包含唤醒词),播放回复音频 + if max_p > self.threshold and idx == 1: + return 1 + else: + return 0 + + +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + nn.shrink_memory_pool() + # 设置模型路径和其他参数 + kmodel_path = "/sdcard/app/tests/kmodel/kws.kmodel" + # 其它参数 + THRESH = 0.5 # 检测阈值 + SAMPLE_RATE = 16000 # 采样率16000Hz,即每秒采样16000次 + CHANNELS = 1 # 通道数 1为单声道,2为立体声 + FORMAT = paInt16 # 音频输入输出格式 paInt16 + CHUNK = int(0.3 * 16000) # 每次读取音频数据的帧数,设置为0.3s的帧数16000*0.3=4800 + reply_wav_file = "/sdcard/app/tests/utils/wozai.wav" # kws唤醒词回复音频路径 + + # 初始化音频预处理接口 + fp = aidemo.kws_fp_create() + # 初始化音频流 + p = PyAudio() + p.initialize(CHUNK) + MediaManager.init() #vb buffer初始化 + # 用于采集实时音频数据 + input_stream = p.open(format=FORMAT,channels=CHANNELS,rate=SAMPLE_RATE,input=True,frames_per_buffer=CHUNK) + # 用于播放回复音频 + output_stream = p.open(format=FORMAT,channels=CHANNELS,rate=SAMPLE_RATE,output=True,frames_per_buffer=CHUNK) + # 初始化自定义关键词唤醒实例 + kws = KWSApp(kmodel_path,threshold=THRESH,debug_mode=0) + + try: + while True: + os.exitpoint() # 检查是否有退出信号 + with ScopedTiming("total",1): + pcm_data=input_stream.read() + res=kws.run(pcm_data) + if res: + print("====Detected XiaonanXiaonan!====") + wf = wave.open(reply_wav_file, "rb") + wav_data = wf.read_frames(CHUNK) + while wav_data: + output_stream.write(wav_data) + wav_data = wf.read_frames(CHUNK) + time.sleep(1) # 时间缓冲,用于播放回复声音 + wf.close() + else: + print("Deactivated!") + gc.collect() # 垃圾回收 + except Exception as e: + sys.print_exception(e) # 打印异常信息 + finally: + input_stream.stop_stream() + output_stream.stop_stream() + input_stream.close() + output_stream.close() + p.terminate() + MediaManager.deinit() #释放vb buffer + aidemo.kws_fp_destroy(fp) + kws.deinit() # 反初始化 +``` + +### 2.17. 车牌检测 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys +import aidemo + +# 自定义车牌检测类 +class LicenceDetectionApp(AIBase): + # 初始化函数,设置车牌检测应用的参数 + def __init__(self, kmodel_path, model_input_size, confidence_threshold=0.5, nms_threshold=0.2, rgb888p_size=[224,224], display_size=[1920,1080], debug_mode=0): + super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode) # 调用基类的初始化函数 + self.kmodel_path = kmodel_path # 模型路径 + # 模型输入分辨率 + self.model_input_size = model_input_size + # 分类阈值 + self.confidence_threshold = confidence_threshold + self.nms_threshold = nms_threshold + # sensor给到AI的图像分辨率 + self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]] + # 显示分辨率 + self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]] + self.debug_mode = debug_mode + # Ai2d实例,用于实现模型预处理 + self.ai2d = Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine + def config_preprocess(self, input_image_size=None): + with ScopedTiming("set preprocess config", self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理 + def postprocess(self, results): + with ScopedTiming("postprocess", self.debug_mode > 0): + # 对检测结果进行后处理 + det_res = aidemo.licence_det_postprocess(results, [self.rgb888p_size[1], self.rgb888p_size[0]], self.model_input_size, self.confidence_threshold, self.nms_threshold) + return det_res + + # 绘制检测结果到屏幕上 + def draw_result(self, pl, dets): + with ScopedTiming("display_draw", self.debug_mode > 0): + if dets: + pl.osd_img.clear() # 清除屏幕 + point_8 = np.zeros((8), dtype=np.int16) + for det in dets: + # 将检测框坐标从sensor图像分辨率转换为显示分辨率 + for i in range(4): + x = det[i * 2 + 0] / self.rgb888p_size[0] * self.display_size[0] + y = det[i * 2 + 1] / self.rgb888p_size[1] * self.display_size[1] + point_8[i * 2 + 0] = int(x) + point_8[i * 2 + 1] = int(y) + # 在屏幕上绘制检测框 + for i in range(4): + pl.osd_img.draw_line(point_8[i * 2 + 0], point_8[i * 2 + 1], point_8[(i + 1) % 4 * 2 + 0], point_8[(i + 1) % 4 * 2 + 1], color=(255, 0, 255, 0), thickness=4) + else: + pl.osd_img.clear() # 如果没有检测结果,则清空屏幕 + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 模型路径 + kmodel_path="/sdcard/app/tests/kmodel/LPD_640.kmodel" + # 其它参数设置 + confidence_threshold = 0.2 + nms_threshold = 0.2 + rgb888p_size=[1920,1080] + + # 初始化PipeLine + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + # 初始化自定义车牌检测实例 + licence_det=LicenceDetectionApp(kmodel_path,model_input_size=[640,640],confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,rgb888p_size=rgb888p_size,display_size=display_size,debug_mode=0) + licence_det.config_preprocess() + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + # 获取当前帧数据 + img=pl.get_frame() + # 推理当前帧 + res=licence_det.run(img) + # 绘制结果到PipeLine的osd图像 + licence_det.draw_result(pl,res) + # 显示当前的绘制结果 + pl.show_image() + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + licence_det.deinit() + pl.destroy() +``` + +### 2.18. 车牌识别 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aidemo +import random +import gc +import sys + +# 自定义车牌检测类 +class LicenceDetectionApp(AIBase): + # 初始化函数,设置车牌检测应用的参数 + def __init__(self, kmodel_path, model_input_size, confidence_threshold=0.5, nms_threshold=0.2, rgb888p_size=[224,224], display_size=[1920,1080], debug_mode=0): + super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode) # 调用基类的初始化函数 + self.kmodel_path = kmodel_path # 模型路径 + # 模型输入分辨率 + self.model_input_size = model_input_size + # 分类阈值 + self.confidence_threshold = confidence_threshold + self.nms_threshold = nms_threshold + # sensor给到AI的图像分辨率 + self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]] + # 显示分辨率 + self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]] + self.debug_mode = debug_mode + # Ai2d实例,用于实现模型预处理 + self.ai2d = Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine + def config_preprocess(self, input_image_size=None): + with ScopedTiming("set preprocess config", self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理 + def postprocess(self, results): + with ScopedTiming("postprocess", self.debug_mode > 0): + # 对检测结果进行后处理 + det_res = aidemo.licence_det_postprocess(results, [self.rgb888p_size[1], self.rgb888p_size[0]], self.model_input_size, self.confidence_threshold, self.nms_threshold) + return det_res + +# 自定义车牌识别任务类 +class LicenceRecognitionApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 车牌字符字典 + self.dict_rec = ["挂", "使", "领", "澳", "港", "皖", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "京", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂", "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "警", "学", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "_", "-"] + self.dict_size = len(self.dict_rec) + self.ai2d=Ai2d(debug_mode) + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了resize,Ai2d支持crop/shift/pad/resize/affine + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + output_data=results[0].reshape((-1,self.dict_size)) + max_indices = np.argmax(output_data, axis=1) + result_str = "" + for i in range(max_indices.shape[0]): + index = max_indices[i] + if index > 0 and (i == 0 or index != max_indices[i - 1]): + result_str += self.dict_rec[index - 1] + return result_str + +# 车牌识别任务类 +class LicenceRec: + def __init__(self,licence_det_kmodel,licence_rec_kmodel,det_input_size,rec_input_size,confidence_threshold=0.25,nms_threshold=0.3,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + # 车牌检测模型路径 + self.licence_det_kmodel=licence_det_kmodel + # 车牌识别模型路径 + self.licence_rec_kmodel=licence_rec_kmodel + # 人脸检测模型输入分辨率 + self.det_input_size=det_input_size + # 人脸姿态模型输入分辨率 + self.rec_input_size=rec_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + self.licence_det=LicenceDetectionApp(self.licence_det_kmodel,model_input_size=self.det_input_size,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + self.licence_rec=LicenceRecognitionApp(self.licence_rec_kmodel,model_input_size=self.rec_input_size,rgb888p_size=self.rgb888p_size) + self.licence_det.config_preprocess() + + # run函数 + def run(self,input_np): + # 执行车牌检测 + det_boxes=self.licence_det.run(input_np) + # 将车牌部分抠出来 + imgs_array_boxes = aidemo.ocr_rec_preprocess(input_np,[self.rgb888p_size[1],self.rgb888p_size[0]],det_boxes) + imgs_array = imgs_array_boxes[0] + boxes = imgs_array_boxes[1] + rec_res = [] + for img_array in imgs_array: + # 对每一个检测到的车牌进行识别 + self.licence_rec.config_preprocess(input_image_size=[img_array.shape[3],img_array.shape[2]]) + licence_str=self.licence_rec.run(img_array) + rec_res.append(licence_str) + gc.collect() + return det_boxes,rec_res + + # 绘制车牌检测识别效果 + def draw_result(self,pl,det_res,rec_res): + pl.osd_img.clear() + if det_res: + point_8 = np.zeros((8),dtype=np.int16) + for det_index in range(len(det_res)): + for i in range(4): + x = det_res[det_index][i * 2 + 0]/self.rgb888p_size[0]*self.display_size[0] + y = det_res[det_index][i * 2 + 1]/self.rgb888p_size[1]*self.display_size[1] + point_8[i * 2 + 0] = int(x) + point_8[i * 2 + 1] = int(y) + for i in range(4): + pl.osd_img.draw_line(point_8[i * 2 + 0],point_8[i * 2 + 1],point_8[(i+1) % 4 * 2 + 0],point_8[(i+1) % 4 * 2 + 1],color=(255, 0, 255, 0),thickness=4) + pl.osd_img.draw_string_advanced( point_8[6], point_8[7] + 20, 40,rec_res[det_index] , color=(255,255,153,18)) + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 车牌检测模型路径 + licence_det_kmodel_path="/sdcard/app/tests/kmodel/LPD_640.kmodel" + # 车牌识别模型路径 + licence_rec_kmodel_path="/sdcard/app/tests/kmodel/licence_reco.kmodel" + # 其它参数 + rgb888p_size=[640,360] + licence_det_input_size=[640,640] + licence_rec_input_size=[220,32] + confidence_threshold=0.2 + nms_threshold=0.2 + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + lr=LicenceRec(licence_det_kmodel_path,licence_rec_kmodel_path,det_input_size=licence_det_input_size,rec_input_size=licence_rec_input_size,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + det_res,rec_res=lr.run(img) # 推理当前帧 + lr.draw_result(pl,det_res,rec_res) # 绘制当前帧推理结果 + pl.show_image() # 展示推理结果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + lr.licence_det.deinit() + lr.licence_rec.deinit() + pl.destroy() +``` + +### 2.19. 单目标跟踪 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +from random import randint +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aidemo +import random +import gc +import sys + +# 自定义跟踪模版任务类 +class TrackCropApp(AIBase): + def __init__(self,kmodel_path,model_input_size,ratio_src_crop,center_xy_wh,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 跟踪模板输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 跟踪框宽、高调整系数 + self.CONTEXT_AMOUNT = 0.5 + #src模型和crop模型输入比值 + self.ratio_src_crop = ratio_src_crop + self.center_xy_wh=center_xy_wh + # padding和crop参数 + self.pad_crop_params=[] + # 注意:ai2d设置多个预处理时执行的顺序为:crop->shift->resize/affine->pad,如果不符合该顺序,需要配置多个ai2d对象; + # 如下模型预处理要先做resize+padding再做resize+crop,因此要配置两个Ai2d对象 + self.ai2d_pad=Ai2d(debug_mode) + self.ai2d_pad.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + self.ai2d_crop=Ai2d(debug_mode) + self.ai2d_crop.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + self.need_pad=False + + # 配置预处理操作,这里使用了crop、pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + # 计算padding参数并应用pad操作,以确保输入图像尺寸与模型输入尺寸匹配 + self.pad_crop_params= self.get_padding_crop_param() + # 如果需要padding,配置padding部分,否则只走crop + if (self.pad_crop_params[0] != 0 or self.pad_crop_params[1] != 0 or self.pad_crop_params[2] != 0 or self.pad_crop_params[3] != 0): + self.need_pad=True + self.ai2d_pad.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d_pad.pad([0, 0, 0, 0, self.pad_crop_params[0], self.pad_crop_params[1], self.pad_crop_params[2], self.pad_crop_params[3]], 0, [114, 114, 114]) + output_size=[self.rgb888p_size[0]+self.pad_crop_params[2]+self.pad_crop_params[3],self.rgb888p_size[1]+self.pad_crop_params[0]+self.pad_crop_params[1]] + self.ai2d_pad.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,output_size[1],output_size[0]]) + + self.ai2d_crop.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d_crop.crop(int(self.pad_crop_params[4]),int(self.pad_crop_params[6]),int(self.pad_crop_params[5]-self.pad_crop_params[4]+1),int(self.pad_crop_params[7]-self.pad_crop_params[6]+1)) + self.ai2d_crop.build([1,3,output_size[1],output_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + else: + self.need_pad=False + self.ai2d_crop.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d_crop.crop(int(self.center_xy_wh[0]-self.pad_crop_params[8]/2.0),int(self.center_xy_wh[1]-self.pad_crop_params[8]/2.0),int(self.pad_crop_params[8]),int(self.pad_crop_params[8])) + self.ai2d_crop.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 重写预处理函数preprocess,因为该部分不是单纯的走一个ai2d做预处理,所以该函数需要重写 + def preprocess(self,input_np): + if self.need_pad: + pad_output=self.ai2d_pad.run(input_np).to_numpy() + return [self.ai2d_crop.run(pad_output)] + else: + return [self.ai2d_crop.run(input_np)] + + # 自定义后处理,results是模型输出array的列表 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + return results[0] + + # 计算padding和crop参数 + def get_padding_crop_param(self): + s_z = round(np.sqrt((self.center_xy_wh[2] + self.CONTEXT_AMOUNT * (self.center_xy_wh[2] + self.center_xy_wh[3])) * (self.center_xy_wh[3] + self.CONTEXT_AMOUNT * (self.center_xy_wh[2] + self.center_xy_wh[3])))) + c = (s_z + 1) / 2 + context_xmin = np.floor(self.center_xy_wh[0] - c + 0.5) + context_xmax = int(context_xmin + s_z - 1) + context_ymin = np.floor(self.center_xy_wh[1] - c + 0.5) + context_ymax = int(context_ymin + s_z - 1) + left_pad = int(max(0, -context_xmin)) + top_pad = int(max(0, -context_ymin)) + right_pad = int(max(0, int(context_xmax - self.rgb888p_size[0] + 1))) + bottom_pad = int(max(0, int(context_ymax - self.rgb888p_size[1] + 1))) + context_xmin = context_xmin + left_pad + context_xmax = context_xmax + left_pad + context_ymin = context_ymin + top_pad + context_ymax = context_ymax + top_pad + return [top_pad,bottom_pad,left_pad,right_pad,context_xmin,context_xmax,context_ymin,context_ymax,s_z] + + #重写deinit + def deinit(self): + with ScopedTiming("deinit",self.debug_mode > 0): + del self.ai2d_pad + del self.ai2d_crop + super().deinit() + +# 自定义跟踪实时任务类 +class TrackSrcApp(AIBase): + def __init__(self,kmodel_path,model_input_size,ratio_src_crop,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # padding和crop参数列表 + self.pad_crop_params=[] + # 跟踪框宽、高调整系数 + self.CONTEXT_AMOUNT = 0.5 + # src和crop模型的输入尺寸比例 + self.ratio_src_crop = ratio_src_crop + # debug模式 + self.debug_mode=debug_mode + # 注意:ai2d设置多个预处理时执行的顺序为:crop->shift->resize/affine->pad,如果不符合该顺序,需要配置多个ai2d对象; + # 如下模型预处理要先做resize+padding再做resize+crop,因此要配置两个Ai2d对象 + self.ai2d_pad=Ai2d(debug_mode) + self.ai2d_pad.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + self.ai2d_crop=Ai2d(debug_mode) + self.ai2d_crop.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + self.need_pad=False + + # 配置预处理操作,这里使用了crop、pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,center_xy_wh,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 计算padding参数并应用pad操作,以确保输入图像尺寸与模型输入尺寸匹配 + self.pad_crop_params= self.get_padding_crop_param(center_xy_wh) + # 如果需要padding,配置padding部分,否则只走crop + if (self.pad_crop_params[0] != 0 or self.pad_crop_params[1] != 0 or self.pad_crop_params[2] != 0 or self.pad_crop_params[3] != 0): + self.need_pad=True + self.ai2d_pad.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d_pad.pad([0, 0, 0, 0, self.pad_crop_params[0], self.pad_crop_params[1], self.pad_crop_params[2], self.pad_crop_params[3]], 0, [114, 114, 114]) + output_size=[self.rgb888p_size[0]+self.pad_crop_params[2]+self.pad_crop_params[3],self.rgb888p_size[1]+self.pad_crop_params[0]+self.pad_crop_params[1]] + + self.ai2d_pad.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,output_size[1],output_size[0]]) + self.ai2d_crop.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d_crop.crop(int(self.pad_crop_params[4]),int(self.pad_crop_params[6]),int(self.pad_crop_params[5]-self.pad_crop_params[4]+1),int(self.pad_crop_params[7]-self.pad_crop_params[6]+1)) + self.ai2d_crop.build([1,3,output_size[1],output_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + else: + self.need_pad=False + self.ai2d_crop.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d_crop.crop(int(center_xy_wh[0]-self.pad_crop_params[8]/2.0),int(center_xy_wh[1]-self.pad_crop_params[8]/2.0),int(self.pad_crop_params[8]),int(self.pad_crop_params[8])) + self.ai2d_crop.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 重写预处理函数preprocess,因为该部分不是单纯的走一个ai2d做预处理,所以该函数需要重写 + def preprocess(self,input_np): + with ScopedTiming("preprocess",self.debug_mode>0): + if self.need_pad: + pad_output=self.ai2d_pad.run(input_np).to_numpy() + return [self.ai2d_crop.run(pad_output)] + else: + return [self.ai2d_crop.run(input_np)] + + # 自定义后处理,results是模型输出array的列表 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + return results[0] + + # 计算padding和crop参数 + def get_padding_crop_param(self,center_xy_wh): + s_z = round(np.sqrt((center_xy_wh[2] + self.CONTEXT_AMOUNT * (center_xy_wh[2] + center_xy_wh[3])) * (center_xy_wh[3] + self.CONTEXT_AMOUNT * (center_xy_wh[2] + center_xy_wh[3])))) * self.ratio_src_crop + c = (s_z + 1) / 2 + context_xmin = np.floor(center_xy_wh[0] - c + 0.5) + context_xmax = int(context_xmin + s_z - 1) + context_ymin = np.floor(center_xy_wh[1] - c + 0.5) + context_ymax = int(context_ymin + s_z - 1) + left_pad = int(max(0, -context_xmin)) + top_pad = int(max(0, -context_ymin)) + right_pad = int(max(0, int(context_xmax - self.rgb888p_size[0] + 1))) + bottom_pad = int(max(0, int(context_ymax - self.rgb888p_size[1] + 1))) + context_xmin = context_xmin + left_pad + context_xmax = context_xmax + left_pad + context_ymin = context_ymin + top_pad + context_ymax = context_ymax + top_pad + return [top_pad,bottom_pad,left_pad,right_pad,context_xmin,context_xmax,context_ymin,context_ymax,s_z] + + # 重写deinit + def deinit(self): + with ScopedTiming("deinit",self.debug_mode > 0): + del self.ai2d_pad + del self.ai2d_crop + super().deinit() + + +class TrackerApp(AIBase): + def __init__(self,kmodel_path,crop_input_size,thresh,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # crop模型的输入尺寸 + self.crop_input_size=crop_input_size + # 跟踪框阈值 + self.thresh=thresh + # 跟踪框宽、高调整系数 + self.CONTEXT_AMOUNT = 0.5 + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + self.ai2d=Ai2d(debug_mode) + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + pass + + # 重写run函数,因为没有预处理过程,所以原来run操作中包含的preprocess->inference->postprocess不合适,这里只包含inference->postprocess + def run(self,input_np_1,input_np_2,center_xy_wh): + input_tensors=[] + input_tensors.append(nn.from_numpy(input_np_1)) + input_tensors.append(nn.from_numpy(input_np_2)) + results=self.inference(input_tensors) + return self.postprocess(results,center_xy_wh) + + + # 自定义后处理,results是模型输出array的列表,这里使用了aidemo的nanotracker_postprocess列表 + def postprocess(self,results,center_xy_wh): + with ScopedTiming("postprocess",self.debug_mode > 0): + det = aidemo.nanotracker_postprocess(results[0],results[1],[self.rgb888p_size[1],self.rgb888p_size[0]],self.thresh,center_xy_wh,self.crop_input_size[0],self.CONTEXT_AMOUNT) + return det + +class NanoTracker: + def __init__(self,track_crop_kmodel,track_src_kmodel,tracker_kmodel,crop_input_size,src_input_size,threshold=0.25,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + # 跟踪模版模型路径 + self.track_crop_kmodel=track_crop_kmodel + # 跟踪实时模型路径 + self.track_src_kmodel=track_src_kmodel + # 跟踪模型路径 + self.tracker_kmodel=tracker_kmodel + # 跟踪模版模型输入分辨率 + self.crop_input_size=crop_input_size + # 跟踪实时模型输入分辨率 + self.src_input_size=src_input_size + self.threshold=threshold + + self.CONTEXT_AMOUNT=0.5 # 跟踪框宽、高调整系数 + self.ratio_src_crop = 0.0 # src模型和crop模型输入比值 + self.track_x1 = float(600) # 起始跟踪目标框左上角点x + self.track_y1 = float(300) # 起始跟踪目标框左上角点y + self.track_w = float(100) # 起始跟踪目标框w + self.track_h = float(100) # 起始跟踪目标框h + self.draw_mean=[] # 初始目标框位置列表 + self.center_xy_wh = [] + self.track_boxes = [] + self.center_xy_wh_tmp = [] + self.track_boxes_tmp=[] + self.crop_output=None + self.src_output=None + # 跟踪框初始化时间 + self.seconds = 8 + self.endtime = time.time() + self.seconds + self.enter_init = True + + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + self.init_param() + + self.track_crop=TrackCropApp(self.track_crop_kmodel,model_input_size=self.crop_input_size,ratio_src_crop=self.ratio_src_crop,center_xy_wh=self.center_xy_wh,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + self.track_src=TrackSrcApp(self.track_src_kmodel,model_input_size=self.src_input_size,ratio_src_crop=self.ratio_src_crop,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + self.tracker=TrackerApp(self.tracker_kmodel,crop_input_size=self.crop_input_size,thresh=self.threshold,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + self.track_crop.config_preprocess() + + # run函数 + def run(self,input_np): + # 在初始化时间内,crop模版部分的到跟踪模版特征,否则,对当前帧进行src推理得到特征并使用tracker对两个特征推理,得到跟踪框的坐标 + nowtime = time.time() + if (self.enter_init and nowtime <= self.endtime): + print("倒计时: " + str(self.endtime - nowtime) + " 秒") + self.crop_output=self.track_crop.run(input_np) + time.sleep(1) + return self.draw_mean + else: + self.track_src.config_preprocess(self.center_xy_wh) + self.src_output=self.track_src.run(input_np) + det=self.tracker.run(self.crop_output,self.src_output,self.center_xy_wh) + return det + + # 绘制效果,绘制跟踪框位置 + def draw_result(self,pl,box): + pl.osd_img.clear() + if self.enter_init: + pl.osd_img.draw_rectangle(box[0],box[1],box[2],box[3],color=(255, 0, 255, 0),thickness = 4) + if (time.time() > self.endtime): + self.enter_init = False + else: + self.track_boxes = box[0] + self.center_xy_wh = box[1] + track_bool = True + if (len(self.track_boxes) != 0): + track_bool = self.track_boxes[0] > 10 and self.track_boxes[1] > 10 and self.track_boxes[0] + self.track_boxes[2] < self.rgb888p_size[0] - 10 and self.track_boxes[1] + self.track_boxes[3] < self.rgb888p_size[1] - 10 + else: + track_bool = False + + if (len(self.center_xy_wh) != 0): + track_bool = track_bool and self.center_xy_wh[2] * self.center_xy_wh[3] < 40000 + else: + track_bool = False + if (track_bool): + self.center_xy_wh_tmp = self.center_xy_wh + self.track_boxes_tmp = self.track_boxes + x1 = int(float(self.track_boxes[0]) * self.display_size[0] / self.rgb888p_size[0]) + y1 = int(float(self.track_boxes[1]) * self.display_size[1] / self.rgb888p_size[1]) + w = int(float(self.track_boxes[2]) * self.display_size[0] / self.rgb888p_size[0]) + h = int(float(self.track_boxes[3]) * self.display_size[1] / self.rgb888p_size[1]) + pl.osd_img.draw_rectangle(x1, y1, w, h, color=(255, 255, 0, 0),thickness = 4) + else: + self.center_xy_wh = self.center_xy_wh_tmp + self.track_boxes = self.track_boxes_tmp + x1 = int(float(self.track_boxes[0]) * self.display_size[0] / self.rgb888p_size[0]) + y1 = int(float(self.track_boxes[1]) * self.display_size[1] / self.rgb888p_size[1]) + w = int(float(self.track_boxes[2]) * self.display_size[0] / self.rgb888p_size[0]) + h = int(float(self.track_boxes[3]) * self.display_size[1] / self.rgb888p_size[1]) + pl.osd_img.draw_rectangle(x1, y1, w, h, color=(255, 255, 0, 0),thickness = 4) + pl.osd_img.draw_string_advanced( x1 , y1-50,32, "请远离摄像头,保持跟踪物体大小基本一致!" , color=(255, 255 ,0 , 0)) + pl.osd_img.draw_string_advanced( x1 , y1-100,32, "请靠近中心!" , color=(255, 255 ,0 , 0)) + + # crop参数初始化 + def init_param(self): + self.ratio_src_crop = float(self.src_input_size[0])/float(self.crop_input_size[0]) + print(self.ratio_src_crop) + if (self.track_x1 < 50 or self.track_y1 < 50 or self.track_x1+self.track_w >= self.rgb888p_size[0]-50 or self.track_y1+self.track_h >= self.rgb888p_size[1]-50): + print("**剪切范围超出图像范围**") + else: + track_mean_x = self.track_x1 + self.track_w / 2.0 + track_mean_y = self.track_y1 + self.track_h / 2.0 + draw_mean_w = int(self.track_w / self.rgb888p_size[0] * self.display_size[0]) + draw_mean_h = int(self.track_h / self.rgb888p_size[1] * self.display_size[1]) + draw_mean_x = int(track_mean_x / self.rgb888p_size[0] * self.display_size[0] - draw_mean_w / 2.0) + draw_mean_y = int(track_mean_y / self.rgb888p_size[1] * self.display_size[1] - draw_mean_h / 2.0) + self.draw_mean=[draw_mean_x,draw_mean_y,draw_mean_w,draw_mean_h] + self.center_xy_wh = [track_mean_x,track_mean_y,self.track_w,self.track_h] + self.center_xy_wh_tmp=[track_mean_x,track_mean_y,self.track_w,self.track_h] + + self.track_boxes = [self.track_x1,self.track_y1,self.track_w,self.track_h,1] + self.track_boxes_tmp=np.array([self.track_x1,self.track_y1,self.track_w,self.track_h,1]) + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 跟踪模板模型路径 + track_crop_kmodel_path="/sdcard/app/tests/kmodel/cropped_test127.kmodel" + # 跟踪实时模型路径 + track_src_kmodel_path="/sdcard/app/tests/kmodel/nanotrack_backbone_sim.kmodel" + # 跟踪模型路径 + tracker_kmodel_path="/sdcard/app/tests/kmodel/nanotracker_head_calib_k230.kmodel" + # 其他参数 + rgb888p_size=[1280,720] + track_crop_input_size=[127,127] + track_src_input_size=[255,255] + threshold=0.1 + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + track=NanoTracker(track_crop_kmodel_path,track_src_kmodel_path,tracker_kmodel_path,crop_input_size=track_crop_input_size,src_input_size=track_src_input_size,threshold=threshold,rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + output=track.run(img) # 推理当前帧 + track.draw_result(pl,output) # 绘制当前帧推理结果 + pl.show_image() # 展示推理结果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + track.track_crop.deinit() + track.track_src.deinit() + track.tracker.deinit() + pl.destroy() +``` + +### 2.20. yolov8n目标检测 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys +import aidemo + +# 自定义YOLOv8检测类 +class ObjectDetectionApp(AIBase): + def __init__(self,kmodel_path,labels,model_input_size,max_boxes_num,confidence_threshold=0.5,nms_threshold=0.2,rgb888p_size=[224,224],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + self.kmodel_path=kmodel_path + self.labels=labels + # 模型输入分辨率 + self.model_input_size=model_input_size + # 阈值设置 + self.confidence_threshold=confidence_threshold + self.nms_threshold=nms_threshold + self.max_boxes_num=max_boxes_num + # sensor给到AI的图像分辨率 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 显示分辨率 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + self.debug_mode=debug_mode + # 检测框预置颜色值 + self.color_four=[(255, 220, 20, 60), (255, 119, 11, 32), (255, 0, 0, 142), (255, 0, 0, 230), + (255, 106, 0, 228), (255, 0, 60, 100), (255, 0, 80, 100), (255, 0, 0, 70), + (255, 0, 0, 192), (255, 250, 170, 30), (255, 100, 170, 30), (255, 220, 220, 0), + (255, 175, 116, 175), (255, 250, 0, 30), (255, 165, 42, 42), (255, 255, 77, 255), + (255, 0, 226, 252), (255, 182, 182, 255), (255, 0, 82, 0), (255, 120, 166, 157)] + # 宽高缩放比例 + self.x_factor = float(self.rgb888p_size[0])/self.model_input_size[0] + self.y_factor = float(self.rgb888p_size[1])/self.model_input_size[1] + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,您可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + result=results[0] + result = result.reshape((result.shape[0] * result.shape[1], result.shape[2])) + output_data = result.transpose() + boxes_ori = output_data[:,0:4] + scores_ori = output_data[:,4:] + confs_ori = np.max(scores_ori,axis=-1) + inds_ori = np.argmax(scores_ori,axis=-1) + boxes,scores,inds = [],[],[] + for i in range(len(boxes_ori)): + if confs_ori[i] > confidence_threshold: + scores.append(confs_ori[i]) + inds.append(inds_ori[i]) + x = boxes_ori[i,0] + y = boxes_ori[i,1] + w = boxes_ori[i,2] + h = boxes_ori[i,3] + left = int((x - 0.5 * w) * self.x_factor) + top = int((y - 0.5 * h) * self.y_factor) + right = int((x + 0.5 * w) * self.x_factor) + bottom = int((y + 0.5 * h) * self.y_factor) + boxes.append([left,top,right,bottom]) + if len(boxes)==0: + return [] + boxes = np.array(boxes) + scores = np.array(scores) + inds = np.array(inds) + # NMS过程 + keep = self.nms(boxes,scores,nms_threshold) + dets = np.concatenate((boxes, scores.reshape((len(boxes),1)), inds.reshape((len(boxes),1))), axis=1) + dets_out = [] + for keep_i in keep: + dets_out.append(dets[keep_i]) + dets_out = np.array(dets_out) + dets_out = dets_out[:self.max_boxes_num, :] + return dets_out + + # 绘制结果 + def draw_result(self,pl,dets): + with ScopedTiming("display_draw",self.debug_mode >0): + if dets: + pl.osd_img.clear() + for det in dets: + x1, y1, x2, y2 = map(lambda x: int(round(x, 0)), det[:4]) + x= x1*self.display_size[0] // self.rgb888p_size[0] + y= y1*self.display_size[1] // self.rgb888p_size[1] + w = (x2 - x1) * self.display_size[0] // self.rgb888p_size[0] + h = (y2 - y1) * self.display_size[1] // self.rgb888p_size[1] + pl.osd_img.draw_rectangle(x,y, w, h, color=self.get_color(int(det[5])),thickness=4) + pl.osd_img.draw_string_advanced( x , y-50,32," " + self.labels[int(det[5])] + " " + str(round(det[4],2)) , color=self.get_color(int(det[5]))) + else: + pl.osd_img.clear() + + + # 多目标检测 非最大值抑制方法实现 + def nms(self,boxes,scores,thresh): + """Pure Python NMS baseline.""" + x1,y1,x2,y2 = boxes[:, 0],boxes[:, 1],boxes[:, 2],boxes[:, 3] + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = np.argsort(scores,axis = 0)[::-1] + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + new_x1,new_y1,new_x2,new_y2,new_areas = [],[],[],[],[] + for order_i in order: + new_x1.append(x1[order_i]) + new_x2.append(x2[order_i]) + new_y1.append(y1[order_i]) + new_y2.append(y2[order_i]) + new_areas.append(areas[order_i]) + new_x1 = np.array(new_x1) + new_x2 = np.array(new_x2) + new_y1 = np.array(new_y1) + new_y2 = np.array(new_y2) + xx1 = np.maximum(x1[i], new_x1) + yy1 = np.maximum(y1[i], new_y1) + xx2 = np.minimum(x2[i], new_x2) + yy2 = np.minimum(y2[i], new_y2) + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + new_areas = np.array(new_areas) + ovr = inter / (areas[i] + new_areas - inter) + new_order = [] + for ovr_i,ind in enumerate(ovr): + if ind < thresh: + new_order.append(order[ovr_i]) + order = np.array(new_order,dtype=np.uint8) + return keep + + # 根据当前类别索引获取框的颜色 + def get_color(self, x): + idx=x%len(self.color_four) + return self.color_four[idx] + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 模型路径 + kmodel_path="/sdcard/app/tests/kmodel/yolov8n_320.kmodel" + labels = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"] + # 其它参数设置 + confidence_threshold = 0.2 + nms_threshold = 0.2 + max_boxes_num = 50 + rgb888p_size=[320,320] + + # 初始化PipeLine + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + # 初始化自定义目标检测实例 + ob_det=ObjectDetectionApp(kmodel_path,labels=labels,model_input_size=[320,320],max_boxes_num=max_boxes_num,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,rgb888p_size=rgb888p_size,display_size=display_size,debug_mode=0) + ob_det.config_preprocess() + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + # 获取当前帧数据 + img=pl.get_frame() + # 推理当前帧 + res=ob_det.run(img) + # 绘制结果到PipeLine的osd图像 + ob_det.draw_result(pl,res) + # 显示当前的绘制结果 + pl.show_image() + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + ob_det.deinit() + pl.destroy() +``` + +### 2.21. OCR检测 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys +import aicube + +# 自定义OCR检测类 +class OCRDetectionApp(AIBase): + def __init__(self,kmodel_path,model_input_size,mask_threshold=0.5,box_threshold=0.2,rgb888p_size=[224,224],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + self.kmodel_path=kmodel_path + # 模型输入分辨率 + self.model_input_size=model_input_size + # 分类阈值 + self.mask_threshold=mask_threshold + self.box_threshold=box_threshold + # sensor给到AI的图像分辨率 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 显示分辨率 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + self.debug_mode=debug_mode + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,您可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + top,bottom,left,right=self.get_padding_param() + self.ai2d.pad([0,0,0,0,top,bottom,left,right], 0, [0,0,0]) + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + # chw2hwc + hwc_array=self.chw2hwc(self.cur_img) + # 这里使用了aicube封装的接口seg_post_process做后处理,返回一个和display_size相同分辨率的mask图 + # det_boxes结构为[[crop_array_nhwc,[p1_x,p1_y,p2_x,p2_y,p3_x,p3_y,p4_x,p4_y]],...] + det_boxes = aicube.ocr_post_process(results[0][:,:,:,0].reshape(-1), hwc_array.reshape(-1),self.model_input_size,self.rgb888p_size, self.mask_threshold, self.box_threshold) + all_boxes_pos=[] + for det_box in det_boxes: + all_boxes_pos.append(det_box[1]) + return all_boxes_pos + + # 绘制结果 + def draw_result(self,pl,all_boxes_pos): + with ScopedTiming("display_draw",self.debug_mode >0): + pl.osd_img.clear() + # 一次绘制四条边,得到文本检测的四边形 + for i in range(len(all_boxes_pos)): + for j in range(4): + x1=all_boxes_pos[i][2*j]*self.display_size[0]//self.rgb888p_size[0] + y1=all_boxes_pos[i][2*j+1]*self.display_size[1]//self.rgb888p_size[1] + x2=all_boxes_pos[i][(2*j+2)%8]*self.display_size[0]//self.rgb888p_size[0] + y2=all_boxes_pos[i][(2*j+3)%8]*self.display_size[1]//self.rgb888p_size[1] + pl.osd_img.draw_line(int(x1),int(y1),int(x2),int(y2),color=(255,255,0,0),thickness=4) + + # 计算padding参数 + def get_padding_param(self): + # 右padding或下padding + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + input_width = self.rgb888p_size[0] + input_high = self.rgb888p_size[1] + ratio_w = dst_w / input_width + ratio_h = dst_h / input_high + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + new_w = (int)(ratio * input_width) + new_h = (int)(ratio * input_high) + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = (int)(round(0)) + bottom = (int)(round(dh * 2 + 0.1)) + left = (int)(round(0)) + right = (int)(round(dw * 2 - 0.1)) + return top, bottom, left, right + + # chw2hwc + def chw2hwc(self,features): + ori_shape = (features.shape[0], features.shape[1], features.shape[2]) + c_hw_ = features.reshape((ori_shape[0], ori_shape[1] * ori_shape[2])) + hw_c_ = c_hw_.transpose() + new_array = hw_c_.copy() + hwc_array = new_array.reshape((ori_shape[1], ori_shape[2], ori_shape[0])) + del c_hw_ + del hw_c_ + del new_array + return hwc_array + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 模型路径 + kmodel_path="/sdcard/app/tests/kmodel/ocr_det_int16.kmodel" + # kmodel其它参数设置 + mask_threshold = 0.25 + box_threshold = 0.3 + rgb888p_size=[640,360] + + # 初始化PipeLine + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + # 初始化自定义OCR检测实例 + ocr_det=OCRDetectionApp(kmodel_path,model_input_size=[640,640],mask_threshold=mask_threshold,box_threshold=box_threshold,rgb888p_size=rgb888p_size,display_size=display_size,debug_mode=0) + ocr_det.config_preprocess() + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + # 获取当前帧数据 + img=pl.get_frame() + # 推理当前帧 + res=ocr_det.run(img) + # 绘制结果到PipeLine的osd图像 + ocr_det.draw_result(pl,res) + # 显示当前的绘制结果 + pl.show_image() + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + ocr_det.deinit() + pl.destroy() +``` + +### 2.22. OCR识别 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aicube +import random +import gc +import sys + +# 自定义OCR检测类 +class OCRDetectionApp(AIBase): + def __init__(self,kmodel_path,model_input_size,mask_threshold=0.5,box_threshold=0.2,rgb888p_size=[224,224],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + self.kmodel_path=kmodel_path + # 模型输入分辨率 + self.model_input_size=model_input_size + # 分类阈值 + self.mask_threshold=mask_threshold + self.box_threshold=box_threshold + # sensor给到AI的图像分辨率 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 显示分辨率 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + self.debug_mode=debug_mode + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,您可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + top,bottom,left,right=self.get_padding_param() + self.ai2d.pad([0,0,0,0,top,bottom,left,right], 0, [0,0,0]) + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + # chw2hwc + hwc_array=self.chw2hwc(self.cur_img) + # 这里使用了aicube封装的接口ocr_post_process做后处理,返回的det_boxes结构为[[crop_array_nhwc,[p1_x,p1_y,p2_x,p2_y,p3_x,p3_y,p4_x,p4_y]],...] + det_boxes = aicube.ocr_post_process(results[0][:,:,:,0].reshape(-1), hwc_array.reshape(-1),self.model_input_size,self.rgb888p_size, self.mask_threshold, self.box_threshold) + return det_boxes + + # 计算padding参数 + def get_padding_param(self): + # 右padding或下padding + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + input_width = self.rgb888p_size[0] + input_high = self.rgb888p_size[1] + ratio_w = dst_w / input_width + ratio_h = dst_h / input_high + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + new_w = (int)(ratio * input_width) + new_h = (int)(ratio * input_high) + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = (int)(round(0)) + bottom = (int)(round(dh * 2 + 0.1)) + left = (int)(round(0)) + right = (int)(round(dw * 2 - 0.1)) + return top, bottom, left, right + + # chw2hwc + def chw2hwc(self,features): + ori_shape = (features.shape[0], features.shape[1], features.shape[2]) + c_hw_ = features.reshape((ori_shape[0], ori_shape[1] * ori_shape[2])) + hw_c_ = c_hw_.transpose() + new_array = hw_c_.copy() + hwc_array = new_array.reshape((ori_shape[1], ori_shape[2], ori_shape[0])) + del c_hw_ + del hw_c_ + del new_array + return hwc_array + +# 自定义OCR识别任务类 +class OCRRecognitionApp(AIBase): + def __init__(self,kmodel_path,model_input_size,dict_path,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 识别模型输入分辨率 + self.model_input_size=model_input_size + self.dict_path=dict_path + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + self.dict_word=None + # 读取OCR的字典 + self.read_dict() + self.ai2d=Ai2d(debug_mode) + self.ai2d.set_ai2d_dtype(nn.ai2d_format.RGB_packed,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None,input_np=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + top,bottom,left,right=self.get_padding_param(ai2d_input_size,self.model_input_size) + self.ai2d.pad([0,0,0,0,top,bottom,left,right], 0, [0,0,0]) + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 如果传入input_np,输入shape为input_np的shape,如果不传入,输入shape为[1,3,ai2d_input_size[1],ai2d_input_size[0]] + self.ai2d.build([input_np.shape[0],input_np.shape[1],input_np.shape[2],input_np.shape[3]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + preds = np.argmax(results[0], axis=2).reshape((-1)) + output_txt = "" + for i in range(len(preds)): + # 当前识别字符不是字典的最后一个字符并且和前一个字符不重复(去重),加入识别结果字符串 + if preds[i] != (len(self.dict_word) - 1) and (not (i > 0 and preds[i - 1] == preds[i])): + output_txt = output_txt + self.dict_word[preds[i]] + return output_txt + + # 计算padding参数 + def get_padding_param(self,src_size,dst_size): + # 右padding或下padding + dst_w = dst_size[0] + dst_h = dst_size[1] + input_width = src_size[0] + input_high = src_size[1] + ratio_w = dst_w / input_width + ratio_h = dst_h / input_high + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + new_w = (int)(ratio * input_width) + new_h = (int)(ratio * input_high) + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = (int)(round(0)) + bottom = (int)(round(dh * 2 + 0.1)) + left = (int)(round(0)) + right = (int)(round(dw * 2 - 0.1)) + return top, bottom, left, right + + def read_dict(self): + if self.dict_path!="": + with open(dict_path, 'r') as file: + line_one = file.read(100000) + line_list = line_one.split("\r\n") + self.dict_word = {num: char.replace("\r", "").replace("\n", "") for num, char in enumerate(line_list)} + + +class OCRDetRec: + def __init__(self,ocr_det_kmodel,ocr_rec_kmodel,det_input_size,rec_input_size,dict_path,mask_threshold=0.25,box_threshold=0.3,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + # OCR检测模型路径 + self.ocr_det_kmodel=ocr_det_kmodel + # OCR识别模型路径 + self.ocr_rec_kmodel=ocr_rec_kmodel + # OCR检测模型输入分辨率 + self.det_input_size=det_input_size + # OCR识别模型输入分辨率 + self.rec_input_size=rec_input_size + # 字典路径 + self.dict_path=dict_path + # 置信度阈值 + self.mask_threshold=mask_threshold + # nms阈值 + self.box_threshold=box_threshold + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + self.ocr_det=OCRDetectionApp(self.ocr_det_kmodel,model_input_size=self.det_input_size,mask_threshold=self.mask_threshold,box_threshold=self.box_threshold,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + self.ocr_rec=OCRRecognitionApp(self.ocr_rec_kmodel,model_input_size=self.rec_input_size,dict_path=self.dict_path,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + self.ocr_det.config_preprocess() + + # run函数 + def run(self,input_np): + # 先进行OCR检测 + det_res=self.ocr_det.run(input_np) + boxes=[] + ocr_res=[] + for det in det_res: + # 对得到的每个检测框执行OCR识别 + self.ocr_rec.config_preprocess(input_image_size=[det[0].shape[2],det[0].shape[1]],input_np=det[0]) + ocr_str=self.ocr_rec.run(det[0]) + ocr_res.append(ocr_str) + boxes.append(det[1]) + gc.collect() + return boxes,ocr_res + + # 绘制OCR检测识别效果 + def draw_result(self,pl,det_res,rec_res): + pl.osd_img.clear() + if det_res: + # 循环绘制所有检测到的框 + for j in range(len(det_res)): + # 将原图的坐标点转换成显示的坐标点,循环绘制四条直线,得到一个矩形框 + for i in range(4): + x1 = det_res[j][(i * 2)] / self.rgb888p_size[0] * self.display_size[0] + y1 = det_res[j][(i * 2 + 1)] / self.rgb888p_size[1] * self.display_size[1] + x2 = det_res[j][((i + 1) * 2) % 8] / self.rgb888p_size[0] * self.display_size[0] + y2 = det_res[j][((i + 1) * 2 + 1) % 8] / self.rgb888p_size[1] * self.display_size[1] + pl.osd_img.draw_line((int(x1), int(y1), int(x2), int(y2)), color=(255, 0, 0, 255),thickness=5) + pl.osd_img.draw_string_advanced(int(x1),int(y1),32,rec_res[j],color=(0,0,255)) + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # OCR检测模型路径 + ocr_det_kmodel_path="/sdcard/app/tests/kmodel/ocr_det_int16.kmodel" + # OCR识别模型路径 + ocr_rec_kmodel_path="/sdcard/app/tests/kmodel/ocr_rec_int16.kmodel" + # 其他参数 + dict_path="/sdcard/app/tests/utils/dict.txt" + rgb888p_size=[640,360] + ocr_det_input_size=[640,640] + ocr_rec_input_size=[512,32] + mask_threshold=0.25 + box_threshold=0.3 + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + ocr=OCRDetRec(ocr_det_kmodel_path,ocr_rec_kmodel_path,det_input_size=ocr_det_input_size,rec_input_size=ocr_rec_input_size,dict_path=dict_path,mask_threshold=mask_threshold,box_threshold=box_threshold,rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + det_res,rec_res=ocr.run(img) # 推理当前帧 + ocr.draw_result(pl,det_res,rec_res) # 绘制当前帧推理结果 + pl.show_image() # 展示当前帧推理结果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + ocr.ocr_det.deinit() + ocr.ocr_rec.deinit() + pl.destroy() +``` + +### 2.23. 人体检测 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys +import aicube + +# 自定义人体检测类 +class PersonDetectionApp(AIBase): + def __init__(self,kmodel_path,model_input_size,labels,anchors,confidence_threshold=0.2,nms_threshold=0.5,nms_option=False,strides=[8,16,32],rgb888p_size=[224,224],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + self.kmodel_path=kmodel_path + # 模型输入分辨率 + self.model_input_size=model_input_size + # 标签 + self.labels=labels + # 检测anchors设置 + self.anchors=anchors + # 特征图降采样倍数 + self.strides=strides + # 置信度阈值设置 + self.confidence_threshold=confidence_threshold + # nms阈值设置 + self.nms_threshold=nms_threshold + self.nms_option=nms_option + # sensor给到AI的图像分辨率 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 显示分辨率 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + self.debug_mode=debug_mode + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,您可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + top,bottom,left,right=self.get_padding_param() + self.ai2d.pad([0,0,0,0,top,bottom,left,right], 0, [0,0,0]) + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + # 这里使用了aicube模型的后处理接口anchorbasedet_post_preocess + dets = aicube.anchorbasedet_post_process(results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides, len(self.labels), self.confidence_threshold, self.nms_threshold, self.anchors, self.nms_option) + return dets + + # 绘制结果 + def draw_result(self,pl,dets): + with ScopedTiming("display_draw",self.debug_mode >0): + if dets: + pl.osd_img.clear() + for det_box in dets: + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w = float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0] + h = float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1] + x1 = int(x1 * self.display_size[0] // self.rgb888p_size[0]) + y1 = int(y1 * self.display_size[1] // self.rgb888p_size[1]) + x2 = int(x2 * self.display_size[0] // self.rgb888p_size[0]) + y2 = int(y2 * self.display_size[1] // self.rgb888p_size[1]) + if (h<(0.1*self.display_size[0])): + continue + if (w<(0.25*self.display_size[0]) and ((x1<(0.03*self.display_size[0])) or (x2>(0.97*self.display_size[0])))): + continue + if (w<(0.15*self.display_size[0]) and ((x1<(0.01*self.display_size[0])) or (x2>(0.99*self.display_size[0])))): + continue + pl.osd_img.draw_rectangle(x1 , y1 , int(w) , int(h), color=(255, 0, 255, 0), thickness = 2) + pl.osd_img.draw_string_advanced( x1 , y1-50,32, " " + self.labels[det_box[0]] + " " + str(round(det_box[1],2)), color=(255,0, 255, 0)) + else: + pl.osd_img.clear() + + # 计算padding参数 + def get_padding_param(self): + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + input_width = self.rgb888p_size[0] + input_high = self.rgb888p_size[1] + ratio_w = dst_w / input_width + ratio_h = dst_h / input_high + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + new_w = (int)(ratio * input_width) + new_h = (int)(ratio * input_high) + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = int(round(dh - 0.1)) + bottom = int(round(dh + 0.1)) + left = int(round(dw - 0.1)) + right = int(round(dw - 0.1)) + return top, bottom, left, right + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 模型路径 + kmodel_path="/sdcard/app/tests/kmodel/person_detect_yolov5n.kmodel" + # 其它参数设置 + confidence_threshold = 0.2 + nms_threshold = 0.6 + rgb888p_size=[1920,1080] + labels = ["person"] + anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326] + + # 初始化PipeLine + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + # 初始化自定义人体检测实例 + person_det=PersonDetectionApp(kmodel_path,model_input_size=[640,640],labels=labels,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,nms_option=False,strides=[8,16,32],rgb888p_size=rgb888p_size,display_size=display_size,debug_mode=0) + person_det.config_preprocess() + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + # 获取当前帧数据 + img=pl.get_frame() + # 推理当前帧 + res=person_det.run(img) + # 绘制结果到PipeLine的osd图像 + person_det.draw_result(pl,res) + # 显示当前的绘制结果 + pl.show_image() + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + person_det.deinit() + pl.destroy() +``` + +### 2.24. 人体关键点检测 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys +import aidemo + +# 自定义人体关键点检测类 +class PersonKeyPointApp(AIBase): + def __init__(self,kmodel_path,model_input_size,confidence_threshold=0.2,nms_threshold=0.5,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + self.kmodel_path=kmodel_path + # 模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值设置 + self.confidence_threshold=confidence_threshold + # nms阈值设置 + self.nms_threshold=nms_threshold + # sensor给到AI的图像分辨率 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 显示分辨率 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + self.debug_mode=debug_mode + #骨骼信息 + self.SKELETON = [(16, 14),(14, 12),(17, 15),(15, 13),(12, 13),(6, 12),(7, 13),(6, 7),(6, 8),(7, 9),(8, 10),(9, 11),(2, 3),(1, 2),(1, 3),(2, 4),(3, 5),(4, 6),(5, 7)] + #肢体颜色 + self.LIMB_COLORS = [(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255),(255, 255, 51, 255),(255, 255, 51, 255),(255, 255, 51, 255),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0)] + #关键点颜色,共17个 + self.KPS_COLORS = [(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255)] + + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,您可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + top,bottom,left,right=self.get_padding_param() + self.ai2d.pad([0,0,0,0,top,bottom,left,right], 0, [0,0,0]) + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + # 这里使用了aidemo库的person_kp_postprocess接口 + results = aidemo.person_kp_postprocess(results[0],[self.rgb888p_size[1],self.rgb888p_size[0]],self.model_input_size,self.confidence_threshold,self.nms_threshold) + return results + + #绘制结果,绘制人体关键点 + def draw_result(self,pl,res): + with ScopedTiming("display_draw",self.debug_mode >0): + if res[0]: + pl.osd_img.clear() + kpses = res[1] + for i in range(len(res[0])): + for k in range(17+2): + if (k < 17): + kps_x,kps_y,kps_s = round(kpses[i][k][0]),round(kpses[i][k][1]),kpses[i][k][2] + kps_x1 = int(float(kps_x) * self.display_size[0] // self.rgb888p_size[0]) + kps_y1 = int(float(kps_y) * self.display_size[1] // self.rgb888p_size[1]) + if (kps_s > 0): + pl.osd_img.draw_circle(kps_x1,kps_y1,5,self.KPS_COLORS[k],4) + ske = self.SKELETON[k] + pos1_x,pos1_y= round(kpses[i][ske[0]-1][0]),round(kpses[i][ske[0]-1][1]) + pos1_x_ = int(float(pos1_x) * self.display_size[0] // self.rgb888p_size[0]) + pos1_y_ = int(float(pos1_y) * self.display_size[1] // self.rgb888p_size[1]) + + pos2_x,pos2_y = round(kpses[i][(ske[1] -1)][0]),round(kpses[i][(ske[1] -1)][1]) + pos2_x_ = int(float(pos2_x) * self.display_size[0] // self.rgb888p_size[0]) + pos2_y_ = int(float(pos2_y) * self.display_size[1] // self.rgb888p_size[1]) + + pos1_s,pos2_s = kpses[i][(ske[0] -1)][2],kpses[i][(ske[1] -1)][2] + if (pos1_s > 0.0 and pos2_s >0.0): + pl.osd_img.draw_line(pos1_x_,pos1_y_,pos2_x_,pos2_y_,self.LIMB_COLORS[k],4) + gc.collect() + else: + pl.osd_img.clear() + + # 计算padding参数 + def get_padding_param(self): + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + input_width = self.rgb888p_size[0] + input_high = self.rgb888p_size[1] + ratio_w = dst_w / input_width + ratio_h = dst_h / input_high + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + new_w = (int)(ratio * input_width) + new_h = (int)(ratio * input_high) + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = int(round(dh - 0.1)) + bottom = int(round(dh + 0.1)) + left = int(round(dw - 0.1)) + right = int(round(dw - 0.1)) + return top, bottom, left, right + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 模型路径 + kmodel_path="/sdcard/app/tests/kmodel/yolov8n-pose.kmodel" + # 其它参数设置 + confidence_threshold = 0.2 + nms_threshold = 0.5 + rgb888p_size=[1920,1080] + # 初始化PipeLine + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + # 初始化自定义人体关键点检测实例 + person_kp=PersonKeyPointApp(kmodel_path,model_input_size=[320,320],confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,rgb888p_size=rgb888p_size,display_size=display_size,debug_mode=0) + person_kp.config_preprocess() + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + # 获取当前帧数据 + img=pl.get_frame() + # 推理当前帧 + res=person_kp.run(img) + # 绘制结果到PipeLine的osd图像 + person_kp.draw_result(pl,res) + # 显示当前的绘制结果 + pl.show_image() + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + person_kp.deinit() + pl.destroy() +``` + +### 2.25. 拼图游戏 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aicube +import random +import gc +import sys + +# 自定义手掌检测任务类 +class HandDetApp(AIBase): + def __init__(self,kmodel_path,labels,model_input_size,anchors,confidence_threshold=0.2,nms_threshold=0.5,nms_option=False, strides=[8,16,32],rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + self.labels=labels + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + self.anchors=anchors + self.strides = strides # 特征下采样倍数 + self.nms_option = nms_option # NMS选项,如果为True做类间NMS,如果为False做类内NMS + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + self.ai2d=Ai2d(debug_mode) + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + # 计算padding参数并应用pad操作,以确保输入图像尺寸与模型输入尺寸匹配 + top, bottom, left, right = self.get_padding_param() + self.ai2d.pad([0, 0, 0, 0, top, bottom, left, right], 0, [114, 114, 114]) + # 使用双线性插值进行resize操作,调整图像尺寸以符合模型输入要求 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程 + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型的输出array列表,这里使用了aicube库的anchorbasedet_post_process + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + dets = aicube.anchorbasedet_post_process(results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides, len(self.labels), self.confidence_threshold, self.nms_threshold, self.anchors, self.nms_option) + # 返回手掌检测结果 + return dets + + # 计算padding参数,确保输入图像尺寸与模型输入尺寸匹配 + def get_padding_param(self): + # 根据目标宽度和高度计算比例因子 + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + input_width = self.rgb888p_size[0] + input_high = self.rgb888p_size[1] + ratio_w = dst_w / input_width + ratio_h = dst_h / input_high + # 选择较小的比例因子,以确保图像内容完整 + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + # 计算新的宽度和高度 + new_w = int(ratio * input_width) + new_h = int(ratio * input_high) + # 计算宽度和高度的差值,并确定padding的位置 + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = int(round(dh - 0.1)) + bottom = int(round(dh + 0.1)) + left = int(round(dw - 0.1)) + right = int(round(dw + 0.1)) + return top, bottom, left, right + +# 自定义手势关键点分类任务类 +class HandKPClassApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + self.crop_params=[] + # debug模式 + self.debug_mode=debug_mode + self.ai2d=Ai2d(debug_mode) + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了crop和resize,Ai2d支持crop/shift/pad/resize/affine + def config_preprocess(self,det,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + self.crop_params = self.get_crop_param(det) + self.ai2d.crop(self.crop_params[0],self.crop_params[1],self.crop_params[2],self.crop_params[3]) + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型的输出array列表 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + results=results[0].reshape(results[0].shape[0]*results[0].shape[1]) + results_show = np.zeros(results.shape,dtype=np.int16) + results_show[0::2] = (results[0::2] * self.crop_params[3] + self.crop_params[0]) + results_show[1::2] = (results[1::2] * self.crop_params[2] + self.crop_params[1]) + return results_show + + # 计算crop参数 + def get_crop_param(self,det_box): + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + w_det = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0]) + h_det = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1]) + x_det = int(x1*self.display_size[0] // self.rgb888p_size[0]) + y_det = int(y1*self.display_size[1] // self.rgb888p_size[1]) + length = max(w, h)/2 + cx = (x1+x2)/2 + cy = (y1+y2)/2 + ratio_num = 1.26*length + x1_kp = int(max(0,cx-ratio_num)) + y1_kp = int(max(0,cy-ratio_num)) + x2_kp = int(min(self.rgb888p_size[0]-1, cx+ratio_num)) + y2_kp = int(min(self.rgb888p_size[1]-1, cy+ratio_num)) + w_kp = int(x2_kp - x1_kp + 1) + h_kp = int(y2_kp - y1_kp + 1) + return [x1_kp, y1_kp, w_kp, h_kp] + +# 拼图游戏任务类 +class PuzzleGame: + def __init__(self,hand_det_kmodel,hand_kp_kmodel,det_input_size,kp_input_size,labels,anchors,confidence_threshold=0.25,nms_threshold=0.3,nms_option=False,strides=[8,16,32],rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + # 手掌检测模型路径 + self.hand_det_kmodel=hand_det_kmodel + # 手掌关键点模型路径 + self.hand_kp_kmodel=hand_kp_kmodel + # 手掌检测模型输入分辨率 + self.det_input_size=det_input_size + # 手掌关键点模型输入分辨率 + self.kp_input_size=kp_input_size + self.labels=labels + # anchors + self.anchors=anchors + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + self.nms_option=nms_option + self.strides=strides + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + + self.level = 3 # 游戏级别 目前只支持设置为 3 + self.puzzle_width = self.display_size[1] # 设定 拼图宽 + self.puzzle_height = self.display_size[1] # 设定 拼图高 + self.puzzle_ori_width = self.display_size[0] - self.puzzle_width - 50 # 设定 原始拼图宽 + self.puzzle_ori_height = self.display_size[0] - self.puzzle_height - 50 # 设定 原始拼图高 + + self.every_block_width = int(self.puzzle_width/self.level) # 设定 拼图块宽 + self.every_block_height = int(self.puzzle_height/self.level) # 设定 拼图块高 + self.ori_every_block_width = int(self.puzzle_ori_width/self.level) # 设定 原始拼图宽 + self.ori_every_block_height = int(self.puzzle_ori_height/self.level) # 设定 原始拼图高 + self.ratio_num = self.every_block_width/360.0 # 字体比例 + self.blank_x = 0 # 空白块 角点x + self.blank_y = 0 # 空白块 角点y + self.direction_vec = [-1,1,-1,1] # 空白块四种移动方向 + self.exact_division_x = 0 # 交换块 角点x + self.exact_division_y = 0 # 交换块 角点y + self.distance_tow_points = self.display_size[0] # 两手指距离 + self.distance_thred = self.every_block_width*0.4 # 两手指距离阈值 + self.osd_frame_tmp = np.zeros((self.display_size[1],self.display_size[0],4),dtype=np.uint8) + self.osd_frame_tmp_img = image.Image(self.display_size[0], self.display_size[1], image.ARGB8888,alloc=image.ALLOC_REF,data=self.osd_frame_tmp) + self.move_mat = np.zeros((self.every_block_height,self.every_block_width,4),dtype=np.uint8) + self.init_osd_frame() + self.hand_det=HandDetApp(self.hand_det_kmodel,self.labels,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,nms_option=self.nms_option,strides=self.strides,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + self.hand_kp=HandKPClassApp(self.hand_kp_kmodel,model_input_size=self.kp_input_size,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + self.hand_det.config_preprocess() + + # 初始化拼图界面,绘制两个3*3的拼图 + def init_osd_frame(self): + self.osd_frame_tmp[0:self.puzzle_height,0:self.puzzle_width,3] = 100 + self.osd_frame_tmp[0:self.puzzle_height,0:self.puzzle_width,2] = 150 + self.osd_frame_tmp[0:self.puzzle_height,0:self.puzzle_width,1] = 130 + self.osd_frame_tmp[0:self.puzzle_height,0:self.puzzle_width,0] = 127 + self.osd_frame_tmp[(self.display_size[1]-self.puzzle_ori_height)//2:(self.display_size[1]-self.puzzle_ori_height)//2+self.puzzle_ori_width,self.puzzle_width+25:self.puzzle_width+25+self.puzzle_ori_height,3] = 100 + self.osd_frame_tmp[(self.display_size[1]-self.puzzle_ori_height)//2:(self.display_size[1]-self.puzzle_ori_height)//2+self.puzzle_ori_width,self.puzzle_width+25:self.puzzle_width+25+self.puzzle_ori_height,2] = 150 + self.osd_frame_tmp[(self.display_size[1]-self.puzzle_ori_height)//2:(self.display_size[1]-self.puzzle_ori_height)//2+self.puzzle_ori_width,self.puzzle_width+25:self.puzzle_width+25+self.puzzle_ori_height,1] = 130 + self.osd_frame_tmp[(self.display_size[1]-self.puzzle_ori_height)//2:(self.display_size[1]-self.puzzle_ori_height)//2+self.puzzle_ori_width,self.puzzle_width+25:self.puzzle_width+25+self.puzzle_ori_height,0] = 127 + for i in range(self.level*self.level): + self.osd_frame_tmp_img.draw_rectangle((i%self.level)*self.every_block_width,(i//self.level)*self.every_block_height,self.every_block_width,self.every_block_height,(255,0,0,0),5) + self.osd_frame_tmp_img.draw_string_advanced((i%self.level)*self.every_block_width + 55,(i//self.level)*self.every_block_height + 45,int(60*self.ratio_num),str(i),color=(255,0,0,255)) + self.osd_frame_tmp_img.draw_rectangle(self.puzzle_width+25 + (i%self.level)*self.ori_every_block_width,(self.display_size[1]-self.puzzle_ori_height)//2 + (i//self.level)*self.ori_every_block_height,self.ori_every_block_width,self.ori_every_block_height,(255,0,0,0),5) + self.osd_frame_tmp_img.draw_string_advanced(self.puzzle_width+25 + (i%self.level)*self.ori_every_block_width + 50,(self.display_size[1]-self.puzzle_ori_height)//2 + (i//self.level)*self.ori_every_block_height + 25,int(50*self.ratio_num),str(i),color=(255,0,0,255)) + self.osd_frame_tmp[0:self.every_block_height,0:self.every_block_width,3] = 114 + self.osd_frame_tmp[0:self.every_block_height,0:self.every_block_width,2] = 114 + self.osd_frame_tmp[0:self.every_block_height,0:self.every_block_width,1] = 114 + self.osd_frame_tmp[0:self.every_block_height,0:self.every_block_width,0] = 220 + self.osd_frame_tmp[(self.display_size[1]-self.puzzle_ori_height)//2:(self.display_size[1]-self.puzzle_ori_height)//2+self.ori_every_block_width,self.puzzle_width+25:self.puzzle_width+25+self.ori_every_block_height,3] = 114 + self.osd_frame_tmp[(self.display_size[1]-self.puzzle_ori_height)//2:(self.display_size[1]-self.puzzle_ori_height)//2+self.ori_every_block_width,self.puzzle_width+25:self.puzzle_width+25+self.ori_every_block_height,2] = 114 + self.osd_frame_tmp[(self.display_size[1]-self.puzzle_ori_height)//2:(self.display_size[1]-self.puzzle_ori_height)//2+self.ori_every_block_width,self.puzzle_width+25:self.puzzle_width+25+self.ori_every_block_height,1] = 114 + self.osd_frame_tmp[(self.display_size[1]-self.puzzle_ori_height)//2:(self.display_size[1]-self.puzzle_ori_height)//2+self.ori_every_block_width,self.puzzle_width+25:self.puzzle_width+25+self.ori_every_block_height,0] = 220 + + for i in range(self.level*10): + k230_random = int(random.random() * 100) % 4 + blank_x_tmp = self.blank_x + blank_y_tmp = self.blank_y + if (k230_random < 2): + blank_x_tmp = self.blank_x + self.direction_vec[k230_random] + else: + blank_y_tmp = self.blank_y + self.direction_vec[k230_random] + + if ((blank_x_tmp >= 0 and blank_x_tmp < self.level) and (blank_y_tmp >= 0 and blank_y_tmp < self.level) and (abs(self.blank_x - blank_x_tmp) <= 1 and abs(self.blank_y - blank_y_tmp) <= 1)): + move_rect = [blank_x_tmp*self.every_block_width,blank_y_tmp*self.every_block_height,self.every_block_width,self.every_block_height] + blank_rect = [self.blank_x*self.every_block_width,self.blank_y*self.every_block_height,self.every_block_width,self.every_block_height] + self.move_mat[:] = self.osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] + self.osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] = self.osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] + self.osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] = self.move_mat[:] + self.blank_x = blank_x_tmp + self.blank_y = blank_y_tmp + + # run函数 + def run(self,input_np): + # 先进行手掌检测 + det_boxes=self.hand_det.run(input_np) + det_res=[] + two_point = np.zeros((4),dtype=np.int16) + # 对于每一个检测到的手掌做筛选 + for det_box in det_boxes: + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + if (h<(0.1*self.rgb888p_size[1])): + continue + if (w<(0.25*self.rgb888p_size[0]) and ((x1<(0.03*self.rgb888p_size[0])) or (x2>(0.97*self.rgb888p_size[0])))): + continue + if (w<(0.15*self.rgb888p_size[0]) and ((x1<(0.01*self.rgb888p_size[0])) or (x2>(0.99*self.rgb888p_size[0])))): + continue + det_res.append(det_box) + if len(det_res)!=0: + # 对第一个手掌做手掌关键点检测 + det_box=det_res[0] + self.hand_kp.config_preprocess(det_box) + results_show=self.hand_kp.run(input_np) + two_point[0],two_point[1],two_point[2],two_point[3] = results_show[8],results_show[9],results_show[16+8],results_show[16+9] + return det_res,two_point + + # 绘制效果,手指拇指和中指位置判断拼图移动位置,并与周边空白位置做交换 + def draw_result(self,pl,det_res,two_point): + pl.osd_img.clear() + if len(det_res)==1: + if (two_point[1] <= self.rgb888p_size[0]): + self.distance_tow_points = np.sqrt(pow((two_point[0]-two_point[2]),2) + pow((two_point[1] - two_point[3]),2))* 1.0 / self.rgb888p_size[0] * self.display_size[0] + self.exact_division_x = int((two_point[0] * 1.0 / self.rgb888p_size[0] * self.display_size[0])//self.every_block_width) + self.exact_division_y = int((two_point[1] * 1.0 / self.rgb888p_size[1] * self.display_size[1])//self.every_block_height) + + + if (self.distance_tow_points < self.distance_thred and self.exact_division_x >= 0 and self.exact_division_x < self.level and self.exact_division_y >= 0 and self.exact_division_y < self.level): + if (abs(self.blank_x - self.exact_division_x) == 1 and abs(self.blank_y - self.exact_division_y) == 0): + move_rect = [self.exact_division_x*self.every_block_width,self.exact_division_y*self.every_block_height,self.every_block_width,self.every_block_height] + blank_rect = [self.blank_x*self.every_block_width,self.blank_y*self.every_block_height,self.every_block_width,self.every_block_height] + + self.move_mat[:] = self.osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] + self.osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] = self.osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] + self.osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] = self.move_mat[:] + + self.blank_x = self.exact_division_x + elif (abs(self.blank_y - self.exact_division_y) == 1 and abs(self.blank_x - self.exact_division_x) == 0): + move_rect = [self.exact_division_x*self.every_block_width,self.exact_division_y*self.every_block_height,self.every_block_width,self.every_block_height] + blank_rect = [self.blank_x*self.every_block_width,self.blank_y*self.every_block_height,self.every_block_width,self.every_block_height] + + self.move_mat[:] = self.osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] + self.osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] = self.osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] + self.osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] = self.move_mat[:] + + self.blank_y = self.exact_division_y + + pl.osd_img.copy_from(self.osd_frame_tmp) + x1 = int(two_point[0] * 1.0 * self.display_size[0] // self.rgb888p_size[0]) + y1 = int(two_point[1] * 1.0 * self.display_size[1] // self.rgb888p_size[1]) + pl.osd_img.draw_circle(x1, y1, 1, color=(255, 0, 255, 255),thickness=4,fill=False) + else: + pl.osd_img.copy_from(self.osd_frame_tmp) + x1 = int(two_point[0] * 1.0 * self.display_size[0] // self.rgb888p_size[0]) + y1 = int(two_point[1] * 1.0 * self.display_size[1] // self.rgb888p_size[1]) + pl.osd_img.draw_circle(x1, y1, 1, color=(255, 255, 255, 0),thickness=4,fill=False) + else: + pl.osd_img.copy_from(self.osd_frame_tmp) + pl.osd_img.draw_string_advanced((self.display_size[0]//2),(self.display_size[1]//2),32,"请保证一只手入镜!",color=(255,0,0)) + + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 手掌检测模型路径 + hand_det_kmodel_path="/sdcard/app/tests/kmodel/hand_det.kmodel" + # 手掌关键点模型路径 + hand_kp_kmodel_path="/sdcard/app/tests/kmodel/handkp_det.kmodel" + # 其他参数 + anchors_path="/sdcard/app/tests/utils/prior_data_320.bin" + rgb888p_size=[1920,1080] + hand_det_input_size=[512,512] + hand_kp_input_size=[256,256] + confidence_threshold=0.2 + nms_threshold=0.5 + labels=["hand"] + anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + pg=PuzzleGame(hand_det_kmodel_path,hand_kp_kmodel_path,det_input_size=hand_det_input_size,kp_input_size=hand_kp_input_size,labels=labels,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,nms_option=False,strides=[8,16,32],rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + det_res,two_point=pg.run(img) # 推理当前帧 + pg.draw_result(pl,det_res,two_point) # 绘制当前帧推理结果 + pl.show_image() # 展示推理结果 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + pg.hand_det.deinit() + pg.hand_kp.deinit() + pl.destroy() +``` + +### 2.26. yolov8分割 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys +import aidemo + +# 自定义YOLOv8分割类 +class SegmentationApp(AIBase): + def __init__(self,kmodel_path,labels,model_input_size,confidence_threshold=0.2,nms_threshold=0.5,mask_threshold=0.5,rgb888p_size=[224,224],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # 模型路径 + self.kmodel_path=kmodel_path + # 分割类别标签 + self.labels=labels + # 模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + # mask阈值 + self.mask_threshold=mask_threshold + # sensor给到AI的图像分辨率 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 显示分辨率 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + self.debug_mode=debug_mode + # 检测框预置颜色值 + self.color_four=[(255, 220, 20, 60), (255, 119, 11, 32), (255, 0, 0, 142), (255, 0, 0, 230), + (255, 106, 0, 228), (255, 0, 60, 100), (255, 0, 80, 100), (255, 0, 0, 70), + (255, 0, 0, 192), (255, 250, 170, 30), (255, 100, 170, 30), (255, 220, 220, 0), + (255, 175, 116, 175), (255, 250, 0, 30), (255, 165, 42, 42), (255, 255, 77, 255), + (255, 0, 226, 252), (255, 182, 182, 255), (255, 0, 82, 0), (255, 120, 166, 157)] + # 分割结果的numpy.array,用于给到aidemo后处理接口 + self.masks=np.zeros((1,self.display_size[1],self.display_size[0],4)) + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,您可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + top,bottom,left,right=self.get_padding_param() + self.ai2d.pad([0,0,0,0,top,bottom,left,right], 0, [114,114,114]) + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + # 这里使用了aidemo的segment_postprocess接口 + seg_res = aidemo.segment_postprocess(results,[self.rgb888p_size[1],self.rgb888p_size[0]],self.model_input_size,[self.display_size[1],self.display_size[0]],self.confidence_threshold,self.nms_threshold,self.mask_threshold,self.masks) + return seg_res + + # 绘制结果 + def draw_result(self,pl,seg_res): + with ScopedTiming("display_draw",self.debug_mode >0): + if seg_res[0]: + pl.osd_img.clear() + mask_img=image.Image(self.display_size[0], self.display_size[1], image.ARGB8888,alloc=image.ALLOC_REF,data=self.masks) + pl.osd_img.copy_from(mask_img) + dets,ids,scores = seg_res[0],seg_res[1],seg_res[2] + for i, det in enumerate(dets): + x1, y1, w, h = map(lambda x: int(round(x, 0)), det) + pl.osd_img.draw_string_advanced(x1,y1-50,32, " " + self.labels[int(ids[i])] + " " + str(round(scores[i],2)) , color=self.get_color(int(ids[i]))) + else: + pl.osd_img.clear() + + # 计算padding参数 + def get_padding_param(self): + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + ratio_w = float(dst_w) / self.rgb888p_size[0] + ratio_h = float(dst_h) / self.rgb888p_size[1] + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + new_w = (int)(ratio * self.rgb888p_size[0]) + new_h = (int)(ratio * self.rgb888p_size[1]) + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = (int)(round(dh - 0.1)) + bottom = (int)(round(dh + 0.1)) + left = (int)(round(dw - 0.1)) + right = (int)(round(dw + 0.1)) + return top, bottom, left, right + + # 根据当前类别索引获取框的颜色 + def get_color(self, x): + idx=x%len(self.color_four) + return self.color_four[idx] + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 模型路径 + kmodel_path="/sdcard/app/tests/kmodel/yolov8n_seg_320.kmodel" + labels = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"] + #其它参数设置 + confidence_threshold = 0.2 + nms_threshold = 0.5 + mask_threshold=0.5 + rgb888p_size=[320,320] + + # 初始化PipeLine + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + # 初始化自定义YOLOV8分割示例 + seg=SegmentationApp(kmodel_path,labels=labels,model_input_size=[320,320],confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,mask_threshold=mask_threshold,rgb888p_size=rgb888p_size,display_size=display_size,debug_mode=0) + seg.config_preprocess() + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + # 获取当前帧数据 + img=pl.get_frame() + # 推理当前帧 + seg_res=seg.run(img) + # 绘制结果到PipeLine的osd图像 + seg.draw_result(pl,seg_res) + # 显示当前的绘制结果 + pl.show_image() + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + seg.deinit() + pl.destroy() +``` + +### 2.27. 自学习 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys +import aicube + +# 自定义自学习类 +class SelfLearningApp(AIBase): + def __init__(self,kmodel_path,model_input_size,labels,top_k,threshold,database_path,rgb888p_size=[224,224],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + self.kmodel_path=kmodel_path + # 模型输入分辨率 + self.model_input_size=model_input_size + self.labels=labels + self.database_path=database_path + # sensor给到AI的图像分辨率 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 显示分辨率 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + self.debug_mode=debug_mode + # 识别阈值 + self.threshold = threshold + # 选择top_k个相似度大于阈值的结果类别 + self.top_k = top_k + #对应类别注册特征数量 + self.features=[2,2] + #注册单个特征中途间隔帧数 + self.time_one=60 + self.time_all = 0 + self.time_now = 0 + # 类别索引 + self.category_index = 0 + # 特征化部分剪切宽高 + self.crop_w = 400 + self.crop_h = 400 + # crop的位置 + self.crop_x = self.rgb888p_size[0] / 2.0 - self.crop_w / 2.0 + self.crop_y = self.rgb888p_size[1] / 2.0 - self.crop_h / 2.0 + self.crop_x_osd=0 + self.crop_y_osd=0 + self.crop_w_osd=0 + self.crop_h_osd=0 + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + self.data_init() + + # 配置预处理操作,这里使用了crop和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,您可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + self.ai2d.crop(int(self.crop_x),int(self.crop_y),int(self.crop_w),int(self.crop_h)) + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + return results[0][0] + + # 绘制结果,绘制特征采集框和特征分类框 + def draw_result(self,pl,feature): + pl.osd_img.clear() + with ScopedTiming("display_draw",self.debug_mode >0): + pl.osd_img.draw_rectangle(self.crop_x_osd,self.crop_y_osd, self.crop_w_osd, self.crop_h_osd, color=(255, 255, 0, 255), thickness = 4) + if (self.category_index < len(self.labels)): + self.time_now += 1 + pl.osd_img.draw_string_advanced(50, self.crop_y_osd-50, 30,"请将待添加类别放入框内进行特征采集:"+self.labels[self.category_index] + "_" + str(int(self.time_now-1) // self.time_one) + ".bin", color=(255,255,0,0)) + with open(self.database_path + self.labels[self.category_index] + "_" + str(int(self.time_now-1) // self.time_one) + ".bin", 'wb') as f: + f.write(feature.tobytes()) + if (self.time_now // self.time_one == self.features[self.category_index]): + self.category_index += 1 + self.time_all -= self.time_now + self.time_now = 0 + else: + results_learn = [] + list_features = os.listdir(self.database_path) + for feature_name in list_features: + with open(self.database_path + feature_name, 'rb') as f: + data = f.read() + save_vec = np.frombuffer(data, dtype=np.float) + score = self.getSimilarity(feature, save_vec) + if (score > self.threshold): + res = feature_name.split("_") + is_same = False + for r in results_learn: + if (r["category"] == res[0]): + if (r["score"] < score): + r["bin_file"] = feature_name + r["score"] = score + is_same = True + if (not is_same): + if(len(results_learn) < self.top_k): + evec = {} + evec["category"] = res[0] + evec["score"] = score + evec["bin_file"] = feature_name + results_learn.append( evec ) + results_learn = sorted(results_learn, key=lambda x: -x["score"]) + else: + if( score <= results_learn[self.top_k-1]["score"] ): + continue + else: + evec = {} + evec["category"] = res[0] + evec["score"] = score + evec["bin_file"] = feature_name + results_learn.append( evec ) + results_learn = sorted(results_learn, key=lambda x: -x["score"]) + results_learn.pop() + draw_y = 200 + for r in results_learn: + pl.osd_img.draw_string_advanced( 50 , draw_y,50,r["category"] + " : " + str(r["score"]), color=(255,255,0,0)) + draw_y += 50 + + #数据初始化 + def data_init(self): + os.mkdir(self.database_path) + self.crop_x_osd = int(self.crop_x / self.rgb888p_size[0] * self.display_size[0]) + self.crop_y_osd = int(self.crop_y / self.rgb888p_size[1] * self.display_size[1]) + self.crop_w_osd = int(self.crop_w / self.rgb888p_size[0] * self.display_size[0]) + self.crop_h_osd = int(self.crop_h / self.rgb888p_size[1] * self.display_size[1]) + for i in range(len(self.labels)): + for j in range(self.features[i]): + self.time_all += self.time_one + + # 获取两个特征向量的相似度 + def getSimilarity(self,output_vec,save_vec): + tmp = sum(output_vec * save_vec) + mold_out = np.sqrt(sum(output_vec * output_vec)) + mold_save = np.sqrt(sum(save_vec * save_vec)) + return tmp / (mold_out * mold_save) + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 模型路径 + kmodel_path="/sdcard/app/tests/kmodel/recognition.kmodel" + database_path="/sdcard/app/tests/utils/features/" + # 其它参数设置 + rgb888p_size=[1920,1080] + model_input_size=[224,224] + labels=["苹果","香蕉"] + top_k=3 + threshold=0.5 + + # 初始化PipeLine + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + # 初始化自学习实例 + sl=SelfLearningApp(kmodel_path,model_input_size=model_input_size,labels=labels,top_k=top_k,threshold=threshold,database_path=database_path,rgb888p_size=rgb888p_size,display_size=display_size,debug_mode=0) + sl.config_preprocess() + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + # 获取当前帧数据 + img=pl.get_frame() + # 推理当前帧 + res=sl.run(img) + # 绘制结果到PipeLine的osd图像 + sl.draw_result(pl,res) + # 显示当前的绘制结果 + pl.show_image() + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + # 删除features文件夹 + stat_info = os.stat(database_path) + if (stat_info[0] & 0x4000): + list_files = os.listdir(database_path) + for l in list_files: + os.remove(database_path + l) + os.rmdir(database_path) + sl.deinit() + pl.destroy() +``` + +### 2.28. 局部放大器 + +```python +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aicube +import random +import gc +import sys + +# 自定义手掌检测任务类 +class HandDetApp(AIBase): + def __init__(self,kmodel_path,labels,model_input_size,anchors,confidence_threshold=0.2,nms_threshold=0.5,nms_option=False, strides=[8,16,32],rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + self.labels=labels + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + self.anchors=anchors + self.strides = strides # 特征下采样倍数 + self.nms_option = nms_option # NMS选项,如果为True做类间NMS,如果为False做类内NMS + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + self.ai2d=Ai2d(debug_mode) + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size + # 计算padding参数并应用pad操作,以确保输入图像尺寸与模型输入尺寸匹配 + top, bottom, left, right = self.get_padding_param() + self.ai2d.pad([0, 0, 0, 0, top, bottom, left, right], 0, [114, 114, 114]) + # 使用双线性插值进行resize操作,调整图像尺寸以符合模型输入要求 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # 构建预处理流程 + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表,这里使用了aicube库的anchorbasedet_post_process接口 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + dets = aicube.anchorbasedet_post_process(results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides, len(self.labels), self.confidence_threshold, self.nms_threshold, self.anchors, self.nms_option) + # 返回手掌检测结果 + return dets + + # 计算padding参数,确保输入图像尺寸与模型输入尺寸匹配 + def get_padding_param(self): + # 根据目标宽度和高度计算比例因子 + dst_w = self.model_input_size[0] + dst_h = self.model_input_size[1] + input_width = self.rgb888p_size[0] + input_high = self.rgb888p_size[1] + ratio_w = dst_w / input_width + ratio_h = dst_h / input_high + # 选择较小的比例因子,以确保图像内容完整 + if ratio_w < ratio_h: + ratio = ratio_w + else: + ratio = ratio_h + # 计算新的宽度和高度 + new_w = int(ratio * input_width) + new_h = int(ratio * input_high) + # 计算宽度和高度的差值,并确定padding的位置 + dw = (dst_w - new_w) / 2 + dh = (dst_h - new_h) / 2 + top = int(round(dh - 0.1)) + bottom = int(round(dh + 0.1)) + left = int(round(dw - 0.1)) + right = int(round(dw + 0.1)) + return top, bottom, left, right + +# 自定义手势关键点分类任务类 +class HandKPClassApp(AIBase): + def __init__(self,kmodel_path,model_input_size,rgb888p_size=[1920,1080],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 检测模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + self.crop_params=[] + # debug模式 + self.debug_mode=debug_mode + self.ai2d=Ai2d(debug_mode) + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了crop和resize,Ai2d支持crop/shift/pad/resize/affine + def config_preprocess(self,det,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + self.crop_params = self.get_crop_param(det) + self.ai2d.crop(self.crop_params[0],self.crop_params[1],self.crop_params[2],self.crop_params[3]) + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义后处理,results是模型输出的array列表,返回手部关键点 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + results=results[0].reshape(results[0].shape[0]*results[0].shape[1]) + results_show = np.zeros(results.shape,dtype=np.int16) + results_show[0::2] = (results[0::2] * self.crop_params[3] + self.crop_params[0]) + results_show[1::2] = (results[1::2] * self.crop_params[2] + self.crop_params[1]) + return results_show + + # 计算crop参数 + def get_crop_param(self,det_box): + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + w_det = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0]) + h_det = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1]) + x_det = int(x1*self.display_size[0] // self.rgb888p_size[0]) + y_det = int(y1*self.display_size[1] // self.rgb888p_size[1]) + length = max(w, h)/2 + cx = (x1+x2)/2 + cy = (y1+y2)/2 + ratio_num = 1.26*length + x1_kp = int(max(0,cx-ratio_num)) + y1_kp = int(max(0,cy-ratio_num)) + x2_kp = int(min(self.rgb888p_size[0]-1, cx+ratio_num)) + y2_kp = int(min(self.rgb888p_size[1]-1, cy+ratio_num)) + w_kp = int(x2_kp - x1_kp + 1) + h_kp = int(y2_kp - y1_kp + 1) + return [x1_kp, y1_kp, w_kp, h_kp] + +class SpaceResize: + def __init__(self,hand_det_kmodel,hand_kp_kmodel,det_input_size,kp_input_size,labels,anchors,confidence_threshold=0.25,nms_threshold=0.3,nms_option=False,strides=[8,16,32],rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + # 手掌检测模型路径 + self.hand_det_kmodel=hand_det_kmodel + # 手掌关键点模型路径 + self.hand_kp_kmodel=hand_kp_kmodel + # 手掌检测模型输入分辨率 + self.det_input_size=det_input_size + # 手掌关键点模型输入分辨率 + self.kp_input_size=kp_input_size + self.labels=labels + # anchors + self.anchors=anchors + # 置信度阈值 + self.confidence_threshold=confidence_threshold + # nms阈值 + self.nms_threshold=nms_threshold + self.nms_option=nms_option + self.strides=strides + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + + self.first_start = True # 首次手掌入镜参数 + self.two_point_left_x = 0 # 中指食指包括范围 x + self.two_point_top_y = 0 # 中指食指包括范围 y + self.two_point_mean_w = 0 # 中指食指首次入镜包括范围 w + self.two_point_mean_h = 0 # 中指食指首次入镜包括范围 h + self.two_point_crop_w = 0 # 中指食指包括范围 w + self.two_point_crop_h = 0 # 中指食指包括范围 h + self.osd_plot_x = 0 # osd 画缩放图起始点 x + self.osd_plot_y = 0 # osd 画缩放图起始点 y + self.ori_new_ratio = 0 # 缩放比例 + self.new_resize_w = 0 # 缩放后 w + self.new_resize_h = 0 # 缩放后 h + self.crop_area = 0 # 剪切区域 + self.rect_frame_x = 0 # osd绘画起始点 x + self.rect_frame_y = 0 # osd绘画起始点 y + self.masks = np.zeros((self.display_size[1],self.display_size[0],4),dtype=np.uint8) + self.mask_img=image.Image(self.display_size[0], self.display_size[1], image.ARGB8888,alloc=image.ALLOC_REF,data=self.masks) + self.hand_det=HandDetApp(self.hand_det_kmodel,self.labels,model_input_size=self.det_input_size,anchors=self.anchors,confidence_threshold=self.confidence_threshold,nms_threshold=self.nms_threshold,nms_option=self.nms_option,strides=self.strides,rgb888p_size=self.rgb888p_size,display_size=self.display_size,debug_mode=0) + self.hand_kp=HandKPClassApp(self.hand_kp_kmodel,model_input_size=self.kp_input_size,rgb888p_size=self.rgb888p_size,display_size=self.display_size) + self.ai2d=Ai2d(debug_mode) + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.RGB_packed,np.uint8, np.uint8) + self.hand_det.config_preprocess() + + # 对输入数据做预处理,对拇指和中指部分做裁剪并做resize + def imgprocess(self,input_np,x,y,w,h,out_w,out_h): + self.ai2d.crop(x, y, w, h) + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) + self.ai2d.build([1,3,self.rgb888p_size[1],self.rgb888p_size[0]],[1,out_h, out_w,3]) + return self.ai2d.run(input_np).to_numpy() + + # run函数 + def run(self,input_np): + # 先进行手掌检测 + det_boxes=self.hand_det.run(input_np) + det_res=[] + two_point = np.zeros((4),dtype=np.int16) + for det_box in det_boxes: + # 筛选符合要求的手掌 + x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] + w,h= int(x2 - x1),int(y2 - y1) + if (h<(0.1*self.rgb888p_size[1])): + continue + if (w<(0.25*self.rgb888p_size[0]) and ((x1<(0.03*self.rgb888p_size[0])) or (x2>(0.97*self.rgb888p_size[0])))): + continue + if (w<(0.15*self.rgb888p_size[0]) and ((x1<(0.01*self.rgb888p_size[0])) or (x2>(0.99*self.rgb888p_size[0])))): + continue + det_res.append(det_box) + if len(det_res)!=0: + # 选择第一个手掌做手掌关键点识别,然后裁剪拇指和中指区域做resize并替换原图中的部分 + det_box=det_res[0] + self.hand_kp.config_preprocess(det_box) + results_show=self.hand_kp.run(input_np) + two_point[0],two_point[1],two_point[2],two_point[3] = results_show[8],results_show[9],results_show[16+8],results_show[16+9] + if (self.first_start): + if (two_point[0] > 0 and two_point[0] < self.rgb888p_size[0] and two_point[2] > 0 and two_point[2] < self.rgb888p_size[0] and two_point[1] > 0 and two_point[1] < self.rgb888p_size[1] and two_point[3] > 0 and two_point[3] < self.rgb888p_size[1]): + self.two_point_mean_w = np.sqrt(pow(two_point[0] - two_point[2],2) + pow(two_point[1] - two_point[3],2))*0.8 + self.two_point_mean_h = np.sqrt(pow(two_point[0] - two_point[2],2) + pow(two_point[1] - two_point[3],2))*0.8 + self.first_start = False + else: + self.mask_img.clear() + self.two_point_left_x = int(max((two_point[0] + two_point[2]) / 2 - self.two_point_mean_w / 2, 0)) + self.two_point_top_y = int(max((two_point[1] + two_point[3]) / 2 - self.two_point_mean_h / 2, 0)) + self.two_point_crop_w = int(min(min((two_point[0] + two_point[2]) / 2 - self.two_point_mean_w / 2 + self.two_point_mean_w , self.two_point_mean_w), self.rgb888p_size[0] - ((two_point[0] + two_point[2]) / 2 - self.two_point_mean_w / 2))) + self.two_point_crop_h = int(min(min((two_point[1] + two_point[3]) / 2 - self.two_point_mean_h / 2 + self.two_point_mean_h , self.two_point_mean_h), self.rgb888p_size[1] - ((two_point[1] + two_point[3]) / 2 - self.two_point_mean_h / 2))) + self.ori_new_ratio = np.sqrt(pow((two_point[0] - two_point[2]),2) + pow((two_point[1] - two_point[3]),2))*0.8 / self.two_point_mean_w + self.new_resize_w = min(int(self.two_point_crop_w * self.ori_new_ratio / self.rgb888p_size[0] * self.display_size[0]),600) + self.new_resize_h = min(int(self.two_point_crop_h * self.ori_new_ratio / self.rgb888p_size[1] * self.display_size[1]),600) + self.rect_frame_x = int(self.two_point_left_x * 1.0 / self.rgb888p_size[0] * self.display_size[0]) + self.rect_frame_y = int(self.two_point_top_y * 1.0 / self.rgb888p_size[1] * self.display_size[1]) + self.draw_w = min(self.new_resize_w,self.display_size[0]-self.rect_frame_x-1) + self.draw_h = min(self.new_resize_h,self.display_size[1]-self.rect_frame_y-1) + space_np_out = self.imgprocess(input_np, self.two_point_left_x, self.two_point_top_y, self.two_point_crop_w, self.two_point_crop_h, self.new_resize_w, self.new_resize_h) # 运行 隔空缩放检测 ai2d + self.masks[self.rect_frame_y:self.rect_frame_y + self.draw_h,self.rect_frame_x:self.rect_frame_x + self.draw_w,0] = 255 + self.masks[self.rect_frame_y:self.rect_frame_y + self.draw_h,self.rect_frame_x:self.rect_frame_x + self.draw_w,1:4] = space_np_out[0][0:self.draw_h,0:self.draw_w,:] + return det_res + + # 绘制效果 + def draw_result(self,pl,det_res): + pl.osd_img.clear() + if len(det_res)==1: + pl.osd_img.copy_from(self.mask_img) + else: + pl.osd_img.draw_string_advanced((self.display_size[0]//2),(self.display_size[1]//2),32,"请保证一只手入镜!",color=(255,0,0)) + + + +if __name__=="__main__": + # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 手掌检测模型路径 + hand_det_kmodel_path="/sdcard/app/tests/kmodel/hand_det.kmodel" + # 手掌关键点模型路径 + hand_kp_kmodel_path="/sdcard/app/tests/kmodel/handkp_det.kmodel" + anchors_path="/sdcard/app/tests/utils/prior_data_320.bin" + rgb888p_size=[1920,1080] + hand_det_input_size=[512,512] + hand_kp_input_size=[256,256] + confidence_threshold=0.2 + nms_threshold=0.5 + labels=["hand"] + anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] + + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode) + pl.create() + sr=SpaceResize(hand_det_kmodel_path,hand_kp_kmodel_path,det_input_size=hand_det_input_size,kp_input_size=hand_kp_input_size,labels=labels,anchors=anchors,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,nms_option=False,strides=[8,16,32],rgb888p_size=rgb888p_size,display_size=display_size) + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + img=pl.get_frame() # 获取当前帧 + det_res=sr.run(img) # 推理当前帧 + sr.draw_result(pl,det_res) # 绘制当前帧推理结果 + pl.show_image() # 展示当前帧 + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + sr.hand_det.deinit() + sr.hand_kp.deinit() + pl.destroy() +``` + +### 2.29. 文本转语音(中文) + +```python +from libs.PipeLine import ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +from media.pyaudio import * # 音频模块 +from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 +import media.wave as wave # wav音频处理模块 +import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 +import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 +import aidemo # aidemo模块,封装ai demo相关前处理、后处理等操作 +import time # 时间统计 +import struct # 字节字符转换模块 +import gc # 垃圾回收模块 +import os,sys # 操作系统接口模块 + +# 自定义TTS中文编码器类,继承自AIBase基类 +class EncoderApp(AIBase): + def __init__(self, kmodel_path,dict_path,phase_path,mapfile,debug_mode=0): + super().__init__(kmodel_path) # 调用基类的构造函数 + self.kmodel_path = kmodel_path # 模型文件路径 + self.debug_mode = debug_mode # 是否开启调试模式 + self.ttszh=aidemo.tts_zh_create(dict_path,phase_path,mapfile) + self.data=None + self.data_len=0 + self.durition_sum=0 + + # 自定义编码器预处理,返回模型输入tensor列表 + def preprocess(self,text): + with ScopedTiming("encoder preprocess", self.debug_mode > 0): + preprocess_data=aidemo.tts_zh_preprocess(self.ttszh,text) + self.data=preprocess_data[0] + self.data_len=preprocess_data[1] + # 创建编码器模型输入并和模型绑定,编码器包含两个输入,一个是文字预处理的序列数据,一个是speaker数据 + # 编码器序列数据 + enc_seq_input_tensor = nn.from_numpy(np.array(self.data)) + # 编码器speaker数据 + enc_speaker_input_tensor=nn.from_numpy(np.array([0.0])) + return [enc_speaker_input_tensor,enc_seq_input_tensor] + + # 自定义编码器的后处理,results是模型输出ndarray列表,编码器后处理也可以视为解码器的前处理 + def postprocess(self, results): + with ScopedTiming("encoder postprocess", self.debug_mode > 0): + enc_output_0_np=results[0] + enc_output_1_np=results[1] + # 给编码结果添加持续时间属性,每个音素编码向量按照持续时间重复 + duritions=enc_output_1_np[0][:int(self.data_len[0])] + self.durition_sum=int(np.sum(duritions)) + # 解码器输入维度为(1,600,256),不足部分需要padding + max_value=13 + while self.durition_sum>600: + for i in range(len(duritions)): + if duritions[i]>max_value: + duritions[i]=max_value + max_value=max_value-1 + self.durition_sum=np.sum(duritions) + dec_input=np.zeros((1,600,256),dtype=np.float) + m_pad=600-self.durition_sum + k=0 + for i in range(len(duritions)): + for j in range(int(duritions[i])): + dec_input[0][k]=enc_output_0_np[0][i] + k+=1 + return dec_input,self.durition_sum + +# 自定义TTS中文解码器类,继承自AIBase基类 +class DecoderApp(AIBase): + def __init__(self, kmodel_path, debug_mode=0): + super().__init__(kmodel_path) # 调用基类的构造函数 + self.kmodel_path = kmodel_path # 模型文件路径 + self.debug_mode = debug_mode # 是否开启调试模式 + + # 自定义解码器预处理,返回模型输入tensor列表 + def preprocess(self,dec_input): + with ScopedTiming("decoder preprocess", self.debug_mode > 0): + dec_input_tensor=nn.from_numpy(dec_input) + return [dec_input_tensor] + + # 自定义解码器后处理,results是模型输出ndarray列表 + def postprocess(self, results): + with ScopedTiming("decoder postprocess", self.debug_mode > 0): + return results[0] + +# 自定义HifiGan声码器类,继承自AIBase基类 +class HifiGanApp(AIBase): + def __init__(self, kmodel_path, debug_mode=0): + super().__init__(kmodel_path) # 调用基类的构造函数 + self.kmodel_path = kmodel_path # 模型文件路径 + self.debug_mode = debug_mode # 是否开启调试模式 + self.mel_data=[] + self.subvector_num=0 + self.hifi_input=None + + # 自定义声码器预处理,返回模型输入tensor列表 + def preprocess(self,dec_output_np,durition_sum): + with ScopedTiming("hifigan preprocess", self.debug_mode > 0): + self.subvector_num=durition_sum//100; + remaining=durition_sum%100; + if remaining>0: + self.subvector_num+=1 + self.hifi_input=np.zeros((1,80,self.subvector_num*100),dtype=np.float) + for i in range(durition_sum): + self.hifi_input[:,:,i]=dec_output_np[:,:,i] + + def run(self,dec_output_np,durition_sum): + self.preprocess(dec_output_np,durition_sum) + # 依次对每一个子向量进行声码器推理 + for i in range(self.subvector_num): + hifi_input_tmp=np.zeros((1,80,100),dtype=np.float) + for j in range(80): + for k in range(i*100,(i+1)*100): + hifi_input_tmp[0][j][k-i*100]=self.hifi_input[0][j][k] + # 设置模型输入 + hifigan_input_tensor=nn.from_numpy(hifi_input_tmp) + # 推理 + results=self.inference([hifigan_input_tensor]) + self.postprocess(results) + return self.mel_data + + # 自定义当前任务的后处理,results是模型输出ndarray列表 + def postprocess(self, results): + with ScopedTiming("hifigan postprocess", self.debug_mode > 0): + # 汇总输出数据 + for j in range(25600): + self.mel_data.append(results[0][0][0][j]) + +#自定义中文TTS任务类 +class TTSZH: + def __init__(self,encoder_kmodel_path,decoder_kmodel_path,hifigan_kmodel_path,dict_path,phase_path,mapfile,save_wav_file,debug_mode): + self.save_wav_file=save_wav_file + self.debug_mode=debug_mode + self.encoder=EncoderApp(encoder_kmodel_path,dict_path,phase_path,mapfile,debug_mode) + self.decoder=DecoderApp(decoder_kmodel_path,debug_mode) + self.hifigan=HifiGanApp(hifigan_kmodel_path,debug_mode) + + def run(self,text): + encoder_output_0,encoder_output_1=self.encoder.run(text) + decoder_output_0=self.decoder.run(encoder_output_0) + hifigan_output=self.hifigan.run(decoder_output_0,encoder_output_1) + # 将生成的音频数据保存为wav文件 + save_data=hifigan_output[:encoder_output_1*256] + save_len=len(save_data) + aidemo.save_wav(save_data,save_len,self.save_wav_file,24000) + self.play_audio() + + def play_audio(self): + with ScopedTiming("play audio", self.debug_mode > 0): + # 有关音频流的宏变量 + SAMPLE_RATE = 24000 # 采样率24000Hz,即每秒采样24000次 + CHANNELS = 1 # 通道数 1为单声道,2为立体声 + FORMAT = paInt16 # 音频输入输出格式 paInt16 + CHUNK = int(0.3 * 24000) # 每次读取音频数据的帧数,设置为0.3s的帧数24000*0.3=7200 + # 初始化音频流 + p = PyAudio() + p.initialize(CHUNK) + ret = MediaManager.init() + if ret: + print("record_audio, buffer_init failed") + # 用于播放音频 + output_stream = p.open(format=FORMAT,channels=CHANNELS,rate=SAMPLE_RATE,output=True,frames_per_buffer=CHUNK) + wf = wave.open(self.save_wav_file, "rb") + wav_data = wf.read_frames(CHUNK) + while wav_data: + output_stream.write(wav_data) + wav_data = wf.read_frames(CHUNK) + time.sleep(2) # 时间缓冲,用于播放声音 + wf.close() + output_stream.stop_stream() + output_stream.close() + p.terminate() + MediaManager.deinit() + + def deinit(self): + aidemo.tts_zh_destroy(self.encoder.ttszh) + tts_zh.encoder.deinit() + tts_zh.decoder.deinit() + tts_zh.hifigan.deinit() + +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + nn.shrink_memory_pool() + # 设置模型路径和其他参数 + # 中文tts encoder模型 + encoder_kmodel_path = "/sdcard/app/tests/kmodel/zh_fastspeech_1_f32.kmodel" + # 中文tts decoder模型 + decoder_kmodel_path = "/sdcard/app/tests/kmodel/zh_fastspeech_2.kmodel" + # 中文tts 声码器模型 + hifigan_kmodel_path="/sdcard/app/tests/kmodel/hifigan.kmodel" + # 拼音字典 + dict_path="/sdcard/app/tests/utils/pinyin.txt" + # 汉字转拼音字典文件 + phase_path="/sdcard/app/tests/utils/small_pinyin.txt" + # 拼音转音素映射文件 + mapfile="/sdcard/app/tests/utils/phone_map.txt" + # 输入中文语句 + text="嘉楠科技研发了最新款的芯片" + # 生成音频存储路径 + save_wav_file = "/sdcard/app/tests/test.wav" + + # 初始化自定义中文tts实例 + tts_zh = TTSZH(encoder_kmodel_path,decoder_kmodel_path,hifigan_kmodel_path,dict_path,phase_path,mapfile,save_wav_file,debug_mode=0) + try: + with ScopedTiming("total",1): + tts_zh.run(text) + gc.collect() # 垃圾回收 + except Exception as e: + sys.print_exception(e) # 打印异常信息 + finally: + tts_zh.deinit() +``` diff --git "a/zh/example/K230_CanMV_AI_Demo\347\244\272\344\276\213\350\257\264\346\230\216.md" "b/zh/example/K230_CanMV_AI_Demo\347\244\272\344\276\213\350\257\264\346\230\216.md" deleted file mode 100755 index 289b658..0000000 --- "a/zh/example/K230_CanMV_AI_Demo\347\244\272\344\276\213\350\257\264\346\230\216.md" +++ /dev/null @@ -1,14803 +0,0 @@ -# K230 CanMV AI Demo示例说明 - -![cover](images/canaan-cover.png) - -版权所有©2023北京嘉楠捷思信息技术有限公司 - -
- -## 免责声明 - -您购买的产品、服务或特性等应受北京嘉楠捷思信息技术有限公司(“本公司”,下同)及其关联公司的商业合同和条款的约束,本文档中描述的全部或部分产品、服务或特性可能不在您的购买或使用范围之内。除非合同另有约定,本公司不对本文档的任何陈述、信息、内容的正确性、可靠性、完整性、适销性、符合特定目的和不侵权提供任何明示或默示的声明或保证。除非另有约定,本文档仅作为使用指导参考。 - -由于产品版本升级或其他原因,本文档内容将可能在未经任何通知的情况下,不定期进行更新或修改。 - -## 商标声明 - -![logo](images/logo.png)、“嘉楠”和其他嘉楠商标均为北京嘉楠捷思信息技术有限公司及其关联公司的商标。本文档可能提及的其他所有商标或注册商标,由各自的所有人拥有。 - -**版权所有 © 2023北京嘉楠捷思信息技术有限公司。保留一切权利。** -非经本公司书面许可,任何单位和个人不得擅自摘抄、复制本文档内容的部分或全部,并不得以任何形式传播。 - -
- -## 目录 - -[TOC] - -## 前言 - -### 概述 - -本文档主要介绍如何写AI Demo。 - -### 读者对象 - -本文档(本指南)主要适用于以下人员: - -- 技术支持工程师 -- 软件开发工程师 - -## 一、概述 - -本文档包括27个AI Demo,这些示例程序都实现从摄像头采集数据、kpu推理到显示器展示的流程,应用到了K230 CanMV 平台的多个硬件模块:AI2D,KPU,Camera,Display等。 - -这些AI Demo分为两种类型:单模型、多模型,涵盖物体、人脸、人手、人体、车牌、OCR、KWS和TTS等方向;参考该文档,k230用户可以更快上手K230 AI应用的开发,实现预期效果。 - -更多AI Demo后续即将解锁。 - -| 单模型示例 | 多模型示例 | -| ------------ | -------------- | -| 人脸检测 | 人脸关键点检测 | -| COCO目标检测 | 人脸识别 | -| yolov8-seg | 人脸姿态角 | -| 车牌检测 | 人脸解析 | -| OCR检测 | 车牌识别 | -| 手掌检测 | 石头剪刀布 | -| 人体检测 | OCR识别 | -| 人体姿态估计 | 手掌关键点检测 | -| KWS | 静态手势识别 | -| 跌倒检测 | 人脸mesh | -| | 注视估计 | -| | 动态手势识别 | -| | 单目标跟踪 | -| | 隔空放大 | -| | 拼图游戏 | -| | 基于关键点的手势识别 | -| | 自学习 | -| | TTS中文 | - -## 二、AI Demo单模型示例解析 - -### 1.分模块解析 - -#### 1.1 模块引入 - -```python -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行时模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -``` - -| 模块 | 说明 | -| ---------------------- | ------------------------------------------------------------ | -| image(必选) | 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 | -| media.camera(必选) | [摄像头模块](../api/mpp/K230_CanMV_Camera%E6%A8%A1%E5%9D%97API%E6%89%8B%E5%86%8C.md) | -| media.display (必选) | [显示模块](../api/mpp/K230_CanMV_Display%E6%A8%A1%E5%9D%97API%E6%89%8B%E5%86%8C.md) | -| media.media(必选) | [媒体软件抽象模块,主要封装媒体数据链路以及媒体缓冲区](../api/mpp/K230_CanMV_Media%E6%A8%A1%E5%9D%97API%E6%89%8B%E5%86%8C.md) | -| nncase_runtime(必选) | nncase运行时模块, 封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 | -| aidemo(可选) | 封装部分ai demo相关后处理、复杂画图操作 | -| aicube(可选) | 封装基于ai cube训练的检测分割等任务的后处理 | -| ulab.numpy (可选) | [类似python numpy操作,但也会有一些接口不同](https://micropython-ulab.readthedocs.io/en/latest/ ) | -| time(可选) | 时间统计 | -| gc(可选) | [垃圾回收模块](https://docs.micropython.org/en/latest/library/gc.html),自动回收 | - -#### 1.2 参数配置 - -不同模型(kmodel)根据自己需要设置参数信息,包括显示、AI原图、kmodel参数、文件配置、调试模式等。 - -```python -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1024, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGH = 576 - -# kmodel相关参数设置 -# kmodel输入shape,NCHW,RGB -kmodel_input_shape = (1,3,320,320) -# ai原图padding -rgb_mean = [104,117,123] -# 其它kmodel参数 -...... - -# 文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' # kmodel路径 -anchors_path = root_dir + 'utils/prior_data_320.bin' # kmodel anchor -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 -``` - -#### 1.3 时间统计工具 - -ScopedTiming 类是一个用来测量代码块执行时间的上下文管理器。上下文管理器通过定义包含 `__enter__` 和 `__exit__` 方法的类来创建。当在 with 语句中使用该类的实例时,`__enter__` 在进入 with 块时被调用,`__exit__` 在离开时被调用。 - -```python -#********************for scoped_timing.py******************** -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") -``` - -**使用示例:** - -```python -import ulab.numpy as np -debug_mode = 1 -a = np.ones(10**6, dtype=np.float) -with ScopedTiming("kpu_pre_process",debug_mode > 0): - a = a * 1.000001 + 0.000001 -``` - -#### 1.4 nncase使用:ai2d - -ai2d主要是用于对输入原图预处理进行硬件加速,然后把预处理结果喂给kmodel。 - -##### (1)ai2d基础用法示例 - -```python -import nncase_runtime as nn -# 注:此示例仅为基础用法,其它demo可根据实际情况调整 -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - dst_w = kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = kmodel_input_shape[2] # kmodel输入高(h) - - # OUT_RGB888P_WIDTH:原图宽(w) - # OUT_RGB888P_HEIGH:原图高(h) - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / OUT_RGB888P_WIDTH - ratio_h = dst_h / OUT_RGB888P_HEIGH - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGH) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def ai2d_demo(rgb888p_img): - # 初始化AI2D模块 - # rgb888p_img为Image对象 - with ScopedTiming("ai2d_init",debug_mode > 0): - # (1)创建ai2d实例 - ai2d = nn.ai2d() - # (2)设置ai2d参数 - # 设置ai2d输入、输出格式和数据类型 - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - # 设置padding参数 - ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - # 设置resize参数 - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - # (3)根据ai2d参数构建ai2d_builder - global ai2d_builder - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], kmodel_input_shape) - - # (4)创建ai2d输出,用于保存ai2d输出结果 - global ai2d_output_tensor - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_output_tensor = nn.from_numpy(data) - - # (5)创建ai2d输入对象,并将对象从numpy转换为tensor - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - - # (6)根据输入,ai2d参数,运行得到ai2d输出,将结果保存到ai2d_output_tensor中 - ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) - # dump ai2d结果查看,查看结果是否正确(此时保存格式为nn.ai2d_format.NCHW_FMT,NCHW、RGB格式,需要转换为图片格式后再查看) - #ai2d_out_data = fp_ai2d_output_tensor.to_numpy() # - #with open("/sdcard/app/ai2d_out.bin", "wb") as file: - #file.write(ai2d_out_data.tobytes()) - - # (7)删除 ai2d、ai2d_input_tensor、ai2d_output_tensor、ai2d_builder 变量,释放对它所引用对象的内存引用 - del ai2d - del ai2d_input_tensor - del ai2d_output_tensor - del ai2d_builder -``` - -##### (2)ai2d示例用法一:ai2d参数固定 - -**ai2d参数固定**:针对视频流的不同帧,ai2d参数固定使用示例 - -eg:人脸检测模型,对于模型输入,需要将原图进行预处理(padding、resize)之后,然后再喂给kmodel;若是原图从sensor中取出,分辨率固定,则padding的上下左右位置是固定的,此时可以使用以下模板对原图进行预处理 - -```python -import nncase_runtime as nn -#ai2d:ai2d实例 -#ai2d_input_tensor:ai2d输入 -#ai2d_output_tensor:ai2d输出 -#ai2d_builder:根据ai2d参数,构建的ai2d_builder对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder - -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - ..... - return [0, 0, 0, 0, top, bottom, left, right] - -def ai2d_init(): - # 初始化AI2D模块 - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - # (1)创建ai2d实例 - ai2d = nn.ai2d() - # (2)设置ai2d参数 - # 设置ai2d输入、输出格式和数据类型 - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - # 设置padding参数 - ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - # 设置resize参数 - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - # (3)创建ai2d输出,用于保存ai2d输出结果 - global ai2d_output_tensor - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_output_tensor = nn.from_numpy(data) - - # (4)根据ai2d参数构建ai2d_builder,因为ai2d的参数不变,因此只需创建一次ai2d_builder - global ai2d_builder - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], kmodel_input_shape) - - -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor,ai2d_output_tensor - # (1)创建ai2d输入对象,并将对象从numpy转换为tensor - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - - # (2)根据输入,ai2d参数,运行得到ai2d输出,将结果保存到ai2d_output_tensor中 - ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) - -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - # 删除 ai2d_input_tensor 变量,释放对它所引用对象的内存引用 - del ai2d_input_tensor -``` - -**使用示例:** - -```python -ai2d_init() # ai2d初始化 -while True: - rgb888p_img = camera_read(CAM_DEV_ID_0) # 从sensor拿到一帧图像 - ai2d_run(rgb888p_img) # 对sensor原图像预处理 - ai2d_release() # 释放ai2d_input_tensor,因为每帧原图不同,ai2d_input_tensor指向的对象都会改变,所以每次都需释放内存 -...... -global ai2d,ai2d_output_tensor,ai2d_builder #只需释放一次 -del ai2d # 释放ai2d,因为ai2d指向的对象是固定的 -del ai2d_output_tensor # 释放ai2d_output_tensor,因为ai2d_output_tensor指向的对象是固定的 -del ai2d_builder # 释放ai2d_builder,因为ai2d的参数未变,ai2d_builder指向的对象是固定的 -``` - -##### (3)ai2d示例用法二:ai2d参数不固定 - -**ai2d参数不固定**:针对视频流的不同帧,ai2d参数实时变化 - -eg:人脸关键点检测模型,对于模型输入,需要将原图进行预处理(affine)之后,然后再喂给kmodel;即使原图从sensor中取出,分辨率固定,但是affine的参数是实时改变的,此时可以使用以下模板对原图进行预处理。 - -```python -import nncase_runtime as nn -# fld_ai2d:人脸关键点ai2d实例 -# fld_ai2d_input_tensor:人脸关键点ai2d输入 -# fld_ai2d_output_tensor:人脸关键点ai2d输出 -# fld_ai2d_builder:根据人脸关键点ai2d参数,构建的人脸关键点ai2d_builder对象 -global fld_ai2d,fld_ai2d_input_tensor,fld_ai2d_output_tensor,fld_ai2d_builder -# affine参数 -global matrix_dst - -def get_affine_matrix(bbox): - # 根据人脸检测框获取仿射矩阵,用于将边界框映射到模型输入空间 - with ScopedTiming("get_affine_matrix", debug_mode > 1): - ...... - return matrix_dst - -def fld_ai2d_init(): - with ScopedTiming("fld_ai2d_init",debug_mode > 0): - #for face landmark - global fld_ai2d - # (1)创建人脸关键点ai2d对象 - fld_ai2d = nn.ai2d() - - global fld_ai2d_output_tensor - # (2)创建人脸关键点ai2d_output_tensor对象 - data = np.ones(fld_kmodel_input_shape, dtype=np.uint8) - fld_ai2d_output_tensor = nn.from_numpy(data) - -def fld_ai2d_run(rgb888p_img,det): - # 人脸关键点ai2d运行,rgb888p_img是Image对象,det为人脸检测框 - with ScopedTiming("fld_ai2d_run",debug_mode > 0): - global fld_ai2d,fld_ai2d_input_tensor,fld_ai2d_output_tensor - # (1)创建ai2d_input_tensor - # Image对象转换为numpy对象 - ai2d_input = rgb888p_img.to_numpy_ref() - # 将numpy对象转换为ai2d_tensor - fld_ai2d_input_tensor = nn.from_numpy(ai2d_input) - - # (2)设置ai2d参数 - # 设置ai2d输入、输出格式、数据类型 - fld_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - global matrix_dst - # 根据检测框获取affine参数 - matrix_dst = get_affine_matrix(det) - affine_matrix = [matrix_dst[0][0],matrix_dst[0][1],matrix_dst[0][2], - matrix_dst[1][0],matrix_dst[1][1],matrix_dst[1][2]] - # 设置affine参数 - fld_ai2d.set_affine_param(True,nn.interp_method.cv2_bilinear,0, 0, 127, 1,affine_matrix) - - global fld_ai2d_builder - # (3)根据新的ai2d affine参数,创建新的ai2d_builder对象 - fld_ai2d_builder = fld_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fld_kmodel_input_shape) - # (4)ai2d_builder运行,将结果保存到fld_ai2d_output_tensor - fld_ai2d_builder.run(fld_ai2d_input_tensor, fld_ai2d_output_tensor) - -def fld_ai2d_release(): - with ScopedTiming("fld_ai2d_release",debug_mode > 0): - global fld_ai2d_input_tensor,fld_ai2d_builder - del fld_ai2d_input_tensor #删除fld_ai2d_input_tensor变量,释放对它所引用对象的内存引用 - del fld_ai2d_builder #删除fld_ai2d_builder变量,释放对它所引用对象的内存引用 -``` - -**使用示例:** - -```python -# ************main************ -fld_ai2d_init() # ai2d初始化 -while True: - rgb888p_img = camera_read(CAM_DEV_ID_0) # 从sensor拿到一帧图像 - fld_ai2d_run(rgb888p_img,det) # 根据det,对sensor原图预处理 - fld_ai2d_release() # 释放ai2d_input_tensor、ai2d_builder,因为每帧原图不同,ai2d_input_tensor指向的对象都会改变,所以每次都需释放内存;因为ai2d的参数实时改变,ai2d_builder对象需要每次创建,也需要每次都释放内存 -...... -global fld_ai2d,fld_ai2d_output_tensor #只需释放一次 -del fld_ai2d # 释放fld_ai2d,因为ai2d指向的对象是固定的 -del fld_ai2d_output_tensor # 释放fld_ai2d_output_tensor,因为fld_ai2d_output_tensor指向的对象是固定的 -``` - -#### 1.5 nncase使用:kpu - -##### (1)kpu基础用法 - -```python -import nncase_runtime as nn - -# (1)初始化kpu对象 -kpu_obj = nn.kpu() -# (2)加载kmodel -kpu_obj.load_kmodel(kmodel_file) - -# (3)设置kpu输入 -kmodel_input_shape = (1,3,320,320) -data = np.ones(kmodel_input_shape, dtype=np.uint8) -kmodel_input_tensor = nn.from_numpy(data) -kpu_obj.set_input_tensor(0, kmodel_input_tensor) - -# (4)kpu运行 -kpu_obj.run() - -# (5)获取kpu模型输出 -results = [] -for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data #tensor对象用完之后释放内存 - results.append(result) - -#(6)kpu后处理 -...... - -# (7)释放kpu对象 -del kpu_obj -nn.shrink_memory_pool() -``` - -##### (2)kpu示例用法:kpu配合ai2d使用 - -```python -import nncase_runtime as nn -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder #ai2d相关对象 -global current_kmodel_obj #当前kpu对象 - -def kpu_init(kmodel_file): - # 初始化kpu对象,并加载kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - # (1)初始化kpu对象 - kpu_obj = nn.kpu() - # (2)加载kmodel - kpu_obj.load_kmodel(kmodel_file) - # (3)ai2d初始化,模型输入需要预处理的情况下,kpu需要配合ai2d使用 - ai2d_init() - return kpu_obj - -def kpu_pre_process(rgb888p_img): - # kpu预处理,rgb888p_img是Image对象,原图 - # (1)ai2d运行,对原图进行预处理 - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor - # (2)将ai2d输出tensor设置为kpu模型输入 - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor) - -def kpu_get_output(): - # 获取kpu输出 - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取kpu输出,将输出转换为numpy格式,以便进行后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data #tensor对象用完之后释放内存 - results.append(result) - return results - -def kpu_run(kpu_obj,rgb888p_img): - # kpu推理运行 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)kpu预处理 - kpu_pre_process(rgb888p_img) - with ScopedTiming("kpu_run",debug_mode > 0): - # (2)kpu运行 - kpu_obj.run() - # (3)ai2d释放 - ai2d_release() - # (4)获取模型输出 - results = kpu_get_output() - # (5)kpu后处理,获取检测结果 - with ScopedTiming("kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,kmodel_input_shape[2],prior_data,[OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH],results) - - # (6)返回人脸检测框 - if len(post_ret)==0: - return post_ret - else: - return post_ret[0] - -# kpu释放 -def kpu_deinit(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - global ai2d,ai2d_output_tensor - del ai2d #删除ai2d变量,释放对它所引用对象的内存引用 - del ai2d_output_tensor #删除ai2d_output_tensor变量,释放对它所引用对象的内存引用 -``` - -使用示例: - -```python -kmodel_file = "/sdcard/app/kmodel/face_detection_320.kmodel" -fd_kmodel = kpu_init(kmodel_file) -while True: - rgb888p_img = camera_read(CAM_DEV_ID_0) # 从sensor拿到一帧图像 - dets = kpu_run(fd_kmodel,rgb888p_img) # kmodel推理 - ...... -...... -kpu_deinit() # 释放kmodel -global current_kmodel_obj -del current_kmodel_obj -del fd_kmodel -nn.shrink_memory_pool() # 释放所有nncase内存 -``` - -#### 1.6 媒体使用 - -##### 1.6.1 camera - -```python -from media.camera import * -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -OUT_RGB888P_WIDTH = ALIGN_UP(1024, 16) -OUT_RGB888P_HEIGH = 576 - -def camera_init(dev_id): - # 根据设备id初始化camera - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # 设置camera的两路输出,一路输出用于显示,一路输出用于ai - - # (1)设置显示输出 - # 设置指定设备id的chn0的输出宽高 - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # 设置指定设备id的chn0的输出格式为yuv420sp - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # (2)设置AI输出 - # 设置指定设备id的chn2的输出宽高 - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - # 设置指定设备id的chn2的输出格式为rgb88planar - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # 启动sensor - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取指定设备chn2的一帧图像,即获取一帧AI原图 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放指定设备chn2一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 释放sensor - camera.stop_stream(dev_id) -``` - -**使用示例:** - -```python -from media.camera import * -import os,sys - -# 初始化camera 0 -camera_init(CAM_DEV_ID_0) -......(camera_start还要配合其它操作,稍后在media模块介绍) -# 启动sensor -camera_start(CAM_DEV_ID_0) -time.sleep(5) -rgb888p_img = None -while True: - os.exitpoint() #加退出点,确保当前当次循环执行完全,保证释放图像函数调用 - # 读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - ....... - # 释放当前图像 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) -``` - -##### 1.6.2 display - -```python -from media.display import * -# draw_img:用于画图 -# osd_img:用于显示 -global draw_img,osd_img - -#for display -def display_init(): - # 使用hdmi用于显示 - display.init(LT9611_1920X1080_30FPS) - # 设置显示宽高、格式等 - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放与显示相关的资源 - display.deinit() - -def display_draw(dets): - # 将检测框画到显示上 - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if dets: - # 清空draw_img - draw_img.clear() - - # 画检测框 - for det in dets: - x, y, w, h = map(lambda x: int(round(x, 0)), det[:4]) - x = x * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - y = y * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - w = w * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = h * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - # 先将框画到draw_img,argb - draw_img.draw_rectangle(x,y, w, h, color=(255, 255, 0, 255)) - # 将draw_img拷贝到osd_img - draw_img.copy_to(osd_img) - # 将osd_img显示到hdmi上 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - # 清空draw_img - draw_img.clear() - # 将draw_img拷贝到osd_img - draw_img.copy_to(osd_img) - # 将透明图显示到hdmi上 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) -``` - -**使用示例:** - -```python -from media.display import * -# 显示初始化 -display_init() -while True: - ...... - dets = kpu_run(kpu_face_detect,rgb888p_img) - # 将检测框画到显示屏幕 - display_draw(dets) - ...... -# 显示资源释放 -display_deinit() -``` - -##### 1.6.3 media - -```python -from media.media import * - -global buffer,media_source,media_sink #for media -def media_init(): - # (1)配置媒体缓冲区 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - ret = media.buffer_config(config) - - # (2)创建由参数media_source和media_sink指定的媒体链路,链路创建成功后,数据流会自动从media_source流入media_sink,无需用户干预 - global media_source, media_sink - # 创建指向指定sensor id,chn0的source - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # 创建指向指定显示的sink - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # 创建子sensor的chn0的输出,到指定显示的链路 - media.create_link(media_source, media_sink) - - # (3)初始化K230 CanMV平台媒体缓冲区 - media.buffer_init() - - global buffer, draw_img, osd_img - # (4)构建用于画图的对象 - # 使用media模块构建osd_image内存,用于显示 - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 创建用于画框、画点的Image - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_MPGC) - # 用于画框、画点结果,防止画的过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - # 释放media资源 - global buffer,media_source, media_sink - # (1)释放buffer - media.release_buffer(buffer) - # (2)销毁已经创建的媒体链路 - media.destroy_link(media_source, media_sink) - - # (3)去初始化K230 CanMV平台媒体缓冲区 - media.buffer_deinit() -``` - -**使用示例:** - -```python -# camera初始化 -camera_init(CAM_DEV_ID_0) -# 显示初始化 -display_init() - -rgb888p_img = None -try: - media_init() #媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - - camera_start(CAM_DEV_ID_0) # 启动camera - while True: - with ScopedTiming("total",1): - os.exitpoint() # 添加退出点,保证图像释放 - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - # (2)kpu推理,获取推理结果 - dets = kpu_run(kpu_face_detect,rgb888p_img) - # (3)将推理结果画到原图 - display_draw(dets) - - # (4)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) -except KeyboardInterrupt as e: - print("user stop: ", e) -except BaseException as e: - sys.print_exception(e) -finally: - # 释放camera资源 - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - kpu_deinit() - global current_kmodel_obj - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - # 垃圾回收 - gc.collect() - nn.shrink_memory_pool() # 保证nncase资源的完全释放 - # 释放媒体资源 - media_deinit() -``` - -### 2.人脸检测 - -```python -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGH = 1080 - -# kmodel参数设置 -# kmodel输入shape -kmodel_input_shape = (1,3,320,320) -# ai原图padding -rgb_mean = [104,117,123] -# kmodel其它参数设置 -confidence_threshold = 0.5 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 - -# 文件配置 -# kmodel文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# anchor文件配置 -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -# 当前kmodel -global current_kmodel_obj -# ai2d: ai2d实例 -# ai2d_input_tensor: ai2d输入 -# ai2d_output_tensor:ai2d输出 -# ai2d_builder: 根据ai2d参数,构建的ai2d_builder对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder #for ai2d -print('anchors_path:',anchors_path) -# 读取anchor文件,为后处理做准备 -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - dst_w = kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = kmodel_input_shape[2] # kmodel输入高(h) - - # OUT_RGB888P_WIDTH:原图宽(w) - # OUT_RGB888P_HEIGH:原图高(h) - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / OUT_RGB888P_WIDTH - ratio_h = dst_h / OUT_RGB888P_HEIGH - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGH) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("ai2d_init",debug_mode > 0): - # (1)创建ai2d对象 - global ai2d - ai2d = nn.ai2d() - # (2)设置ai2d参数 - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - # (3)创建ai2d_output_tensor,用于保存ai2d输出 - global ai2d_output_tensor - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_output_tensor = nn.from_numpy(data) - - # (4)ai2d_builder,根据ai2d参数、输入输出大小创建ai2d_builder对象 - global ai2d_builder - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], kmodel_input_shape) - - -def ai2d_run(rgb888p_img): - # 对原图rgb888p_img进行预处理 - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor,ai2d_output_tensor - # (1)根据原图构建ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - # (2)运行ai2d_builder,将结果保存到ai2d_output_tensor中 - ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) - -def ai2d_release(): - # 释放ai2d_input_tensor - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -def kpu_init(kmodel_file): - # 初始化kpu对象,并加载kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - # 初始化kpu对象 - kpu_obj = nn.kpu() - # 加载kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化ai2d - ai2d_init() - return kpu_obj - -def kpu_pre_process(rgb888p_img): - # 使用ai2d对原图进行预处理(padding,resize) - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor - # 将ai2d输出设置为kpu输入 - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor) - -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def kpu_run(kpu_obj,rgb888p_img): - # kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2)kpu推理 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放ai2d资源 - ai2d_release() - # (4)获取kpu输出 - results = kpu_get_output() - # (5)kpu结果后处理 - with ScopedTiming("kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,kmodel_input_shape[2],prior_data,[OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH],results) - - # (6)返回人脸检测框 - if len(post_ret)==0: - return post_ret - else: - return post_ret[0] - - -def kpu_deinit(): - # kpu释放 - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): #删除ai2d变量,释放对它所引用对象的内存引用 - global ai2d - del ai2d - - if 'ai2d_output_tensor' in globals(): #删除ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global ai2d_output_tensor - del ai2d_output_tensor - -#********************for media_utils.py******************** -global draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# for display,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.2 -def display_init(): - # hdmi显示初始化 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -def display_draw(dets): - # hdmi画检测框 - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if dets: - draw_img.clear() - for det in dets: - x, y, w, h = map(lambda x: int(round(x, 0)), det[:4]) - x = x * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - y = y * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - w = w * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = h * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - draw_img.draw_rectangle(x,y, w, h, color=(255, 255, 0, 255), thickness = 2) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.1 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.3 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - # meida资源释放 - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for face_detect.py******************** -def face_detect_inference(): - print("face_detect_test start") - # kpu初始化 - kpu_face_detect = kpu_init(kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - # 启动camera - camera_start(CAM_DEV_ID_0) -# time.sleep(5) - gc_count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取检测结果 - dets = kpu_run(kpu_face_detect,rgb888p_img) - # (2.2)将结果画到显示器 - display_draw(dets) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - if gc_count > 5: - gc.collect() - nn.shrink_memory_pool() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - - # 垃圾回收 - gc.collect() - nn.shrink_memory_pool() - # 释放媒体资源 - media_deinit() - - - print("face_detect_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - face_detect_inference() -``` - -### 3.COCO目标检测 - -```python -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -#ai 原图输入分辨率 -OUT_RGB888P_WIDTH = ALIGN_UP(320, 16) -OUT_RGB888P_HEIGHT = 320 - -#多目标检测 kmodel 输入 shape -kmodel_input_shape = (1,3,320,320) - -#多目标检测 相关参数设置 -confidence_threshold = 0.2 # 多目标检测分数阈值 -nms_threshold = 0.2 # 非最大值抑制阈值 -x_factor = float(OUT_RGB888P_WIDTH)/kmodel_input_shape[3] # 原始图像分辨率宽与kmodel宽输入大小比值 -y_factor = float(OUT_RGB888P_HEIGHT)/kmodel_input_shape[2] # 原始图像分辨率高与kmodel高输入大小比值 -keep_top_k = 50 # 最大输出检测框的数量 - -#文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/yolov8n_320.kmodel' # kmodel文件的路径 -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#颜色板 用于作图 -color_four = [(255, 220, 20, 60), (255, 119, 11, 32), (255, 0, 0, 142), (255, 0, 0, 230), - (255, 106, 0, 228), (255, 0, 60, 100), (255, 0, 80, 100), (255, 0, 0, 70), - (255, 0, 0, 192), (255, 250, 170, 30), (255, 100, 170, 30), (255, 220, 220, 0), - (255, 175, 116, 175), (255, 250, 0, 30), (255, 165, 42, 42), (255, 255, 77, 255), - (255, 0, 226, 252), (255, 182, 182, 255), (255, 0, 82, 0), (255, 120, 166, 157), - (255, 110, 76, 0), (255, 174, 57, 255), (255, 199, 100, 0), (255, 72, 0, 118), - (255, 255, 179, 240), (255, 0, 125, 92), (255, 209, 0, 151), (255, 188, 208, 182), - (255, 0, 220, 176), (255, 255, 99, 164), (255, 92, 0, 73), (255, 133, 129, 255), - (255, 78, 180, 255), (255, 0, 228, 0), (255, 174, 255, 243), (255, 45, 89, 255), - (255, 134, 134, 103), (255, 145, 148, 174), (255, 255, 208, 186), - (255, 197, 226, 255), (255, 171, 134, 1), (255, 109, 63, 54), (255, 207, 138, 255), - (255, 151, 0, 95), (255, 9, 80, 61), (255, 84, 105, 51), (255, 74, 65, 105), - (255, 166, 196, 102), (255, 208, 195, 210), (255, 255, 109, 65), (255, 0, 143, 149), - (255, 179, 0, 194), (255, 209, 99, 106), (255, 5, 121, 0), (255, 227, 255, 205), - (255, 147, 186, 208), (255, 153, 69, 1), (255, 3, 95, 161), (255, 163, 255, 0), - (255, 119, 0, 170), (255, 0, 182, 199), (255, 0, 165, 120), (255, 183, 130, 88), - (255, 95, 32, 0), (255, 130, 114, 135), (255, 110, 129, 133), (255, 166, 74, 118), - (255, 219, 142, 185), (255, 79, 210, 114), (255, 178, 90, 62), (255, 65, 70, 15), - (255, 127, 167, 115), (255, 59, 105, 106), (255, 142, 108, 45), (255, 196, 172, 0), - (255, 95, 54, 80), (255, 128, 76, 255), (255, 201, 57, 1), (255, 246, 0, 122), - (255, 191, 162, 208)] - -#标签 多目标检测的所有可识别类别 -labels = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"] - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - - -# 多目标检测 非最大值抑制方法实现 -def py_cpu_nms(boxes,scores,thresh): - """Pure Python NMS baseline.""" - x1 = boxes[:, 0] - y1 = boxes[:, 1] - x2 = boxes[:, 2] - y2 = boxes[:, 3] - - areas = (x2 - x1 + 1) * (y2 - y1 + 1) - order = np.argsort(scores,axis = 0)[::-1] - - keep = [] - while order.size > 0: - i = order[0] - keep.append(i) - new_x1 = [] - new_x2 = [] - new_y1 = [] - new_y2 = [] - new_areas = [] - for order_i in order: - new_x1.append(x1[order_i]) - new_x2.append(x2[order_i]) - new_y1.append(y1[order_i]) - new_y2.append(y2[order_i]) - new_areas.append(areas[order_i]) - new_x1 = np.array(new_x1) - new_x2 = np.array(new_x2) - new_y1 = np.array(new_y1) - new_y2 = np.array(new_y2) - xx1 = np.maximum(x1[i], new_x1) - yy1 = np.maximum(y1[i], new_y1) - xx2 = np.minimum(x2[i], new_x2) - yy2 = np.minimum(y2[i], new_y2) - - w = np.maximum(0.0, xx2 - xx1 + 1) - h = np.maximum(0.0, yy2 - yy1 + 1) - inter = w * h - - new_areas = np.array(new_areas) - ovr = inter / (areas[i] + new_areas - inter) - new_order = [] - for ovr_i,ind in enumerate(ovr): - if ind < thresh: - new_order.append(order[ovr_i]) - order = np.array(new_order,dtype=np.uint8) - return keep - -# 多目标检测 接收kmodel输出的后处理方法 -def kpu_post_process(output_data): - with ScopedTiming("kpu_post_process", debug_mode > 0): - boxes_ori = output_data[:,0:4] - scores_ori = output_data[:,4:] - confs_ori = np.max(scores_ori,axis=-1) - inds_ori = np.argmax(scores_ori,axis=-1) - - boxes = [] - scores = [] - inds = [] - - for i in range(len(boxes_ori)): - if confs_ori[i] > confidence_threshold: - scores.append(confs_ori[i]) - inds.append(inds_ori[i]) - x = boxes_ori[i,0] - y = boxes_ori[i,1] - w = boxes_ori[i,2] - h = boxes_ori[i,3] - left = int((x - 0.5 * w) * x_factor) - top = int((y - 0.5 * h) * y_factor) - right = int((x + 0.5 * w) * x_factor) - bottom = int((y + 0.5 * h) * y_factor) - boxes.append([left,top,right,bottom]) - - if len(boxes)==0: - return [] - - boxes = np.array(boxes) - scores = np.array(scores) - inds = np.array(inds) - - # do NMS - keep = py_cpu_nms(boxes,scores,nms_threshold) - dets = np.concatenate((boxes, scores.reshape((len(boxes),1)), inds.reshape((len(boxes),1))), axis=1) - - dets_out = [] - for keep_i in keep: - dets_out.append(dets[keep_i]) - dets_out = np.array(dets_out) - - # keep top-K faster NMS - dets_out = dets_out[:keep_top_k, :] - return dets_out - -# ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - ai2d = nn.ai2d() - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global ai2d_out_tensor - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_out_tensor = nn.from_numpy(data) - - global ai2d_builder - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], kmodel_input_shape) - -# ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor,ai2d_out_tensor,ai2d_builder - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - - ai2d_builder.run(ai2d_input_tensor, ai2d_out_tensor) - -# ai2d 释放内存 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -# kpu 初始化 -def kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - ai2d_init() - return kpu_obj - -# kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_out_tensor) - -# kpu 获得 kmodel 输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - - result = result.reshape((result.shape[0] * result.shape[1], result.shape[2])) - result = result.transpose() - tmp2 = result.copy() - del data - results.append(tmp2) - return results - -# kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2) kpu运行 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - # (3) 释放ai2d资源 - ai2d_release() - # (4) 获取kpu输出 - results = kpu_get_output() - # (5) kpu结果后处理 - dets = kpu_post_process(results[0]) - # (6) 返回多目标检测结果 - return dets - -# kpu 释放内存 -def kpu_deinit(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): - global ai2d - del ai2d - if 'ai2d_out_tensor' in globals(): - global ai2d_out_tensor - del ai2d_out_tensor - if 'ai2d_builder' in globals(): - global ai2d_builder - del ai2d_builder - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 将所有目标检测框以及类别、分数值的作图 -def display_draw(dets): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if dets: - draw_img.clear() - for det in dets: - x1, y1, x2, y2 = map(lambda x: int(round(x, 0)), det[:4]) - w = (x2 - x1) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = (y2 - y1) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT - draw_img.draw_rectangle(x1 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH, - y1 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT, w, h, color=color_four[int(det[5])],thickness=4) - draw_img.draw_string( int(x1 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) , int(y1 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT)-50, - " " + labels[int(det[5])] + " " + str(round(det[4],2)) , color=color_four[int(det[5])] , scale=4) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - - -#**********for ob_detect.py********** -def ob_detect_inference(): - print("ob_detect start") - kpu_ob_detect = kpu_init(kmodel_file) # 创建多目标检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = kpu_run(kpu_ob_detect,rgb888p_img) # 执行多目标检测 kpu运行 以及 后处理过程 - display_draw(dets) # 将得到的检测结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止camera - display_deinit() # 释放 display - kpu_deinit() # 释放 kpu - - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_ob_detect - - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("ob_detect_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - ob_detect_inference() -``` - -### 4.yolov8-seg - -```python -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -#ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(320, 16) -OUT_RGB888P_HEIGHT = 320 - -#多目标分割 kmodel 输入参数配置 -kmodel_input_shape = (1,3,320,320) # kmodel输入分辨率 -rgb_mean = [114,114,114] # ai2d padding 值 - -#多目标分割 相关参数设置 -confidence_threshold = 0.2 # 多目标分割分数阈值 -nms_threshold = 0.5 # 非最大值抑制阈值 -mask_thres = 0.5 # 多目标分割掩码阈值 - -#文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/yolov8n_seg_320.kmodel' # kmodel文件的路径 -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#标签 多目标分割的所有可识别类别 -labels = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"] - -#颜色板 用于作图 -color_four = [(255, 220, 20, 60), (255, 119, 11, 32), (255, 0, 0, 142), (255, 0, 0, 230), - (255, 106, 0, 228), (255, 0, 60, 100), (255, 0, 80, 100), (255, 0, 0, 70), - (255, 0, 0, 192), (255, 250, 170, 30), (255, 100, 170, 30), (255, 220, 220, 0), - (255, 175, 116, 175), (255, 250, 0, 30), (255, 165, 42, 42), (255, 255, 77, 255), - (255, 0, 226, 252), (255, 182, 182, 255), (255, 0, 82, 0), (255, 120, 166, 157), - (255, 110, 76, 0), (255, 174, 57, 255), (255, 199, 100, 0), (255, 72, 0, 118), - (255, 255, 179, 240), (255, 0, 125, 92), (255, 209, 0, 151), (255, 188, 208, 182), - (255, 0, 220, 176), (255, 255, 99, 164), (255, 92, 0, 73), (255, 133, 129, 255), - (255, 78, 180, 255), (255, 0, 228, 0), (255, 174, 255, 243), (255, 45, 89, 255), - (255, 134, 134, 103), (255, 145, 148, 174), (255, 255, 208, 186), - (255, 197, 226, 255), (255, 171, 134, 1), (255, 109, 63, 54), (255, 207, 138, 255), - (255, 151, 0, 95), (255, 9, 80, 61), (255, 84, 105, 51), (255, 74, 65, 105), - (255, 166, 196, 102), (255, 208, 195, 210), (255, 255, 109, 65), (255, 0, 143, 149), - (255, 179, 0, 194), (255, 209, 99, 106), (255, 5, 121, 0), (255, 227, 255, 205), - (255, 147, 186, 208), (255, 153, 69, 1), (255, 3, 95, 161), (255, 163, 255, 0), - (255, 119, 0, 170), (255, 0, 182, 199), (255, 0, 165, 120), (255, 183, 130, 88), - (255, 95, 32, 0), (255, 130, 114, 135), (255, 110, 129, 133), (255, 166, 74, 118), - (255, 219, 142, 185), (255, 79, 210, 114), (255, 178, 90, 62), (255, 65, 70, 15), - (255, 127, 167, 115), (255, 59, 105, 106), (255, 142, 108, 45), (255, 196, 172, 0), - (255, 95, 54, 80), (255, 128, 76, 255), (255, 201, 57, 1), (255, 246, 0, 122), - (255, 191, 162, 208)] - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - -# 多目标分割 接收kmodel输出的后处理方法 -def kpu_post_process(output_datas): - with ScopedTiming("kpu_post_process", debug_mode > 0): - global masks - mask_dets = aidemo.segment_postprocess(output_datas,[OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH],[kmodel_input_shape[2],kmodel_input_shape[3]],[DISPLAY_HEIGHT,DISPLAY_WIDTH],confidence_threshold,nms_threshold,mask_thres,masks) - return mask_dets - -# 获取kmodel输入图像resize比例 以及 padding的上下左右像素数量 -def get_pad_param(): - #右padding或下padding - dst_w = kmodel_input_shape[3] - dst_h = kmodel_input_shape[2] - - ratio_w = float(dst_w) / OUT_RGB888P_WIDTH - ratio_h = float(dst_h) / OUT_RGB888P_HEIGHT - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGHT) - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - - top = (int)(round(dh - 0.1)) - bottom = (int)(round(dh + 0.1)) - left = (int)(round(dw - 0.1)) - right = (int)(round(dw + 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -# ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - ai2d = nn.ai2d() - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_pad_param(True, get_pad_param(), 0, rgb_mean) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global ai2d_out_tensor - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_out_tensor = nn.from_numpy(data) - - global ai2d_builder - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], kmodel_input_shape) - -# ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor,ai2d_out_tensor,ai2d_builder - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - - ai2d_builder.run(ai2d_input_tensor, ai2d_out_tensor) - -# ai2d 释放内存 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -# kpu 初始化 -def kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - ai2d_init() - return kpu_obj - -# kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_out_tensor) - -# kpu 获得 kmodel 输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - data_0 = current_kmodel_obj.get_output_tensor(0) - result_0 = data_0.to_numpy() - del data_0 - results.append(result_0) - - data_1 = current_kmodel_obj.get_output_tensor(1) - result_1 = data_1.to_numpy() - del data_1 - results.append(result_1) - - return results - -# kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2) kpu 运行 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - # (3) 释放ai2d资源 - ai2d_release() - # (4) 获取kpu输出 - results = kpu_get_output() - # (5) kpu结果后处理 - seg_res = kpu_post_process(results) - # (6) 返回 分割 mask 结果 - return seg_res - -# kpu 释放内存 -def kpu_deinit(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): - global ai2d - del ai2d - if 'ai2d_out_tensor' in globals(): - global ai2d_out_tensor - del ai2d_out_tensor - if 'ai2d_builder' in globals(): - global ai2d_builder - del ai2d_builder - -#media_utils.py -global draw_img,osd_img,masks #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 将所有目标分割对象以及类别、分数值的作图 -def display_draw(seg_res): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img,masks - if seg_res[0]: - dets = seg_res[0] - ids = seg_res[1] - scores = seg_res[2] - - for i, det in enumerate(dets): - x1, y1, w, h = map(lambda x: int(round(x, 0)), det) - draw_img.draw_string( int(x1) , int(y1)-50, " " + labels[int(ids[i])] + " " + str(round(scores[i],2)) , color=color_four[int(ids[i])], scale=4) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img, masks - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - masks = np.zeros((1,DISPLAY_HEIGHT,DISPLAY_WIDTH,4)) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888,alloc=image.ALLOC_REF,data=masks) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - - -#**********for seg.py********** -def seg_inference(): - print("seg start") - kpu_seg = kpu_init(kmodel_file) # 创建多目标分割的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - seg_res = kpu_run(kpu_seg,rgb888p_img) # 执行多目标分割 kpu 运行 以及 后处理过程 - display_draw(seg_res) # 将得到的分割结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - gc.collect() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - kpu_deinit() # 释放 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_seg - - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'masks' in globals(): - global masks - del masks - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("seg end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - seg_inference() -``` - -### 5.车牌检测 - -```python -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -#ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#车牌检测 kmodel 输入shape -kmodel_input_shape = (1,3,640,640) - -#车牌检测 相关参数设置 -obj_thresh = 0.2 #车牌检测分数阈值 -nms_thresh = 0.2 #检测框 非极大值抑制 阈值 - -#文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/LPD_640.kmodel' # kmodel 文件的路径 -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - -# 车牌检测 接收kmodel输出的后处理方法 -def kpu_post_process(output_data): - with ScopedTiming("kpu_post_process", debug_mode > 0): - results = aidemo.licence_det_postprocess(output_data,[OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH],[kmodel_input_shape[2],kmodel_input_shape[3]],obj_thresh,nms_thresh) - return results - -# ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - ai2d = nn.ai2d() - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global ai2d_out_tensor - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_out_tensor = nn.from_numpy(data) - - global ai2d_builder - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], kmodel_input_shape) - -# ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor,ai2d_out_tensor,ai2d_builder - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - - ai2d_builder.run(ai2d_input_tensor, ai2d_out_tensor) - -# ai2d 释放内存 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -# kpu 初始化 -def kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - ai2d_init() - return kpu_obj - -# kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_out_tensor) - -# kpu 获得 kmodel 输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - tmp2 = result.copy() - del data - results.append(tmp2) - return results - -# kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2) kpu 运行 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - # (3) 释放ai2d资源 - ai2d_release() - # (4) 获取kpu输出 - results = kpu_get_output() - # (5) kpu结果后处理 - dets = kpu_post_process(results) - # (6) 返回 车牌检测框 结果 - return dets - -# kpu 释放内存 -def kpu_deinit(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): - global ai2d - del ai2d - if 'ai2d_out_tensor' in globals(): - global ai2d_out_tensor - del ai2d_out_tensor - if 'ai2d_builder' in globals(): - global ai2d_builder - del ai2d_builder - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 将所有车牌检测框绘制到屏幕上 -def display_draw(dets): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if dets: - draw_img.clear() - point_8 = np.zeros((8),dtype=np.int16) - for det in dets: - for i in range(4): - x = det[i * 2 + 0]/OUT_RGB888P_WIDTH*DISPLAY_WIDTH - y = det[i * 2 + 1]/OUT_RGB888P_HEIGHT*DISPLAY_HEIGHT - point_8[i * 2 + 0] = int(x) - point_8[i * 2 + 1] = int(y) - for i in range(4): - draw_img.draw_line(point_8[i * 2 + 0],point_8[i * 2 + 1],point_8[(i+1) % 4 * 2 + 0],point_8[(i+1) % 4 * 2 + 1],color=(255, 0, 255, 0),thickness=4) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_BGR_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - - -#**********for licence_det.py********** -def licence_det_inference(): - print("licence_det start") - kpu_licence_det = kpu_init(kmodel_file) # 创建车牌检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = kpu_run(kpu_licence_det,rgb888p_img) # 执行车牌检测 kpu 运行 以及后处理过程 - display_draw(dets) # 将得到的 检测结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - kpu_deinit() # 释放 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_licence_det - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放整个media - - print("licence_det end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - licence_det_inference() -``` - -### 6.OCR检测 - -```python -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import os,sys #操作系统接口模块 -import aicube #aicube模块,封装检测分割等任务相关后处理 - -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(640, 16) -OUT_RGB888P_HEIGH = 360 - -# kmodel输入参数配置 -kmodel_input_shape_det = (1,3,640,640) # kmodel输入分辨率 -rgb_mean = [0,0,0] # ai2d padding的值 - -# kmodel相关参数设置 -mask_threshold = 0.25 # 二值化mask阈值 -box_threshold = 0.3 # 检测框分数阈值 - -# 文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file_det = root_dir + 'kmodel/ocr_det_int16.kmodel' # kmodel加载路径 -debug_mode = 0 # 调试模式 大于0(调试)、 反之 (不调试) - -# scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -# ai utils -# 当前kmodel -global current_kmodel_obj # 定义全局kpu对象 -# ai2d_det: ai2d实例 -# ai2d_input_tensor_det: ai2d输入 -# ai2d_output_tensor_det: ai2d输出 -# ai2d_builder_det: 根据ai2d参数,构建的ai2d_builder_det对象 -# ai2d_input_det: ai2d输入的numpy数据 -global ai2d_det,ai2d_input_tensor_det,ai2d_output_tensor_det,ai2d_builder_det,ai2d_input_det # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - -# padding方法,一边padding,右padding或者下padding -def get_pad_one_side_param(out_img_size,input_img_size): - # 右padding或下padding - dst_w = out_img_size[0] - dst_h = out_img_size[1] - - input_width = input_img_size[0] - input_high = input_img_size[1] - - ratio_w = dst_w / input_width - ratio_h = dst_h / input_high - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - - new_w = (int)(ratio * input_width) - new_h = (int)(ratio * input_high) - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - - -# ai2d 初始化,用于实现输入的预处理 -def ai2d_init_det(): - with ScopedTiming("ai2d_init",debug_mode > 0): - # 创建ai2d对象 - global ai2d_det - ai2d_det = nn.ai2d() - # 设置ai2d参数 - ai2d_det.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d_det.set_pad_param(True, get_pad_one_side_param([kmodel_input_shape_det[3],kmodel_input_shape_det[2]], [OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH]), 0, [0, 0, 0]) - ai2d_det.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - # 创建ai2d_output_tensor_det,用于保存ai2d的输出 - global ai2d_output_tensor_det - data = np.ones(kmodel_input_shape_det, dtype=np.uint8) - ai2d_output_tensor_det = nn.from_numpy(data) - - # ai2d_builder_det,根据ai2d参数、输入输出大小创建ai2d_builder_det对象 - global ai2d_builder_det - ai2d_builder_det = ai2d_det.build([1, 3, OUT_RGB888P_HEIGH, OUT_RGB888P_WIDTH], [1, 3, kmodel_input_shape_det[2], kmodel_input_shape_det[3]]) - - -# ai2d 运行,完成ai2d_init_det设定的预处理 -def ai2d_run_det(rgb888p_img): - # 对原图rgb888p_img进行预处理 - with ScopedTiming("ai2d_run",debug_mode > 0): - # 根据原图构建ai2d_input_tensor_det - global ai2d_input_tensor_det,ai2d_builder_det,ai2d_input_det,ai2d_output_tensor_det - ai2d_input_det = rgb888p_img.to_numpy_ref() - ai2d_input_tensor_det = nn.from_numpy(ai2d_input_det) - # 运行ai2d_builder_det,将结果保存到ai2d_output_tensor_det - ai2d_builder_det.run(ai2d_input_tensor_det, ai2d_output_tensor_det) - - -# ai2d 释放输入tensor -def ai2d_release_det(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor_det - del ai2d_input_tensor_det - -# kpu 初始化 -def kpu_init_det(kmodel_file): - # 初始化kpu对象,并加载kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - # 初始化kpu对象 - kpu_obj = nn.kpu() - # 加载kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化ai2d - ai2d_init_det() - return kpu_obj - -# 预处理方法 -def kpu_pre_process_det(rgb888p_img): - # 运行ai2d,将ai2d预处理的输出设置为kmodel的输入tensor - ai2d_run_det(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor_det - # 将ai2d的输出设置为kmodel的输入 - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor_det) - -# 获取kmodel的推理输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - # 获取模型输出,并将结果转换为numpy,以便后续处理 - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -# kpu 运行 -def kpu_run_det(kpu_obj,rgb888p_img): - # kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - #(1)原图像预处理并设置模型输入 - kpu_pre_process_det(rgb888p_img) - #(2)kpu推理 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - #(3)释放ai2d资源 - ai2d_release_det() - #(4)获取kpu输出 - results = kpu_get_output() - #(5)CHW转HWC - global ai2d_input_det - tmp = (ai2d_input_det.shape[0], ai2d_input_det.shape[1], ai2d_input_det.shape[2]) - ai2d_input_det = ai2d_input_det.reshape((ai2d_input_det.shape[0], ai2d_input_det.shape[1] * ai2d_input_det.shape[2])) - ai2d_input_det = ai2d_input_det.transpose() - tmp2 = ai2d_input_det.copy() - tmp2 = tmp2.reshape((tmp[1], tmp[2], tmp[0])) - #(6)后处理,aicube.ocr_post_process接口说明: - # 接口:aicube.ocr_post_process(threshold_map,ai_isp,kmodel_input_shape,isp_shape,mask_threshold,box_threshold); - # 参数说明: - # threshold_map: DBNet模型的输出为(N,kmodel_input_shape_det[2],kmodel_input_shape_det[3],2),两个通道分别为threshold map和segmentation map - # 后处理过程只使用threshold map,因此将results[0][:,:,:,0] reshape成一维传给接口使用。 - # ai_isp:后处理还会返回基于原图的检测框裁剪数据,因此要将原图数据reshape为一维传给接口处理。 - # kmodel_input_shape:kmodel输入分辨率。 - # isp_shape:AI原图分辨率。要将kmodel输出分辨率的检测框坐标映射到原图分辨率上,需要使用这两个分辨率的值。 - # mask_threshold:用于二值化图像获得文本区域。 - # box_threshold:检测框分数阈值,低于该阈值的检测框不计入结果。 - with ScopedTiming("kpu_post",debug_mode > 0): - # 调用aicube模块的ocr_post_process完成ocr检测的后处理 - # det_results结构为[[crop_array_nhwc,[p1_x,p1_y,p2_x,p2_y,p3_x,p3_y,p4_x,p4_y]],...] - det_results = aicube.ocr_post_process(results[0][:, :, :, 0].reshape(-1), tmp2.reshape(-1), - [kmodel_input_shape_det[3], kmodel_input_shape_det[2]], - [OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH], mask_threshold, box_threshold) - return det_results - - -# kpu 释放内存 -def kpu_deinit_det(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - global ai2d_det,ai2d_output_tensor_det,ai2d_input_tensor_det - if "ai2d" in globals(): - del ai2d_det - if "ai2d_output_tensor_det" in globals(): - del ai2d_output_tensor_det - -#********************for media_utils.py******************** - -global draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -#display 初始化 -def display_init(): - # hdmi显示初始化 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程,将OCR检测后处理得到的框绘制到OSD上并显示 -def display_draw(det_results): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if det_results: - draw_img.clear() - # 循环绘制所有检测到的框 - for j in det_results: - # 将原图的坐标点转换成显示的坐标点,循环绘制四条直线,得到一个矩形框 - for i in range(4): - x1 = j[1][(i * 2)] / OUT_RGB888P_WIDTH * DISPLAY_WIDTH - y1 = j[1][(i * 2 + 1)] / OUT_RGB888P_HEIGH * DISPLAY_HEIGHT - x2 = j[1][((i + 1) * 2) % 8] / OUT_RGB888P_WIDTH * DISPLAY_WIDTH - y2 = j[1][((i + 1) * 2 + 1) % 8] / OUT_RGB888P_HEIGH * DISPLAY_HEIGHT - draw_img.draw_line((int(x1), int(y1), int(x2), int(y2)), color=(255, 0, 0, 255), - thickness=5) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 启动视频流 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 捕获一帧图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 释放内存 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# 停止视频流 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - ret = media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret - -# media 释放buffer,销毁link -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - global buffer,media_source, media_sink - if "buffer" in globals(): - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - media.destroy_link(media_source, media_sink) - media.buffer_deinit() - - -def ocr_det_inference(): - print("ocr_det_test start") - kpu_ocr_det = kpu_init_det(kmodel_file_det) # 创建ocr检测任务的kpu对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - # 启动camera - camera_start(CAM_DEV_ID_0) - gc_count=0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - # 读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图像 - # 若图像获取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - det_results = kpu_run_det(kpu_ocr_det,rgb888p_img) # kpu运行获取kmodel的推理输出 - display_draw(det_results) # 绘制检测结果,并显示 - # 释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # 释放内存 - if (gc_count>2): - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止camera - display_deinit() # 释放display - kpu_deinit_det() # 释放kpu - if "current_kmodel_obj" in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_ocr_det - gc.collect() - nn.shrink_memory_pool() - time.sleep(1) - media_deinit() # 释放整个media - print("ocr_det_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - ocr_det_inference() -``` - -### 7.手掌检测 - -```python -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#kmodel输入shape -kmodel_input_shape = (1,3,512,512) # kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 手掌检测阈值,用于过滤roi -nms_threshold = 0.5 # 手掌检测框阈值,用于过滤重复roi -kmodel_frame_size = [512,512] # 手掌检测输入图片尺寸 -frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS -labels = ["hand"] # 模型输出类别名称 - -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/hand_det.kmodel' # kmodel文件的路径 -anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] #anchor设置 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - - -# ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - global ai2d_builder - global ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = kmodel_frame_size[0] - height = kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - ai2d = nn.ai2d() - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_output_tensor = nn.from_numpy(data) - -# ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor, ai2d_output_tensor, ai2d_builder - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) - -# ai2d 释放内存 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -# kpu 初始化 -def kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - ai2d_init() - return kpu_obj - -# kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor) - -# kpu 获得 kmodel 输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2)手掌检测 kpu 运行 - with ScopedTiming("kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌检测 ai2d 资源 - ai2d_release() - # (4)获取手掌检测 kpu 输出 - results = kpu_get_output() - # (5)手掌检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], kmodel_frame_size, frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6)返回手掌检测结果 - return dets - -# kpu 释放内存 -def kpu_deinit(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): #删除ai2d变量,释放对它所引用对象的内存引用 - global ai2d - del ai2d - if 'ai2d_output_tensor' in globals(): #删除ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global ai2d_output_tensor - del ai2d_output_tensor - if 'ai2d_builder' in globals(): #删除ai2d_builder变量,释放对它所引用对象的内存引用 - global ai2d_builder - del ai2d_builder - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 框出所有检测到的手以及标出得分 -def display_draw(dets): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - - if dets: - draw_img.clear() - for det_box in dets: - x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] - w = float(x2 - x1) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = float(y2 - y1) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT - - x1 = int(x1 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y1 = int(y1 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - x2 = int(x2 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y2 = int(y2 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - if (h<(0.1*DISPLAY_HEIGHT)): - continue - if (w<(0.25*DISPLAY_WIDTH) and ((x1<(0.03*DISPLAY_WIDTH)) or (x2>(0.97*DISPLAY_WIDTH)))): - continue - if (w<(0.15*DISPLAY_WIDTH) and ((x1<(0.01*DISPLAY_WIDTH)) or (x2>(0.99*DISPLAY_WIDTH)))): - continue - draw_img.draw_rectangle(x1 , y1 , int(w) , int(h), color=(255, 0, 255, 0), thickness = 2) - draw_img.draw_string( x1 , y1-50, " " + labels[det_box[0]] + " " + str(round(det_box[1],2)), color=(255,0, 255, 0), scale=4) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for hand_detect.py********** -def hand_detect_inference(): - print("hand_detect_test start") - kpu_hand_detect = kpu_init(kmodel_file) # 创建手掌检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - display_draw(dets) # 将得到的检测结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - if (count>10): - gc.collect() - count = 0 - else: - count += 1 - - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - kpu_deinit() # 释放 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("hand_detect_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - hand_detect_inference() -``` - -### 8.人体检测 - -```python -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGH = 1080 - -#kmodel输入shape -kmodel_input_shape = (1,3,640,640) # kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 行人检测阈值,用于过滤roi -nms_threshold = 0.6 # 行人检测框阈值,用于过滤重复roi -kmodel_frame_size = [640,640] # 行人检测输入图片尺寸 -frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH] # 直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS -labels = ["person"] # 模型输出类别名称 - -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/person_detect_yolov5n.kmodel' # kmodel文件的路径 -anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326] #anchor设置 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) -total_debug_mode = 1 - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - - -# ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - global ai2d_builder - global ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGH - width = kmodel_frame_size[0] - height = kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - ai2d = nn.ai2d() - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], [1,3,height,width]) - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_output_tensor = nn.from_numpy(data) - -# ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor,ai2d_output_tensor,ai2d_builder - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) - -# ai2d 释放内存 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -# kpu 初始化 -def kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - ai2d_init() - return kpu_obj - -# kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor) - -# kpu 获得 kmodel 输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2)行人检测 kpu 运行 - with ScopedTiming("kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放行人检测 ai2d 资源 - ai2d_release() - # (4)获取行人检测 kpu 输出 - results = kpu_get_output() - # (5)行人检测 kpu 结果后处理 - with ScopedTiming("kpu_post_process",debug_mode > 0): - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], kmodel_frame_size, frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6)返回行人检测结果 - return dets - -# kpu 释放内存 -def kpu_deinit(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): - global ai2d - del ai2d - if 'ai2d_output_tensor' in globals(): - global ai2d_output_tensor - del ai2d_output_tensor - if 'ai2d_builder' in globals(): - global ai2d_builder - del ai2d_builder - -#media_utils.py -global draw_img,osd_img,masks #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 框出所有检测到的人以及标出得分 -def display_draw(dets): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - - if dets: - draw_img.clear() - for det_box in dets: - x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] - w = float(x2 - x1) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = float(y2 - y1) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - - x1 = int(x1 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y1 = int(y1 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH) - x2 = int(x2 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y2 = int(y2 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH) - - if (h<(0.1*DISPLAY_HEIGHT)): - continue - if (w<(0.25*DISPLAY_WIDTH) and ((x1<(0.03*DISPLAY_WIDTH)) or (x2>(0.97*DISPLAY_WIDTH)))): - continue - if (w<(0.15*DISPLAY_WIDTH) and ((x1<(0.01*DISPLAY_WIDTH)) or (x2>(0.99*DISPLAY_WIDTH)))): - continue - draw_img.draw_rectangle(x1 , y1 , int(w) , int(h) , color=(255, 0, 255, 0),thickness = 4) - draw_img.draw_string( x1 , y1-50, " " + labels[det_box[0]] + " " + str(round(det_box[1],2)) , color=(255,0, 255, 0), scale=4) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for person_detect.py********** -def person_detect_inference(): - print("person_detect_test start") - kpu_person_detect = kpu_init(kmodel_file) # 创建行人检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",total_debug_mode): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = kpu_run(kpu_person_detect,rgb888p_img) # 执行行人检测 kpu 运行 以及 后处理过程 - display_draw(dets) # 将得到的检测结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - kpu_deinit() # 释放 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_person_detect - - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("person_detect_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - person_detect_inference() -``` - -### 9.人体姿态估计 - -```python -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -#ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#人体关键点检测 kmodel 输入参数配置 -kmodel_input_shape = (1,3,640,640) # kmodel输入分辨率 -rgb_mean = [114,114,114] # ai2d padding 值 - -#人体关键点 相关参数设置 -confidence_threshold = 0.2 # 人体关键点检测分数阈值 -nms_threshold = 0.5 # 非最大值抑制阈值 - -#文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/yolov8n-pose.kmodel' # kmodel文件的路径 -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#骨骼信息 -SKELETON = [(16, 14),(14, 12),(17, 15),(15, 13),(12, 13),(6, 12),(7, 13),(6, 7),(6, 8),(7, 9),(8, 10),(9, 11),(2, 3),(1, 2),(1, 3),(2, 4),(3, 5),(4, 6),(5, 7)] -#肢体颜色 -LIMB_COLORS = [(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255),(255, 255, 51, 255),(255, 255, 51, 255),(255, 255, 51, 255),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0)] -#关键点颜色 -KPS_COLORS = [(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255)] - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - -# 人体关键点检测 接收kmodel输出的后处理方法 -def kpu_post_process(output_datas): - with ScopedTiming("kpu_post_process", debug_mode > 0): - results = aidemo.person_kp_postprocess(output_datas,[OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH],[kmodel_input_shape[2],kmodel_input_shape[3]],confidence_threshold,nms_threshold) - return results - -# 获取kmodel输入图像resize比例 以及 padding的上下左右像素数量 -def get_pad_param(): - #右padding或下padding - dst_w = kmodel_input_shape[3] - dst_h = kmodel_input_shape[2] - - ratio_w = float(dst_w) / OUT_RGB888P_WIDTH - ratio_h = float(dst_h) / OUT_RGB888P_HEIGHT - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGHT) - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - - top = (int)(round(dh)) - bottom = (int)(round(dh)) - left = (int)(round(dw)) - right = (int)(round(dw)) - return [0, 0, 0, 0, top, bottom, left, right] - -# ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - ai2d = nn.ai2d() - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_pad_param(True, get_pad_param(), 0, rgb_mean) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global ai2d_out_tensor - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_out_tensor = nn.from_numpy(data) - - global ai2d_builder - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], kmodel_input_shape) - -# ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor,ai2d_out_tensor,ai2d_builder - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - - ai2d_builder.run(ai2d_input_tensor, ai2d_out_tensor) - -# ai2d 释放内存 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -# kpu 初始化 -def kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - ai2d_init() - return kpu_obj - -# kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_out_tensor) - -# kpu 获得 kmodel 输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - - return result - -# kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2) kpu 运行 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - # (3) 释放ai2d资源 - ai2d_release() - # (4) 获取kpu输出 - results = kpu_get_output() - # (5) kpu结果后处理 - kp_res = kpu_post_process(results) - # (6) 返回 人体关键点检测 结果 - return kp_res - -# kpu 释放内存 -def kpu_deinit(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): - global ai2d - del ai2d - if 'ai2d_out_tensor' in globals(): - global ai2d_out_tensor - del ai2d_out_tensor - if 'ai2d_builder' in globals(): - global ai2d_builder - del ai2d_builder - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 将所有目标关键点作图 -def display_draw(res): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if res[0]: - draw_img.clear() - kpses = res[1] - for i in range(len(res[0])): - for k in range(17+2): - if (k < 17): - kps_x = round(kpses[i][k][0]) - kps_y = round(kpses[i][k][1]) - kps_s = kpses[i][k][2] - - kps_x1 = int(float(kps_x) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - kps_y1 = int(float(kps_y) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - if (kps_s > 0): - draw_img.draw_circle(kps_x1,kps_y1,5,KPS_COLORS[k],4) - ske = SKELETON[k] - pos1_x = round(kpses[i][ske[0]-1][0]) - pos1_y = round(kpses[i][ske[0]-1][1]) - - pos1_x_ = int(float(pos1_x) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - pos1_y_ = int(float(pos1_y) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - pos2_x = round(kpses[i][(ske[1] -1)][0]) - pos2_y = round(kpses[i][(ske[1] -1)][1]) - - pos2_x_ = int(float(pos2_x) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - pos2_y_ = int(float(pos2_y) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - pos1_s = kpses[i][(ske[0] -1)][2] - pos2_s = kpses[i][(ske[1] -1)][2] - - if (pos1_s > 0.0 and pos2_s >0.0): - draw_img.draw_line(pos1_x,pos1_y,pos2_x,pos2_y,LIMB_COLORS[k],4) - - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - - -#**********for person_kp_detect.py********** -def person_kp_detect_inference(): - print("person_kp_detect start") - kpu_person_kp_detect = kpu_init(kmodel_file) # 创建人体关键点检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - person_kp_detect_res = kpu_run(kpu_person_kp_detect,rgb888p_img) # 执行人体关键点检测 kpu 运行 以及 后处理过程 - display_draw(person_kp_detect_res) # 将得到的人体关键点结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - kpu_deinit() # 释放 kpu - - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_person_kp_detect - - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("person_kp_detect end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - person_kp_detect_inference() -``` - -### 10.KWS - -```python -from media.pyaudio import * # 音频模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import media.wave as wave # wav音频处理模块 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import aidemo # aidemo模块,封装ai demo相关前处理、后处理等操作 -import time # 时间统计 -import struct # 字节字符转换模块 -import gc # 垃圾回收模块 -import os,sys # 操作系统接口模块 - -# key word spotting任务 -# 检测阈值 -THRESH = 0.5 -# 有关音频流的宏变量 -SAMPLE_RATE = 16000 # 采样率16000Hz,即每秒采样16000次 -CHANNELS = 1 # 通道数 1为单声道,2为立体声 -FORMAT = paInt16 # 音频输入输出格式 paInt16 -CHUNK = int(0.3 * 16000) # 每次读取音频数据的帧数,设置为0.3s的帧数16000*0.3=4800 - -root_dir='/sdcard/app/tests/' -kmodel_file_kws = root_dir+"kmodel/kws.kmodel" # kmodel加载路径 -reply_wav_file = root_dir+"utils/wozai.wav" # kws唤醒词回复音频路径 -debug_mode = 0 # 调试模式,大于0(调试)、 反之 (不调试) - - -# scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -# 当前kmodel -global current_kmodel_obj # 定义全局kpu对象 -global p,cache_np,fp,input_stream,output_stream,audio_input_tensor,cache_input_tensor # 定义全局音频流对象,输入输出流对象,并且定义kws处理接口FeaturePipeline对象fp,输入输出tensor和缓冲cache_np - -# 初始化kws音频流相关变量 -def init_kws(): - with ScopedTiming("init_kws",debug_mode > 0): - global p,cache_np,fp,input_stream,output_stream,cache_input_tensor - # 初始化模型的cache输入 - cache_np = np.zeros((1, 256, 105), dtype=np.float) - cache_input_tensor = nn.from_numpy(cache_np) - # 初始化音频预处理接口 - fp = aidemo.kws_fp_create() - # 初始化音频流 - p = PyAudio() - p.initialize(CHUNK) - media.buffer_init() - # 用于采集实时音频数据 - input_stream = p.open( - format=FORMAT, - channels=CHANNELS, - rate=SAMPLE_RATE, - input=True, - frames_per_buffer=CHUNK - ) - - # 用于播放回复音频 - output_stream = p.open( - format=FORMAT, - channels=CHANNELS, - rate=SAMPLE_RATE, - output=True, - frames_per_buffer=CHUNK - ) - -# kws 初始化kpu -def kpu_init_kws(): - with ScopedTiming("init_kpu",debug_mode > 0): - # 初始化kpu并加载kmodel - kpu = nn.kpu() - kpu.load_kmodel(kmodel_file_kws) - return kpu - -# kws 释放kpu -def kpu_deinit(): - # kpu释放 - with ScopedTiming("kpu_deinit",debug_mode > 0): - global current_kmodel_obj,audio_input_tensor,cache_input_tensor - if "current_kmodel_obj" in globals(): - del current_kmodel_obj - if "audio_input_tensor" in globals(): - del audio_input_tensor - if "cache_input_tensor" in globals(): - del cache_input_tensor - -# kws音频预处理 -def kpu_pre_process_kws(pcm_data_list): - global current_kmodel_obj - global fp,input_stream,audio_input_tensor,cache_input_tensor - with ScopedTiming("pre_process",debug_mode > 0): - # 将pcm数据处理为模型输入的特征向量 - mp_feats = aidemo.kws_preprocess(fp, pcm_data_list)[0] - mp_feats_np = np.array(mp_feats) - mp_feats_np = mp_feats_np.reshape((1, 30, 40)) - audio_input_tensor = nn.from_numpy(mp_feats_np) - cache_input_tensor = nn.from_numpy(cache_np) - current_kmodel_obj.set_input_tensor(0, audio_input_tensor) - current_kmodel_obj.set_input_tensor(1, cache_input_tensor) - -# kws任务kpu运行并完成后处理 -def kpu_run_kws(kpu_obj,pcm_data_list): - global current_kmodel_obj,cache_np,output_stream - current_kmodel_obj = kpu_obj - # (1)kws音频数据预处理 - kpu_pre_process_kws(pcm_data_list) - # (2)kpu推理 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)获取模型输出 - logits = kpu_obj.get_output_tensor(0) - cache_tensor = kpu_obj.get_output_tensor(1) # 更新缓存输入 - logits_np = logits.to_numpy() - cache_np=cache_tensor.to_numpy() - del logits - del cache_tensor - # (4)后处理argmax - max_logits = np.max(logits_np, axis=1)[0] - max_p = np.max(max_logits) - idx = np.argmax(max_logits) - # 如果分数大于阈值,且idx==1(即包含唤醒词),播放回复音频 - if max_p > THRESH and idx == 1: - print("====Detected XiaonanXiaonan!====") - wf = wave.open(reply_wav_file, "rb") - wav_data = wf.read_frames(CHUNK) - while wav_data: - output_stream.write(wav_data) - wav_data = wf.read_frames(CHUNK) - time.sleep(1) # 时间缓冲,用于播放声音 - wf.close() - else: - print("Deactivated!") - - -# kws推理过程 -def kws_inference(): - # 记录音频帧帧数 - global p,fp,input_stream,output_stream,current_kmodel_obj - # 初始化 - init_kws() - kpu_kws=kpu_init_kws() - pcm_data_list = [] - try: - gc_count=0 - while True: - os.exitpoint() - with ScopedTiming("total", 1): - pcm_data_list.clear() - # 对实时音频流进行推理 - pcm_data = input_stream.read() # 获取的音频流数据字节数,len(pcm_data)=0.3*16000*2=9600,即以16000Hz的采样率采样0.3s,每次采样数据为paInt16格式占2个字节 - # 获取音频流数据 - for i in range(0, len(pcm_data), 2): - # 每两个字节组织成一个有符号整数,然后将其转换为浮点数,即为一次采样的数据,加入到当前一帧(0.3s)的数据列表中 - int_pcm_data = struct.unpack(" 10: - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - input_stream.stop_stream() - output_stream.stop_stream() - input_stream.close() - output_stream.close() - p.terminate() - media.buffer_deinit() - aidemo.kws_fp_destroy(fp) - kpu_deinit() - del kpu_kws - if "current_kmodel_obj" in globals(): - del current_kmodel_obj - gc.collect() - nn.shrink_memory_pool() - -if __name__=="__main__": - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - kws_inference() -``` - -### 11.跌倒检测 - -```python -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#kmodel输入shape -kmodel_input_shape = (1,3,640,640) # kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.3 # 摔倒检测阈值,用于过滤roi -nms_threshold = 0.45 # 摔倒检测框阈值,用于过滤重复roi -kmodel_frame_size = [640,640] # 摔倒检测输入图片尺寸 -frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 2 # 模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS -labels = ["Fall","NoFall"] # 模型输出类别名称 - -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/yolov5n-falldown.kmodel' # kmodel文件的路径 -anchors = [10,13, 16,30, 33,23, 30,61, 62,45, 50,119, 116,90, 156,198, 373,326] # anchor设置 - -colors = [(255,0, 0, 255), (255,0, 255, 0), (255,255,0, 0), (255,255,0, 255)] # 颜色设置 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - - -# ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - global ai2d_builder - global ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = kmodel_frame_size[0] - height = kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - ai2d = nn.ai2d() - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_output_tensor = nn.from_numpy(data) - -# ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor,ai2d_output_tensor, ai2d_builder - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) - -# ai2d 释放内存 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -# kpu 初始化 -def kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - ai2d_init() - return kpu_obj - -# kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor) - -# kpu 获得 kmodel 输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2)摔倒检测 kpu 运行 - with ScopedTiming("kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放摔倒检测 ai2d 资源 - ai2d_release() - # (4)获取摔倒检测 kpu 输出 - results = kpu_get_output() - # (5)摔倒检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], kmodel_frame_size, frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6)返回摔倒检测结果 - return dets - -# kpu 释放内存 -def kpu_deinit(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - global ai2d, ai2d_output_tensor, ai2d_builder - if 'ai2d' in globals(): #删除ai2d变量,释放对它所引用对象的内存引用 - global ai2d - del ai2d - if 'ai2d_output_tensor' in globals(): #删除ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global ai2d_output_tensor - del ai2d_output_tensor - if 'ai2d_builder' in globals(): #删除ai2d_builder变量,释放对它所引用对象的内存引用 - global ai2d_builder - del ai2d_builder - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 框出所有检测到的行人以及标出是否摔倒的结果 -def display_draw(dets): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - - if dets: - draw_img.clear() - for det_box in dets: - x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] - w = float(x2 - x1) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = float(y2 - y1) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT - - x1 = int(x1 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y1 = int(y1 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - x2 = int(x2 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y2 = int(y2 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - draw_img.draw_rectangle(x1 , y1 , int(w) , int(h) , color=colors[det_box[0]], thickness = 2) - draw_img.draw_string( x1 , y1-20, " " + labels[det_box[0]] + " " + str(round(det_box[1],2)) , color=colors[det_box[0]+2], scale=4) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for falldown_detect.py********** -def falldown_detect_inference(): - print("falldown_detect_test start") - kpu_falldown_detect = kpu_init(kmodel_file) # 创建摔倒检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = kpu_run(kpu_falldown_detect,rgb888p_img) # 执行摔倒检测 kpu 运行 以及 后处理过程 - display_draw(dets) # 将得到的检测结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - if (count>10): - gc.collect() - count = 0 - else: - count += 1 - - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - kpu_deinit() # 释放 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_falldown_detect - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("falldown_detect_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - falldown_detect_inference() -``` - -## 三、AI Demo多模型示例解析 - -### 1. 人脸关键点检测 - -```python -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * # 摄像头模块 -from media.display import * # 显示模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo # aidemo模块,封装ai demo相关后处理、画图操作 -import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time # 时间统计 -import gc # 垃圾回收模块 -import os, sys # 操作系统接口模块 -import math # 数学模块 - - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGH = 1080 - -# kmodel参数设置 -# 人脸检测kmodel输入shape -fd_kmodel_input_shape = (1,3,320,320) -# 人脸关键点kmodel输入shape -fld_kmodel_input_shape = (1,3,192,192) -# ai原图padding -rgb_mean = [104,117,123] - -#人脸检测kmodel其它参数设置 -confidence_threshold = 0.5 # 人脸检测阈值 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 - -# 文件配置 -# 人脸检测kmodel -root_dir = '/sdcard/app/tests/' -fd_kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# 人脸关键点kmodel -fr_kmodel_file = root_dir + 'kmodel/face_landmark.kmodel' -# anchor文件 -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -# 人脸关键点不同部位关键点列表 -dict_kp_seq = [ - [43, 44, 45, 47, 46, 50, 51, 49, 48], # left_eyebrow - [97, 98, 99, 100, 101, 105, 104, 103, 102], # right_eyebrow - [35, 36, 33, 37, 39, 42, 40, 41], # left_eye - [89, 90, 87, 91, 93, 96, 94, 95], # right_eye - [34, 88], # pupil - [72, 73, 74, 86], # bridge_nose - [77, 78, 79, 80, 85, 84, 83], # wing_nose - [52, 55, 56, 53, 59, 58, 61, 68, 67, 71, 63, 64], # out_lip - [65, 54, 60, 57, 69, 70, 62, 66], # in_lip - [1, 9, 10, 11, 12, 13, 14, 15, 16, 2, 3, 4, 5, 6, 7, 8, 0, 24, 23, 22, 21, 20, 19, 18, 32, 31, 30, 29, 28, 27, 26, 25, 17] # basin -] - -# 人脸关键点不同部位(顺序同dict_kp_seq)颜色配置,argb -color_list_for_osd_kp = [ - (255, 0, 255, 0), - (255, 0, 255, 0), - (255, 255, 0, 255), - (255, 255, 0, 255), - (255, 255, 0, 0), - (255, 255, 170, 0), - (255, 255, 255, 0), - (255, 0, 255, 255), - (255, 255, 220, 50), - (255, 30, 30, 255) -] - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -global current_kmodel_obj #当前kpu对象 -# fd_ai2d: 人脸检测ai2d实例 -# fd_ai2d_input_tensor: 人脸检测ai2d输入 -# fd_ai2d_output_tensor: 人脸检测ai2d输入 -# fd_ai2d_builder: 根据人脸检测ai2d参数,构建的人脸检测ai2d_builder对象 -global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder -# fld_ai2d: 人脸关键点ai2d实例 -# fld_ai2d_input_tensor: 人脸关键点ai2d输入 -# fld_ai2d_output_tensor:人脸关键点ai2d输入 -# fld_ai2d_builder: 根据人脸关键点ai2d参数,构建的人脸关键点ai2d_builder对象 -global fld_ai2d,fld_ai2d_input_tensor,fld_ai2d_output_tensor,fld_ai2d_builder -global matrix_dst #人脸仿射变换矩阵 - -#读取anchor文件,为人脸检测后处理做准备 -print('anchors_path:',anchors_path) -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - dst_w = fd_kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = fd_kmodel_input_shape[2] # kmodel输入高(h) - - # OUT_RGB888P_WIDTH:原图宽(w) - # OUT_RGB888P_HEIGH:原图高(h) - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / OUT_RGB888P_WIDTH - ratio_h = dst_h / OUT_RGB888P_HEIGH - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGH) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def fd_ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("fd_ai2d_init",debug_mode > 0): - # (1)创建人脸检测ai2d对象 - global fd_ai2d - fd_ai2d = nn.ai2d() - # (2)设置人脸检测ai2d参数 - fd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - fd_ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - fd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - #(3)人脸检测ai2d_builder,根据人脸检测ai2d参数、输入输出大小创建ai2d_builder对象 - global fd_ai2d_builder - fd_ai2d_builder = fd_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fd_kmodel_input_shape) - - #(4)创建人脸检测ai2d_output_tensor,用于保存人脸检测ai2d输出 - global fd_ai2d_output_tensor - data = np.ones(fd_kmodel_input_shape, dtype=np.uint8) - fd_ai2d_output_tensor = nn.from_numpy(data) - -def fd_ai2d_run(rgb888p_img): - # 根据人脸检测ai2d参数,对原图rgb888p_img进行预处理 - with ScopedTiming("fd_ai2d_run",debug_mode > 0): - global fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder - # (1)根据原图构建ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fd_ai2d_input_tensor = nn.from_numpy(ai2d_input) - # (2)运行人脸检测ai2d_builder,将结果保存到人脸检测ai2d_output_tensor中 - fd_ai2d_builder.run(fd_ai2d_input_tensor, fd_ai2d_output_tensor) - -def fd_ai2d_release(): - # 释放人脸检测ai2d_input_tensor - with ScopedTiming("fd_ai2d_release",debug_mode > 0): - global fd_ai2d_input_tensor - del fd_ai2d_input_tensor - - -def fd_kpu_init(kmodel_file): - # 初始化人脸检测kpu对象,并加载kmodel - with ScopedTiming("fd_kpu_init",debug_mode > 0): - # 初始化人脸检测kpu对象 - kpu_obj = nn.kpu() - # 加载人脸检测kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸检测ai2d - fd_ai2d_init() - return kpu_obj - -def fd_kpu_pre_process(rgb888p_img): - # 设置人脸检测kpu输入 - # 使用人脸检测ai2d对原图进行预处理(padding,resize) - fd_ai2d_run(rgb888p_img) - with ScopedTiming("fd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fd_ai2d_output_tensor - # 设置人脸检测kpu输入 - current_kmodel_obj.set_input_tensor(0, fd_ai2d_output_tensor) - -def fd_kpu_get_output(): - # 获取人脸检测kpu输出 - with ScopedTiming("fd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def fd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - fd_kpu_pre_process(rgb888p_img) - # (2)人脸检测kpu推理 - with ScopedTiming("fd kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸检测ai2d资源 - fd_ai2d_release() - # (4)获取人俩检测kpu输出 - results = fd_kpu_get_output() - # (5)人脸检测kpu结果后处理 - with ScopedTiming("fd kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,fd_kmodel_input_shape[2],prior_data, - [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH],results) - # (6)返回人脸检测框 - if len(post_ret)==0: - return post_ret - else: - return post_ret[0] #0:det,1:landm,2:score - -def fd_kpu_deinit(): - # kpu释放 - with ScopedTiming("fd_kpu_deinit",debug_mode > 0): - if 'fd_ai2d' in globals(): - global fd_ai2d - del fd_ai2d #删除人脸检测ai2d变量,释放对它所引用对象的内存引用 - if 'fd_ai2d_output_tensor' in globals(): - global fd_ai2d_output_tensor - del fd_ai2d_output_tensor #删除人脸检测ai2d_output_tensor变量,释放对它所引用对象的内存引用 - -###############for face recognition############### -def get_affine_matrix(bbox): - # 获取仿射矩阵,用于将边界框映射到模型输入空间 - with ScopedTiming("get_affine_matrix", debug_mode > 1): - # 从边界框提取坐标和尺寸 - x1, y1, w, h = map(lambda x: int(round(x, 0)), bbox[:4]) - # 计算缩放比例,使得边界框映射到模型输入空间的一部分 - scale_ratio = (fld_kmodel_input_shape[2]) / (max(w, h) * 1.5) - # 计算边界框中心点在模型输入空间的坐标 - cx = (x1 + w / 2) * scale_ratio - cy = (y1 + h / 2) * scale_ratio - # 计算模型输入空间的一半长度 - half_input_len = fld_kmodel_input_shape[2] / 2 - - # 创建仿射矩阵并进行设置 - matrix_dst = np.zeros((2, 3), dtype=np.float) - matrix_dst[0, 0] = scale_ratio - matrix_dst[0, 1] = 0 - matrix_dst[0, 2] = half_input_len - cx - matrix_dst[1, 0] = 0 - matrix_dst[1, 1] = scale_ratio - matrix_dst[1, 2] = half_input_len - cy - return matrix_dst - -def fld_ai2d_init(): - # 人脸关键点ai2d初始化 - with ScopedTiming("fld_ai2d_init",debug_mode > 0): - # (1)创建人脸关键点ai2d对象 - global fld_ai2d - fld_ai2d = nn.ai2d() - - # (2)创建人脸关键点ai2d_output_tensor对象,用于存放ai2d输出 - global fld_ai2d_output_tensor - data = np.ones(fld_kmodel_input_shape, dtype=np.uint8) - fld_ai2d_output_tensor = nn.from_numpy(data) - -def fld_ai2d_run(rgb888p_img,det): - # 人脸关键点ai2d推理 - with ScopedTiming("fld_ai2d_run",debug_mode > 0): - global fld_ai2d,fld_ai2d_input_tensor,fld_ai2d_output_tensor - #(1)根据原图ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fld_ai2d_input_tensor = nn.from_numpy(ai2d_input) - - # (2)根据新的det设置新的人脸关键点ai2d参数 - fld_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - global matrix_dst - matrix_dst = get_affine_matrix(det) - affine_matrix = [matrix_dst[0][0],matrix_dst[0][1],matrix_dst[0][2], - matrix_dst[1][0],matrix_dst[1][1],matrix_dst[1][2]] - fld_ai2d.set_affine_param(True,nn.interp_method.cv2_bilinear,0, 0, 127, 1,affine_matrix) - - # (3)根据新的人脸关键点ai2d参数,构建人脸关键点ai2d_builder - global fld_ai2d_builder - fld_ai2d_builder = fld_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fld_kmodel_input_shape) - # (4)推理人脸关键点ai2d,将预处理的结果保存到fld_ai2d_output_tensor - fld_ai2d_builder.run(fld_ai2d_input_tensor, fld_ai2d_output_tensor) - -def fld_ai2d_release(): - # 释放人脸关键点ai2d_input_tensor、ai2d_builder - with ScopedTiming("fld_ai2d_release",debug_mode > 0): - global fld_ai2d_input_tensor,fld_ai2d_builder - del fld_ai2d_input_tensor - del fld_ai2d_builder - -def fld_kpu_init(kmodel_file): - # 人脸关键点kpu初始化 - with ScopedTiming("fld_kpu_init",debug_mode > 0): - # 初始化人脸关键点kpu对象 - kpu_obj = nn.kpu() - # 加载人脸关键点kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸关键点ai2d - fld_ai2d_init() - return kpu_obj - -def fld_kpu_pre_process(rgb888p_img,det): - # 人脸关键点kpu预处理 - # 人脸关键点ai2d推理,根据det对原图进行预处理 - fld_ai2d_run(rgb888p_img,det) - with ScopedTiming("fld_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fld_ai2d_output_tensor - # 将人脸关键点ai2d输出设置为人脸关键点kpu输入 - current_kmodel_obj.set_input_tensor(0, fld_ai2d_output_tensor) - #ai2d_out_data = fld_ai2d_output_tensor.to_numpy() - #with open("/sdcard/app/ai2d_out.bin", "wb") as file: - #file.write(ai2d_out_data.tobytes()) - -def fld_kpu_get_output(): - with ScopedTiming("fld_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取人脸关键点kpu输出 - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result - -def fld_kpu_post_process(pred): - # 人脸关键点kpu推理结果后处理 - with ScopedTiming("fld_kpu_post_process",debug_mode > 0): - # (1)将人脸关键点输出变换模型输入 - half_input_len = fld_kmodel_input_shape[2] // 2 - pred = pred.flatten() - for i in range(len(pred)): - pred[i] += (pred[i] + 1) * half_input_len - - # (2)获取仿射矩阵的逆矩阵 - global matrix_dst - matrix_dst_inv = aidemo.invert_affine_transform(matrix_dst) - matrix_dst_inv = matrix_dst_inv.flatten() - - # (3)对每个关键点进行逆变换 - half_out_len = len(pred) // 2 - for kp_id in range(half_out_len): - old_x = pred[kp_id * 2] - old_y = pred[kp_id * 2 + 1] - - # 逆变换公式 - new_x = old_x * matrix_dst_inv[0] + old_y * matrix_dst_inv[1] + matrix_dst_inv[2] - new_y = old_x * matrix_dst_inv[3] + old_y * matrix_dst_inv[4] + matrix_dst_inv[5] - - pred[kp_id * 2] = new_x - pred[kp_id * 2 + 1] = new_y - - return pred - -def fld_kpu_run(kpu_obj,rgb888p_img,det): - # 人脸关键点kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)人脸关键点kpu预处理,设置kpu输入 - fld_kpu_pre_process(rgb888p_img,det) - # (2)人脸关键点kpu推理 - with ScopedTiming("fld_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸关键点ai2d - fld_ai2d_release() - # (4)获取人脸关键点kpu输出 - result = fld_kpu_get_output() - # (5)人脸关键点后处理 - result = fld_kpu_post_process(result) - return result - -def fld_kpu_deinit(): - # 人脸关键点kpu释放 - with ScopedTiming("fld_kpu_deinit",debug_mode > 0): - if 'fld_ai2d' in globals(): - global fld_ai2d - del fld_ai2d - if 'fld_ai2d_output_tensor' in globals(): - global fld_ai2d_output_tensor - del fld_ai2d_output_tensor - -#********************for media_utils.py******************** -global draw_img_ulab,draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# for display,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.2 -def display_init(): - # 设置使用hdmi进行显示 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -def display_draw(dets,landmark_preds): - # 在显示器画人脸轮廓 - with ScopedTiming("display_draw",debug_mode >0): - global draw_img_ulab,draw_img,osd_img - if dets: - draw_img.clear() - for pred in landmark_preds: - # (1)获取单个人脸框对应的人脸关键点 - for sub_part_index in range(len(dict_kp_seq)): - # (2)构建人脸某个区域关键点集 - sub_part = dict_kp_seq[sub_part_index] - face_sub_part_point_set = [] - for kp_index in range(len(sub_part)): - real_kp_index = sub_part[kp_index] - x, y = pred[real_kp_index * 2], pred[real_kp_index * 2 + 1] - - x = int(x * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y = int(y * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH) - face_sub_part_point_set.append((x, y)) - - # (3)画人脸不同区域的轮廓 - if sub_part_index in (9, 6): - color = np.array(color_list_for_osd_kp[sub_part_index],dtype = np.uint8) - face_sub_part_point_set = np.array(face_sub_part_point_set) - - aidemo.polylines(draw_img_ulab, face_sub_part_point_set,False,color,5,8,0) - - elif sub_part_index == 4: - color = color_list_for_osd_kp[sub_part_index] - for kp in face_sub_part_point_set: - x,y = kp[0],kp[1] - draw_img.draw_circle(x,y ,2, color, 1) - else: - color = np.array(color_list_for_osd_kp[sub_part_index],dtype = np.uint8) - face_sub_part_point_set = np.array(face_sub_part_point_set) - aidemo.contours(draw_img_ulab, face_sub_part_point_set,-1,color,2,8) - - # (4)将轮廓结果拷贝到osd - draw_img.copy_to(osd_img) - # (5)将osd显示到屏幕 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - # (1)清空用来画框的图像 - draw_img.clear() - # (2)清空osd - draw_img.copy_to(osd_img) - # (3)显示透明图层 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.1 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.3 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img_ulab,draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框 - draw_img_ulab = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_REF,data = draw_img_ulab) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - # meida资源释放 - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for face_detect.py******************** -def face_landmark_inference(): - print("face_landmark_test start") - # 人脸检测kpu初始化 - kpu_face_detect = fd_kpu_init(fd_kmodel_file) - # 人脸关键点kpu初始化 - kpu_face_landmark = fld_kpu_init(fr_kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - - # 启动camera - camera_start(CAM_DEV_ID_0) - gc_count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取人脸检测结果 - dets = fd_kpu_run(kpu_face_detect,rgb888p_img) - # (2.2)针对每个人脸框,推理得到对应人脸关键点 - landmark_result = [] - for det in dets: - ret = fld_kpu_run(kpu_face_landmark,rgb888p_img,det) - landmark_result.append(ret) - # (2.3)将人脸关键点画到屏幕上 - display_draw(dets,landmark_result) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - if gc_count > 5: - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - # 捕捉运行运行中异常,并打印错误 - sys.print_exception(e) - finally: - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - fd_kpu_deinit() - fld_kpu_deinit() - global current_kmodel_obj - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - del kpu_face_landmark - nn.shrink_memory_pool() - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'draw_img_ulab' in globals(): - global draw_img_ulab - del draw_img_ulab - # 垃圾回收 - gc.collect() - # 释放媒体资源 - media_deinit() - - print("face_landmark_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - face_landmark_inference() -``` - -### 2. 人脸识别 - -**人脸注册:** - -```python -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import os,sys #操作系统接口模块 -import math #数学模块 - -#********************for config.py******************** -# kmodel输入shape -# 人脸检测kmodel输入shape -fd_kmodel_input_shape = (1,3,320,320) -# 人脸识别kmodel输入shape -fr_kmodel_input_shape = (1,3,112,112) -# ai原图padding -rgb_mean = [104,117,123] - -#kmodel相关参数设置 -#人脸检测 -confidence_threshold = 0.5 #人脸检测阈值 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 -#人脸识别 -max_register_face = 100 #数据库最多人脸个数 -feature_num = 128 #人脸识别特征维度 - -# 文件配置 -# 人脸检测kmodel -root_dir = '/sdcard/app/tests/' -fd_kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# 人脸识别kmodel -fr_kmodel_file = root_dir + 'kmodel/face_recognition.kmodel' -# 人脸检测anchor -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 人脸注册数据库 -database_dir = root_dir + 'utils/db/' -# 人脸注册数据库原图 -database_img_dir = root_dir + 'utils/db_img/' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -global current_kmodel_obj #当前kpu实例 -# fd_ai2d: 人脸检测ai2d实例 -# fd_ai2d_input_tensor: 人脸检测ai2d输入 -# fd_ai2d_output_tensor: 人脸检测ai2d输入 -# fd_ai2d_builder: 根据人脸检测ai2d参数,构建的人脸检测ai2d_builder对象 -global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder -# fr_ai2d: 人脸识别ai2d实例 -# fr_ai2d_input_tensor: 人脸识别ai2d输入 -# fr_ai2d_output_tensor: 人脸识别ai2d输入 -# fr_ai2d_builder: 根据人脸识别ai2d参数,构建的人脸识别ai2d_builder对象 -global fr_ai2d,fr_ai2d_input_tensor,fr_ai2d_output_tensor,fr_ai2d_builder -global valid_register_face #数据库中有效人脸个数 - -#读取anchor文件,为人脸检测后处理做准备 -print('anchors_path:',anchors_path) -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(rgb888p_img): - # 右padding或下padding,获取padding参数 - with ScopedTiming("get_pad_one_side_param", debug_mode > 1): - dst_w = fd_kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = fd_kmodel_input_shape[2] # kmodel输入高(h) - - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / rgb888p_img.shape[3] - ratio_h = dst_h / rgb888p_img.shape[2] - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * rgb888p_img.shape[3]) - new_h = (int)(ratio * rgb888p_img.shape[2]) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def fd_ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("fd_ai2d_init",debug_mode > 0): - # (1)创建人脸检测ai2d对象 - global fd_ai2d - fd_ai2d = nn.ai2d() - - #(2)创建人脸检测ai2d_output_tensor,用于保存人脸检测ai2d输出 - global fd_ai2d_output_tensor - data = np.ones(fd_kmodel_input_shape, dtype=np.uint8) - fd_ai2d_output_tensor = nn.from_numpy(data) - -def fd_ai2d_run(rgb888p_img): - # 根据人脸检测ai2d参数,对原图rgb888p_img进行预处理 - with ScopedTiming("fd_ai2d_run",debug_mode > 0): - global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder - # (1)根据原图构建ai2d_input_tensor对象 - fd_ai2d_input_tensor = nn.from_numpy(rgb888p_img) - # (2)根据新的图像设置新的人脸检测ai2d参数 - fd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - fd_ai2d.set_pad_param(True, get_pad_one_side_param(rgb888p_img), 0, rgb_mean) - fd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - # (3)根据新的人脸检测ai2d参数,构建人脸检测ai2d_builder - fd_ai2d_builder = fd_ai2d.build(rgb888p_img.shape, fd_kmodel_input_shape) - # (4)运行人脸检测ai2d_builder,将结果保存到人脸检测ai2d_output_tensor中 - fd_ai2d_builder.run(fd_ai2d_input_tensor, fd_ai2d_output_tensor) - -def fd_ai2d_release(): - # 释放人脸检测ai2d部分资源 - with ScopedTiming("fd_ai2d_release",debug_mode > 0): - global fd_ai2d_input_tensor,fd_ai2d_builder - del fd_ai2d_input_tensor - del fd_ai2d_builder - - -def fd_kpu_init(kmodel_file): - # 初始化人脸检测kpu对象,并加载kmodel - with ScopedTiming("fd_kpu_init",debug_mode > 0): - # 初始化人脸检测kpu对象 - kpu_obj = nn.kpu() - # 加载人脸检测kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸检测ai2d - fd_ai2d_init() - return kpu_obj - -def fd_kpu_pre_process(rgb888p_img): - # 设置人脸检测kpu输入 - # 使用人脸检测ai2d对原图进行预处理(padding,resize) - fd_ai2d_run(rgb888p_img) - with ScopedTiming("fd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fd_ai2d_output_tensor - # 设置人脸检测kpu输入 - current_kmodel_obj.set_input_tensor(0, fd_ai2d_output_tensor) - -def fd_kpu_get_output(): - # 获取人脸检测kpu输出 - with ScopedTiming("fd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def fd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - fd_kpu_pre_process(rgb888p_img) - # (2)人脸检测kpu推理 - with ScopedTiming("fd kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸检测ai2d资源 - fd_ai2d_release() - # (4)获取人俩检测kpu输出 - results = fd_kpu_get_output() - # (5)人脸检测kpu结果后处理 - with ScopedTiming("fd kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,fd_kmodel_input_shape[2],prior_data, - [rgb888p_img.shape[3],rgb888p_img.shape[2]],results) - # (6)返回人脸关键点 - if len(post_ret)==0: - return post_ret - else: - return post_ret[0],post_ret[1] #0:det,1:landm,2:score - -def fd_kpu_deinit(): - # kpu释放 - with ScopedTiming("fd_kpu_deinit",debug_mode > 0): - if 'fd_ai2d' in globals(): #删除人脸检测ai2d变量,释放对它所引用对象的内存引用 - global fd_ai2d - del fd_ai2d - if 'fd_ai2d_output_tensor' in globals():#删除人脸检测ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global fd_ai2d_output_tensor - del fd_ai2d_output_tensor - - -###############for face recognition############### -# 标准5官 -umeyama_args_112 = [ - 38.2946 , 51.6963 , - 73.5318 , 51.5014 , - 56.0252 , 71.7366 , - 41.5493 , 92.3655 , - 70.7299 , 92.2041 -] - -def svd22(a): - # svd - s = [0.0, 0.0] - u = [0.0, 0.0, 0.0, 0.0] - v = [0.0, 0.0, 0.0, 0.0] - - s[0] = (math.sqrt((a[0] - a[3]) ** 2 + (a[1] + a[2]) ** 2) + math.sqrt((a[0] + a[3]) ** 2 + (a[1] - a[2]) ** 2)) / 2 - s[1] = abs(s[0] - math.sqrt((a[0] - a[3]) ** 2 + (a[1] + a[2]) ** 2)) - v[2] = math.sin((math.atan2(2 * (a[0] * a[1] + a[2] * a[3]), a[0] ** 2 - a[1] ** 2 + a[2] ** 2 - a[3] ** 2)) / 2) if \ - s[0] > s[1] else 0 - v[0] = math.sqrt(1 - v[2] ** 2) - v[1] = -v[2] - v[3] = v[0] - u[0] = -(a[0] * v[0] + a[1] * v[2]) / s[0] if s[0] != 0 else 1 - u[2] = -(a[2] * v[0] + a[3] * v[2]) / s[0] if s[0] != 0 else 0 - u[1] = (a[0] * v[1] + a[1] * v[3]) / s[1] if s[1] != 0 else -u[2] - u[3] = (a[2] * v[1] + a[3] * v[3]) / s[1] if s[1] != 0 else u[0] - v[0] = -v[0] - v[2] = -v[2] - - return u, s, v - - -def image_umeyama_112(src): - # 使用Umeyama算法计算仿射变换矩阵 - SRC_NUM = 5 - SRC_DIM = 2 - src_mean = [0.0, 0.0] - dst_mean = [0.0, 0.0] - - for i in range(0,SRC_NUM * 2,2): - src_mean[0] += src[i] - src_mean[1] += src[i + 1] - dst_mean[0] += umeyama_args_112[i] - dst_mean[1] += umeyama_args_112[i + 1] - - src_mean[0] /= SRC_NUM - src_mean[1] /= SRC_NUM - dst_mean[0] /= SRC_NUM - dst_mean[1] /= SRC_NUM - - src_demean = [[0.0, 0.0] for _ in range(SRC_NUM)] - dst_demean = [[0.0, 0.0] for _ in range(SRC_NUM)] - - for i in range(SRC_NUM): - src_demean[i][0] = src[2 * i] - src_mean[0] - src_demean[i][1] = src[2 * i + 1] - src_mean[1] - dst_demean[i][0] = umeyama_args_112[2 * i] - dst_mean[0] - dst_demean[i][1] = umeyama_args_112[2 * i + 1] - dst_mean[1] - - A = [[0.0, 0.0], [0.0, 0.0]] - for i in range(SRC_DIM): - for k in range(SRC_DIM): - for j in range(SRC_NUM): - A[i][k] += dst_demean[j][i] * src_demean[j][k] - A[i][k] /= SRC_NUM - - T = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] - U, S, V = svd22([A[0][0], A[0][1], A[1][0], A[1][1]]) - - T[0][0] = U[0] * V[0] + U[1] * V[2] - T[0][1] = U[0] * V[1] + U[1] * V[3] - T[1][0] = U[2] * V[0] + U[3] * V[2] - T[1][1] = U[2] * V[1] + U[3] * V[3] - - scale = 1.0 - src_demean_mean = [0.0, 0.0] - src_demean_var = [0.0, 0.0] - for i in range(SRC_NUM): - src_demean_mean[0] += src_demean[i][0] - src_demean_mean[1] += src_demean[i][1] - - src_demean_mean[0] /= SRC_NUM - src_demean_mean[1] /= SRC_NUM - - for i in range(SRC_NUM): - src_demean_var[0] += (src_demean_mean[0] - src_demean[i][0]) * (src_demean_mean[0] - src_demean[i][0]) - src_demean_var[1] += (src_demean_mean[1] - src_demean[i][1]) * (src_demean_mean[1] - src_demean[i][1]) - - src_demean_var[0] /= SRC_NUM - src_demean_var[1] /= SRC_NUM - - scale = 1.0 / (src_demean_var[0] + src_demean_var[1]) * (S[0] + S[1]) - T[0][2] = dst_mean[0] - scale * (T[0][0] * src_mean[0] + T[0][1] * src_mean[1]) - T[1][2] = dst_mean[1] - scale * (T[1][0] * src_mean[0] + T[1][1] * src_mean[1]) - T[0][0] *= scale - T[0][1] *= scale - T[1][0] *= scale - T[1][1] *= scale - return T - -def get_affine_matrix(sparse_points): - # 获取affine变换矩阵 - with ScopedTiming("get_affine_matrix", debug_mode > 1): - # 使用Umeyama算法计算仿射变换矩阵 - matrix_dst = image_umeyama_112(sparse_points) - matrix_dst = [matrix_dst[0][0],matrix_dst[0][1],matrix_dst[0][2], - matrix_dst[1][0],matrix_dst[1][1],matrix_dst[1][2]] - return matrix_dst - -def fr_ai2d_init(): - with ScopedTiming("fr_ai2d_init",debug_mode > 0): - # (1)人脸识别ai2d初始化 - global fr_ai2d - fr_ai2d = nn.ai2d() - - # (2)人脸识别ai2d_output_tensor初始化,用于存放ai2d输出 - global fr_ai2d_output_tensor - data = np.ones(fr_kmodel_input_shape, dtype=np.uint8) - fr_ai2d_output_tensor = nn.from_numpy(data) - -def fr_ai2d_run(rgb888p_img,sparse_points): - # 人脸识别ai2d推理 - with ScopedTiming("fr_ai2d_run",debug_mode > 0): - global fr_ai2d,fr_ai2d_input_tensor,fr_ai2d_output_tensor - #(1)根据原图创建人脸识别ai2d_input_tensor对象 - fr_ai2d_input_tensor = nn.from_numpy(rgb888p_img) - #(2)根据新的人脸关键点设置新的人脸识别ai2d参数 - fr_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - affine_matrix = get_affine_matrix(sparse_points) - fr_ai2d.set_affine_param(True,nn.interp_method.cv2_bilinear,0, 0, 127, 1,affine_matrix) - global fr_ai2d_builder - # (3)根据新的人脸识别ai2d参数,构建识别ai2d_builder - fr_ai2d_builder = fr_ai2d.build(rgb888p_img.shape, fr_kmodel_input_shape) - # (4)推理人脸识别ai2d,将预处理的结果保存到fr_ai2d_output_tensor - fr_ai2d_builder.run(fr_ai2d_input_tensor, fr_ai2d_output_tensor) - -def fr_ai2d_release(): - # 释放人脸识别ai2d_input_tensor、ai2d_builder - with ScopedTiming("fr_ai2d_release",debug_mode > 0): - global fr_ai2d_input_tensor,fr_ai2d_builder - del fr_ai2d_input_tensor - del fr_ai2d_builder - -def fr_kpu_init(kmodel_file): - # 人脸识别kpu初始化 - with ScopedTiming("fr_kpu_init",debug_mode > 0): - # 初始化人脸识别kpu对象 - kpu_obj = nn.kpu() - # 加载人脸识别kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸识别ai2d - fr_ai2d_init() - return kpu_obj - -def fr_kpu_pre_process(rgb888p_img,sparse_points): - # 人脸识别kpu预处理 - # 人脸识别ai2d推理,根据关键点对原图进行预处理 - fr_ai2d_run(rgb888p_img,sparse_points) - with ScopedTiming("fr_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fr_ai2d_output_tensor - # 将人脸识别ai2d输出设置为人脸识别kpu输入 - current_kmodel_obj.set_input_tensor(0, fr_ai2d_output_tensor) - - #ai2d_out_data = fr_ai2d_output_tensor.to_numpy() - #print('ai2d_out_data.shape:',ai2d_out_data.shape) - #with open("/sdcard/app/ai2d_out.bin", "wb") as file: - #file.write(ai2d_out_data.tobytes()) - -def fr_kpu_get_output(): - # 获取人脸识别kpu输出 - with ScopedTiming("fr_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result[0] - -def fr_kpu_run(kpu_obj,rgb888p_img,sparse_points): - # 人脸识别kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)人脸识别kpu预处理,设置kpu输入 - fr_kpu_pre_process(rgb888p_img,sparse_points) - # (2)人脸识别kpu推理 - with ScopedTiming("fr kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸识别ai2d - fr_ai2d_release() - # (4)获取人脸识别kpu输出 - results = fr_kpu_get_output() - return results - -def fr_kpu_deinit(): - # 人脸识别kpu相关资源释放 - with ScopedTiming("fr_kpu_deinit",debug_mode > 0): - if 'fr_ai2d' in globals(): - global fr_ai2d - del fr_ai2d - if 'fr_ai2d_output_tensor' in globals(): - global fr_ai2d_output_tensor - del fr_ai2d_output_tensor - -#********************for face_detect.py******************** -def image2rgb888array(img): #4维 - # 将Image转换为rgb888格式 - with ScopedTiming("fr_kpu_deinit",debug_mode > 0): - img_data_rgb888=img.to_rgb888() - # hwc,rgb888 - img_hwc=img_data_rgb888.to_numpy_ref() - shape=img_hwc.shape - img_tmp = img_hwc.reshape((shape[0] * shape[1], shape[2])) - img_tmp_trans = img_tmp.transpose() - img_res=img_tmp_trans.copy() - # chw,rgb888 - img_return=img_res.reshape((1,shape[2],shape[0],shape[1])) - return img_return - -def face_registration_inference(): - print("face_registration_test start") - # 人脸检测kpu初始化 - kpu_face_detect = fd_kpu_init(fd_kmodel_file) - # 人脸识别kpu初始化 - kpu_face_reg = fr_kpu_init(fr_kmodel_file) - try: - # 获取图像列表 - img_list = os.listdir(database_img_dir) - for img_file in img_list: - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一张图像 - full_img_file = database_img_dir + img_file - print(full_img_file) - img = image.Image(full_img_file) - rgb888p_img_ndarry = image2rgb888array(img) - - #(2)推理得到人脸检测kpu,得到人脸检测框、人脸五点 - dets,landms = fd_kpu_run(kpu_face_detect,rgb888p_img_ndarry) - if dets: - if dets.shape[0] == 1: - #(3)若是只检测到一张人脸,则将该人脸注册到数据库 - db_i_name = img_file.split('.')[0] - for landm in landms: - reg_result = fr_kpu_run(kpu_face_reg,rgb888p_img_ndarry,landm) - #print('\nwrite bin:',database_dir+'{}.bin'.format(db_i_name)) - with open(database_dir+'{}.bin'.format(db_i_name), "wb") as file: - file.write(reg_result.tobytes()) - else: - print('Only one person in a picture when you sign up') - else: - print('No person detected') - - gc.collect() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - # 释放kpu资源 - fd_kpu_deinit() - fr_kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - del kpu_face_reg - # 垃圾回收 - gc.collect() - nn.shrink_memory_pool() - - print("face_registration_test end") - return 0 - -if __name__ == '__main__': - nn.shrink_memory_pool() - face_registration_inference() -``` - -**人脸识别:** - -```python -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * # 摄像头模块 -from media.display import * # 显示模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo # aidemo模块,封装ai demo相关后处理、画图操作 -import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time # 时间统计 -import gc # 垃圾回收模块 -import os,sys # 操作系统接口模块 -import math # 数学模块 - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGH = 1080 - -# kmodel输入shape -# 人脸检测kmodel输入shape -fd_kmodel_input_shape = (1,3,320,320) -# 人脸识别kmodel输入shape -fr_kmodel_input_shape = (1,3,112,112) -# ai原图padding -rgb_mean = [104,117,123] - -#kmodel相关参数设置 -#人脸检测 -confidence_threshold = 0.5 #人脸检测阈值 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 -#人脸识别 -max_register_face = 100 # 数据库最多人脸个数 -feature_num = 128 # 人脸识别特征维度 -face_recognition_threshold = 0.75 # 人脸识别阈值 - -#文件配置 -# 人脸检测kmodel -root_dir = '/sdcard/app/tests/' -fd_kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# 人脸识别kmodel -fr_kmodel_file = root_dir + 'kmodel/face_recognition.kmodel' -# 人脸检测anchor -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 人脸数据库 -database_dir = root_dir + 'utils/db/' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -global current_kmodel_obj #当前kpu实例 -# fd_ai2d: 人脸检测ai2d实例 -# fd_ai2d_input_tensor: 人脸检测ai2d输入 -# fd_ai2d_output_tensor: 人脸检测ai2d输入 -# fd_ai2d_builder: 根据人脸检测ai2d参数,构建的人脸检测ai2d_builder对象 -global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder -# fr_ai2d: 人脸识别ai2d实例 -# fr_ai2d_input_tensor: 人脸识别ai2d输入 -# fr_ai2d_output_tensor: 人脸识别ai2d输入 -# fr_ai2d_builder: 根据人脸识别ai2d参数,构建的人脸识别ai2d_builder对象 -global fr_ai2d,fr_ai2d_input_tensor,fr_ai2d_output_tensor,fr_ai2d_builder -# valid_register_face: 数据库中有效人脸个数 -# db_name: 数据库人名列表 -# db_data: 数据库特征列表 -global valid_register_face,db_name,db_data - -#读取anchor文件,为人脸检测后处理做准备 -print('anchors_path:',anchors_path) -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - dst_w = fd_kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = fd_kmodel_input_shape[2] # kmodel输入高(h) - - # OUT_RGB888P_WIDTH:原图宽(w) - # OUT_RGB888P_HEIGH:原图高(h) - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / OUT_RGB888P_WIDTH - ratio_h = dst_h / OUT_RGB888P_HEIGH - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGH) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def fd_ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("fd_ai2d_init",debug_mode > 0): - # (1)创建人脸检测ai2d对象 - global fd_ai2d - fd_ai2d = nn.ai2d() - # (2)设置人脸检测ai2d参数 - fd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - fd_ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - fd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - #(3)人脸检测ai2d_builder,根据人脸检测ai2d参数、输入输出大小创建ai2d_builder对象 - global fd_ai2d_builder - fd_ai2d_builder = fd_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fd_kmodel_input_shape) - - #(4)创建人脸检测ai2d_output_tensor,用于保存人脸检测ai2d输出 - global fd_ai2d_output_tensor - data = np.ones(fd_kmodel_input_shape, dtype=np.uint8) - fd_ai2d_output_tensor = nn.from_numpy(data) - -def fd_ai2d_run(rgb888p_img): - # 根据人脸检测ai2d参数,对原图rgb888p_img进行预处理 - with ScopedTiming("fd_ai2d_run",debug_mode > 0): - global fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder - # (1)根据原图构建ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fd_ai2d_input_tensor = nn.from_numpy(ai2d_input) - # (2)运行人脸检测ai2d_builder,将结果保存到人脸检测ai2d_output_tensor中 - fd_ai2d_builder.run(fd_ai2d_input_tensor, fd_ai2d_output_tensor) - -def fd_ai2d_release(): - # 释放人脸检测ai2d_input_tensor - with ScopedTiming("fd_ai2d_release",debug_mode > 0): - global fd_ai2d_input_tensor - del fd_ai2d_input_tensor - - -def fd_kpu_init(kmodel_file): - # 初始化人脸检测kpu对象,并加载kmodel - with ScopedTiming("fd_kpu_init",debug_mode > 0): - # 初始化人脸检测kpu对象 - kpu_obj = nn.kpu() - # 加载人脸检测kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸检测ai2d - fd_ai2d_init() - return kpu_obj - -def fd_kpu_pre_process(rgb888p_img): - # 设置人脸检测kpu输入 - # 使用人脸检测ai2d对原图进行预处理(padding,resize) - fd_ai2d_run(rgb888p_img) - with ScopedTiming("fd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fd_ai2d_output_tensor - # 设置人脸检测kpu输入 - current_kmodel_obj.set_input_tensor(0, fd_ai2d_output_tensor) - -def fd_kpu_get_output(): - # 获取人脸检测kpu输出 - with ScopedTiming("fd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def fd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - fd_kpu_pre_process(rgb888p_img) - # (2)人脸检测kpu推理 - with ScopedTiming("fd kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸检测ai2d资源 - fd_ai2d_release() - # (4)获取人俩检测kpu输出 - results = fd_kpu_get_output() - # (5)人脸检测kpu结果后处理 - with ScopedTiming("fd kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,fd_kmodel_input_shape[2],prior_data, - [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH],results) - # (6)返回人脸检测框 - if len(post_ret)==0: - return post_ret,post_ret - else: - return post_ret[0],post_ret[1] #0:det,1:landm,2:score - -def fd_kpu_deinit(): - # kpu释放 - with ScopedTiming("fd_kpu_deinit",debug_mode > 0): - if 'fd_ai2d' in globals(): #删除人脸检测ai2d变量,释放对它所引用对象的内存引用 - global fd_ai2d - del fd_ai2d - if 'fd_ai2d_output_tensor' in globals():#删除人脸检测ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global fd_ai2d_output_tensor - del fd_ai2d_output_tensor - -###############for face recognition############### -##for database -def database_init(): - # 数据初始化,构建数据库人名列表和数据库特征列表 - with ScopedTiming("database_init", debug_mode > 1): - global valid_register_face,db_name,db_data - valid_register_face = 0 - db_name = [] - db_data = [] - - db_file_list = os.listdir(database_dir) - for db_file in db_file_list: - if not db_file.endswith('.bin'): - continue - if valid_register_face >= max_register_face: - break - valid_index = valid_register_face - full_db_file = database_dir + db_file - with open(full_db_file, 'rb') as f: - data = f.read() - feature = np.frombuffer(data, dtype=np.float) - db_data.append(feature) - name = db_file.split('.')[0] - db_name.append(name) - valid_register_face += 1 - -def database_reset(): - # 数据库清空 - with ScopedTiming("database_reset", debug_mode > 1): - global valid_register_face,db_name,db_data - print("database clearing...") - db_name = [] - db_data = [] - valid_register_face = 0 - print("database clear Done!") - -def database_search(feature): - # 数据库查询 - with ScopedTiming("database_search", debug_mode > 1): - global valid_register_face,db_name,db_data - v_id = -1 - v_score_max = 0.0 - - # 将当前人脸特征归一化 - feature /= np.linalg.norm(feature) - # 遍历当前人脸数据库,统计最高得分 - for i in range(valid_register_face): - db_feature = db_data[i] - db_feature /= np.linalg.norm(db_feature) - # 计算数据库特征与当前人脸特征相似度 - v_score = np.dot(feature, db_feature)/2 + 0.5 - if v_score > v_score_max: - v_score_max = v_score - v_id = i - - if v_id == -1: - # 数据库中无人脸 - return 'unknown' - elif v_score_max < face_recognition_threshold: - # 小于人脸识别阈值,未识别 -# print('v_score_max:',v_score_max) - return 'unknown' - else: - # 识别成功 - result = 'name: {}, score:{}'.format(db_name[v_id],v_score_max) - return result - -# 标准5官 -umeyama_args_112 = [ - 38.2946 , 51.6963 , - 73.5318 , 51.5014 , - 56.0252 , 71.7366 , - 41.5493 , 92.3655 , - 70.7299 , 92.2041 -] - -def svd22(a): - # svd - s = [0.0, 0.0] - u = [0.0, 0.0, 0.0, 0.0] - v = [0.0, 0.0, 0.0, 0.0] - - s[0] = (math.sqrt((a[0] - a[3]) ** 2 + (a[1] + a[2]) ** 2) + math.sqrt((a[0] + a[3]) ** 2 + (a[1] - a[2]) ** 2)) / 2 - s[1] = abs(s[0] - math.sqrt((a[0] - a[3]) ** 2 + (a[1] + a[2]) ** 2)) - v[2] = math.sin((math.atan2(2 * (a[0] * a[1] + a[2] * a[3]), a[0] ** 2 - a[1] ** 2 + a[2] ** 2 - a[3] ** 2)) / 2) if \ - s[0] > s[1] else 0 - v[0] = math.sqrt(1 - v[2] ** 2) - v[1] = -v[2] - v[3] = v[0] - u[0] = -(a[0] * v[0] + a[1] * v[2]) / s[0] if s[0] != 0 else 1 - u[2] = -(a[2] * v[0] + a[3] * v[2]) / s[0] if s[0] != 0 else 0 - u[1] = (a[0] * v[1] + a[1] * v[3]) / s[1] if s[1] != 0 else -u[2] - u[3] = (a[2] * v[1] + a[3] * v[3]) / s[1] if s[1] != 0 else u[0] - v[0] = -v[0] - v[2] = -v[2] - - return u, s, v - - -def image_umeyama_112(src): - # 使用Umeyama算法计算仿射变换矩阵 - SRC_NUM = 5 - SRC_DIM = 2 - src_mean = [0.0, 0.0] - dst_mean = [0.0, 0.0] - - for i in range(0,SRC_NUM * 2,2): - src_mean[0] += src[i] - src_mean[1] += src[i + 1] - dst_mean[0] += umeyama_args_112[i] - dst_mean[1] += umeyama_args_112[i + 1] - - src_mean[0] /= SRC_NUM - src_mean[1] /= SRC_NUM - dst_mean[0] /= SRC_NUM - dst_mean[1] /= SRC_NUM - - src_demean = [[0.0, 0.0] for _ in range(SRC_NUM)] - dst_demean = [[0.0, 0.0] for _ in range(SRC_NUM)] - - for i in range(SRC_NUM): - src_demean[i][0] = src[2 * i] - src_mean[0] - src_demean[i][1] = src[2 * i + 1] - src_mean[1] - dst_demean[i][0] = umeyama_args_112[2 * i] - dst_mean[0] - dst_demean[i][1] = umeyama_args_112[2 * i + 1] - dst_mean[1] - - A = [[0.0, 0.0], [0.0, 0.0]] - for i in range(SRC_DIM): - for k in range(SRC_DIM): - for j in range(SRC_NUM): - A[i][k] += dst_demean[j][i] * src_demean[j][k] - A[i][k] /= SRC_NUM - - T = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] - U, S, V = svd22([A[0][0], A[0][1], A[1][0], A[1][1]]) - - T[0][0] = U[0] * V[0] + U[1] * V[2] - T[0][1] = U[0] * V[1] + U[1] * V[3] - T[1][0] = U[2] * V[0] + U[3] * V[2] - T[1][1] = U[2] * V[1] + U[3] * V[3] - - scale = 1.0 - src_demean_mean = [0.0, 0.0] - src_demean_var = [0.0, 0.0] - for i in range(SRC_NUM): - src_demean_mean[0] += src_demean[i][0] - src_demean_mean[1] += src_demean[i][1] - - src_demean_mean[0] /= SRC_NUM - src_demean_mean[1] /= SRC_NUM - - for i in range(SRC_NUM): - src_demean_var[0] += (src_demean_mean[0] - src_demean[i][0]) * (src_demean_mean[0] - src_demean[i][0]) - src_demean_var[1] += (src_demean_mean[1] - src_demean[i][1]) * (src_demean_mean[1] - src_demean[i][1]) - - src_demean_var[0] /= SRC_NUM - src_demean_var[1] /= SRC_NUM - - scale = 1.0 / (src_demean_var[0] + src_demean_var[1]) * (S[0] + S[1]) - T[0][2] = dst_mean[0] - scale * (T[0][0] * src_mean[0] + T[0][1] * src_mean[1]) - T[1][2] = dst_mean[1] - scale * (T[1][0] * src_mean[0] + T[1][1] * src_mean[1]) - T[0][0] *= scale - T[0][1] *= scale - T[1][0] *= scale - T[1][1] *= scale - return T - -def get_affine_matrix(sparse_points): - # 获取放射变换矩阵 - with ScopedTiming("get_affine_matrix", debug_mode > 1): - # 使用Umeyama算法计算仿射变换矩阵 - matrix_dst = image_umeyama_112(sparse_points) - matrix_dst = [matrix_dst[0][0],matrix_dst[0][1],matrix_dst[0][2], - matrix_dst[1][0],matrix_dst[1][1],matrix_dst[1][2]] - return matrix_dst - -def fr_ai2d_init(): - with ScopedTiming("fr_ai2d_init",debug_mode > 0): - # (1)人脸识别ai2d初始化 - global fr_ai2d - fr_ai2d = nn.ai2d() - - # (2)人脸识别ai2d_output_tensor初始化,用于存放ai2d输出 - global fr_ai2d_output_tensor - data = np.ones(fr_kmodel_input_shape, dtype=np.uint8) - fr_ai2d_output_tensor = nn.from_numpy(data) - -def fr_ai2d_run(rgb888p_img,sparse_points): - # 人脸识别ai2d推理 - with ScopedTiming("fr_ai2d_run",debug_mode > 0): - global fr_ai2d,fr_ai2d_input_tensor,fr_ai2d_output_tensor - #(1)根据原图创建人脸识别ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fr_ai2d_input_tensor = nn.from_numpy(ai2d_input) - #(2)根据新的人脸关键点设置新的人脸识别ai2d参数 - fr_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - affine_matrix = get_affine_matrix(sparse_points) - fr_ai2d.set_affine_param(True,nn.interp_method.cv2_bilinear,0, 0, 127, 1,affine_matrix) - global fr_ai2d_builder - # (3)根据新的人脸识别ai2d参数,构建识别ai2d_builder - fr_ai2d_builder = fr_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fr_kmodel_input_shape) - # (4)推理人脸识别ai2d,将预处理的结果保存到fr_ai2d_output_tensor - fr_ai2d_builder.run(fr_ai2d_input_tensor, fr_ai2d_output_tensor) - -def fr_ai2d_release(): - # 释放人脸识别ai2d_input_tensor、ai2d_builder - with ScopedTiming("fr_ai2d_release",debug_mode > 0): - global fr_ai2d_input_tensor,fr_ai2d_builder - del fr_ai2d_input_tensor - del fr_ai2d_builder - -def fr_kpu_init(kmodel_file): - # 人脸识别kpu初始化 - with ScopedTiming("fr_kpu_init",debug_mode > 0): - # 初始化人脸识别kpu对象 - kpu_obj = nn.kpu() - # 加载人脸识别kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸识别ai2d - fr_ai2d_init() - # 数据库初始化 - database_init() - return kpu_obj - -def fr_kpu_pre_process(rgb888p_img,sparse_points): - # 人脸识别kpu预处理 - # 人脸识别ai2d推理,根据关键点对原图进行预处理 - fr_ai2d_run(rgb888p_img,sparse_points) - with ScopedTiming("fr_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fr_ai2d_output_tensor - # 将人脸识别ai2d输出设置为人脸识别kpu输入 - current_kmodel_obj.set_input_tensor(0, fr_ai2d_output_tensor) - -def fr_kpu_get_output(): - # 获取人脸识别kpu输出 - with ScopedTiming("fr_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result[0] - -def fr_kpu_run(kpu_obj,rgb888p_img,sparse_points): - # 人脸识别kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)人脸识别kpu预处理,设置kpu输入 - fr_kpu_pre_process(rgb888p_img,sparse_points) - # (2)人脸识别kpu推理 - with ScopedTiming("fr kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸识别ai2d - fr_ai2d_release() - # (4)获取人脸识别kpu输出 - results = fr_kpu_get_output() - # (5)在数据库中查找当前人脸特征 - recg_result = database_search(results) - return recg_result - -def fr_kpu_deinit(): - # 人脸识别kpu相关资源释放 - with ScopedTiming("fr_kpu_deinit",debug_mode > 0): - if 'fr_ai2d' in globals(): - global fr_ai2d - del fr_ai2d - if 'fr_ai2d_output_tensor' in globals(): - global fr_ai2d_output_tensor - del fr_ai2d_output_tensor - -#********************for media_utils.py******************** -global draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# for display,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.2 -def display_init(): - # hdmi显示初始化 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -def display_draw(dets,recg_results): - # 在显示器上写人脸识别结果 - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if dets: - draw_img.clear() - for i,det in enumerate(dets): - # (1)画人脸框 - x1, y1, w, h = map(lambda x: int(round(x, 0)), det[:4]) - x1 = x1 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - y1 = y1 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - w = w * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = h * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - draw_img.draw_rectangle(x1,y1, w, h, color=(255,0, 0, 255), thickness = 4) - - # (2)写人脸识别结果 - recg_text = recg_results[i] - draw_img.draw_string(x1,y1,recg_text,color=(255, 255, 0, 0),scale=4) - - # (3)将画图结果拷贝到osd - draw_img.copy_to(osd_img) - # (4)将osd显示到屏幕 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - # (1)清空用来画框的图像 - draw_img.clear() - # (2)清空osd - draw_img.copy_to(osd_img) - # (3)显示透明图层 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.1 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_BGR_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.3 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - # meida资源释放 - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for face_detect.py******************** -def face_recognition_inference(): - print("face_recognition_test start") - # 人脸检测kpu初始化 - kpu_face_detect = fd_kpu_init(fd_kmodel_file) - # 人脸关键点kpu初始化 - kpu_face_recg = fr_kpu_init(fr_kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - - # 启动camera - camera_start(CAM_DEV_ID_0) - gc_count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取人脸检测结果 - dets,landms = fd_kpu_run(kpu_face_detect,rgb888p_img) - recg_result = [] - for landm in landms: - # (2.2)针对每个人脸五官点,推理得到人脸特征,并计算特征在数据库中相似度 - ret = fr_kpu_run(kpu_face_recg,rgb888p_img,landm) - recg_result.append(ret) - # (2.3)将识别结果画到显示器上 - display_draw(dets,recg_result) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - if gc_count > 5: - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - fd_kpu_deinit() - fr_kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - del kpu_face_recg - # 垃圾回收 - gc.collect() - nn.shrink_memory_pool() - # 释放媒体资源 - media_deinit() - - print("face_recognition_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - face_recognition_inference() -``` - -### 3.人脸姿态角 - -```python -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * # 摄像头模块 -from media.display import * # 显示模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo # aidemo模块,封装ai demo相关后处理、画图操作 -import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time # 时间统计 -import gc # 垃圾回收模块 -import os,sys # 操作系统接口模块 -import math # 数学模块 - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGH = 1080 - -# kmodel参数设置 -# 人脸检测kmodel输入shape -fd_kmodel_input_shape = (1,3,320,320) -# 人脸姿态估计kmodel输入shape -fp_kmodel_input_shape = (1,3,120,120) -# ai原图padding -rgb_mean = [104,117,123] - -#人脸检测kmodel其它参数设置 -confidence_threshold = 0.5 # 人脸检测阈值 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 - -# 文件配置 -# 人脸检测kmodel文件配置 -root_dir = '/sdcard/app/tests/' -fd_kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# 人脸姿态估计kmodel文件配置 -fp_kmodel_file = root_dir + 'kmodel/face_pose.kmodel' -# anchor文件配置 -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -global current_kmodel_obj #当前kpu对象 -# fd_ai2d: 人脸检测ai2d实例 -# fd_ai2d_input_tensor: 人脸检测ai2d输入 -# fd_ai2d_output_tensor: 人脸检测ai2d输入 -# fd_ai2d_builder: 根据人脸检测ai2d参数,构建的人脸检测ai2d_builder对象 -global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder -# fld_ai2d: 人脸姿态估计ai2d实例 -# fld_ai2d_input_tensor: 人脸姿态估计ai2d输入 -# fld_ai2d_output_tensor:人脸姿态估计ai2d输入 -# fld_ai2d_builder: 根据人脸姿态估计ai2d参数,构建的人脸姿态估计ai2d_builder对象 -global fp_ai2d,fp_ai2d_input_tensor,fp_ai2d_output_tensor,fp_ai2d_builder -global matrix_dst #人脸仿射变换矩阵 -#读取anchor文件,为人脸检测后处理做准备 -print('anchors_path:',anchors_path) -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - dst_w = fd_kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = fd_kmodel_input_shape[2] # kmodel输入高(h) - - # OUT_RGB888P_WIDTH:原图宽(w) - # OUT_RGB888P_HEIGH:原图高(h) - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / OUT_RGB888P_WIDTH - ratio_h = dst_h / OUT_RGB888P_HEIGH - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGH) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def fd_ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("fd_ai2d_init",debug_mode > 0): - # (1)创建人脸检测ai2d对象 - global fd_ai2d - fd_ai2d = nn.ai2d() - # (2)设置人脸检测ai2d参数 - fd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - fd_ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - fd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - #(3)人脸检测ai2d_builder,根据人脸检测ai2d参数、输入输出大小创建ai2d_builder对象 - global fd_ai2d_builder - fd_ai2d_builder = fd_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fd_kmodel_input_shape) - - #(4)创建人脸检测ai2d_output_tensor,用于保存人脸检测ai2d输出 - global fd_ai2d_output_tensor - data = np.ones(fd_kmodel_input_shape, dtype=np.uint8) - fd_ai2d_output_tensor = nn.from_numpy(data) - -def fd_ai2d_run(rgb888p_img): - # 根据人脸检测ai2d参数,对原图rgb888p_img进行预处理 - with ScopedTiming("fd_ai2d_run",debug_mode > 0): - global fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder - # (1)根据原图构建ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fd_ai2d_input_tensor = nn.from_numpy(ai2d_input) - # (2)运行人脸检测ai2d_builder,将结果保存到人脸检测ai2d_output_tensor中 - fd_ai2d_builder.run(fd_ai2d_input_tensor, fd_ai2d_output_tensor) - -def fd_ai2d_release(): - # 释放人脸检测ai2d_input_tensor - with ScopedTiming("fd_ai2d_release",debug_mode > 0): - global fd_ai2d_input_tensor - del fd_ai2d_input_tensor - - -def fd_kpu_init(kmodel_file): - # 初始化人脸检测kpu对象,并加载kmodel - with ScopedTiming("fd_kpu_init",debug_mode > 0): - # 初始化人脸检测kpu对象 - kpu_obj = nn.kpu() - # 加载人脸检测kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸检测ai2d - fd_ai2d_init() - return kpu_obj - -def fd_kpu_pre_process(rgb888p_img): - # 设置人脸检测kpu输入 - # 使用人脸检测ai2d对原图进行预处理(padding,resize) - fd_ai2d_run(rgb888p_img) - with ScopedTiming("fd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fd_ai2d_output_tensor - # 设置人脸检测kpu输入 - current_kmodel_obj.set_input_tensor(0, fd_ai2d_output_tensor) - -def fd_kpu_get_output(): - # 获取人脸检测kpu输出 - with ScopedTiming("fd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def fd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - fd_kpu_pre_process(rgb888p_img) - # (2)人脸检测kpu推理 - with ScopedTiming("fd kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸检测ai2d资源 - fd_ai2d_release() - # (4)获取人俩检测kpu输出 - results = fd_kpu_get_output() - # (5)人脸检测kpu结果后处理 - with ScopedTiming("fd kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,fd_kmodel_input_shape[2],prior_data, - [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH],results) - # (6)返回人脸检测框 - if len(post_ret)==0: - return post_ret - else: - return post_ret[0] #0:det,1:landm,2:score - -def fd_kpu_deinit(): - # kpu释放 - with ScopedTiming("fd_kpu_deinit",debug_mode > 0): - if 'fd_ai2d' in globals(): #删除人脸检测ai2d变量,释放对它所引用对象的内存引用 - global fd_ai2d - del fd_ai2d - if 'fd_ai2d_output_tensor' in globals():#删除人脸检测ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global fd_ai2d_output_tensor - del fd_ai2d_output_tensor - - -###############for face recognition############### -def get_affine_matrix(bbox): - # 获取仿射矩阵,用于将边界框映射到模型输入空间 - with ScopedTiming("get_affine_matrix", debug_mode > 1): - # 设置缩放因子 - factor = 2.7 - # 从边界框提取坐标和尺寸 - x1, y1, w, h = map(lambda x: int(round(x, 0)), bbox[:4]) - # 模型输入大小 - edge_size = fp_kmodel_input_shape[2] - # 平移距离,使得模型输入空间的中心对准原点 - trans_distance = edge_size / 2.0 - # 计算边界框中心点的坐标 - center_x = x1 + w / 2.0 - center_y = y1 + h / 2.0 - # 计算最大边长 - maximum_edge = factor * (h if h > w else w) - # 计算缩放比例 - scale = edge_size * 2.0 / maximum_edge - # 计算平移参数 - cx = trans_distance - scale * center_x - cy = trans_distance - scale * center_y - # 创建仿射矩阵 - affine_matrix = [scale, 0, cx, 0, scale, cy] - return affine_matrix - -def build_projection_matrix(det): - x1, y1, w, h = map(lambda x: int(round(x, 0)), det[:4]) - - # 计算边界框中心坐标 - center_x = x1 + w / 2.0 - center_y = y1 + h / 2.0 - - # 定义后部(rear)和前部(front)的尺寸和深度 - rear_width = 0.5 * w - rear_height = 0.5 * h - rear_depth = 0 - factor = np.sqrt(2.0) - front_width = factor * rear_width - front_height = factor * rear_height - front_depth = factor * rear_width # 使用宽度来计算深度,也可以使用高度,取决于需求 - - # 定义立方体的顶点坐标 - temp = [ - [-rear_width, -rear_height, rear_depth], - [-rear_width, rear_height, rear_depth], - [rear_width, rear_height, rear_depth], - [rear_width, -rear_height, rear_depth], - [-front_width, -front_height, front_depth], - [-front_width, front_height, front_depth], - [front_width, front_height, front_depth], - [front_width, -front_height, front_depth] - ] - - projections = np.array(temp) - # 返回投影矩阵和中心坐标 - return projections, (center_x, center_y) - -def rotation_matrix_to_euler_angles(R): - # 将旋转矩阵(3x3 矩阵)转换为欧拉角(pitch、yaw、roll) - # 计算 sin(yaw) - sy = np.sqrt(R[0, 0] ** 2 + R[1, 0] ** 2) - - if sy < 1e-6: - # 若 sin(yaw) 过小,说明 pitch 接近 ±90 度 - pitch = np.arctan2(-R[1, 2], R[1, 1]) * 180 / np.pi - yaw = np.arctan2(-R[2, 0], sy) * 180 / np.pi - roll = 0 - else: - # 计算 pitch、yaw、roll 的角度 - pitch = np.arctan2(R[2, 1], R[2, 2]) * 180 / np.pi - yaw = np.arctan2(-R[2, 0], sy) * 180 / np.pi - roll = np.arctan2(R[1, 0], R[0, 0]) * 180 / np.pi - return [pitch,yaw,roll] - -def get_euler(data): - # 获取旋转矩阵和欧拉角 - R = data[:3, :3].copy() - eular = rotation_matrix_to_euler_angles(R) - return R,eular - -def fp_ai2d_init(): - # 人脸姿态估计ai2d初始化 - with ScopedTiming("fp_ai2d_init",debug_mode > 0): - # (1)创建人脸姿态估计ai2d对象 - global fp_ai2d - fp_ai2d = nn.ai2d() - - # (2)创建人脸姿态估计ai2d_output_tensor对象 - global fp_ai2d_output_tensor - data = np.ones(fp_kmodel_input_shape, dtype=np.uint8) - fp_ai2d_output_tensor = nn.from_numpy(data) - -def fp_ai2d_run(rgb888p_img,det): - # 人脸姿态估计ai2d推理 - with ScopedTiming("fp_ai2d_run",debug_mode > 0): - global fp_ai2d,fp_ai2d_input_tensor,fp_ai2d_output_tensor - #(1)根据原图构建人脸姿态估计ai2d_input_tensor - ai2d_input = rgb888p_img.to_numpy_ref() - fp_ai2d_input_tensor = nn.from_numpy(ai2d_input) - #(2)设置人脸姿态估计ai2d参数 - fp_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - global matrix_dst - matrix_dst = get_affine_matrix(det) - fp_ai2d.set_affine_param(True,nn.interp_method.cv2_bilinear,0, 0, 127, 1,matrix_dst) - # (3)构建人脸姿态估计ai2d_builder - global fp_ai2d_builder - fp_ai2d_builder = fp_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fp_kmodel_input_shape) - # (4)推理人脸姿态估计ai2d,将结果保存到ai2d_output_tensor - fp_ai2d_builder.run(fp_ai2d_input_tensor, fp_ai2d_output_tensor) - -def fp_ai2d_release(): - # 释放部分人脸姿态估计ai2d资源 - with ScopedTiming("fp_ai2d_release",debug_mode > 0): - global fp_ai2d_input_tensor,fp_ai2d_builder - del fp_ai2d_input_tensor - del fp_ai2d_builder - -def fp_kpu_init(kmodel_file): - # 初始化人脸姿态估计kpu及ai2d - with ScopedTiming("fp_kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - fp_ai2d_init() - return kpu_obj - -def fp_kpu_pre_process(rgb888p_img,det): - # 人脸姿态估计kpu预处理 - fp_ai2d_run(rgb888p_img,det) - with ScopedTiming("fp_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fp_ai2d_output_tensor - current_kmodel_obj.set_input_tensor(0, fp_ai2d_output_tensor) - #ai2d_out_data = _ai2d_output_tensor.to_numpy() - #with open("/sdcard/app/ai2d_out.bin", "wb") as file: - #file.write(ai2d_out_data.tobytes()) - -def fp_kpu_get_output(): - # 获取人脸姿态估计kpu输出 - with ScopedTiming("fp_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - result = result[0] - del data - return result - -def fp_kpu_post_process(pred): - # 人脸姿态估计kpu推理结果后处理 - R,eular = get_euler(pred) - return R,eular - -def fp_kpu_run(kpu_obj,rgb888p_img,det): - # 人脸姿态估计kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)根据人脸检测框进行人脸姿态估计kpu预处理 - fp_kpu_pre_process(rgb888p_img,det) - # (2)人脸姿态估计kpu推理 - with ScopedTiming("fp_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸姿态估计ai2d资源 - fp_ai2d_release() - # (4)释放人脸姿态估计kpu推理输出 - result = fp_kpu_get_output() - # (5)释放人脸姿态估计后处理 - R,eular = fp_kpu_post_process(result) - return R,eular - -def fp_kpu_deinit(): - # 释放人脸姿态估计kpu及ai2d资源 - with ScopedTiming("fp_kpu_deinit",debug_mode > 0): - if 'fp_ai2d' in globals(): - global fp_ai2d - del fp_ai2d - if 'fp_ai2d_output_tensor' in globals(): - global fp_ai2d_output_tensor - del fp_ai2d_output_tensor - -#********************for media_utils.py******************** -global draw_img_ulab,draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# for display,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.2 -def display_init(): - # 设置使用hdmi进行显示 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -def display_draw(dets,pose_results): - # 在显示器画人脸轮廓 - with ScopedTiming("display_draw",debug_mode >0): - global draw_img_ulab,draw_img,osd_img - if dets: - draw_img.clear() - line_color = np.array([255, 0, 0 ,255],dtype = np.uint8) #bgra - for i,det in enumerate(dets): - # (1)获取人脸姿态矩阵和欧拉角 - projections,center_point = build_projection_matrix(det) - R,euler = pose_results[i] - - # (2)遍历人脸投影矩阵的关键点,进行投影,并将结果画在图像上 - first_points = [] - second_points = [] - for pp in range(8): - sum_x, sum_y = 0.0, 0.0 - for cc in range(3): - sum_x += projections[pp][cc] * R[cc][0] - sum_y += projections[pp][cc] * (-R[cc][1]) - - center_x,center_y = center_point[0],center_point[1] - x = (sum_x + center_x) / OUT_RGB888P_WIDTH * DISPLAY_WIDTH - y = (sum_y + center_y) / OUT_RGB888P_HEIGH * DISPLAY_HEIGHT - x = max(0, min(x, DISPLAY_WIDTH)) - y = max(0, min(y, DISPLAY_HEIGHT)) - - if pp < 4: - first_points.append((x, y)) - else: - second_points.append((x, y)) - first_points = np.array(first_points,dtype=np.float) - aidemo.polylines(draw_img_ulab,first_points,True,line_color,2,8,0) - second_points = np.array(second_points,dtype=np.float) - aidemo.polylines(draw_img_ulab,second_points,True,line_color,2,8,0) - - for ll in range(4): - x0, y0 = int(first_points[ll][0]),int(first_points[ll][1]) - x1, y1 = int(second_points[ll][0]),int(second_points[ll][1]) - draw_img.draw_line(x0, y0, x1, y1, color = (255, 0, 0 ,255), thickness = 2) - - # (3)将绘制好的图像拷贝到显示缓冲区,并在显示器上展示 - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - # (1)清空用来画框的图像 - draw_img.clear() - # (2)清空osd - draw_img.copy_to(osd_img) - # (3)显示透明图层 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.1 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.3 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img_ulab,draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框,draw_img->draw_img_ulab(两者指向同一块内存) - draw_img_ulab = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_REF,data = draw_img_ulab) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - # meida资源释放 - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for face_detect.py******************** -def face_pose_inference(): - print("face_pose_test start") - # 人脸检测kpu初始化 - kpu_face_detect = fd_kpu_init(fd_kmodel_file) - # 人脸姿态估计kpu初始化 - kpu_face_pose = fp_kpu_init(fp_kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - rgb888p_img = None - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - - # 启动camera - camera_start(CAM_DEV_ID_0) - gc_count = 0 - while True: - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取人脸检测结果 - dets = fd_kpu_run(kpu_face_detect,rgb888p_img) - # (2.2)针对每个人脸框,推理得到对应人脸旋转矩阵、欧拉角 - pose_results = [] - for det in dets: - R,eular = fp_kpu_run(kpu_face_pose,rgb888p_img,det) - pose_results.append((R,eular)) - # (2.3)将人脸姿态估计结果画到显示器上 - display_draw(dets,pose_results) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - if gc_count > 5: - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - fd_kpu_deinit() - fp_kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - del kpu_face_pose - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'draw_img_ulab' in globals(): - global draw_img_ulab - del draw_img_ulab - # 垃圾回收 - gc.collect() - nn.shrink_memory_pool() - # 释放媒体资源 - media_deinit() - - print("face_pose_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - face_pose_inference() -``` - -### 4. 人脸解析 - -```python -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * # 摄像头模块 -from media.display import * # 显示模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo # aidemo模块,封装ai demo相关后处理、画图操作 -import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time # 时间统计 -import gc # 垃圾回收模块 -import os,sys # 操作系统接口模块 -import math # 数学模块 - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGH = 1080 - -# kmodel参数设置 -# 人脸检测kmodel输入shape -fd_kmodel_input_shape = (1,3,320,320) -# 人脸解析kmodel输入shape -fp_kmodel_input_shape = (1,3,320,320) -# ai原图padding -rgb_mean = [104,117,123] - -#人脸检测kmodel其它参数设置 -confidence_threshold = 0.5 # 人脸检测阈值 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 - -# 文件配置 -# 人脸检测kmodel -root_dir = '/sdcard/app/tests/' -fd_kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# 人脸解析kmodel -fp_kmodel_file = root_dir + 'kmodel/face_parse.kmodel' -# anchor文件 -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -global current_kmodel_obj #当前kpu对象 -# fd_ai2d: 人脸检测ai2d实例 -# fd_ai2d_input_tensor: 人脸检测ai2d输入 -# fd_ai2d_output_tensor: 人脸检测ai2d输入 -# fd_ai2d_builder: 根据人脸检测ai2d参数,构建的人脸检测ai2d_builder对象 -global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder -# fld_ai2d: 人脸解析ai2d实例 -# fld_ai2d_input_tensor: 人脸解析ai2d输入 -# fld_ai2d_output_tensor:人脸解析ai2d输入 -# fld_ai2d_builder: 根据人脸解析ai2d参数,构建的人脸解析ai2d_builder对象 -global fp_ai2d,fp_ai2d_input_tensor,fp_ai2d_output_tensor,fp_ai2d_builder -global matrix_dst #人脸仿射变换矩阵 - -#读取anchor文件,为人脸检测后处理做准备 -print('anchors_path:',anchors_path) -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - dst_w = fd_kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = fd_kmodel_input_shape[2] # kmodel输入高(h) - - # OUT_RGB888P_WIDTH:原图宽(w) - # OUT_RGB888P_HEIGH:原图高(h) - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / OUT_RGB888P_WIDTH - ratio_h = dst_h / OUT_RGB888P_HEIGH - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGH) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def fd_ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("fd_ai2d_init",debug_mode > 0): - # (1)创建人脸检测ai2d对象 - global fd_ai2d - fd_ai2d = nn.ai2d() - # (2)设置人脸检测ai2d参数 - fd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - fd_ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - fd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - #(3)人脸检测ai2d_builder,根据人脸检测ai2d参数、输入输出大小创建ai2d_builder对象 - global fd_ai2d_builder - fd_ai2d_builder = fd_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fd_kmodel_input_shape) - - #(4)创建人脸检测ai2d_output_tensor,用于保存人脸检测ai2d输出 - global fd_ai2d_output_tensor - data = np.ones(fd_kmodel_input_shape, dtype=np.uint8) - fd_ai2d_output_tensor = nn.from_numpy(data) - -def fd_ai2d_run(rgb888p_img): - # 根据人脸检测ai2d参数,对原图rgb888p_img进行预处理 - with ScopedTiming("fd_ai2d_run",debug_mode > 0): - global fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder - # (1)根据原图构建ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fd_ai2d_input_tensor = nn.from_numpy(ai2d_input) - # (2)运行人脸检测ai2d_builder,将结果保存到人脸检测ai2d_output_tensor中 - fd_ai2d_builder.run(fd_ai2d_input_tensor, fd_ai2d_output_tensor) - -def fd_ai2d_release(): - # 释放人脸检测ai2d_input_tensor - with ScopedTiming("fd_ai2d_release",debug_mode > 0): - global fd_ai2d_input_tensor - del fd_ai2d_input_tensor - - -def fd_kpu_init(kmodel_file): - # 初始化人脸检测kpu对象,并加载kmodel - with ScopedTiming("fd_kpu_init",debug_mode > 0): - # 初始化人脸检测kpu对象 - kpu_obj = nn.kpu() - # 加载人脸检测kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸检测ai2d - fd_ai2d_init() - return kpu_obj - -def fd_kpu_pre_process(rgb888p_img): - # 设置人脸检测kpu输入 - # 使用人脸检测ai2d对原图进行预处理(padding,resize) - fd_ai2d_run(rgb888p_img) - with ScopedTiming("fd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fd_ai2d_output_tensor - # 设置人脸检测kpu输入 - current_kmodel_obj.set_input_tensor(0, fd_ai2d_output_tensor) - -def fd_kpu_get_output(): - # 获取人脸检测kpu输出 - with ScopedTiming("fd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def fd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - fd_kpu_pre_process(rgb888p_img) - # (2)人脸检测kpu推理 - with ScopedTiming("fd kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸检测ai2d资源 - fd_ai2d_release() - # (4)获取人俩检测kpu输出 - results = fd_kpu_get_output() - # (5)人脸检测kpu结果后处理 - with ScopedTiming("fd kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,fd_kmodel_input_shape[2],prior_data, - [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH],results) - # (6)返回人脸检测框 - if len(post_ret)==0: - return post_ret - else: - return post_ret[0] #0:det,1:landm,2:score - -def fd_kpu_deinit(): - # kpu释放 - with ScopedTiming("fd_kpu_deinit",debug_mode > 0): - if 'fd_ai2d' in globals(): #删除人脸检测ai2d变量,释放对它所引用对象的内存引用 - global fd_ai2d - del fd_ai2d - if 'fd_ai2d_output_tensor' in globals():#删除人脸检测ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global fd_ai2d_output_tensor - del fd_ai2d_output_tensor - -###############for face recognition############### -def get_affine_matrix(bbox): - # 获取仿射矩阵,用于将边界框映射到模型输入空间 - with ScopedTiming("get_affine_matrix", debug_mode > 1): - # 设置缩放因子 - factor = 2.7 - # 从边界框提取坐标和尺寸 - x1, y1, w, h = map(lambda x: int(round(x, 0)), bbox[:4]) - # 模型输入大小 - edge_size = fp_kmodel_input_shape[2] - # 平移距离,使得模型输入空间的中心对准原点 - trans_distance = edge_size / 2.0 - # 计算边界框中心点的坐标 - center_x = x1 + w / 2.0 - center_y = y1 + h / 2.0 - # 计算最大边长 - maximum_edge = factor * (h if h > w else w) - # 计算缩放比例 - scale = edge_size * 2.0 / maximum_edge - # 计算平移参数 - cx = trans_distance - scale * center_x - cy = trans_distance - scale * center_y - # 创建仿射矩阵 - affine_matrix = [scale, 0, cx, 0, scale, cy] - return affine_matrix - -def fp_ai2d_init(): - # 人脸解析ai2d初始化 - with ScopedTiming("fp_ai2d_init",debug_mode > 0): - # (1)创建人脸解析ai2d对象 - global fp_ai2d - fp_ai2d = nn.ai2d() - - # (2)创建人脸解析ai2d_output_tensor对象 - global fp_ai2d_output_tensor - data = np.ones(fp_kmodel_input_shape, dtype=np.uint8) - fp_ai2d_output_tensor = nn.from_numpy(data) - -def fp_ai2d_run(rgb888p_img,det): - # 人脸解析ai2d推理 - with ScopedTiming("fp_ai2d_run",debug_mode > 0): - global fp_ai2d,fp_ai2d_input_tensor,fp_ai2d_output_tensor - #(1)根据原图构建人脸解析ai2d_input_tensor - ai2d_input = rgb888p_img.to_numpy_ref() - fp_ai2d_input_tensor = nn.from_numpy(ai2d_input) - #(2)设置人脸解析ai2d参数 - fp_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - global matrix_dst - matrix_dst = get_affine_matrix(det) - fp_ai2d.set_affine_param(True,nn.interp_method.cv2_bilinear,0, 0, 127, 1,matrix_dst) - - # (3)构建人脸解析ai2d_builder - global fp_ai2d_builder - fp_ai2d_builder = fp_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fp_kmodel_input_shape) - # (4)推理人脸解析ai2d,将结果保存到ai2d_output_tensor - fp_ai2d_builder.run(fp_ai2d_input_tensor, fp_ai2d_output_tensor) - -def fp_ai2d_release(): - # 释放部分人脸解析ai2d资源 - with ScopedTiming("fp_ai2d_release",debug_mode > 0): - global fp_ai2d_input_tensor,fp_ai2d_builder - del fp_ai2d_input_tensor - del fp_ai2d_builder - -def fp_kpu_init(kmodel_file): - # 初始化人脸解析kpu及ai2d - with ScopedTiming("fp_kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - fp_ai2d_init() - return kpu_obj - -def fp_kpu_pre_process(rgb888p_img,det): - # 人脸解析kpu预处理 - fp_ai2d_run(rgb888p_img,det) - with ScopedTiming("fp_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fp_ai2d_output_tensor - current_kmodel_obj.set_input_tensor(0, fp_ai2d_output_tensor) - #ai2d_out_data = fp_ai2d_output_tensor.to_numpy() - #with open("/sdcard/app/ai2d_out.bin", "wb") as file: - #file.write(ai2d_out_data.tobytes()) - -def fp_kpu_get_output(): - # 获取人脸解析kpu输出 - with ScopedTiming("fp_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result - -def fp_kpu_run(kpu_obj,rgb888p_img,det): - # 人脸解析kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)根据人脸检测框进行人脸解析kpu预处理 - fp_kpu_pre_process(rgb888p_img,det) - # (2)人脸解析kpu推理 - with ScopedTiming("fp_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸解析ai2d资源 - fp_ai2d_release() - # (4)释放人脸解析kpu输出 - result = fp_kpu_get_output() - return result - -def fp_kpu_deinit(): - # 释放人脸解析kpu和ai2d资源 - with ScopedTiming("fp_kpu_deinit",debug_mode > 0): - if 'fp_ai2d' in globals(): - global fp_ai2d - del fp_ai2d - if 'fp_ai2d_output_tensor' in globals(): - global fp_ai2d_output_tensor - del fp_ai2d_output_tensor - -#********************for media_utils.py******************** -global draw_img_ulab,draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -#for display -def display_init(): - # 设置使用hdmi进行显示 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -def display_draw(dets,parse_results): - # 在显示器画出人脸解析结果 - with ScopedTiming("display_draw",debug_mode >0): - global draw_img_ulab,draw_img,osd_img - if dets: - draw_img.clear() - for i,det in enumerate(dets): - # (1)将人脸检测框画到draw_img - x, y, w, h = map(lambda x: int(round(x, 0)), det[:4]) - x = x * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - y = y * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - w = w * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = h * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - draw_img.draw_rectangle(x,y, w, h, color=(255, 255, 0, 255)) - # (2)将人脸解析结果画到draw_img(draw_img_ulab和draw_img指同一内存) - aidemo.face_parse_post_process(draw_img_ulab,[OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH], - [DISPLAY_WIDTH,DISPLAY_HEIGHT],fp_kmodel_input_shape[2],det.tolist(),parse_results[i]) - # (3)将绘制好的图像拷贝到显示缓冲区,并在显示器上展示 - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - # (1)清空用来画框的图像 - draw_img.clear() - # (2)清空osd - draw_img.copy_to(osd_img) - # (3)显示透明图层 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.1 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.3 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - ret = media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img_ulab,draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框,draw_img->draw_img_ulab(两者指向同一块内存) - draw_img_ulab = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_REF,data = draw_img_ulab) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - # meida资源释放 - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for face_detect.py******************** -def face_parse_inference(): - print("face_parse_test start") - # 人脸检测kpu初始化 - kpu_face_detect = fd_kpu_init(fd_kmodel_file) - # 人脸解析kpu初始化 - kpu_face_parse = fp_kpu_init(fp_kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - # 启动camera - camera_start(CAM_DEV_ID_0) - while True: - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取人脸检测结果 - dets = fd_kpu_run(kpu_face_detect,rgb888p_img) - # (2.2)针对每个人脸框,推理得到对应人脸解析结果 - parse_results = [] - for det in dets: - parse_ret = fp_kpu_run(kpu_face_parse,rgb888p_img,det) - parse_results.append(parse_ret) - # (2.3)将人脸解析结果画到显示器上 - display_draw(dets,parse_results) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - gc.collect() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - fd_kpu_deinit() - fp_kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - del kpu_face_parse - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'draw_img_ulab' in globals(): - global draw_img_ulab - del draw_img_ulab - # 垃圾回收 - gc.collect() - nn.shrink_memory_pool() - # 释放媒体资源 - media_deinit() - - print("face_parse_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - face_parse_inference() -``` - -### 5. 车牌识别 - -```python -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -#ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(640, 16) -OUT_RGB888P_HEIGHT = 360 - -#车牌检测 和 车牌识别 kmodel输入shape -det_kmodel_input_shape = (1,3,640,640) -rec_kmodel_input_shape = (1,1,32,220) - -#车牌检测 相关参数设置 -obj_thresh = 0.2 #车牌检测分数阈值 -nms_thresh = 0.2 #检测框 非极大值抑制 阈值 - -#文件配置 -root_dir = '/sdcard/app/tests/' -det_kmodel_file = root_dir + 'kmodel/LPD_640.kmodel' # 车牌检测 kmodel 文件路径 -rec_kmodel_file = root_dir + 'kmodel/licence_reco.kmodel' # 车牌识别 kmodel 文件路径 -#dict_rec = ["挂", "使", "领", "澳", "港", "皖", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "京", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂", "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "警", "学", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "_", "-"] -dict_rec = ["gua","shi","ling","ao","gang","wan","hu","jin","yu","ji","jin","meng","liao","ji","hei","su","zhe","jing","min","gan","lu","yu","e","xiang","yue","gui","qiong","chuan","gui","yun","zang","shan","gan","qing","ning","xin","jing","xue","0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "_", "-"] -dict_size = len(dict_rec) -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global det_ai2d,det_ai2d_input_tensor,det_ai2d_output_tensor,det_ai2d_builder # 定义车牌检测 ai2d 对象 ,并且定义 ai2d 的输入、输出 以及 builder -global rec_ai2d,rec_ai2d_input_tensor,rec_ai2d_output_tensor,rec_ai2d_builder # 定义车牌识别 ai2d 对象 ,并且定义 ai2d 的输入、输出 以及 builder - -# 车牌检测 接收kmodel输出的后处理方法 -def det_kpu_post_process(output_data): - with ScopedTiming("det_kpu_post_process", debug_mode > 0): - results = aidemo.licence_det_postprocess(output_data,[OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH],[det_kmodel_input_shape[2],det_kmodel_input_shape[3]],obj_thresh,nms_thresh) - return results - -# 车牌识别 接收kmodel输出的后处理方法 -def rec_kpu_post_process(output_data): - with ScopedTiming("rec_kpu_post_process", debug_mode > 0): - size = rec_kmodel_input_shape[3] / 4 - result = [] - for i in range(size): - maxs = float("-inf") - index = -1 - for j in range(dict_size): - if (maxs < float(output_data[i * dict_size +j])): - index = j - maxs = output_data[i * dict_size +j] - result.append(index) - - result_str = "" - for i in range(size): - if (result[i] >= 0 and result[i] != 0 and not(i > 0 and result[i-1] == result[i])): - result_str += dict_rec[result[i]-1] - return result_str - -# 车牌检测 ai2d 初始化 -def det_ai2d_init(): - with ScopedTiming("det_ai2d_init",debug_mode > 0): - global det_ai2d - det_ai2d = nn.ai2d() - det_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - det_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global det_ai2d_out_tensor - data = np.ones(det_kmodel_input_shape, dtype=np.uint8) - det_ai2d_out_tensor = nn.from_numpy(data) - - global det_ai2d_builder - det_ai2d_builder = det_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], det_kmodel_input_shape) - -# 车牌识别 ai2d 初始化 -def rec_ai2d_init(): - with ScopedTiming("rec_ai2d_init",debug_mode > 0): - global rec_ai2d - rec_ai2d = nn.ai2d() - rec_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - - global rec_ai2d_out_tensor - data = np.ones(rec_kmodel_input_shape, dtype=np.uint8) - rec_ai2d_out_tensor = nn.from_numpy(data) - - rec_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - -# 车牌检测 ai2d 运行 -def det_ai2d_run(rgb888p_img): - with ScopedTiming("det_ai2d_run",debug_mode > 0): - global det_ai2d_input_tensor,det_ai2d_out_tensor,det_ai2d_builder - det_ai2d_input = rgb888p_img.to_numpy_ref() - det_ai2d_input_tensor = nn.from_numpy(det_ai2d_input) - - det_ai2d_builder.run(det_ai2d_input_tensor, det_ai2d_out_tensor) - -# 车牌识别 ai2d 运行 -def rec_ai2d_run(img_array): - with ScopedTiming("rec_ai2d_run",debug_mode > 0): - global rec_ai2d_input_tensor,rec_ai2d_out_tensor,rec_ai2d_builder - rec_ai2d_builder = rec_ai2d.build([1,1,img_array.shape[2],img_array.shape[3]], rec_kmodel_input_shape) - rec_ai2d_input_tensor = nn.from_numpy(img_array) - - rec_ai2d_builder.run(rec_ai2d_input_tensor, rec_ai2d_out_tensor) - -# 车牌检测 ai2d 释放内存 -def det_ai2d_release(): - with ScopedTiming("det_ai2d_release",debug_mode > 0): - global det_ai2d_input_tensor - del det_ai2d_input_tensor - -# 车牌识别 ai2d 释放内存 -def rec_ai2d_release(): - with ScopedTiming("rec_ai2d_release",debug_mode > 0): - global rec_ai2d_input_tensor, rec_ai2d_builder - del rec_ai2d_input_tensor - del rec_ai2d_builder - -# 车牌检测 kpu 初始化 -def det_kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("det_kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - det_ai2d_init() - return kpu_obj - -# 车牌识别 kpu 初始化 -def rec_kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("rec_kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - rec_ai2d_init() - return kpu_obj - -# 车牌检测 kpu 输入预处理 -def det_kpu_pre_process(rgb888p_img): - det_ai2d_run(rgb888p_img) - with ScopedTiming("det_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,det_ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, det_ai2d_out_tensor) - -# 车牌识别 kpu 输入预处理 -def rec_kpu_pre_process(img_array): - rec_ai2d_run(img_array) - with ScopedTiming("rec_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,rec_ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, rec_ai2d_out_tensor) - -# 车牌识别 抠图 -def rec_array_pre_process(rgb888p_img,dets): - with ScopedTiming("rec_array_pre_process",debug_mode > 0): - isp_image = rgb888p_img.to_numpy_ref() - imgs_array_boxes = aidemo.ocr_rec_preprocess(isp_image,[OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH],dets) - return imgs_array_boxes - -# 车牌检测 获取 kmodel 输出 -def det_kpu_get_output(): - with ScopedTiming("det_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - tmp2 = result.copy() - del data - results.append(tmp2) - return results - -# 车牌识别 获取 kmodel 输出 -def rec_kpu_get_output(): - with ScopedTiming("rec_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - result = result.reshape((result.shape[0] * result.shape[1] * result.shape[2])) - tmp = result.copy() - del data - return tmp - -# 车牌检测 kpu 运行 -def det_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - det_kpu_pre_process(rgb888p_img) - # (2) kpu 运行 - with ScopedTiming("det_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3) 释放ai2d资源 - det_ai2d_release() - # (4) 获取kpu输出 - results = det_kpu_get_output() - # (5) kpu结果后处理 - dets = det_kpu_post_process(results) - # 返回 车牌检测结果 - return dets - -# 车牌识别 kpu 运行 -def rec_kpu_run(kpu_obj,rgb888p_img,dets): - global current_kmodel_obj - if (len(dets) == 0): - return [] - current_kmodel_obj = kpu_obj - # (1) 原始图像抠图,车牌检测结果 points 排序 - imgs_array_boxes = rec_array_pre_process(rgb888p_img,dets) - imgs_array = imgs_array_boxes[0] - boxes = imgs_array_boxes[1] - recs = [] - for img_array in imgs_array: - # (2) 抠出后的图像 进行预处理,设置模型输入 - rec_kpu_pre_process(img_array) - # (3) kpu 运行 - with ScopedTiming("rec_kpu_run",debug_mode > 0): - kpu_obj.run() - # (4) 释放ai2d资源 - rec_ai2d_release() - # (5) 获取 kpu 输出 - result = rec_kpu_get_output() - # (6) kpu 结果后处理 - rec = rec_kpu_post_process(result) - recs.append(rec) - # (7) 返回 车牌检测 和 识别结果 - return [boxes,recs] - - -# 车牌检测 kpu 释放内存 -def det_kpu_deinit(): - with ScopedTiming("det_kpu_deinit",debug_mode > 0): - if 'det_ai2d' in globals(): - global det_ai2d - del det_ai2d - if 'det_ai2d_builder' in globals(): - global det_ai2d_builder - del det_ai2d_builder - if 'det_ai2d_out_tensor' in globals(): - global det_ai2d_out_tensor - del det_ai2d_out_tensor - -# 车牌识别 kpu 释放内存 -def rec_kpu_deinit(): - with ScopedTiming("rec_kpu_deinit",debug_mode > 0): - if 'rec_ai2d' in globals(): - global rec_ai2d - del rec_ai2d - if 'rec_ai2d_out_tensor' in globals(): - global rec_ai2d_out_tensor - del rec_ai2d_out_tensor - - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 将所有车牌检测框 和 识别结果绘制到屏幕 -def display_draw(dets_recs): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if dets_recs: - dets = dets_recs[0] - recs = dets_recs[1] - draw_img.clear() - point_8 = np.zeros((8),dtype=np.int16) - for det_index in range(len(dets)): - for i in range(4): - x = dets[det_index][i * 2 + 0]/OUT_RGB888P_WIDTH*DISPLAY_WIDTH - y = dets[det_index][i * 2 + 1]/OUT_RGB888P_HEIGHT*DISPLAY_HEIGHT - point_8[i * 2 + 0] = int(x) - point_8[i * 2 + 1] = int(y) - for i in range(4): - draw_img.draw_line(point_8[i * 2 + 0],point_8[i * 2 + 1],point_8[(i+1) % 4 * 2 + 0],point_8[(i+1) % 4 * 2 + 1],color=(255, 0, 255, 0),thickness=4) - draw_img.draw_string( point_8[6], point_8[7] + 20, recs[det_index] , color=(255,255,153,18) , scale=4) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_BGR_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - - -#**********for licence_det_rec.py********** -def licence_det_rec_inference(): - print("licence_det_rec start") - kpu_licence_det = det_kpu_init(det_kmodel_file) # 创建车牌检测的 kpu 对象 - kpu_licence_rec = rec_kpu_init(rec_kmodel_file) # 创建车牌识别的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = det_kpu_run(kpu_licence_det,rgb888p_img) # 执行车牌检测 kpu 运行 以及 后处理过程 - dets_recs = rec_kpu_run(kpu_licence_rec,rgb888p_img,dets) # 执行车牌识别 kpu 运行 以及 后处理过程 - display_draw(dets_recs) # 将得到的检测结果和识别结果 绘制到display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - det_kpu_deinit() # 释放 车牌检测 kpu - rec_kpu_deinit() # 释放 车牌识别 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_licence_det - del kpu_licence_rec - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("licence_det_rec end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - licence_det_rec_inference() -``` - -### 6. 石头剪刀布 - -```python -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -from random import randint #随机整数生成 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import aicube #aicube模块,封装ai cube 相关后处理 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -#ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#手掌检测 和 手掌关键点检测 kmodel输入shape -hd_kmodel_input_shape = (1,3,512,512) -hk_kmodel_input_shape = (1,3,256,256) - -#手掌检测 相关参数设置 -confidence_threshold = 0.2 #手掌检测 分数阈值 -nms_threshold = 0.5 #非极大值抑制 阈值 -hd_kmodel_frame_size = [512,512] #手掌检测kmodel输入 w h -hd_frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] #手掌检测原始输入图像 w h -strides = [8,16,32] #手掌检测模型 下采样输出倍数 -num_classes = 1 #检测类别数, 及手掌一种 -nms_option = False #控制最大值抑制的方式 False 类内 True 类间 -labels = ["hand"] #标签名称 -anchors = [26,27,53,52,75,71,80,99,106,82,99,134,140,113,161,172,245,276] #手掌检测模型 锚框 -#手掌关键点检测 相关参数 -hk_kmodel_frame_size = [256,256] #手掌关键点检测 kmodel 输入 w h - -# kmodel 路径 -root_dir = '/sdcard/app/tests/' -hd_kmodel_file = root_dir + 'kmodel/hand_det.kmodel' #手掌检测kmodel路径 -hk_kmodel_file = root_dir + 'kmodel/handkp_det.kmodel' #手掌关键点kmodel路径 -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -# 猜拳模式 0 玩家稳赢 , 1 玩家必输 , n > 2 多局多胜 -guess_mode = 3 - -# 读取石头剪刀布的bin文件方法 -def read_file(file_name): - image_arr = np.fromfile(file_name,dtype=np.uint8) - image_arr = image_arr.reshape((400,400,4)) - return image_arr -# 石头剪刀布的 array -five_image = read_file(root_dir + "utils/five.bin") -fist_image = read_file(root_dir + "utils/fist.bin") -shear_image = read_file(root_dir + "utils/shear.bin") - - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global hd_ai2d,hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder # 定义手掌检测 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor,hk_ai2d_builder # 定义手掌关键点检测 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global counts_guess, player_win, k230_win, sleep_end, set_stop_id # 定义猜拳游戏的参数:猜拳次数、玩家赢次、k230赢次、是否停顿、是狗暂停 - -# 手掌检测 ai2d 初始化 -def hd_ai2d_init(): - with ScopedTiming("hd_ai2d_init",debug_mode > 0): - global hd_ai2d - global hd_ai2d_builder - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = hd_kmodel_frame_size[0] - height = hd_kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - # init kpu and load kmodel - hd_ai2d = nn.ai2d() - hd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - hd_ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - hd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - hd_ai2d_builder = hd_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - - global hd_ai2d_output_tensor - data = np.ones(hd_kmodel_input_shape, dtype=np.uint8) - hd_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌检测 ai2d 运行 -def hd_ai2d_run(rgb888p_img): - with ScopedTiming("hd_ai2d_run",debug_mode > 0): - global hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder - hd_ai2d_input = rgb888p_img.to_numpy_ref() - hd_ai2d_input_tensor = nn.from_numpy(hd_ai2d_input) - - hd_ai2d_builder.run(hd_ai2d_input_tensor, hd_ai2d_output_tensor) - -# 手掌检测 ai2d 释放内存 -def hd_ai2d_release(): - with ScopedTiming("hd_ai2d_release",debug_mode > 0): - global hd_ai2d_input_tensor - del hd_ai2d_input_tensor - -# 手掌检测 kpu 初始化 -def hd_kpu_init(hd_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hd_kpu_init",debug_mode > 0): - hd_kpu_obj = nn.kpu() - hd_kpu_obj.load_kmodel(hd_kmodel_file) - - hd_ai2d_init() - return hd_kpu_obj - -# 手掌检测 kpu 输入预处理 -def hd_kpu_pre_process(rgb888p_img): - hd_ai2d_run(rgb888p_img) - with ScopedTiming("hd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hd_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hd_ai2d_output_tensor) - -# 手掌检测 kpu 获取 kmodel 输出 -def hd_kpu_get_output(): - with ScopedTiming("hd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌检测 kpu 运行 -def hd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原始图像预处理,并设置模型输入 - hd_kpu_pre_process(rgb888p_img) - # (2) kpu 运行 - with ScopedTiming("hd_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3) 释放ai2d资源 - hd_ai2d_release() - # (4) 获取kpu输出 - results = hd_kpu_get_output() - # (5) kpu结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], hd_kmodel_frame_size, hd_frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6) 返回 手掌检测 结果 - return dets - -# 手掌检测 kpu 释放内存 -def hd_kpu_deinit(): - with ScopedTiming("hd_kpu_deinit",debug_mode > 0): - if 'hd_ai2d' in globals(): - global hd_ai2d - del hd_ai2d - if 'hd_ai2d_output_tensor' in globals(): - global hd_ai2d_output_tensor - del hd_ai2d_output_tensor - if 'hd_ai2d_builder' in globals(): - global hd_ai2d_builder - del hd_ai2d_builder - -# 手掌关键点检测 ai2d 初始化 -def hk_ai2d_init(): - with ScopedTiming("hk_ai2d_init",debug_mode > 0): - global hk_ai2d - hk_ai2d = nn.ai2d() - hk_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - - global hk_ai2d_output_tensor - data = np.ones(hk_kmodel_input_shape, dtype=np.uint8) - hk_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌关键点检测 ai2d 运行 -def hk_ai2d_run(rgb888p_img, x, y, w, h): - with ScopedTiming("hk_ai2d_run",debug_mode > 0): - global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor - hk_ai2d_input = rgb888p_img.to_numpy_ref() - hk_ai2d_input_tensor = nn.from_numpy(hk_ai2d_input) - - hk_ai2d.set_crop_param(True, x, y, w, h) - hk_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - global hk_ai2d_builder - hk_ai2d_builder = hk_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,hk_kmodel_frame_size[1],hk_kmodel_frame_size[0]]) - hk_ai2d_builder.run(hk_ai2d_input_tensor, hk_ai2d_output_tensor) - -# 手掌关键点检测 ai2d 释放内存 -def hk_ai2d_release(): - with ScopedTiming("hk_ai2d_release",debug_mode > 0): - global hk_ai2d_input_tensor,hk_ai2d_builder - del hk_ai2d_input_tensor - del hk_ai2d_builder - -# 手掌关键点检测 kpu 初始化 -def hk_kpu_init(hk_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hk_kpu_init",debug_mode > 0): - hk_kpu_obj = nn.kpu() - hk_kpu_obj.load_kmodel(hk_kmodel_file) - - hk_ai2d_init() - return hk_kpu_obj - -# 手掌关键点检测 kpu 输入预处理 -def hk_kpu_pre_process(rgb888p_img, x, y, w, h): - hk_ai2d_run(rgb888p_img, x, y, w, h) - with ScopedTiming("hk_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hk_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hk_ai2d_output_tensor) - -# 手掌关键点检测 kpu 获得 kmodel 输出 -def hk_kpu_get_output(): - with ScopedTiming("hk_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌关键点检测 接收kmodel结果的后处理 -def hk_kpu_post_process(results, x, y, w, h): - results_show = np.zeros(results.shape,dtype=np.int16) - # results_show = np.zeros(len(results),dtype=np.int16) - results_show[0::2] = results[0::2] * w + x - results_show[1::2] = results[1::2] * h + y - return results_show - -# 手掌关键点检测 kpu 运行 -def hk_kpu_run(kpu_obj,rgb888p_img, x, y, w, h): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - hk_kpu_pre_process(rgb888p_img, x, y, w, h) - # (2) kpu 运行 - with ScopedTiming("hk_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3) 释放ai2d资源 - hk_ai2d_release() - # (4) 获取kpu输出 - results = hk_kpu_get_output() - # (5) kpu结果后处理 - result = hk_kpu_post_process(results[0],x,y,w,h) - # (6) 返回 关键点检测 结果 - return result - -# 手掌关键点检测 kpu 释放内存 -def hk_kpu_deinit(): - with ScopedTiming("hk_kpu_deinit",debug_mode > 0): - if 'hk_ai2d' in globals(): - global hk_ai2d - del hk_ai2d - if 'hk_ai2d_output_tensor' in globals(): - global hk_ai2d_output_tensor - del hk_ai2d_output_tensor - -# 手掌关键点检测 计算角度 -def hk_vector_2d_angle(v1,v2): - v1_x = v1[0] - v1_y = v1[1] - v2_x = v2[0] - v2_y = v2[1] - v1_norm = np.sqrt(v1_x * v1_x+ v1_y * v1_y) - v2_norm = np.sqrt(v2_x * v2_x + v2_y * v2_y) - dot_product = v1_x * v2_x + v1_y * v2_y - cos_angle = dot_product/(v1_norm*v2_norm) - angle = np.acos(cos_angle)*180/np.pi - # if (angle>180): - # return 65536 - return angle - -# 利用手掌关键点检测的结果 判断手掌手势 -def hk_gesture(kpu_hand_keypoint_detect,rgb888p_img,det_box): - x1, y1, x2, y2 = int(det_box[2]),int(det_box[3]),int(det_box[4]),int(det_box[5]) - w = int(x2 - x1) - h = int(y2 - y1) - - if (h<(0.1*OUT_RGB888P_HEIGHT)): - return - if (w<(0.25*OUT_RGB888P_WIDTH) and ((x1<(0.03*OUT_RGB888P_WIDTH)) or (x2>(0.97*OUT_RGB888P_WIDTH)))): - return - if (w<(0.15*OUT_RGB888P_WIDTH) and ((x1<(0.01*OUT_RGB888P_WIDTH)) or (x2>(0.99*OUT_RGB888P_WIDTH)))): - return - - length = max(w,h)/2 - cx = (x1+x2)/2 - cy = (y1+y2)/2 - ratio_num = 1.26*length - - x1_kp = int(max(0,cx-ratio_num)) - y1_kp = int(max(0,cy-ratio_num)) - x2_kp = int(min(OUT_RGB888P_WIDTH-1, cx+ratio_num)) - y2_kp = int(min(OUT_RGB888P_HEIGHT-1, cy+ratio_num)) - w_kp = int(x2_kp - x1_kp + 1) - h_kp = int(y2_kp - y1_kp + 1) - - results = hk_kpu_run(kpu_hand_keypoint_detect,rgb888p_img, x1_kp, y1_kp, w_kp, h_kp) - - angle_list = [] - for i in range(5): - angle = hk_vector_2d_angle([(results[0]-results[i*8+4]), (results[1]-results[i*8+5])],[(results[i*8+6]-results[i*8+8]),(results[i*8+7]-results[i*8+9])]) - angle_list.append(angle) - - thr_angle = 65. - thr_angle_thumb = 53. - thr_angle_s = 49. - gesture_str = None - if 65535. not in angle_list: - if (angle_list[0]>thr_angle_thumb) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "fist" - elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "gun" - elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]5) and (angle_list[1]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "one" - elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]thr_angle_thumb) and (angle_list[1]thr_angle): - gesture_str = "three" - elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "thumbUp" - elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "yeah" - - return gesture_str - - -#media_utils.py -global draw_img,osd_img,masks #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - global buffer, draw_img, osd_img, masks - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - masks = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888,alloc=image.ALLOC_REF,data=masks) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - - -#**********for finger_guessing.py********** -def finger_guessing_inference(): - print("finger_guessing_test start") - kpu_hand_detect = hd_kpu_init(hd_kmodel_file) # 创建手掌检测的 kpu 对象 - kpu_hand_keypoint_detect = hk_kpu_init(hk_kmodel_file) # 创建手掌关键点检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) # 开启 camera - counts_guess = -1 # 猜拳次数 计数 - player_win = 0 # 玩家 赢次计数 - k230_win = 0 # k230 赢次计数 - sleep_end = False # 是否 停顿 - set_stop_id = True # 是否 暂停猜拳 - LIBRARY = ["fist","yeah","five"] # 猜拳 石头剪刀布 三种方案的dict - - count = 0 - global draw_img,masks,osd_img - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图像 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - with ScopedTiming("trigger time", debug_mode > 0): - dets_no_pro = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - gesture = "" - draw_img.clear() - - dets = [] - for det_box in dets_no_pro: - if det_box[4] < OUT_RGB888P_WIDTH - 10 : - dets.append(det_box) - - for det_box in dets: - gesture = hk_gesture(kpu_hand_keypoint_detect,rgb888p_img,det_box) # 执行手掌关键点检测 kpu 运行 以及 后处理过程 得到手势类型 - if (len(dets) >= 2): - draw_img.draw_string( 300 , 500, "Must have one hand !", color=(255,255,0,0), scale=7) - draw_img.copy_to(osd_img) - elif (guess_mode == 0): - if (gesture == "fist"): - masks[:400,:400,:] = shear_image - elif (gesture == "five"): - masks[:400,:400,:] = fist_image - elif (gesture == "yeah"): - masks[:400,:400,:] = five_image - draw_img.copy_to(osd_img) - elif (guess_mode == 1): - if (gesture == "fist"): - masks[:400,:400,:] = five_image - elif (gesture == "five"): - masks[:400,:400,:] = shear_image - elif (gesture == "yeah"): - masks[:400,:400,:] = fist_image - draw_img.copy_to(osd_img) - else: - if (sleep_end): - time.sleep_ms(2000) - sleep_end = False - if (len(dets) == 0): - set_stop_id = True - if (counts_guess == -1 and gesture != "fist" and gesture != "yeah" and gesture != "five"): - draw_img.draw_string( 400 , 450, "G A M E S T A R T", color=(255,255,0,0), scale=7) - draw_img.draw_string( 400 , 550, " 1 S E T ", color=(255,255,0,0), scale=7) - draw_img.copy_to(osd_img) - elif (counts_guess == guess_mode): - draw_img.clear() - if (k230_win > player_win): - draw_img.draw_string( 400 , 450, "Y O U L O S E", color=(255,255,0,0), scale=7) - elif (k230_win < player_win): - draw_img.draw_string( 400 , 450, "Y O U W I N", color=(255,255,0,0), scale=7) - else: - draw_img.draw_string( 400 , 450, "T I E G A M E", color=(255,255,0,0), scale=7) - draw_img.copy_to(osd_img) - counts_guess = -1 - player_win = 0 - k230_win = 0 - - sleep_end = True - else: - if (set_stop_id): - if (counts_guess == -1 and (gesture == "fist" or gesture == "yeah" or gesture == "five")): - counts_guess = 0 - if (counts_guess != -1 and (gesture == "fist" or gesture == "yeah" or gesture == "five")): - k230_guess = randint(1,10000) % 3 - if (gesture == "fist" and LIBRARY[k230_guess] == "yeah"): - player_win += 1 - elif (gesture == "fist" and LIBRARY[k230_guess] == "five"): - k230_win += 1 - if (gesture == "yeah" and LIBRARY[k230_guess] == "fist"): - k230_win += 1 - elif (gesture == "yeah" and LIBRARY[k230_guess] == "five"): - player_win += 1 - if (gesture == "five" and LIBRARY[k230_guess] == "fist"): - player_win += 1 - elif (gesture == "five" and LIBRARY[k230_guess] == "yeah"): - k230_win += 1 - - if (LIBRARY[k230_guess] == "fist"): - masks[:400,:400,:] = fist_image - elif (LIBRARY[k230_guess] == "five"): - masks[:400,:400,:] = five_image - elif (LIBRARY[k230_guess] == "yeah"): - masks[:400,:400,:] = shear_image - - counts_guess += 1; - draw_img.draw_string( 400 , 450, " " + str(counts_guess) + " S E T ", color=(255,255,0,0), scale=7) - draw_img.copy_to(osd_img) - set_stop_id = False - sleep_end = True - - else: - draw_img.draw_string( 400 , 450, " " + str(counts_guess+1) + " S E T ", color=(255,255,0,0), scale=7) - draw_img.copy_to(osd_img) - else: - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) # 将得到的图像 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图形 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 停止 display - hd_kpu_deinit() # 释放手掌检测 kpu - hk_kpu_deinit() # 释放手掌关键点检测 kpu - - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - del kpu_hand_keypoint_detect - - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'masks' in globals(): - global masks - del masks - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个 media - - - print("finger_guessing_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - finger_guessing_inference() -``` - -### 7. OCR识别 - -```python -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import aicube #aicube模块,封装检测分割等任务相关后处理 -import os, sys - -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(640, 16) -OUT_RGB888P_HEIGH = 360 - -#kmodel输入参数设置 -kmodel_input_shape_det = (1,3,640,640) # OCR检测模型的kmodel输入分辨率 -kmodel_input_shape_rec = (1,3,32,512) # OCR识别模型的kmodel输入分辨率 -rgb_mean = [0,0,0] # ai2d padding的值 - -#检测步骤kmodel相关参数设置 -mask_threshold = 0.25 # 二值化mask阈值 -box_threshold = 0.3 # 检测框分数阈值 - -#文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file_det = root_dir + 'kmodel/ocr_det_int16.kmodel' # 检测模型路径 -kmodel_file_rec = root_dir + "kmodel/ocr_rec_int16.kmodel" # 识别模型路径 -dict_path = root_dir + 'utils/dict.txt' # 调试模式 大于0(调试)、 反之 (不调试) -debug_mode = 0 - -# OCR字典读取 -with open(dict_path, 'r') as file: - line_one = file.read(100000) - line_list = line_one.split("\r\n") -DICT = {num: char.replace("\r", "").replace("\n", "") for num, char in enumerate(line_list)} - -# scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -# utils 设定全局变量 -# 当前kmodel -global current_kmodel_obj # 设置全局kpu对象 -# 检测阶段预处理应用的ai2d全局变量 -global ai2d_det,ai2d_input_tensor_det,ai2d_output_tensor_det,ai2d_builder_det,ai2d_input_det # 设置检测模型的ai2d对象,并定义ai2d的输入、输出和builder -# 识别阶段预处理应用的ai2d全局变量 -global ai2d_rec,ai2d_input_tensor_rec,ai2d_output_tensor_rec,ai2d_builder_rec # 设置识别模型的ai2d对象,并定义ai2d的输入、输出和builder - -# padding方法,一边padding,右padding或者下padding -def get_pad_one_side_param(out_img_size,input_img_size): - dst_w = out_img_size[0] - dst_h = out_img_size[1] - - input_width = input_img_size[0] - input_high = input_img_size[1] - - ratio_w = dst_w / input_width - ratio_h = dst_h / input_high - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - - new_w = (int)(ratio * input_width) - new_h = (int)(ratio * input_high) - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -# 检测步骤ai2d初始化 -def ai2d_init_det(): - with ScopedTiming("ai2d_init_det",debug_mode > 0): - global ai2d_det - ai2d_det = nn.ai2d() - ai2d_det.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d_det.set_pad_param(True, get_pad_one_side_param([kmodel_input_shape_det[3],kmodel_input_shape_det[2]], [OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH]), 0, [0, 0, 0]) - ai2d_det.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - global ai2d_output_tensor_det - data = np.ones(kmodel_input_shape_det, dtype=np.uint8) - ai2d_output_tensor_det = nn.from_numpy(data) - global ai2d_builder_det - ai2d_builder_det = ai2d_det.build([1, 3, OUT_RGB888P_HEIGH, OUT_RGB888P_WIDTH], [1, 3, kmodel_input_shape_det[2], kmodel_input_shape_det[3]]) - - -# 检测步骤的ai2d 运行,完成ai2d_init_det预设的预处理 -def ai2d_run_det(rgb888p_img): - with ScopedTiming("ai2d_run_det",debug_mode > 0): - global ai2d_input_tensor_det,ai2d_builder_det,ai2d_input_det - ai2d_input_det = rgb888p_img.to_numpy_ref() - ai2d_input_tensor_det = nn.from_numpy(ai2d_input_det) - global ai2d_output_tensor_det - ai2d_builder_det.run(ai2d_input_tensor_det, ai2d_output_tensor_det) - -# 识别步骤ai2d初始化 -def ai2d_init_rec(): - with ScopedTiming("ai2d_init_res",debug_mode > 0): - global ai2d_rec,ai2d_output_tensor_rec - ai2d_rec = nn.ai2d() - ai2d_rec.set_dtype(nn.ai2d_format.RGB_packed, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d_out_data = np.ones((1, 3, kmodel_input_shape_rec[2], kmodel_input_shape_rec[3]), dtype=np.uint8) - ai2d_output_tensor_rec = nn.from_numpy(ai2d_out_data) - - -# 识别步骤ai2d运行 -def ai2d_run_rec(rgb888p_img): - with ScopedTiming("ai2d_run_rec",debug_mode > 0): - global ai2d_rec,ai2d_builder_rec,ai2d_input_tensor_rec,ai2d_output_tensor_rec - ai2d_rec.set_pad_param(True, get_pad_one_side_param([kmodel_input_shape_rec[3],kmodel_input_shape_rec[2]],[rgb888p_img.shape[2],rgb888p_img.shape[1]]), 0, [0, 0, 0]) - ai2d_rec.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - ai2d_builder_rec = ai2d_rec.build([rgb888p_img.shape[0], rgb888p_img.shape[1], rgb888p_img.shape[2],rgb888p_img.shape[3]], - [1, 3, kmodel_input_shape_rec[2], kmodel_input_shape_rec[3]]) - ai2d_input_tensor_rec = nn.from_numpy(rgb888p_img) - ai2d_builder_rec.run(ai2d_input_tensor_rec, ai2d_output_tensor_rec) - -# 检测步骤ai2d释放内存 -def ai2d_release_det(): - with ScopedTiming("ai2d_release_det",debug_mode > 0): - if "ai2d_input_tensor_det" in globals(): - global ai2d_input_tensor_det - del ai2d_input_tensor_det - -# 识别步骤ai2d释放内存 -def ai2d_release_rec(): - with ScopedTiming("ai2d_release_rec",debug_mode > 0): - if "ai2d_input_tensor_rec" in globals(): - global ai2d_input_tensor_rec - del ai2d_input_tensor_rec - -# 检测步骤kpu初始化 -def kpu_init_det(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init_det",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - ai2d_init_det() - return kpu_obj - -# 识别步骤kpu初始化 -def kpu_init_rec(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init_rec",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - ai2d_init_rec() - return kpu_obj - -# 检测步骤预处理,调用ai2d_run_det实现,并将ai2d的输出设置为kmodel的输入 -def kpu_pre_process_det(rgb888p_img): - ai2d_run_det(rgb888p_img) - with ScopedTiming("kpu_pre_process_det",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor_det - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor_det) - -# 识别步骤预处理,调用ai2d_init_run_rec实现,并将ai2d的输出设置为kmodel的输入 -def kpu_pre_process_rec(rgb888p_img): - ai2d_run_rec(rgb888p_img) - with ScopedTiming("kpu_pre_process_rec",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor_rec - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor_rec) - - -# 获取kmodel的输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -# 检测步骤kpu运行 -def kpu_run_det(kpu_obj,rgb888p_img): - # kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - #(1)原图像预处理并设置模型输入 - kpu_pre_process_det(rgb888p_img) - #(2)kpu推理 - with ScopedTiming("kpu_run_det",debug_mode > 0): - # 检测运行 - kpu_obj.run() - #(3)检测释放ai2d资源 - ai2d_release_det() - #(4)获取检测kpu输出 - results = kpu_get_output() - #(5)CHW转HWC - global ai2d_input_det - tmp = (ai2d_input_det.shape[0], ai2d_input_det.shape[1], ai2d_input_det.shape[2]) - ai2d_input_det = ai2d_input_det.reshape((ai2d_input_det.shape[0], ai2d_input_det.shape[1] * ai2d_input_det.shape[2])) - ai2d_input_det = ai2d_input_det.transpose() - tmp2 = ai2d_input_det.copy() - tmp2 = tmp2.reshape((tmp[1], tmp[2], tmp[0])) - #(6)后处理,aicube.ocr_post_process接口说明: - # 接口:aicube.ocr_post_process(threshold_map,ai_isp,kmodel_input_shape,isp_shape,mask_threshold,box_threshold); - # 参数说明: - # threshold_map: DBNet模型的输出为(N,kmodel_input_shape_det[2],kmodel_input_shape_det[3],2),两个通道分别为threshold map和segmentation map - # 后处理过程只使用threshold map,因此将results[0][:,:,:,0] reshape成一维传给接口使用。 - # ai_isp:后处理还会返回基于原图的检测框裁剪数据,因此要将原图数据reshape为一维传给接口处理。 - # kmodel_input_shape:kmodel输入分辨率。 - # isp_shape:AI原图分辨率。要将kmodel输出分辨率的检测框坐标映射到原图分辨率上,需要使用这两个分辨率的值。 - # mask_threshold:用于二值化图像获得文本区域。 - # box_threshold:检测框分数阈值,低于该阈值的检测框不计入结果。 - with ScopedTiming("kpu_post",debug_mode > 0): - # 调用aicube模块的ocr_post_process完成ocr检测的后处理 - # det_results结构为[[crop_array_nhwc,[p1_x,p1_y,p2_x,p2_y,p3_x,p3_y,p4_x,p4_y]],...] - det_results = aicube.ocr_post_process(results[0][:, :, :, 0].reshape(-1), tmp2.reshape(-1), - [kmodel_input_shape_det[3], kmodel_input_shape_det[2]], - [OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH], mask_threshold, box_threshold) - return det_results - -# 识别步骤后处理 -def kpu_run_rec(kpu_obj,rgb888p_img): - # kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - #(1)识别预处理并设置模型输入 - kpu_pre_process_rec(rgb888p_img) - #(2)kpu推理 - with ScopedTiming("kpu_run_rec",debug_mode > 0): - # 识别运行 - kpu_obj.run() - #(3)识别释放ai2d资源 - ai2d_release_rec() - #(4)获取识别kpu输出 - results = kpu_get_output() - #(5)识别后处理,results结构为[(N,MAX_LENGTH,DICT_LENGTH),...],在axis=2维度上取argmax获取当前识别字符在字典中的索引 - preds = np.argmax(results[0], axis=2).reshape((-1)) - output_txt = "" - for i in range(len(preds)): - # 当前识别字符不是字典的最后一个字符并且和前一个字符不重复(去重),加入识别结果字符串 - if preds[i] != (len(DICT) - 1) and (not (i > 0 and preds[i - 1] == preds[i])): - output_txt = output_txt + DICT[preds[i]] - return output_txt - -# 释放检测步骤kpu、ai2d以及ai2d相关的tensor -def kpu_deinit_det(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - global ai2d_det,ai2d_output_tensor_det - if "ai2d_det" in globals(): - del ai2d_det - if "ai2d_output_tensor_det" in globals(): - del ai2d_output_tensor_det - -# 释放识别步骤kpu -def kpu_deinit_rec(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - global ai2d_rec,ai2d_output_tensor_rec - if "ai2d_rec" in globals(): - del ai2d_rec - if "ai2d_output_tensor_rec" in globals(): - del ai2d_output_tensor_rec - - -#********************for media_utils.py******************** - -global draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# display初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# 释放display -def display_deinit(): - display.deinit() - -# display显示检测识别框 -def display_draw(det_results): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if det_results: - draw_img.clear() - # 循环绘制所有检测到的框 - for j in det_results: - # 将原图的坐标点转换成显示的坐标点,循环绘制四条直线,得到一个矩形框 - for i in range(4): - x1 = j[1][(i * 2)] / OUT_RGB888P_WIDTH * DISPLAY_WIDTH - y1 = j[1][(i * 2 + 1)] / OUT_RGB888P_HEIGH * DISPLAY_HEIGHT - x2 = j[1][((i + 1) * 2) % 8] / OUT_RGB888P_WIDTH * DISPLAY_WIDTH - y2 = j[1][((i + 1) * 2 + 1) % 8] / OUT_RGB888P_HEIGH * DISPLAY_HEIGHT - draw_img.draw_line((int(x1), int(y1), int(x2), int(y2)), color=(255, 0, 0, 255), - thickness=5) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -# camera初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - # camera获取的通道0图像送display显示 - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # camera获取的通道2图像送ai处理 - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 启动视频流 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 捕获一帧图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 释放内存 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 停止视频流 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放buffer,销毁link -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - global buffer,media_source, media_sink - if "buffer" in globals(): - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - media.destroy_link(media_source, media_sink) - media.buffer_deinit() - -def ocr_rec_inference(): - print("ocr_rec_test start") - kpu_ocr_det = kpu_init_det(kmodel_file_det) # 创建OCR检测kpu对象 - kpu_ocr_rec = kpu_init_rec(kmodel_file_rec) # 创建OCR识别kpu对象 - camera_init(CAM_DEV_ID_0) # camera初始化 - display_init() # display初始化 - try: - media_init() - camera_start(CAM_DEV_ID_0) - gc_count=0 - while True: - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图像 - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - det_results = kpu_run_det(kpu_ocr_det,rgb888p_img) # kpu运行获取OCR检测kmodel的推理输出 - ocr_results="" - if det_results: - for j in det_results: - ocr_result = kpu_run_rec(kpu_ocr_rec,j[0]) # j[0]为检测框的裁剪部分,kpu运行获取OCR识别kmodel的推理输出 - ocr_results = ocr_results+" ["+ocr_result+"] " - gc.collect() - print("\n"+ocr_results) - display_draw(det_results) - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - if (gc_count>1): - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止camera - display_deinit() # 释放display - kpu_deinit_det() # 释放OCR检测步骤kpu - kpu_deinit_rec() # 释放OCR识别步骤kpu - if "current_kmodel_obj" in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_ocr_det - del kpu_ocr_rec - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放整个media - print("ocr_rec_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - ocr_rec_inference() -``` - -### 8. 手掌关键点检测 - -```python -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#--------for hand detection---------- -#kmodel输入shape -hd_kmodel_input_shape = (1,3,512,512) # 手掌检测kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 手掌检测阈值,用于过滤roi -nms_threshold = 0.5 # 手掌检测框阈值,用于过滤重复roi -hd_kmodel_frame_size = [512,512] # 手掌检测输入图片尺寸 -hd_frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 手掌检测直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 手掌检测模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS - -root_dir = '/sdcard/app/tests/' -hd_kmodel_file = root_dir + "kmodel/hand_det.kmodel" # 手掌检测kmodel文件的路径 -anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] #anchor设置 - -#--------for hand keypoint detection---------- -#kmodel输入shape -hk_kmodel_input_shape = (1,3,256,256) # 手掌关键点检测kmodel输入分辨率 - -#kmodel相关参数设置 -hk_kmodel_frame_size = [256,256] # 手掌关键点检测输入图片尺寸 -hk_kmodel_file = root_dir + 'kmodel/handkp_det.kmodel' # 手掌关键点检测kmodel文件的路径 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global hd_ai2d,hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder # 定义手掌检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor,hk_ai2d_builder # 定义手掌关键点检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - -#-------hand detect--------: -# 手掌检测ai2d 初始化 -def hd_ai2d_init(): - with ScopedTiming("hd_ai2d_init",debug_mode > 0): - global hd_ai2d - global hd_ai2d_builder - global hd_ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = hd_kmodel_frame_size[0] - height = hd_kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - hd_ai2d = nn.ai2d() - hd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - hd_ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - hd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - hd_ai2d_builder = hd_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - data = np.ones(hd_kmodel_input_shape, dtype=np.uint8) - hd_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌检测 ai2d 运行 -def hd_ai2d_run(rgb888p_img): - with ScopedTiming("hd_ai2d_run",debug_mode > 0): - global hd_ai2d_input_tensor, hd_ai2d_output_tensor, hd_ai2d_builder - hd_ai2d_input = rgb888p_img.to_numpy_ref() - hd_ai2d_input_tensor = nn.from_numpy(hd_ai2d_input) - - hd_ai2d_builder.run(hd_ai2d_input_tensor, hd_ai2d_output_tensor) - -# 手掌检测 ai2d 释放内存 -def hd_ai2d_release(): - with ScopedTiming("hd_ai2d_release",debug_mode > 0): - global hd_ai2d_input_tensor - del hd_ai2d_input_tensor - -# 手掌检测 kpu 初始化 -def hd_kpu_init(hd_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hd_kpu_init",debug_mode > 0): - hd_kpu_obj = nn.kpu() - hd_kpu_obj.load_kmodel(hd_kmodel_file) - - hd_ai2d_init() - return hd_kpu_obj - -# 手掌检测 kpu 输入预处理 -def hd_kpu_pre_process(rgb888p_img): - hd_ai2d_run(rgb888p_img) - with ScopedTiming("hd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hd_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hd_ai2d_output_tensor) - -# 手掌检测 kpu 获得 kmodel 输出 -def hd_kpu_get_output(): - with ScopedTiming("hd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌检测 kpu 运行 -def hd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hd_kpu_pre_process(rgb888p_img) - # (2)手掌检测 kpu 运行 - with ScopedTiming("hd_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌检测 ai2d 资源 - hd_ai2d_release() - # (4)获取手掌检测 kpu 输出 - results = hd_kpu_get_output() - # (5)手掌检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], hd_kmodel_frame_size, hd_frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6)返回手掌检测结果 - return dets - -# 手掌检测 kpu 释放内存 -def hd_kpu_deinit(): - with ScopedTiming("hd_kpu_deinit",debug_mode > 0): - if 'hd_ai2d' in globals(): #删除hd_ai2d变量,释放对它所引用对象的内存引用 - global hd_ai2d - del hd_ai2d - if 'hd_ai2d_output_tensor' in globals(): #删除hd_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hd_ai2d_output_tensor - del hd_ai2d_output_tensor - if 'hd_ai2d_builder' in globals(): #删除hd_ai2d_builder变量,释放对它所引用对象的内存引用 - global hd_ai2d_builder - del hd_ai2d_builder - - -#-------hand keypoint detection------: -# 手掌关键点检测 ai2d 初始化 -def hk_ai2d_init(): - with ScopedTiming("hk_ai2d_init",debug_mode > 0): - global hk_ai2d, hk_ai2d_output_tensor - hk_ai2d = nn.ai2d() - hk_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - data = np.ones(hk_kmodel_input_shape, dtype=np.uint8) - hk_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌关键点检测 ai2d 运行 -def hk_ai2d_run(rgb888p_img, x, y, w, h): - with ScopedTiming("hk_ai2d_run",debug_mode > 0): - global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor - hk_ai2d_input = rgb888p_img.to_numpy_ref() - hk_ai2d_input_tensor = nn.from_numpy(hk_ai2d_input) - - hk_ai2d.set_crop_param(True, x, y, w, h) - hk_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - global hk_ai2d_builder - hk_ai2d_builder = hk_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,hk_kmodel_frame_size[1],hk_kmodel_frame_size[0]]) - hk_ai2d_builder.run(hk_ai2d_input_tensor, hk_ai2d_output_tensor) - -# 手掌关键点检测 ai2d 释放内存 -def hk_ai2d_release(): - with ScopedTiming("hk_ai2d_release",debug_mode > 0): - global hk_ai2d_input_tensor, hk_ai2d_builder - del hk_ai2d_input_tensor - del hk_ai2d_builder - -# 手掌关键点检测 kpu 初始化 -def hk_kpu_init(hk_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hk_kpu_init",debug_mode > 0): - hk_kpu_obj = nn.kpu() - hk_kpu_obj.load_kmodel(hk_kmodel_file) - - hk_ai2d_init() - return hk_kpu_obj - -# 手掌关键点检测 kpu 输入预处理 -def hk_kpu_pre_process(rgb888p_img, x, y, w, h): - hk_ai2d_run(rgb888p_img, x, y, w, h) - with ScopedTiming("hk_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hk_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hk_ai2d_output_tensor) - -# 手掌关键点检测 kpu 获得 kmodel 输出 -def hk_kpu_get_output(): - with ScopedTiming("hk_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌关键点检测 kpu 运行 -def hk_kpu_run(kpu_obj,rgb888p_img, x, y, w, h): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hk_kpu_pre_process(rgb888p_img, x, y, w, h) - # (2)手掌关键点检测 kpu 运行 - with ScopedTiming("hk_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌关键点检测 ai2d 资源 - hk_ai2d_release() - # (4)获取手掌关键点检测 kpu 输出 - results = hk_kpu_get_output() - # (5)返回手掌关键点检测结果 - return results - -# 手掌关键点检测 kpu 释放内存 -def hk_kpu_deinit(): - with ScopedTiming("hk_kpu_deinit",debug_mode > 0): - if 'hk_ai2d' in globals(): #删除hk_ai2d变量,释放对它所引用对象的内存引用 - global hk_ai2d - del hk_ai2d - if 'hk_ai2d_output_tensor' in globals(): #删除hk_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hk_ai2d_output_tensor - del hk_ai2d_output_tensor - - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 标出检测到的21个关键点并用不同颜色的线段连接 -def display_draw(results, x, y, w, h): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - - if results: - results_show = np.zeros(results.shape,dtype=np.int16) - results_show[0::2] = (results[0::2] * w + x) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - results_show[1::2] = (results[1::2] * h + y) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT - for i in range(len(results_show)/2): - draw_img.draw_circle(results_show[i*2], results_show[i*2+1], 1, color=(255, 0, 255, 0),fill=False) - for i in range(5): - j = i*8 - if i==0: - R = 255; G = 0; B = 0 - if i==1: - R = 255; G = 0; B = 255 - if i==2: - R = 255; G = 255; B = 0 - if i==3: - R = 0; G = 255; B = 0 - if i==4: - R = 0; G = 0; B = 255 - draw_img.draw_line(results_show[0], results_show[1], results_show[j+2], results_show[j+3], color=(255,R,G,B), thickness = 3) - draw_img.draw_line(results_show[j+2], results_show[j+3], results_show[j+4], results_show[j+5], color=(255,R,G,B), thickness = 3) - draw_img.draw_line(results_show[j+4], results_show[j+5], results_show[j+6], results_show[j+7], color=(255,R,G,B), thickness = 3) - draw_img.draw_line(results_show[j+6], results_show[j+7], results_show[j+8], results_show[j+9], color=(255,R,G,B), thickness = 3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for hand_keypoint_detect.py********** -def hand_keypoint_detect_inference(): - print("hand_keypoint_detect_test start") - kpu_hand_detect = hd_kpu_init(hd_kmodel_file) # 创建手掌检测的 kpu 对象 - kpu_hand_keypoint_detect = hk_kpu_init(hk_kmodel_file) # 创建手掌关键点检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - draw_img.clear() - - for det_box in dets: - x1, y1, x2, y2 = int(det_box[2]),int(det_box[3]),int(det_box[4]),int(det_box[5]) - w = int(x2 - x1) - h = int(y2 - y1) - - if (h<(0.1*OUT_RGB888P_HEIGHT)): - continue - if (w<(0.25*OUT_RGB888P_WIDTH) and ((x1<(0.03*OUT_RGB888P_WIDTH)) or (x2>(0.97*OUT_RGB888P_WIDTH)))): - continue - if (w<(0.15*OUT_RGB888P_WIDTH) and ((x1<(0.01*OUT_RGB888P_WIDTH)) or (x2>(0.99*OUT_RGB888P_WIDTH)))): - continue - - w_det = int(float(x2 - x1) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - h_det = int(float(y2 - y1) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - x_det = int(x1*DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y_det = int(y1*DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - length = max(w,h)/2 - cx = (x1+x2)/2 - cy = (y1+y2)/2 - ratio_num = 1.26*length - - x1_kp = int(max(0,cx-ratio_num)) - y1_kp = int(max(0,cy-ratio_num)) - x2_kp = int(min(OUT_RGB888P_WIDTH-1, cx+ratio_num)) - y2_kp = int(min(OUT_RGB888P_HEIGHT-1, cy+ratio_num)) - w_kp = int(x2_kp - x1_kp + 1) - h_kp = int(y2_kp - y1_kp + 1) - - hk_results = hk_kpu_run(kpu_hand_keypoint_detect,rgb888p_img, x1_kp, y1_kp, w_kp, h_kp) # 执行手掌关键点检测 kpu 运行 以及 后处理过程 - - draw_img.draw_rectangle(x_det, y_det, w_det, h_det, color=(255, 0, 255, 0), thickness = 2) # 将得到的手掌检测结果 绘制到 display - display_draw(hk_results[0], x1_kp, y1_kp, w_kp, h_kp) # 将得到的手掌关键点检测结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - if (count>10): - gc.collect() - count = 0 - else: - count += 1 - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - hd_kpu_deinit() # 释放手掌检测 kpu - hk_kpu_deinit() # 释放手掌关键点检测 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - del kpu_hand_keypoint_detect - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("hand_detect_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - hand_keypoint_detect_inference() -``` - -### 9. 静态手势识别 - -```python -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#--------for hand detection---------- -#kmodel输入shape -hd_kmodel_input_shape = (1,3,512,512) # 手掌检测kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 手掌检测阈值,用于过滤roi -nms_threshold = 0.5 # 手掌检测框阈值,用于过滤重复roi -hd_kmodel_frame_size = [512,512] # 手掌检测输入图片尺寸 -hd_frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 手掌检测直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 手掌检测模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS - -root_dir = '/sdcard/app/tests/' -hd_kmodel_file = root_dir + 'kmodel/hand_det.kmodel' # 手掌检测kmodel文件的路径 -anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] #anchor设置 - -#--------for hand recognition---------- -#kmodel输入shape -hr_kmodel_input_shape = (1,3,224,224) # 手势识别kmodel输入分辨率 - -#kmodel相关参数设置 -hr_kmodel_frame_size = [224,224] # 手势识别输入图片尺寸 -labels = ["gun","other","yeah","five"] # 模型输出类别名称 - -hr_kmodel_file = root_dir + "kmodel/hand_reco.kmodel" # 手势识别kmodel文件的路径 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global hd_ai2d,hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder # 定义手掌检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global hr_ai2d,hr_ai2d_input_tensor,hr_ai2d_output_tensor,hr_ai2d_builder # 定义手势识别全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - -#-------hand detect--------: -# 手掌检测 ai2d 初始化 -def hd_ai2d_init(): - with ScopedTiming("hd_ai2d_init",debug_mode > 0): - global hd_ai2d - global hd_ai2d_builder - global hd_ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = hd_kmodel_frame_size[0] - height = hd_kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - hd_ai2d = nn.ai2d() - hd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - hd_ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - hd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - hd_ai2d_builder = hd_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - data = np.ones(hd_kmodel_input_shape, dtype=np.uint8) - hd_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌检测 ai2d 运行 -def hd_ai2d_run(rgb888p_img): - with ScopedTiming("hd_ai2d_run",debug_mode > 0): - global hd_ai2d_input_tensor, hd_ai2d_output_tensor, hd_ai2d_builder - hd_ai2d_input = rgb888p_img.to_numpy_ref() - hd_ai2d_input_tensor = nn.from_numpy(hd_ai2d_input) - - hd_ai2d_builder.run(hd_ai2d_input_tensor, hd_ai2d_output_tensor) - -# 手掌检测 ai2d 释放内存 -def hd_ai2d_release(): - with ScopedTiming("hd_ai2d_release",debug_mode > 0): - global hd_ai2d_input_tensor - del hd_ai2d_input_tensor - -# 手掌检测 kpu 初始化 -def hd_kpu_init(hd_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hd_kpu_init",debug_mode > 0): - hd_kpu_obj = nn.kpu() - hd_kpu_obj.load_kmodel(hd_kmodel_file) - - hd_ai2d_init() - return hd_kpu_obj - -# 手掌检测 kpu 输入预处理 -def hd_kpu_pre_process(rgb888p_img): - hd_ai2d_run(rgb888p_img) - with ScopedTiming("hd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hd_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hd_ai2d_output_tensor) - -# 手掌检测 kpu 获得 kmodel 输出 -def hd_kpu_get_output(): - with ScopedTiming("hd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌检测 kpu 运行 -def hd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hd_kpu_pre_process(rgb888p_img) - # (2)手掌检测 kpu 运行 - with ScopedTiming("hd_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌检测 ai2d 资源 - hd_ai2d_release() - # (4)获取手掌检测 kpu 输出 - results = hd_kpu_get_output() - # (5)手掌检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], hd_kmodel_frame_size, hd_frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6)返回手掌检测结果 - return dets - -# 手掌检测 kpu 释放内存 -def hd_kpu_deinit(): - with ScopedTiming("hd_kpu_deinit",debug_mode > 0): - if 'hd_ai2d' in globals(): #删除hd_ai2d变量,释放对它所引用对象的内存引用 - global hd_ai2d - del hd_ai2d - if 'hd_ai2d_output_tensor' in globals(): #删除hd_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hd_ai2d_output_tensor - del hd_ai2d_output_tensor - if 'hd_ai2d_builder' in globals(): #删除hd_ai2d_builder变量,释放对它所引用对象的内存引用 - global hd_ai2d_builder - del hd_ai2d_builder - - -#-------hand recognition--------: -# 手势识别 ai2d 初始化 -def hr_ai2d_init(): - with ScopedTiming("hr_ai2d_init",debug_mode > 0): - global hr_ai2d, hr_ai2d_output_tensor - hr_ai2d = nn.ai2d() - hr_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - data = np.ones(hr_kmodel_input_shape, dtype=np.uint8) - hr_ai2d_output_tensor = nn.from_numpy(data) - -# 手势识别 ai2d 运行 -def hr_ai2d_run(rgb888p_img, x, y, w, h): - with ScopedTiming("hr_ai2d_run",debug_mode > 0): - global hr_ai2d,hr_ai2d_input_tensor,hr_ai2d_output_tensor - hr_ai2d_input = rgb888p_img.to_numpy_ref() - hr_ai2d_input_tensor = nn.from_numpy(hr_ai2d_input) - - hr_ai2d.set_crop_param(True, x, y, w, h) - hr_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - global hr_ai2d_builder - hr_ai2d_builder = hr_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,hr_kmodel_frame_size[1],hr_kmodel_frame_size[0]]) - hr_ai2d_builder.run(hr_ai2d_input_tensor, hr_ai2d_output_tensor) - -# 手势识别 ai2d 释放内存 -def hr_ai2d_release(): - with ScopedTiming("hr_ai2d_release",debug_mode > 0): - global hr_ai2d_input_tensor, hr_ai2d_builder - del hr_ai2d_input_tensor - del hr_ai2d_builder - -# 手势识别 kpu 初始化 -def hr_kpu_init(hr_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hr_kpu_init",debug_mode > 0): - hr_kpu_obj = nn.kpu() - hr_kpu_obj.load_kmodel(hr_kmodel_file) - - hr_ai2d_init() - return hr_kpu_obj - -# 手势识别 kpu 输入预处理 -def hr_kpu_pre_process(rgb888p_img, x, y, w, h): - hr_ai2d_run(rgb888p_img, x, y, w, h) - with ScopedTiming("hr_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hr_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hr_ai2d_output_tensor) - -# 手势识别 kpu 获得 kmodel 输出 -def hr_kpu_get_output(): - with ScopedTiming("hr_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# softmax实现 -def softmax(x): - x -= np.max(x) - x = np.exp(x) / np.sum(np.exp(x)) - return x - -# 手势识别 kpu 输出后处理 -def hr_kpu_post_process(results): - x_softmax = softmax(results[0]) - result = np.argmax(x_softmax) - text = " " + labels[result] + ": " + str(round(x_softmax[result],2)) - return text - -# 手势识别 kpu 运行 -def hr_kpu_run(kpu_obj,rgb888p_img, x, y, w, h): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hr_kpu_pre_process(rgb888p_img, x, y, w, h) - # (2)手势识别 kpu 运行 - with ScopedTiming("hr_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手势识别 ai2d 资源 - hr_ai2d_release() - # (4)获取手势识别 kpu 输出 - results = hr_kpu_get_output() - # (5)手势识别 kpu 结果后处理 - result = hr_kpu_post_process(results) - # (6)返回手势识别结果 - return result - -# 手势识别 kpu 释放内存 -def hr_kpu_deinit(): - with ScopedTiming("hr_kpu_deinit",debug_mode > 0): - if 'hr_ai2d' in globals(): #删除hr_ai2d变量,释放对它所引用对象的内存引用 - global hr_ai2d - del hr_ai2d - if 'hr_ai2d_output_tensor' in globals(): #删除hr_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hr_ai2d_output_tensor - del hr_ai2d_output_tensor - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for hand_recognition.py********** -def hand_recognition_inference(): - print("hand_recognition start") - kpu_hand_detect = hd_kpu_init(hd_kmodel_file) # 创建手掌检测的 kpu 对象 - kpu_hand_recognition = hr_kpu_init(hr_kmodel_file) # 创建手势识别的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - draw_img.clear() - - for det_box in dets: - x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] - w = int(x2 - x1) - h = int(y2 - y1) - - if (h<(0.1*OUT_RGB888P_HEIGHT)): - continue - if (w<(0.25*OUT_RGB888P_WIDTH) and ((x1<(0.03*OUT_RGB888P_WIDTH)) or (x2>(0.97*OUT_RGB888P_WIDTH)))): - continue - if (w<(0.15*OUT_RGB888P_WIDTH) and ((x1<(0.01*OUT_RGB888P_WIDTH)) or (x2>(0.99*OUT_RGB888P_WIDTH)))): - continue - - w_det = int(float(x2 - x1) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - h_det = int(float(y2 - y1) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - x_det = int(x1*DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y_det = int(y1*DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - length = max(w,h)/2 - cx = (x1+x2)/2 - cy = (y1+y2)/2 - ratio_num = 1.1*length - - x1_kp = int(max(0,cx-ratio_num)) - y1_kp = int(max(0,cy-ratio_num)) - x2_kp = int(min(OUT_RGB888P_WIDTH-1, cx+ratio_num)) - y2_kp = int(min(OUT_RGB888P_HEIGHT-1, cy+ratio_num)) - w_kp = int(x2_kp - x1_kp + 1) - h_kp = int(y2_kp - y1_kp + 1) - - hr_results = hr_kpu_run(kpu_hand_recognition,rgb888p_img, x1_kp, y1_kp, w_kp, h_kp) # 执行手势识别 kpu 运行 以及 后处理过程 - draw_img.draw_rectangle(x_det, y_det, w_det, h_det, color=(255, 0, 255, 0), thickness = 2) # 将得到的手掌检测结果 绘制到 display - draw_img.draw_string( x_det, y_det-50, hr_results, color=(255,0, 255, 0), scale=4) # 将得到的手势识别结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - if (count>10): - gc.collect() - count = 0 - else: - count += 1 - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - hd_kpu_deinit() # 释放手掌检测 kpu - hr_kpu_deinit() # 释放手势识别 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - del kpu_hand_recognition - - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("hand_recognition_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - hand_recognition_inference() -``` - -### 10.人脸mesh - -```python -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * # 摄像头模块 -from media.display import * # 显示模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo # aidemo模块,封装ai demo相关后处理、画图操作 -import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time # 时间统计 -import gc # 垃圾回收模块 -import os,sys # 操作系统接口模块 -import math # 数学模块 - - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGH = 1080 - -# kmodel参数设置 -# 人脸检测kmodel输入shape -fd_kmodel_input_shape = (1,3,320,320) -# 人脸mesh kmodel输入shape -fm_kmodel_input_shape = (1,3,120,120) -fmpost_kmodel_input_shapes = [(3,3),(3,1),(40,1),(10,1)] -# ai原图padding -rgb_mean = [104,117,123] - -#人脸检测kmodel其它参数设置 -confidence_threshold = 0.5 # 人脸检测阈值 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 - -# 文件配置 -# 人脸检测kmodel -root_dir = '/sdcard/app/tests/' -fd_kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# 人脸mesh kmodel -fm_kmodel_file = root_dir + 'kmodel/face_alignment.kmodel' -# 人脸mesh后处理kmodel -fmpost_kmodel_file = root_dir + 'kmodel/face_alignment_post.kmodel' -# anchor文件 -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 人脸mesh参数均值 -param_mean = np.array([0.0003492636315058917,2.52790130161884e-07,-6.875197868794203e-07,60.1679573059082,-6.295513230725192e-07,0.0005757200415246189,-5.085391239845194e-05,74.2781982421875,5.400917189035681e-07,6.574138387804851e-05,0.0003442012530285865,-66.67157745361328,-346603.6875,-67468.234375,46822.265625,-15262.046875,4350.5888671875,-54261.453125,-18328.033203125,-1584.328857421875,-84566.34375,3835.960693359375,-20811.361328125,38094.9296875,-19967.85546875,-9241.3701171875,-19600.71484375,13168.08984375,-5259.14404296875,1848.6478271484375,-13030.662109375,-2435.55615234375,-2254.20654296875,-14396.5615234375,-6176.3291015625,-25621.919921875,226.39447021484375,-6326.12353515625,-10867.2509765625,868.465087890625,-5831.14794921875,2705.123779296875,-3629.417724609375,2043.9901123046875,-2446.6162109375,3658.697021484375,-7645.98974609375,-6674.45263671875,116.38838958740234,7185.59716796875,-1429.48681640625,2617.366455078125,-1.2070955038070679,0.6690792441368103,-0.17760828137397766,0.056725528091192245,0.03967815637588501,-0.13586315512657166,-0.09223993122577667,-0.1726071834564209,-0.015804484486579895,-0.1416848599910736],dtype=np.float) -# 人脸mesh参数方差 -param_std = np.array([0.00017632152594160289,6.737943476764485e-05,0.00044708489440381527,26.55023193359375,0.0001231376954820007,4.493021697271615e-05,7.923670636955649e-05,6.982563018798828,0.0004350444069132209,0.00012314890045672655,0.00017400001524947584,20.80303955078125,575421.125,277649.0625,258336.84375,255163.125,150994.375,160086.109375,111277.3046875,97311.78125,117198.453125,89317.3671875,88493.5546875,72229.9296875,71080.2109375,50013.953125,55968.58203125,47525.50390625,49515.06640625,38161.48046875,44872.05859375,46273.23828125,38116.76953125,28191.162109375,32191.4375,36006.171875,32559.892578125,25551.1171875,24267.509765625,27521.3984375,23166.53125,21101.576171875,19412.32421875,19452.203125,17454.984375,22537.623046875,16174.28125,14671.640625,15115.6884765625,13870.0732421875,13746.3125,12663.1337890625,1.5870834589004517,1.5077009201049805,0.5881357789039612,0.5889744758605957,0.21327851712703705,0.2630201280117035,0.2796429395675659,0.38030216097831726,0.16162841022014618,0.2559692859649658],dtype=np.float) -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -global current_kmodel_obj #当前kpu对象 -# fd_ai2d: 人脸检测ai2d实例 -# fd_ai2d_input_tensor: 人脸检测ai2d输入 -# fd_ai2d_output_tensor: 人脸检测ai2d输入 -# fd_ai2d_builder: 根据人脸检测ai2d参数,构建的人脸检测ai2d_builder对象 -global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder -# fm_ai2d: 人脸mesh ai2d实例 -# fm_ai2d_input_tensor: 人脸mesh ai2d输入 -# fm_ai2d_output_tensor:人脸mesh ai2d输入 -# fm_ai2d_builder: 根据人脸mesh ai2d参数,构建的人脸mesh ai2d_builder对象 -global fm_ai2d,fm_ai2d_input_tensor,fm_ai2d_output_tensor,fm_ai2d_builder -global roi #人脸区域 -global vertices #3D关键点 - -#读取anchor文件,为人脸检测后处理做准备 -print('anchors_path:',anchors_path) -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - dst_w = fd_kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = fd_kmodel_input_shape[2] # kmodel输入高(h) - - # OUT_RGB888P_WIDTH:原图宽(w) - # OUT_RGB888P_HEIGH:原图高(h) - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / OUT_RGB888P_WIDTH - ratio_h = dst_h / OUT_RGB888P_HEIGH - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGH) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def fd_ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("fd_ai2d_init",debug_mode > 0): - # (1)创建人脸检测ai2d对象 - global fd_ai2d - fd_ai2d = nn.ai2d() - # (2)设置人脸检测ai2d参数 - fd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - fd_ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - fd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - #(3)人脸检测ai2d_builder,根据人脸检测ai2d参数、输入输出大小创建ai2d_builder对象 - global fd_ai2d_builder - fd_ai2d_builder = fd_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fd_kmodel_input_shape) - - #(4)创建人脸检测ai2d_output_tensor,用于保存人脸检测ai2d输出 - global fd_ai2d_output_tensor - data = np.ones(fd_kmodel_input_shape, dtype=np.uint8) - fd_ai2d_output_tensor = nn.from_numpy(data) - -def fd_ai2d_run(rgb888p_img): - # 根据人脸检测ai2d参数,对原图rgb888p_img进行预处理 - with ScopedTiming("fd_ai2d_run",debug_mode > 0): - global fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder - # (1)根据原图构建ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fd_ai2d_input_tensor = nn.from_numpy(ai2d_input) - # (2)运行人脸检测ai2d_builder,将结果保存到人脸检测ai2d_output_tensor中 - fd_ai2d_builder.run(fd_ai2d_input_tensor, fd_ai2d_output_tensor) - -def fd_ai2d_release(): - # 释放人脸检测ai2d_input_tensor - with ScopedTiming("fd_ai2d_release",debug_mode > 0): - global fd_ai2d_input_tensor - del fd_ai2d_input_tensor - -def fd_kpu_init(kmodel_file): - # 初始化人脸检测kpu对象,并加载kmodel - with ScopedTiming("fd_kpu_init",debug_mode > 0): - # 初始化人脸检测kpu对象 - kpu_obj = nn.kpu() - # 加载人脸检测kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸检测ai2d - fd_ai2d_init() - return kpu_obj - -def fd_kpu_pre_process(rgb888p_img): - # 设置人脸检测kpu输入 - # 使用人脸检测ai2d对原图进行预处理(padding,resize) - fd_ai2d_run(rgb888p_img) - with ScopedTiming("fd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fd_ai2d_output_tensor - # 设置人脸检测kpu输入 - current_kmodel_obj.set_input_tensor(0, fd_ai2d_output_tensor) - -def fd_kpu_get_output(): - # 获取人脸检测kpu输出 - with ScopedTiming("fd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def fd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - fd_kpu_pre_process(rgb888p_img) - # (2)人脸检测kpu推理 - with ScopedTiming("fd kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸检测ai2d资源 - fd_ai2d_release() - # (4)获取人俩检测kpu输出 - results = fd_kpu_get_output() - # (5)人脸检测kpu结果后处理 - with ScopedTiming("fd kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,fd_kmodel_input_shape[2],prior_data, - [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH],results) - # (6)返回人脸检测框 - if len(post_ret)==0: - return post_ret - else: - return post_ret[0] #0:det,1:landm,2:score - -def fd_kpu_deinit(): - # kpu释放 - with ScopedTiming("fd_kpu_deinit",debug_mode > 0): - if 'fd_ai2d' in globals(): #删除人脸检测ai2d变量,释放对它所引用对象的内存引用 - global fd_ai2d - del fd_ai2d - if 'fd_ai2d_output_tensor' in globals(): #删除人脸检测ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global fd_ai2d_output_tensor - del fd_ai2d_output_tensor - -###############for face recognition############### -def parse_roi_box_from_bbox(bbox): - # 获取人脸roi - x1, y1, w, h = map(lambda x: int(round(x, 0)), bbox[:4]) - old_size = (w + h) / 2 - center_x = x1 + w / 2 - center_y = y1 + h / 2 + old_size * 0.14 - size = int(old_size * 1.58) - - x0 = center_x - float(size) / 2 - y0 = center_y - float(size) / 2 - x1 = x0 + size - y1 = y0 + size - - x0 = max(0, min(x0, OUT_RGB888P_WIDTH)) - y0 = max(0, min(y0, OUT_RGB888P_HEIGH)) - x1 = max(0, min(x1, OUT_RGB888P_WIDTH)) - y1 = max(0, min(y1, OUT_RGB888P_HEIGH)) - - roi = (x0, y0, x1 - x0, y1 - y0) - return roi - -def fm_ai2d_init(): - # 人脸mesh ai2d初始化 - with ScopedTiming("fm_ai2d_init",debug_mode > 0): - # (1)创建人脸mesh ai2d对象 - global fm_ai2d - fm_ai2d = nn.ai2d() - - # (2)创建人脸mesh ai2d_output_tensor对象,用于存放ai2d输出 - global fm_ai2d_output_tensor - data = np.ones(fm_kmodel_input_shape, dtype=np.uint8) - fm_ai2d_output_tensor = nn.from_numpy(data) - -def fm_ai2d_run(rgb888p_img,det): - # 人脸mesh ai2d推理 - with ScopedTiming("fm_ai2d_run",debug_mode > 0): - global fm_ai2d,fm_ai2d_input_tensor,fm_ai2d_output_tensor - #(1)根据原图ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fm_ai2d_input_tensor = nn.from_numpy(ai2d_input) - - # (2)根据新的det设置新的人脸mesh ai2d参数 - fm_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - global roi - roi = parse_roi_box_from_bbox(det) - fm_ai2d.set_crop_param(True,int(roi[0]),int(roi[1]),int(roi[2]),int(roi[3])) - fm_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - # (3)根据新的人脸mesh ai2d参数,构建人脸mesh ai2d_builder - global fm_ai2d_builder - fm_ai2d_builder = fm_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fm_kmodel_input_shape) - # (4)推理人脸mesh ai2d,将预处理的结果保存到fm_ai2d_output_tensor - fm_ai2d_builder.run(fm_ai2d_input_tensor, fm_ai2d_output_tensor) - -def fm_ai2d_release(): - # 释放人脸mesh ai2d_input_tensor、ai2d_builder - with ScopedTiming("fm_ai2d_release",debug_mode > 0): - global fm_ai2d_input_tensor,fm_ai2d_builder - del fm_ai2d_input_tensor - del fm_ai2d_builder - -def fm_kpu_init(kmodel_file): - # 人脸mesh kpu初始化 - with ScopedTiming("fm_kpu_init",debug_mode > 0): - # 初始化人脸mesh kpu对象 - kpu_obj = nn.kpu() - # 加载人脸mesh kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸mesh ai2d - fm_ai2d_init() - return kpu_obj - -def fm_kpu_pre_process(rgb888p_img,det): - # 人脸mesh kpu预处理 - # 人脸mesh ai2d推理,根据det对原图进行预处理 - fm_ai2d_run(rgb888p_img,det) - with ScopedTiming("fm_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fm_ai2d_output_tensor - # 将人脸mesh ai2d输出设置为人脸mesh kpu输入 - current_kmodel_obj.set_input_tensor(0, fm_ai2d_output_tensor) - -def fm_kpu_get_output(): - with ScopedTiming("fm_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取人脸mesh kpu输出 - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result - -def fm_kpu_post_process(param): - # 人脸mesh kpu结果后处理,反标准化 - with ScopedTiming("fm_kpu_post_process",debug_mode > 0): - param = param * param_std + param_mean - return param - -def fm_kpu_run(kpu_obj,rgb888p_img,det): - # 人脸mesh kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)人脸mesh kpu预处理,设置kpu输入 - fm_kpu_pre_process(rgb888p_img,det) - # (2)人脸mesh kpu推理 - with ScopedTiming("fm_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸mesh ai2d - fm_ai2d_release() - # (4)获取人脸mesh kpu输出 - param = fm_kpu_get_output() - # (5)人脸mesh 后处理 - param = fm_kpu_post_process(param) - return param - -def fm_kpu_deinit(): - # 人脸mesh kpu释放 - with ScopedTiming("fm_kpu_deinit",debug_mode > 0): - if 'fm_ai2d' in globals(): # 删除fm_ai2d变量,释放对它所引用对象的内存引用 - global fm_ai2d - del fm_ai2d - if 'fm_ai2d_output_tensor' in globals(): # 删除fm_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global fm_ai2d_output_tensor - del fm_ai2d_output_tensor - -def fmpost_kpu_init(kmodel_file): - # face mesh post模型初始化 - with ScopedTiming("fmpost_kpu_init",debug_mode > 0): - # 初始化人脸mesh kpu post对象 - kpu_obj = nn.kpu() - # 加载人脸mesh后处理kmodel - kpu_obj.load_kmodel(kmodel_file) - return kpu_obj - -def fmpost_kpu_pre_process(param): - # face mesh post模型预处理,param解析 - with ScopedTiming("fmpost_kpu_pre_process",debug_mode > 0): - param = param[0] - trans_dim, shape_dim, exp_dim = 12, 40, 10 - - # reshape前务必进行copy,否则会导致模型输入错误 - R_ = param[:trans_dim].copy().reshape((3, -1)) - R = R_[:, :3].copy() - offset = R_[:, 3].copy() - offset = offset.reshape((3, 1)) - alpha_shp = param[trans_dim:trans_dim + shape_dim].copy().reshape((-1, 1)) - alpha_exp = param[trans_dim + shape_dim:].copy().reshape((-1, 1)) - - R_tensor = nn.from_numpy(R) - current_kmodel_obj.set_input_tensor(0, R_tensor) - del R_tensor - - offset_tensor = nn.from_numpy(offset) - current_kmodel_obj.set_input_tensor(1, offset_tensor) - del offset_tensor - - alpha_shp_tensor = nn.from_numpy(alpha_shp) - current_kmodel_obj.set_input_tensor(2, alpha_shp_tensor) - del alpha_shp_tensor - - alpha_exp_tensor = nn.from_numpy(alpha_exp) - current_kmodel_obj.set_input_tensor(3, alpha_exp_tensor) - del alpha_exp_tensor - - return - -def fmpost_kpu_get_output(): - # 获取face mesh post模型输出 - with ScopedTiming("fmpost_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取人脸mesh kpu输出 - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result - -def fmpost_kpu_post_process(roi): - # face mesh post模型推理结果后处理 - with ScopedTiming("fmpost_kpu_post_process",debug_mode > 0): - x, y, w, h = map(lambda x: int(round(x, 0)), roi[:4]) - x = x * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - y = y * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - w = w * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = h * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - roi_array = np.array([x,y,w,h],dtype=np.float) - global vertices - aidemo.face_mesh_post_process(roi_array,vertices) - return - -def fmpost_kpu_run(kpu_obj,param): - # face mesh post模型推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - fmpost_kpu_pre_process(param) - with ScopedTiming("fmpost_kpu_run",debug_mode > 0): - kpu_obj.run() - global vertices - vertices = fmpost_kpu_get_output() - global roi - fmpost_kpu_post_process(roi) - return -#********************for media_utils.py******************** -global draw_img_ulab,draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# for display,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.2 -def display_init(): - # 设置使用hdmi进行显示 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -def display_draw(dets,vertices_list): - # 在显示器画人脸轮廓 - with ScopedTiming("display_draw",debug_mode >0): - global draw_img_ulab,draw_img,osd_img - if dets: - draw_img.clear() - for vertices in vertices_list: - aidemo.face_draw_mesh(draw_img_ulab, vertices) - # (4)将轮廓结果拷贝到osd - draw_img.copy_to(osd_img) - # (5)将osd显示到屏幕 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - # (1)清空用来画框的图像 - draw_img.clear() - # (2)清空osd - draw_img.copy_to(osd_img) - # (3)显示透明图层 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.1 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.3 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img_ulab,draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框 - draw_img_ulab = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_REF,data = draw_img_ulab) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - # meida资源释放 - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for face_detect.py******************** -def face_mesh_inference(): - # 人脸检测kpu初始化 - kpu_face_detect = fd_kpu_init(fd_kmodel_file) - # 人脸mesh kpu初始化 - kpu_face_mesh = fm_kpu_init(fm_kmodel_file) - # face_mesh_post kpu初始化 - kpu_face_mesh_post = fmpost_kpu_init(fmpost_kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - # 启动camera - camera_start(CAM_DEV_ID_0) - gc_count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取人脸检测结果 - dets = fd_kpu_run(kpu_face_detect,rgb888p_img) - ## (2.2)针对每个人脸框,推理得到对应人脸mesh - mesh_result = [] - for det in dets: - param = fm_kpu_run(kpu_face_mesh,rgb888p_img,det) - fmpost_kpu_run(kpu_face_mesh_post,param) - global vertices - mesh_result.append(vertices) - ## (2.3)将人脸mesh 画到屏幕上 - display_draw(dets,mesh_result) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - if gc_count > 5: - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - fd_kpu_deinit() - fm_kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - del kpu_face_mesh - del kpu_face_mesh_post - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'draw_img_ulab' in globals(): - global draw_img_ulab - del draw_img_ulab - # 垃圾回收 - gc.collect() - nn.shrink_memory_pool() - # 释放媒体资源 - media_deinit() - - print("face_mesh_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - face_mesh_inference() -``` - -### 11.注视估计 - -```python -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * # 摄像头模块 -from media.display import * # 显示模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo # aidemo模块,封装ai demo相关后处理、画图操作 -import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time # 时间统计 -import gc # 垃圾回收模块 -import os,sys # 操作系统接口模块 -import math # 数学模块 - - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGH = 1080 - -# kmodel参数设置 -# 人脸检测kmodel输入shape -fd_kmodel_input_shape = (1,3,320,320) -# 注视估计kmodel输入shape -feg_kmodel_input_shape = (1,3,448,448) -# ai原图padding -rgb_mean = [104,117,123] - -#人脸检测kmodel其它参数设置 -confidence_threshold = 0.5 # 人脸检测阈值 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 - -# 文件配置 -# 人脸检测kmodel -root_dir = '/sdcard/app/tests/' -fd_kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# 注视估计kmodel -fr_kmodel_file = root_dir + 'kmodel/eye_gaze.kmodel' -# anchor文件 -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -global current_kmodel_obj #当前kpu对象 -# fd_ai2d: 人脸检测ai2d实例 -# fd_ai2d_input_tensor: 人脸检测ai2d输入 -# fd_ai2d_output_tensor: 人脸检测ai2d输入 -# fd_ai2d_builder: 根据人脸检测ai2d参数,构建的人脸检测ai2d_builder对象 -global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder -# feg_ai2d: 注视估计ai2d实例 -# feg_ai2d_input_tensor: 注视估计ai2d输入 -# feg_ai2d_output_tensor:注视估计ai2d输入 -# feg_ai2d_builder: 根据注视估计ai2d参数,构建的注视估计ai2d_builder对象 -global feg_ai2d,feg_ai2d_input_tensor,feg_ai2d_output_tensor,feg_ai2d_builder -global matrix_dst #人脸仿射变换矩阵 - -#读取anchor文件,为人脸检测后处理做准备 -print('anchors_path:',anchors_path) -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - dst_w = fd_kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = fd_kmodel_input_shape[2] # kmodel输入高(h) - - # OUT_RGB888P_WIDTH:原图宽(w) - # OUT_RGB888P_HEIGH:原图高(h) - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / OUT_RGB888P_WIDTH - ratio_h = dst_h / OUT_RGB888P_HEIGH - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGH) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def fd_ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("fd_ai2d_init",debug_mode > 0): - # (1)创建人脸检测ai2d对象 - global fd_ai2d - fd_ai2d = nn.ai2d() - # (2)设置人脸检测ai2d参数 - fd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - fd_ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - fd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - #(3)人脸检测ai2d_builder,根据人脸检测ai2d参数、输入输出大小创建ai2d_builder对象 - global fd_ai2d_builder - fd_ai2d_builder = fd_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fd_kmodel_input_shape) - - #(4)创建人脸检测ai2d_output_tensor,用于保存人脸检测ai2d输出 - global fd_ai2d_output_tensor - data = np.ones(fd_kmodel_input_shape, dtype=np.uint8) - fd_ai2d_output_tensor = nn.from_numpy(data) - -def fd_ai2d_run(rgb888p_img): - # 根据人脸检测ai2d参数,对原图rgb888p_img进行预处理 - with ScopedTiming("fd_ai2d_run",debug_mode > 0): - global fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder - # (1)根据原图构建ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fd_ai2d_input_tensor = nn.from_numpy(ai2d_input) - # (2)运行人脸检测ai2d_builder,将结果保存到人脸检测ai2d_output_tensor中 - fd_ai2d_builder.run(fd_ai2d_input_tensor, fd_ai2d_output_tensor) - -def fd_ai2d_release(): - # 释放人脸检测ai2d_input_tensor - with ScopedTiming("fd_ai2d_release",debug_mode > 0): - global fd_ai2d_input_tensor - del fd_ai2d_input_tensor - - -def fd_kpu_init(kmodel_file): - # 初始化人脸检测kpu对象,并加载kmodel - with ScopedTiming("fd_kpu_init",debug_mode > 0): - # 初始化人脸检测kpu对象 - kpu_obj = nn.kpu() - # 加载人脸检测kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸检测ai2d - fd_ai2d_init() - return kpu_obj - -def fd_kpu_pre_process(rgb888p_img): - # 设置人脸检测kpu输入 - # 使用人脸检测ai2d对原图进行预处理(padding,resize) - fd_ai2d_run(rgb888p_img) - with ScopedTiming("fd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fd_ai2d_output_tensor - # 设置人脸检测kpu输入 - current_kmodel_obj.set_input_tensor(0, fd_ai2d_output_tensor) - -def fd_kpu_get_output(): - # 获取人脸检测kpu输出 - with ScopedTiming("fd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def fd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - fd_kpu_pre_process(rgb888p_img) - # (2)人脸检测kpu推理 - with ScopedTiming("fd kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸检测ai2d资源 - fd_ai2d_release() - # (4)获取人俩检测kpu输出 - results = fd_kpu_get_output() - # (5)人脸检测kpu结果后处理 - with ScopedTiming("fd kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,fd_kmodel_input_shape[2],prior_data, - [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH],results) - # (6)返回人脸检测框 - if len(post_ret)==0: - return post_ret - else: - return post_ret[0] #0:det,1:landm,2:score - -def fd_kpu_deinit(): - # kpu释放 - with ScopedTiming("fd_kpu_deinit",debug_mode > 0): - if 'fd_ai2d' in globals(): #删除人脸检测ai2d变量,释放对它所引用对象的内存引用 - global fd_ai2d - del fd_ai2d - if 'fd_ai2d_output_tensor' in globals(): #删除人脸检测ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global fd_ai2d_output_tensor - del fd_ai2d_output_tensor - -###############for face recognition############### -def feg_ai2d_init(): - # 注视估计ai2d初始化 - with ScopedTiming("feg_ai2d_init",debug_mode > 0): - # (1)创建注视估计ai2d对象 - global feg_ai2d - feg_ai2d = nn.ai2d() - - # (2)创建注视估计ai2d_output_tensor对象,用于存放ai2d输出 - global feg_ai2d_output_tensor - data = np.ones(feg_kmodel_input_shape, dtype=np.uint8) - feg_ai2d_output_tensor = nn.from_numpy(data) - -def feg_ai2d_run(rgb888p_img,det): - # 注视估计ai2d推理 - with ScopedTiming("feg_ai2d_run",debug_mode > 0): - global feg_ai2d,feg_ai2d_input_tensor,feg_ai2d_output_tensor - #(1)根据原图ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - feg_ai2d_input_tensor = nn.from_numpy(ai2d_input) - - # (2)根据新的det设置新的注视估计ai2d参数 - feg_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - - x, y, w, h = map(lambda x: int(round(x, 0)), det[:4]) - feg_ai2d.set_crop_param(True,x,y,w,h) - feg_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - # (3)根据新的注视估计ai2d参数,构建注视估计ai2d_builder - global feg_ai2d_builder - feg_ai2d_builder = feg_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], feg_kmodel_input_shape) - # (4)推理注视估计ai2d,将预处理的结果保存到feg_ai2d_output_tensor - feg_ai2d_builder.run(feg_ai2d_input_tensor, feg_ai2d_output_tensor) - -def feg_ai2d_release(): - # 释放注视估计ai2d_input_tensor、ai2d_builder - with ScopedTiming("feg_ai2d_release",debug_mode > 0): - global feg_ai2d_input_tensor,feg_ai2d_builder - del feg_ai2d_input_tensor - del feg_ai2d_builder - -def feg_kpu_init(kmodel_file): - # 注视估计kpu初始化 - with ScopedTiming("feg_kpu_init",debug_mode > 0): - # 初始化注视估计kpu对象 - kpu_obj = nn.kpu() - # 加载注视估计kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化注视估计ai2d - feg_ai2d_init() - return kpu_obj - -def feg_kpu_pre_process(rgb888p_img,det): - # 注视估计kpu预处理 - # 注视估计ai2d推理,根据det对原图进行预处理 - feg_ai2d_run(rgb888p_img,det) - with ScopedTiming("feg_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,feg_ai2d_output_tensor - # 将注视估计ai2d输出设置为注视估计kpu输入 - current_kmodel_obj.set_input_tensor(0, feg_ai2d_output_tensor) - -def feg_kpu_get_output(): - with ScopedTiming("feg_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取注视估计kpu输出 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def feg_kpu_post_process(results): - # 注视估计kpu推理结果后处理 - with ScopedTiming("feg_kpu_post_process",debug_mode > 0): - post_ret = aidemo.eye_gaze_post_process(results) - return post_ret[0],post_ret[1] - -def feg_kpu_run(kpu_obj,rgb888p_img,det): - # 注视估计kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)注视估计kpu预处理,设置kpu输入 - feg_kpu_pre_process(rgb888p_img,det) - # (2)注视估计kpu推理 - with ScopedTiming("feg_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放注视估计ai2d - feg_ai2d_release() - # (4)获取注视估计kpu输出 - results = feg_kpu_get_output() - # (5)注视估计后处理 - pitch,yaw = feg_kpu_post_process(results) - return pitch,yaw - -def feg_kpu_deinit(): - # 注视估计kpu释放 - with ScopedTiming("feg_kpu_deinit",debug_mode > 0): - if 'feg_ai2d' in globals(): # 删除feg_ai2d变量,释放对它所引用对象的内存引用 - global feg_ai2d - del feg_ai2d - if 'feg_ai2d_output_tensor' in globals(): # 删除feg_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global feg_ai2d_output_tensor - del feg_ai2d_output_tensor - -#********************for media_utils.py******************** -global draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# for display,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.2 -def display_init(): - # 设置使用hdmi进行显示 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -def display_draw(dets,gaze_results): - # 在显示器画人脸轮廓 - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if dets: - draw_img.clear() - for det,gaze_ret in zip(dets,gaze_results): - pitch , yaw = gaze_ret - length = DISPLAY_WIDTH / 2 - x, y, w, h = map(lambda x: int(round(x, 0)), det[:4]) - x = x * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - y = y * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - w = w * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = h * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - center_x = (x + w / 2.0) - center_y = (y + h / 2.0) - dx = -length * math.sin(pitch) * math.cos(yaw) - target_x = int(center_x + dx) - dy = -length * math.sin(yaw) - target_y = int(center_y + dy) - - draw_img.draw_arrow(int(center_x), int(center_y), target_x, target_y, color = (255,255,0,0), size = 30, thickness = 2) - - # (4)将轮廓结果拷贝到osd - draw_img.copy_to(osd_img) - # (5)将osd显示到屏幕 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - # (1)清空用来画框的图像 - draw_img.clear() - # (2)清空osd - draw_img.copy_to(osd_img) - # (3)显示透明图层 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.1 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.3 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer,draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - # meida资源释放 - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for face_detect.py******************** -def eye_gaze_inference(): - print("eye_gaze_test start") - # 人脸检测kpu初始化 - kpu_face_detect = fd_kpu_init(fd_kmodel_file) - # 注视估计kpu初始化 - kpu_eye_gaze = feg_kpu_init(fr_kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - - # 启动camera - camera_start(CAM_DEV_ID_0) - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取人脸检测结果 - dets = fd_kpu_run(kpu_face_detect,rgb888p_img) - # (2.2)针对每个人脸框,推理得到对应注视估计 - gaze_results = [] - for det in dets: - pitch ,yaw = feg_kpu_run(kpu_eye_gaze,rgb888p_img,det) - gaze_results.append([pitch ,yaw]) - # (2.3)将注视估计画到屏幕上 - display_draw(dets,gaze_results) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - with ScopedTiming("gc collect", debug_mode > 0): - gc.collect() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - fd_kpu_deinit() - feg_kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - del kpu_eye_gaze - # 垃圾回收 - gc.collect() - nn.shrink_memory_pool() - # 释放媒体资源 - media_deinit() - - print("eye_gaze_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - eye_gaze_inference() -``` - -### 12.动态手势识别 - -```python -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -root_dir = '/sdcard/app/tests/' - -#--------for hand detection---------- -#kmodel输入shape -hd_kmodel_input_shape = (1,3,512,512) # 手掌检测kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 手掌检测阈值,用于过滤roi -nms_threshold = 0.5 # 手掌检测框阈值,用于过滤重复roi -hd_kmodel_frame_size = [512,512] # 手掌检测输入图片尺寸 -hd_frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 手掌检测直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 手掌检测模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS - -hd_kmodel_file = root_dir + 'kmodel/hand_det.kmodel' # 手掌检测kmodel文件的路径 -anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] #anchor设置 - -#--------for hand keypoint detection---------- -#kmodel输入shape -hk_kmodel_input_shape = (1,3,256,256) # 手掌关键点检测kmodel输入分辨率 - -#kmodel相关参数设置 -hk_kmodel_frame_size = [256,256] # 手掌关键点检测输入图片尺寸 -hk_kmodel_file = root_dir + 'kmodel/handkp_det.kmodel' # 手掌关键点检测kmodel文件的路径 - -#--------for hand gesture---------- -#kmodel输入shape -gesture_kmodel_input_shape = [[1, 3, 224, 224], # 动态手势识别kmodel输入分辨率 - [1,3,56,56], - [1,4,28,28], - [1,4,28,28], - [1,8,14,14], - [1,8,14,14], - [1,8,14,14], - [1,12,14,14], - [1,12,14,14], - [1,20,7,7], - [1,20,7,7]] - -#kmodel相关参数设置 -resize_shape = 256 -mean_values = np.array([0.485, 0.456, 0.406]).reshape((3,1,1)) # 动态手势识别预处理均值 -std_values = np.array([0.229, 0.224, 0.225]).reshape((3,1,1)) # 动态手势识别预处理方差 -gesture_kmodel_frame_size = [224,224] # 动态手势识别输入图片尺寸 - -gesture_kmodel_file = root_dir + 'kmodel/gesture.kmodel' # 动态手势识别kmodel文件的路径 - -shang_bin = root_dir + "utils/shang.bin" # 动态手势识别屏幕坐上角标志状态文件的路径 -xia_bin = root_dir + "utils/xia.bin" # 动态手势识别屏幕坐上角标志状态文件的路径 -zuo_bin = root_dir + "utils/zuo.bin" # 动态手势识别屏幕坐上角标志状态文件的路径 -you_bin = root_dir + "utils/you.bin" # 动态手势识别屏幕坐上角标志状态文件的路径 - -bin_width = 150 # 动态手势识别屏幕坐上角标志状态文件的短边尺寸 -bin_height = 216 # 动态手势识别屏幕坐上角标志状态文件的长边尺寸 -shang_argb = np.fromfile(shang_bin, dtype=np.uint8) -shang_argb = shang_argb.reshape((bin_height, bin_width, 4)) -xia_argb = np.fromfile(xia_bin, dtype=np.uint8) -xia_argb = xia_argb.reshape((bin_height, bin_width, 4)) -zuo_argb = np.fromfile(zuo_bin, dtype=np.uint8) -zuo_argb = zuo_argb.reshape((bin_width, bin_height, 4)) -you_argb = np.fromfile(you_bin, dtype=np.uint8) -you_argb = you_argb.reshape((bin_width, bin_height, 4)) - -TRIGGER = 0 # 动态手势识别应用的结果状态 -MIDDLE = 1 -UP = 2 -DOWN = 3 -LEFT = 4 -RIGHT = 5 - -max_hist_len = 20 # 最多存储多少帧的结果 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global hd_ai2d,hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder # 定义手掌检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor,hk_ai2d_builder # 定义手掌关键点检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global gesture_ai2d_resize, gesture_ai2d_resize_builder, gesture_ai2d_crop, gesture_ai2d_crop_builder # 定义动态手势识别全局 ai2d 对象,以及 builder -global gesture_ai2d_input_tensor, gesture_kpu_input_tensors, gesture_ai2d_middle_output_tensor, gesture_ai2d_output_tensor # 定义动态手势识别全局 ai2d 的输入、输出 - -#-------hand detect--------: -# 手掌检测ai2d 初始化 -def hd_ai2d_init(): - with ScopedTiming("hd_ai2d_init",debug_mode > 0): - global hd_ai2d - global hd_ai2d_builder - global hd_ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = hd_kmodel_frame_size[0] - height = hd_kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - hd_ai2d = nn.ai2d() - hd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - hd_ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - hd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - hd_ai2d_builder = hd_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - - data = np.ones(hd_kmodel_input_shape, dtype=np.uint8) - hd_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌检测 ai2d 运行 -def hd_ai2d_run(rgb888p_img): - with ScopedTiming("hd_ai2d_run",debug_mode > 0): - global hd_ai2d_input_tensor,hd_ai2d_output_tensor, hd_ai2d_builder - hd_ai2d_input = rgb888p_img.to_numpy_ref() - hd_ai2d_input_tensor = nn.from_numpy(hd_ai2d_input) - - hd_ai2d_builder.run(hd_ai2d_input_tensor, hd_ai2d_output_tensor) - -# 手掌检测 ai2d 释放内存 -def hd_ai2d_release(): - with ScopedTiming("hd_ai2d_release",debug_mode > 0): - global hd_ai2d_input_tensor - del hd_ai2d_input_tensor - -# 手掌检测 kpu 初始化 -def hd_kpu_init(hd_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hd_kpu_init",debug_mode > 0): - hd_kpu_obj = nn.kpu() - hd_kpu_obj.load_kmodel(hd_kmodel_file) - - hd_ai2d_init() - return hd_kpu_obj - -# 手掌检测 kpu 输入预处理 -def hd_kpu_pre_process(rgb888p_img): - hd_ai2d_run(rgb888p_img) - with ScopedTiming("hd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hd_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hd_ai2d_output_tensor) - -# 手掌检测 kpu 获得 kmodel 输出 -def hd_kpu_get_output(): - with ScopedTiming("hd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌检测 kpu 运行 -def hd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hd_kpu_pre_process(rgb888p_img) - # (2)手掌检测 kpu 运行 - with ScopedTiming("hd_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌检测 ai2d 资源 - hd_ai2d_release() - # (4)获取手掌检测 kpu 输出 - results = hd_kpu_get_output() - # (5)手掌检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], hd_kmodel_frame_size, hd_frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) # kpu结果后处理 - # (6)返回手掌检测结果 - return dets - -# 手掌检测 kpu 释放内存 -def hd_kpu_deinit(): - with ScopedTiming("hd_kpu_deinit",debug_mode > 0): - if 'hd_ai2d' in globals(): #删除hd_ai2d变量,释放对它所引用对象的内存引用 - global hd_ai2d - del hd_ai2d - if 'hd_ai2d_output_tensor' in globals(): #删除hd_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hd_ai2d_output_tensor - del hd_ai2d_output_tensor - if 'hd_ai2d_builder' in globals(): #删除hd_ai2d_builder变量,释放对它所引用对象的内存引用 - global hd_ai2d_builder - del hd_ai2d_builder - - -#-------hand keypoint detection------: -# 手掌关键点检测 ai2d 初始化 -def hk_ai2d_init(): - with ScopedTiming("hk_ai2d_init",debug_mode > 0): - global hk_ai2d, hk_ai2d_output_tensor - hk_ai2d = nn.ai2d() - hk_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - data = np.ones(hk_kmodel_input_shape, dtype=np.uint8) - hk_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌关键点检测 ai2d 运行 -def hk_ai2d_run(rgb888p_img, x, y, w, h): - with ScopedTiming("hk_ai2d_run",debug_mode > 0): - global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor - hk_ai2d_input = rgb888p_img.to_numpy_ref() - hk_ai2d_input_tensor = nn.from_numpy(hk_ai2d_input) - - hk_ai2d.set_crop_param(True, x, y, w, h) - hk_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - global hk_ai2d_builder - hk_ai2d_builder = hk_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,hk_kmodel_frame_size[1],hk_kmodel_frame_size[0]]) - hk_ai2d_builder.run(hk_ai2d_input_tensor, hk_ai2d_output_tensor) - -# 手掌关键点检测 ai2d 释放内存 -def hk_ai2d_release(): - with ScopedTiming("hk_ai2d_release",debug_mode > 0): - global hk_ai2d_input_tensor, hk_ai2d_builder - del hk_ai2d_input_tensor - del hk_ai2d_builder - -# 手掌关键点检测 kpu 初始化 -def hk_kpu_init(hk_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hk_kpu_init",debug_mode > 0): - hk_kpu_obj = nn.kpu() - hk_kpu_obj.load_kmodel(hk_kmodel_file) - - hk_ai2d_init() - return hk_kpu_obj - -# 手掌关键点检测 kpu 输入预处理 -def hk_kpu_pre_process(rgb888p_img, x, y, w, h): - hk_ai2d_run(rgb888p_img, x, y, w, h) - with ScopedTiming("hk_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hk_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hk_ai2d_output_tensor) - -# 手掌关键点检测 kpu 获得 kmodel 输出 -def hk_kpu_get_output(): - with ScopedTiming("hk_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌关键点检测 kpu 输出后处理 -def hk_kpu_post_process(results, x, y, w, h): - results_show = np.zeros(results.shape,dtype=np.int16) - results_show[0::2] = results[0::2] * w + x - results_show[1::2] = results[1::2] * h + y - return results_show - -# 手掌关键点检测 kpu 运行 -def hk_kpu_run(kpu_obj,rgb888p_img, x, y, w, h): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hk_kpu_pre_process(rgb888p_img, x, y, w, h) - # (2)手掌关键点检测 kpu 运行 - with ScopedTiming("hk_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌关键点检测 ai2d 资源 - hk_ai2d_release() - # (4)获取手掌关键点检测 kpu 输出 - results = hk_kpu_get_output() - # (5)手掌关键点检测 kpu 结果后处理 - result = hk_kpu_post_process(results[0],x,y,w,h) - # (6)返回手掌关键点检测结果 - return result - -# 手掌关键点检测 kpu 释放内存 -def hk_kpu_deinit(): - with ScopedTiming("hk_kpu_deinit",debug_mode > 0): - if 'hk_ai2d' in globals(): #删除hk_ai2d变量,释放对它所引用对象的内存引用 - global hk_ai2d - del hk_ai2d - if 'hk_ai2d_output_tensor' in globals(): #删除hk_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hk_ai2d_output_tensor - del hk_ai2d_output_tensor - -# 求两个vector之间的夹角 -def hk_vector_2d_angle(v1,v2): - with ScopedTiming("hk_vector_2d_angle",debug_mode > 0): - v1_x = v1[0] - v1_y = v1[1] - v2_x = v2[0] - v2_y = v2[1] - v1_norm = np.sqrt(v1_x * v1_x+ v1_y * v1_y) - v2_norm = np.sqrt(v2_x * v2_x + v2_y * v2_y) - dot_product = v1_x * v2_x + v1_y * v2_y - cos_angle = dot_product/(v1_norm*v2_norm) - angle = np.acos(cos_angle)*180/np.pi - return angle - -# 根据手掌关键点检测结果判断手势类别 -def hk_gesture(results): - with ScopedTiming("hk_gesture",debug_mode > 0): - angle_list = [] - for i in range(5): - angle = hk_vector_2d_angle([(results[0]-results[i*8+4]), (results[1]-results[i*8+5])],[(results[i*8+6]-results[i*8+8]),(results[i*8+7]-results[i*8+9])]) - angle_list.append(angle) - - thr_angle = 65. - thr_angle_thumb = 53. - thr_angle_s = 49. - gesture_str = None - if 65535. not in angle_list: - if (angle_list[0]>thr_angle_thumb) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "fist" - elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "gun" - elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]5) and (angle_list[1]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "one" - elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]thr_angle_thumb) and (angle_list[1]thr_angle): - gesture_str = "three" - elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "thumbUp" - elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "yeah" - - return gesture_str - -#-------dynamic gesture--------: -# 动态手势识别 ai2d 初始化 -def gesture_ai2d_init(kpu_obj, resize_shape): - with ScopedTiming("gesture_ai2d_init",debug_mode > 0): - global gesture_ai2d_resize, gesture_ai2d_resize_builder - global gesture_ai2d_crop, gesture_ai2d_crop_builder - global gesture_ai2d_middle_output_tensor, gesture_ai2d_output_tensor - - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = gesture_kmodel_frame_size[0] - height = gesture_kmodel_frame_size[1] - ratiow = float(resize_shape) / ori_w - ratioh = float(resize_shape) / ori_h - if ratiow < ratioh: - ratio = ratioh - else: - ratio = ratiow - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - - top = int((new_h-height)/2) - left = int((new_w-width)/2) - - gesture_ai2d_resize = nn.ai2d() - gesture_ai2d_resize.set_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) - gesture_ai2d_resize.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - gesture_ai2d_resize_builder = gesture_ai2d_resize.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,new_h,new_w]) - - gesture_ai2d_crop = nn.ai2d() - gesture_ai2d_crop.set_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) - gesture_ai2d_crop.set_crop_param(True, left, top, width, height) - gesture_ai2d_crop_builder = gesture_ai2d_crop.build([1,3,new_h,new_w], [1,3,height,width]) - - global gesture_kpu_input_tensor, gesture_kpu_input_tensors, current_kmodel_obj - current_kmodel_obj = kpu_obj - gesture_kpu_input_tensors = [] - for i in range(current_kmodel_obj.inputs_size()): - data = np.zeros(gesture_kmodel_input_shape[i], dtype=np.float) - gesture_kpu_input_tensor = nn.from_numpy(data) - gesture_kpu_input_tensors.append(gesture_kpu_input_tensor) - - data = np.ones(gesture_kmodel_input_shape[0], dtype=np.uint8) - gesture_ai2d_output_tensor = nn.from_numpy(data) - - global data_float - data_float = np.ones(gesture_kmodel_input_shape[0], dtype=np.float) - - data_middle = np.ones((1,3,new_h,new_w), dtype=np.uint8) - gesture_ai2d_middle_output_tensor = nn.from_numpy(data_middle) - -def gesture_ai2d_run(rgb888p_img): - with ScopedTiming("gesture_ai2d_run",debug_mode > 0): - global gesture_ai2d_input_tensor, gesture_kpu_input_tensors, gesture_ai2d_middle_output_tensor, gesture_ai2d_output_tensor - global gesture_ai2d_resize_builder, gesture_ai2d_crop_builder - - gesture_ai2d_input = rgb888p_img.to_numpy_ref() - gesture_ai2d_input_tensor = nn.from_numpy(gesture_ai2d_input) - - gesture_ai2d_resize_builder.run(gesture_ai2d_input_tensor, gesture_ai2d_middle_output_tensor) - gesture_ai2d_crop_builder.run(gesture_ai2d_middle_output_tensor, gesture_ai2d_output_tensor) - - result = gesture_ai2d_output_tensor.to_numpy() - global data_float - data_float[0] = result[0].copy() - data_float[0] = (data_float[0]*1.0/255 -mean_values)/std_values - tmp = nn.from_numpy(data_float) - gesture_kpu_input_tensors[0] = tmp - -# 动态手势识别 ai2d 释放内存 -def gesture_ai2d_release(): - with ScopedTiming("gesture_ai2d_release",debug_mode > 0): - global gesture_ai2d_input_tensor - del gesture_ai2d_input_tensor - -# 动态手势识别 kpu 初始化 -def gesture_kpu_init(gesture_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("gesture_kpu_init",debug_mode > 0): - gesture_kpu_obj = nn.kpu() - gesture_kpu_obj.load_kmodel(gesture_kmodel_file) - gesture_ai2d_init(gesture_kpu_obj, resize_shape) - return gesture_kpu_obj - -# 动态手势识别 kpu 输入预处理 -def gesture_kpu_pre_process(rgb888p_img): - gesture_ai2d_run(rgb888p_img) - with ScopedTiming("gesture_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,gesture_kpu_input_tensors - # set kpu input - for i in range(current_kmodel_obj.inputs_size()): - current_kmodel_obj.set_input_tensor(i, gesture_kpu_input_tensors[i]) - -# 动态手势识别 kpu 获得 kmodel 输出 -def gesture_kpu_get_output(): - with ScopedTiming("gesture_kpu_get_output",debug_mode > 0): - global current_kmodel_obj, gesture_kpu_input_tensors - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - if (i==0): - result = data.to_numpy() - tmp2 = result.copy() - else: - gesture_kpu_input_tensors[i] = data - return tmp2 - -# 动态手势识别结果处理 -def gesture_process_output(pred,history): - if (pred == 7 or pred == 8 or pred == 21 or pred == 22 or pred == 3 ): - pred = history[-1] - if (pred == 0 or pred == 4 or pred == 6 or pred == 9 or pred == 14 or pred == 1 or pred == 19 or pred == 20 or pred == 23 or pred == 24) : - pred = history[-1] - if (pred == 0) : - pred = 2 - if (pred != history[-1]) : - if (len(history)>= 2) : - if (history[-1] != history[len(history)-2]) : - pred = history[-1] - history.append(pred) - if (len(history) > max_hist_len) : - history = history[-max_hist_len:] - return history[-1] - -# 动态手势识别结果后处理 -def gesture_kpu_post_process(results, his_logit, history): - with ScopedTiming("gesture_kpu_post_process",debug_mode > 0): - his_logit.append(results[0]) - avg_logit = sum(np.array(his_logit)) - idx_ = np.argmax(avg_logit) - - idx = gesture_process_output(idx_, history) - if (idx_ != idx): - his_logit_last = his_logit[-1] - his_logit = [] - his_logit.append(his_logit_last) - return idx, avg_logit - -# 动态手势识别 kpu 运行 -def gesture_kpu_run(kpu_obj,rgb888p_img, his_logit, history): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - gesture_kpu_pre_process(rgb888p_img) - # (2)动态手势识别 kpu 运行 - with ScopedTiming("gesture_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放动态手势识别 ai2d 资源 - gesture_ai2d_release() - # (4)获取动态手势识别 kpu 输出 - results = gesture_kpu_get_output() - # (5)动态手势识别 kpu 结果后处理 - result, avg_logit= gesture_kpu_post_process(results,his_logit, history) - # (6)返回动态手势识别结果 - return result, avg_logit - -def gesture_kpu_deinit(): - with ScopedTiming("gesture_kpu_deinit",debug_mode > 0): - if 'gesture_ai2d_resize' in globals(): #删除gesture_ai2d_resize变量,释放对它所引用对象的内存引用 - global gesture_ai2d_resize - del gesture_ai2d_resize - if 'gesture_ai2d_middle_output_tensor' in globals(): #删除gesture_ai2d_middle_output_tensor变量,释放对它所引用对象的内存引用 - global gesture_ai2d_middle_output_tensor - del gesture_ai2d_middle_output_tensor - if 'gesture_ai2d_crop' in globals(): #删除gesture_ai2d_crop变量,释放对它所引用对象的内存引用 - global gesture_ai2d_crop - del gesture_ai2d_crop - if 'gesture_ai2d_output_tensor' in globals(): #删除gesture_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global gesture_ai2d_output_tensor - del gesture_ai2d_output_tensor - if 'gesture_kpu_input_tensors' in globals(): #删除gesture_kpu_input_tensors变量,释放对它所引用对象的内存引用 - global gesture_kpu_input_tensors - del gesture_kpu_input_tensors - if 'gesture_ai2d_resize_builder' in globals(): #删除gesture_ai2d_resize_builder变量,释放对它所引用对象的内存引用 - global gesture_ai2d_resize_builder - del gesture_ai2d_resize_builder - if 'gesture_ai2d_crop_builder' in globals(): #删除gesture_ai2d_crop_builder变量,释放对它所引用对象的内存引用 - global gesture_ai2d_crop_builder - del gesture_ai2d_crop_builder - - -#media_utils.py -global draw_img,osd_img,draw_numpy #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img, draw_numpy - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_numpy = np.zeros((DISPLAY_HEIGHT, DISPLAY_WIDTH,4), dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_REF, data=draw_numpy) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for dynamic_gesture.py********** -def dynamic_gesture_inference(): - print("dynamic_gesture_test start") - cur_state = TRIGGER - pre_state = TRIGGER - draw_state = TRIGGER - - kpu_hand_detect = hd_kpu_init(hd_kmodel_file) # 创建手掌检测的 kpu 对象 - kpu_hand_keypoint_detect = hk_kpu_init(hk_kmodel_file) # 创建手掌关键点检测的 kpu 对象 - kpu_dynamic_gesture = gesture_kpu_init(gesture_kmodel_file) # 创建动态手势识别的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - vec_flag = [] - his_logit = [] - history = [2] - s_start = time.time_ns() - - count = 0 - global draw_img,draw_numpy,osd_img - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - draw_img.clear() - if (cur_state == TRIGGER): - with ScopedTiming("trigger time", debug_mode > 0): - dets = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - - for det_box in dets: - x1, y1, x2, y2 = int(det_box[2]),int(det_box[3]),int(det_box[4]),int(det_box[5]) - w = int(x2 - x1) - h = int(y2 - y1) - - if (h<(0.1*OUT_RGB888P_HEIGHT)): - continue - if (w<(0.25*OUT_RGB888P_WIDTH) and ((x1<(0.03*OUT_RGB888P_WIDTH)) or (x2>(0.97*OUT_RGB888P_WIDTH)))): - continue - if (w<(0.15*OUT_RGB888P_WIDTH) and ((x1<(0.01*OUT_RGB888P_WIDTH)) or (x2>(0.99*OUT_RGB888P_WIDTH)))): - continue - - length = max(w,h)/2 - cx = (x1+x2)/2 - cy = (y1+y2)/2 - ratio_num = 1.26*length - - x1_kp = int(max(0,cx-ratio_num)) - y1_kp = int(max(0,cy-ratio_num)) - x2_kp = int(min(OUT_RGB888P_WIDTH-1, cx+ratio_num)) - y2_kp = int(min(OUT_RGB888P_HEIGHT-1, cy+ratio_num)) - w_kp = int(x2_kp - x1_kp + 1) - h_kp = int(y2_kp - y1_kp + 1) - - hk_results = hk_kpu_run(kpu_hand_keypoint_detect,rgb888p_img, x1_kp, y1_kp, w_kp, h_kp) # 执行手掌关键点检测 kpu 运行 以及 后处理过程 - gesture = hk_gesture(hk_results) # 根据关键点检测结果判断手势类别 - - if ((gesture == "five") or (gesture == "yeah")): - v_x = hk_results[24]-hk_results[0] - v_y = hk_results[25]-hk_results[1] - angle = hk_vector_2d_angle([v_x,v_y],[1.0,0.0]) # 计算手指(中指)的朝向 - - if (v_y>0): - angle = 360-angle - - if ((70.0<=angle) and (angle<110.0)): # 手指向上 - if ((pre_state != UP) or (pre_state != MIDDLE)): - vec_flag.append(pre_state) - if ((len(vec_flag)>10)or(pre_state == UP) or (pre_state == MIDDLE) or(pre_state == TRIGGER)): - draw_numpy[:bin_height,:bin_width,:] = shang_argb - cur_state = UP - - elif ((110.0<=angle) and (angle<225.0)): # 手指向右(实际方向) - if (pre_state != RIGHT): - vec_flag.append(pre_state) - if ((len(vec_flag)>10)or(pre_state == RIGHT)or(pre_state == TRIGGER)): - draw_numpy[:bin_width,:bin_height,:] = you_argb - cur_state = RIGHT - - elif((225.0<=angle) and (angle<315.0)): # 手指向下 - if (pre_state != DOWN): - vec_flag.append(pre_state) - if ((len(vec_flag)>10)or(pre_state == DOWN)or(pre_state == TRIGGER)): - draw_numpy[:bin_height,:bin_width,:] = xia_argb - cur_state = DOWN - - else: # 手指向左(实际方向) - if (pre_state != LEFT): - vec_flag.append(pre_state) - if ((len(vec_flag)>10)or(pre_state == LEFT)or(pre_state == TRIGGER)): - draw_numpy[:bin_width,:bin_height,:] = zuo_argb - cur_state = LEFT - - m_start = time.time_ns() - his_logit = [] - else: - with ScopedTiming("swip time",debug_mode > 0): - idx, avg_logit = gesture_kpu_run(kpu_dynamic_gesture,rgb888p_img, his_logit, history) # 执行动态手势识别 kpu 运行 以及 后处理过程 - if (cur_state == UP): - draw_numpy[:bin_height,:bin_width,:] = shang_argb - if ((idx==15) or (idx==10)): - vec_flag.clear() - if (((avg_logit[idx] >= 0.7) and (len(his_logit) >= 2)) or ((avg_logit[idx] >= 0.3) and (len(his_logit) >= 4))): - s_start = time.time_ns() - cur_state = TRIGGER - draw_state = DOWN - history = [2] - pre_state = UP - elif ((idx==25)or(idx==26)) : - vec_flag.clear() - if (((avg_logit[idx] >= 0.4) and (len(his_logit) >= 2)) or ((avg_logit[idx] >= 0.3) and (len(his_logit) >= 3))): - s_start = time.time_ns() - cur_state = TRIGGER - draw_state = MIDDLE - history = [2] - pre_state = MIDDLE - else: - his_logit.clear() - elif (cur_state == RIGHT): - draw_numpy[:bin_width,:bin_height,:] = you_argb - if ((idx==16)or(idx==11)) : - vec_flag.clear() - if (((avg_logit[idx] >= 0.4) and (len(his_logit) >= 2)) or ((avg_logit[idx] >= 0.3) and (len(his_logit) >= 3))): - s_start = time.time_ns() - cur_state = TRIGGER - draw_state = RIGHT - history = [2] - pre_state = RIGHT - else: - his_logit.clear() - elif (cur_state == DOWN): - draw_numpy[:bin_height,:bin_width,:] = xia_argb - if ((idx==18)or(idx==13)): - vec_flag.clear() - if (((avg_logit[idx] >= 0.4) and (len(his_logit) >= 2)) or ((avg_logit[idx] >= 0.3) and (len(his_logit) >= 3))): - s_start = time.time_ns() - cur_state = TRIGGER - draw_state = UP - history = [2] - pre_state = DOWN - else: - his_logit.clear() - elif (cur_state == LEFT): - draw_numpy[:bin_width,:bin_height,:] = zuo_argb - if ((idx==17)or(idx==12)): - vec_flag.clear() - if (((avg_logit[idx] >= 0.4) and (len(his_logit) >= 2)) or ((avg_logit[idx] >= 0.3) and (len(his_logit) >= 3))): - s_start = time.time_ns() - cur_state = TRIGGER - draw_state = LEFT - history = [2] - pre_state = LEFT - else: - his_logit.clear() - - elapsed_time = round((time.time_ns() - m_start)/1000000) - - if ((cur_state != TRIGGER) and (elapsed_time>2000)): - cur_state = TRIGGER - pre_state = TRIGGER - - elapsed_ms_show = round((time.time_ns()-s_start)/1000000) - if (elapsed_ms_show<1000): - if (draw_state == UP): - draw_img.draw_arrow(1068,330,1068,130, (255,170,190,230), thickness=13) # 判断为向上挥动时,画一个向上的箭头 - elif (draw_state == RIGHT): - draw_img.draw_arrow(1290,540,1536,540, (255,170,190,230), thickness=13) # 判断为向右挥动时,画一个向右的箭头 - elif (draw_state == DOWN): - draw_img.draw_arrow(1068,750,1068,950, (255,170,190,230), thickness=13) # 判断为向下挥动时,画一个向下的箭头 - elif (draw_state == LEFT): - draw_img.draw_arrow(846,540,600,540, (255,170,190,230), thickness=13) # 判断为向左挥动时,画一个向左的箭头 - elif (draw_state == MIDDLE): - draw_img.draw_circle(1068,540,100, (255,170,190,230), thickness=2, fill=True) # 判断为五指捏合手势时,画一个实心圆 - else: - draw_state = TRIGGER - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - if (count>5): - gc.collect() - count = 0 - else: - count += 1 - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - hd_kpu_deinit() # 释放手掌检测 kpu - hk_kpu_deinit() # 释放手掌关键点检测 kpu - gesture_kpu_deinit() # 释放动态手势识别 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - del kpu_hand_keypoint_detect - del kpu_dynamic_gesture - - if 'draw_numpy' in globals(): - global draw_numpy - del draw_numpy - - if 'draw_img' in globals(): - global draw_img - del draw_img - - gc.collect() -# nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("dynamic_gesture_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - dynamic_gesture_inference() -``` - -### 13.单目标跟踪 - -```python -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -#ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1280, 16) -OUT_RGB888P_HEIGHT = 720 - -#单目标跟踪 kmodel 输入 shape -crop_kmodel_input_shape = (1,3,127,127) -src_kmodel_input_shape = (1,3,255,255) - - -#单目标跟踪 相关参数设置 -head_thresh = 0.1 #单目标跟踪分数阈值 -CONTEXT_AMOUNT = 0.5 #跟踪框宽、高调整系数 -rgb_mean = [114,114,114] #padding颜色值 -ratio_src_crop = float(src_kmodel_input_shape[2])/float(crop_kmodel_input_shape[2]) #src模型和crop模型输入比值 -track_x1 = float(300) #起始跟踪目标框左上角点x -track_y1 = float(300) #起始跟踪目标框左上角点y -track_w = float(100) #起始跟踪目标框w -track_h = float(100) #起始跟踪目标框h - - -#文件配置 -root_dir = '/sdcard/app/tests/' -crop_kmodel_file = root_dir + 'kmodel/cropped_test127.kmodel' #单目标跟踪 crop kmodel 文件路径 -src_kmodel_file = root_dir + 'kmodel/nanotrack_backbone_sim.kmodel' #单目标跟踪 src kmodel 文件路径 -track_kmodel_file = root_dir + 'kmodel/nanotracker_head_calib_k230.kmodel' #单目标跟踪 head kmodel 文件路径 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global crop_ai2d,crop_ai2d_input_tensor,crop_ai2d_output_tensor,crop_ai2d_builder # 对应crop模型: ai2d 对象 ,并且定义 ai2d 的输入、输出 以及 builder -global crop_pad_ai2d,crop_pad_ai2d_input_tensor,crop_pad_ai2d_output_tensor,crop_pad_ai2d_builder # 对应crop模型: ai2d 对象 ,并且定义 ai2d 的输入、输出 以及 builder -global src_ai2d,src_ai2d_input_tensor,src_ai2d_output_tensor,src_ai2d_builder # 对应src模型: ai2d 对象 ,并且定义 ai2d 的输入、输出 以及 builder -global src_pad_ai2d,src_pad_ai2d_input_tensor,src_pad_ai2d_output_tensor,src_pad_ai2d_builder # 对应src模型: ai2d 对象 ,并且定义 ai2d 的输入、输出 以及 builder -global track_kpu_input_0,track_kpu_input_1 # 对应head模型: 两个输入 - - -# 单目标跟踪的后处理 -def track_kpu_post_process(output_data,center_xy_wh): - with ScopedTiming("track_kpu_post_process", debug_mode > 0): - det = aidemo.nanotracker_postprocess(output_data[0],output_data[1],[OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH],head_thresh,center_xy_wh,crop_kmodel_input_shape[2],CONTEXT_AMOUNT) - return det - -# 单目标跟踪 对应crop模型的 ai2d 初始化 -def crop_ai2d_init(): - with ScopedTiming("crop_ai2d_init",debug_mode > 0): - global crop_ai2d, crop_pad_ai2d - crop_ai2d = nn.ai2d() - crop_pad_ai2d = nn.ai2d() - - crop_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) - crop_pad_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) - - crop_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - crop_pad_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global crop_ai2d_out_tensor - data = np.ones(crop_kmodel_input_shape, dtype=np.uint8) - crop_ai2d_out_tensor = nn.from_numpy(data) - -# 单目标跟踪 对应crop模型的 ai2d 运行 -def crop_ai2d_run(rgb888p_img,center_xy_wh): - with ScopedTiming("crop_ai2d_run",debug_mode > 0): - global crop_ai2d, crop_pad_ai2d - global crop_ai2d_input_tensor,crop_ai2d_out_tensor,crop_ai2d_builder - global crop_pad_ai2d_input_tensor,crop_pad_ai2d_out_tensor,crop_pad_ai2d_builder - - s_z = round(np.sqrt((center_xy_wh[2] + CONTEXT_AMOUNT * (center_xy_wh[2] + center_xy_wh[3])) * (center_xy_wh[3] + CONTEXT_AMOUNT * (center_xy_wh[2] + center_xy_wh[3])))) - c = (s_z + 1) / 2 - context_xmin = np.floor(center_xy_wh[0] - c + 0.5) - context_xmax = int(context_xmin + s_z - 1) - context_ymin = np.floor(center_xy_wh[1] - c + 0.5) - context_ymax = int(context_ymin + s_z - 1) - - left_pad = int(max(0, -context_xmin)) - top_pad = int(max(0, -context_ymin)) - right_pad = int(max(0, int(context_xmax - OUT_RGB888P_WIDTH + 1))) - bottom_pad = int(max(0, int(context_ymax - OUT_RGB888P_HEIGHT + 1))) - context_xmin = context_xmin + left_pad - context_xmax = context_xmax + left_pad - context_ymin = context_ymin + top_pad - context_ymax = context_ymax + top_pad - - if (left_pad != 0 or right_pad != 0 or top_pad != 0 or bottom_pad != 0): - crop_pad_ai2d.set_pad_param(True, [0, 0, 0, 0, top_pad, bottom_pad, left_pad, right_pad], 0, rgb_mean) - crop_pad_ai2d_builder = crop_pad_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1, 3, OUT_RGB888P_HEIGHT + top_pad + bottom_pad, OUT_RGB888P_WIDTH + left_pad + right_pad]) - crop_pad_ai2d_input = rgb888p_img.to_numpy_ref() - crop_pad_ai2d_input_tensor = nn.from_numpy(crop_pad_ai2d_input) - crop_pad_ai2d_output = np.ones([1, 3, OUT_RGB888P_HEIGHT + top_pad + bottom_pad, OUT_RGB888P_WIDTH + left_pad + right_pad], dtype=np.uint8) - crop_pad_ai2d_out_tensor = nn.from_numpy(crop_pad_ai2d_output) - crop_pad_ai2d_builder.run(crop_pad_ai2d_input_tensor, crop_pad_ai2d_out_tensor) - - crop_ai2d.set_crop_param(True, int(context_xmin), int(context_ymin), int(context_xmax - context_xmin + 1), int(context_ymax - context_ymin + 1)) - crop_ai2d_builder = crop_ai2d.build([1, 3, OUT_RGB888P_HEIGHT + top_pad + bottom_pad, OUT_RGB888P_WIDTH + left_pad + right_pad], crop_kmodel_input_shape) - crop_ai2d_input_tensor = crop_pad_ai2d_out_tensor - crop_ai2d_builder.run(crop_ai2d_input_tensor, crop_ai2d_out_tensor) - del crop_pad_ai2d_input_tensor - del crop_pad_ai2d_out_tensor - del crop_pad_ai2d_builder - else: - crop_ai2d.set_crop_param(True, int(center_xy_wh[0] - s_z/2.0), int(center_xy_wh[1] - s_z/2.0), int(s_z), int(s_z)) - crop_ai2d_builder = crop_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], crop_kmodel_input_shape) - crop_ai2d_input = rgb888p_img.to_numpy_ref() - crop_ai2d_input_tensor = nn.from_numpy(crop_ai2d_input) - crop_ai2d_builder.run(crop_ai2d_input_tensor, crop_ai2d_out_tensor) - -# 单目标跟踪 对应crop模型的 ai2d 释放 -def crop_ai2d_release(): - with ScopedTiming("crop_ai2d_release",debug_mode > 0): - global crop_ai2d_input_tensor,crop_ai2d_builder - del crop_ai2d_input_tensor - del crop_ai2d_builder - - -# 单目标跟踪 对应src模型的 ai2d 初始化 -def src_ai2d_init(): - with ScopedTiming("src_ai2d_init",debug_mode > 0): - global src_ai2d, src_pad_ai2d - src_ai2d = nn.ai2d() - src_pad_ai2d = nn.ai2d() - - src_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) - src_pad_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) - - src_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - src_pad_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global src_ai2d_out_tensor - data = np.ones(src_kmodel_input_shape, dtype=np.uint8) - src_ai2d_out_tensor = nn.from_numpy(data) - -# 单目标跟踪 对应src模型的 ai2d 运行 -def src_ai2d_run(rgb888p_img,center_xy_wh): - with ScopedTiming("src_ai2d_run",debug_mode > 0): - global src_ai2d, src_pad_ai2d - global src_ai2d_input_tensor,src_ai2d_out_tensor,src_ai2d_builder - global src_pad_ai2d_input_tensor,src_pad_ai2d_out_tensor,src_pad_ai2d_builder - - s_z = round(np.sqrt((center_xy_wh[2] + CONTEXT_AMOUNT * (center_xy_wh[2] + center_xy_wh[3])) * (center_xy_wh[3] + CONTEXT_AMOUNT * (center_xy_wh[2] + center_xy_wh[3])))) * ratio_src_crop - c = (s_z + 1) / 2 - context_xmin = np.floor(center_xy_wh[0] - c + 0.5) - context_xmax = int(context_xmin + s_z - 1) - context_ymin = np.floor(center_xy_wh[1] - c + 0.5) - context_ymax = int(context_ymin + s_z - 1) - - left_pad = int(max(0, -context_xmin)) - top_pad = int(max(0, -context_ymin)) - right_pad = int(max(0, int(context_xmax - OUT_RGB888P_WIDTH + 1))) - bottom_pad = int(max(0, int(context_ymax - OUT_RGB888P_HEIGHT + 1))) - context_xmin = context_xmin + left_pad - context_xmax = context_xmax + left_pad - context_ymin = context_ymin + top_pad - context_ymax = context_ymax + top_pad - - if (left_pad != 0 or right_pad != 0 or top_pad != 0 or bottom_pad != 0): - src_pad_ai2d.set_pad_param(True, [0, 0, 0, 0, top_pad, bottom_pad, left_pad, right_pad], 0, rgb_mean) - src_pad_ai2d_builder = src_pad_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1, 3, OUT_RGB888P_HEIGHT + top_pad + bottom_pad, OUT_RGB888P_WIDTH + left_pad + right_pad]) - src_pad_ai2d_input = rgb888p_img.to_numpy_ref() - src_pad_ai2d_input_tensor = nn.from_numpy(src_pad_ai2d_input) - src_pad_ai2d_output = np.ones([1, 3, OUT_RGB888P_HEIGHT + top_pad + bottom_pad, OUT_RGB888P_WIDTH + left_pad + right_pad], dtype=np.uint8) - src_pad_ai2d_out_tensor = nn.from_numpy(src_pad_ai2d_output) - src_pad_ai2d_builder.run(src_pad_ai2d_input_tensor, src_pad_ai2d_out_tensor) - - src_ai2d.set_crop_param(True, int(context_xmin), int(context_ymin), int(context_xmax - context_xmin + 1), int(context_ymax - context_ymin + 1)) - src_ai2d_builder = src_ai2d.build([1, 3, OUT_RGB888P_HEIGHT + top_pad + bottom_pad, OUT_RGB888P_WIDTH + left_pad + right_pad], src_kmodel_input_shape) - src_ai2d_input_tensor = src_pad_ai2d_out_tensor - src_ai2d_builder.run(src_ai2d_input_tensor, src_ai2d_out_tensor) - del src_pad_ai2d_input_tensor - del src_pad_ai2d_out_tensor - del src_pad_ai2d_builder - else: - src_ai2d.set_crop_param(True, int(center_xy_wh[0] - s_z/2.0), int(center_xy_wh[1] - s_z/2.0), int(s_z), int(s_z)) - src_ai2d_builder = src_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], src_kmodel_input_shape) - src_ai2d_input = rgb888p_img.to_numpy_ref() - src_ai2d_input_tensor = nn.from_numpy(src_ai2d_input) - src_ai2d_builder.run(src_ai2d_input_tensor, src_ai2d_out_tensor) - -# 单目标跟踪 对应src模型的 ai2d 释放 -def src_ai2d_release(): - with ScopedTiming("src_ai2d_release",debug_mode > 0): - global src_ai2d_input_tensor,src_ai2d_builder - del src_ai2d_input_tensor - del src_ai2d_builder - - -# 单目标跟踪 crop kpu 初始化 -def crop_kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("crop_kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - crop_ai2d_init() - return kpu_obj - -# 单目标跟踪 crop kpu 输入预处理 -def crop_kpu_pre_process(rgb888p_img,center_xy_wh): - crop_ai2d_run(rgb888p_img,center_xy_wh) - with ScopedTiming("crop_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,crop_ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, crop_ai2d_out_tensor) - -# 单目标跟踪 crop kpu 获取输出 -def crop_kpu_get_output(): - with ScopedTiming("crop_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result - -# 单目标跟踪 crop kpu 运行 -def crop_kpu_run(kpu_obj,rgb888p_img,center_xy_wh): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - crop_kpu_pre_process(rgb888p_img,center_xy_wh) - # (2) kpu 运行 - with ScopedTiming("crop_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3) 释放ai2d资源 - crop_ai2d_release() - # (4) 获取kpu输出 - result = crop_kpu_get_output() - # 返回 crop kpu 的输出 - return result - -# 单目标跟踪 crop kpu 释放 -def crop_kpu_deinit(): - with ScopedTiming("crop_kpu_deinit",debug_mode > 0): - if 'crop_ai2d' in globals(): - global crop_ai2d - del crop_ai2d - if 'crop_pad_ai2d' in globals(): - global crop_pad_ai2d - del crop_pad_ai2d - if 'crop_ai2d_out_tensor' in globals(): - global crop_ai2d_out_tensor - del crop_ai2d_out_tensor - -# 单目标跟踪 src kpu 初始化 -def src_kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("src_kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - src_ai2d_init() - return kpu_obj - -# 单目标跟踪 src kpu 输入预处理 -def src_kpu_pre_process(rgb888p_img,center_xy_wh): - src_ai2d_run(rgb888p_img,center_xy_wh) - with ScopedTiming("src_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,src_ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, src_ai2d_out_tensor) - -# 单目标跟踪 src kpu 获取输出 -def src_kpu_get_output(): - with ScopedTiming("src_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result - -# 单目标跟踪 src kpu 运行 -def src_kpu_run(kpu_obj,rgb888p_img,center_xy_wh): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - src_kpu_pre_process(rgb888p_img,center_xy_wh) - # (2) kpu 运行 - with ScopedTiming("src_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3) 释放ai2d资源 - src_ai2d_release() - # (4) 获取kpu输出 - result = src_kpu_get_output() - # 返回 src kpu 的输出 - return result - -# 单目标跟踪 src kpu 释放 -def src_kpu_deinit(): - with ScopedTiming("src_kpu_deinit",debug_mode > 0): - if 'src_ai2d' in globals(): - global src_ai2d - del src_ai2d - if 'src_pad_ai2d' in globals(): - global src_pad_ai2d - del src_pad_ai2d - if 'src_ai2d_out_tensor' in globals(): - global src_ai2d_out_tensor - del src_ai2d_out_tensor - -# 单目标跟踪 track kpu 初始化 -def track_kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("track_kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - return kpu_obj - -# 单目标跟踪 track kpu 输入预处理 -def track_kpu_pre_process(): - with ScopedTiming("track_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,track_kpu_input_0,track_kpu_input_1 - # set kpu input - current_kmodel_obj.set_input_tensor(0, track_kpu_input_0) - current_kmodel_obj.set_input_tensor(1, track_kpu_input_1) - -# 单目标跟踪 track kpu 获取输出 -def track_kpu_get_output(): - with ScopedTiming("track_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -# 单目标跟踪 track kpu 运行 -def track_kpu_run(kpu_obj,center_xy_wh): - global current_kmodel_obj,track_kpu_input_1 - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - track_kpu_pre_process() - # (2) kpu 运行 - with ScopedTiming("track_kpu_run",debug_mode > 0): - kpu_obj.run() - - del track_kpu_input_1 - # (4) 获取kpu输出 - results = track_kpu_get_output() - # (5) track 后处理 - det = track_kpu_post_process(results,center_xy_wh) - # 返回 跟踪的结果 - return det - -# 单目标跟踪 track kpu 释放 -def track_kpu_deinit(): - with ScopedTiming("track_kpu_deinit",debug_mode > 0): - if 'track_kpu_input_0' in globals(): - global track_kpu_input_0 - del track_kpu_input_0 - - - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_BGR_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for nanotracker.py********** -def nanotracker_inference(): - print("nanotracker start") - kpu_crop = crop_kpu_init(crop_kmodel_file) # 创建单目标跟踪 crop kpu 对象 - kpu_src = src_kpu_init(src_kmodel_file) # 创建单目标跟踪 src kpu 对象 - kpu_track = track_kpu_init(track_kmodel_file) # 创建单目标跟踪 track kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - run_bool = True - if (track_x1 < 50 or track_y1 < 50 or track_x1+track_w >= OUT_RGB888P_WIDTH-50 or track_y1+track_h >= OUT_RGB888P_HEIGHT-50): - print("**剪切范围超出图像范围**") - run_bool = False - - track_mean_x = track_x1 + track_w / 2.0 - track_mean_y = track_y1 + track_h / 2.0 - draw_mean_w = int(track_w / OUT_RGB888P_WIDTH * DISPLAY_WIDTH) - draw_mean_h = int(track_h / OUT_RGB888P_HEIGHT * DISPLAY_HEIGHT) - draw_mean_x = int(track_mean_x / OUT_RGB888P_WIDTH * DISPLAY_WIDTH - draw_mean_w / 2.0) - draw_mean_y = int(track_mean_y / OUT_RGB888P_HEIGHT * DISPLAY_HEIGHT - draw_mean_h / 2.0) - track_w_src = track_w - track_h_src = track_h - - center_xy_wh = [track_mean_x,track_mean_y,track_w_src,track_h_src] - center_xy_wh_tmp = [track_mean_x,track_mean_y,track_w_src,track_h_src] - - seconds = 8 - endtime = time.time() + seconds - enter_init = True - - track_boxes = [track_x1,track_y1,track_w,track_h,1] - track_boxes_tmp = np.array([track_x1,track_y1,track_w,track_h,1]) - global draw_img,osd_img - - count = 0 - while run_bool: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - nowtime = time.time() - draw_img.clear() - if (enter_init and nowtime <= endtime): - print("倒计时: " + str(endtime - nowtime) + " 秒") - draw_img.draw_rectangle(draw_mean_x , draw_mean_y , draw_mean_w , draw_mean_h , color=(255, 0, 255, 0),thickness = 4) - print(" >>>>>> get trackWindow <<<<<<<<") - global track_kpu_input_0 - track_kpu_input_0 = nn.from_numpy(crop_kpu_run(kpu_crop,rgb888p_img,center_xy_wh)) - - time.sleep(1) - if (nowtime > endtime): - print(">>>>>>> Play <<<<<<<") - enter_init = False - else: - global track_kpu_input_1 - track_kpu_input_1 = nn.from_numpy(src_kpu_run(kpu_src,rgb888p_img,center_xy_wh)) - det = track_kpu_run(kpu_track,center_xy_wh) - track_boxes = det[0] - center_xy_wh = det[1] - track_bool = True - if (len(track_boxes) != 0): - track_bool = track_boxes[0] > 10 and track_boxes[1] > 10 and track_boxes[0] + track_boxes[2] < OUT_RGB888P_WIDTH - 10 and track_boxes[1] + track_boxes[3] < OUT_RGB888P_HEIGHT - 10 - else: - track_bool = False - - if (len(center_xy_wh) != 0): - track_bool = track_bool and center_xy_wh[2] * center_xy_wh[3] < 40000 - else: - track_bool = False - - if (track_bool): - center_xy_wh_tmp = center_xy_wh - track_boxes_tmp = track_boxes - x1 = int(float(track_boxes[0]) * DISPLAY_WIDTH / OUT_RGB888P_WIDTH) - y1 = int(float(track_boxes[1]) * DISPLAY_HEIGHT / OUT_RGB888P_HEIGHT) - w = int(float(track_boxes[2]) * DISPLAY_WIDTH / OUT_RGB888P_WIDTH) - h = int(float(track_boxes[3]) * DISPLAY_HEIGHT / OUT_RGB888P_HEIGHT) - draw_img.draw_rectangle(x1, y1, w, h, color=(255, 255, 0, 0),thickness = 4) - else: - center_xy_wh = center_xy_wh_tmp - track_boxes = track_boxes_tmp - x1 = int(float(track_boxes[0]) * DISPLAY_WIDTH / OUT_RGB888P_WIDTH) - y1 = int(float(track_boxes[1]) * DISPLAY_HEIGHT / OUT_RGB888P_HEIGHT) - w = int(float(track_boxes[2]) * DISPLAY_WIDTH / OUT_RGB888P_WIDTH) - h = int(float(track_boxes[3]) * DISPLAY_HEIGHT / OUT_RGB888P_HEIGHT) - draw_img.draw_rectangle(x1, y1, w, h, color=(255, 255, 0, 0),thickness = 4) - draw_img.draw_string( x1 , y1-50, "Step away from the camera, please !" , color=(255, 255 ,0 , 0), scale=4, thickness = 1) - draw_img.draw_string( x1 , y1-100, "Near the center, please !" , color=(255, 255 ,0 , 0), scale=4, thickness = 1) - - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - crop_kpu_deinit() # 释放 单目标跟踪 crop kpu - src_kpu_deinit() # 释放 单目标跟踪 src kpu - track_kpu_deinit() # 释放 单目标跟踪 track kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_crop - del kpu_src - del kpu_track - - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("nanotracker end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - nanotracker_inference() -``` - -### 14.隔空放大 - -```python -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#--------for hand detection---------- -#kmodel输入shape -hd_kmodel_input_shape = (1,3,512,512) # 手掌检测kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 手掌检测阈值,用于过滤roi -nms_threshold = 0.5 # 手掌检测框阈值,用于过滤重复roi -hd_kmodel_frame_size = [512,512] # 手掌检测输入图片尺寸 -hd_frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 手掌检测直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 手掌检测模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS - -root_dir = '/sdcard/app/tests/' -hd_kmodel_file = root_dir + "kmodel/hand_det.kmodel" # 手掌检测kmodel文件的路径 -anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] #anchor设置 - -#--------for hand keypoint detection---------- -#kmodel输入shape -hk_kmodel_input_shape = (1,3,256,256) # 手掌关键点检测kmodel输入分辨率 - -#kmodel相关参数设置 -hk_kmodel_frame_size = [256,256] # 手掌关键点检测输入图片尺寸 -hk_kmodel_file = root_dir + 'kmodel/handkp_det.kmodel' # 手掌关键点检测kmodel文件的路径 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global hd_ai2d,hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder # 定义手掌检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor,hk_ai2d_builder # 定义手掌关键点检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global space_ai2d,space_ai2d_input_tensor,space_ai2d_output_tensor,space_ai2d_builder,space_draw_ai2d_release # 定义缩放剪切图像全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -space_draw_ai2d_release = False - -#-------hand detect--------: -# 手掌检测ai2d 初始化 -def hd_ai2d_init(): - with ScopedTiming("hd_ai2d_init",debug_mode > 0): - global hd_ai2d - global hd_ai2d_builder - global hd_ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = hd_kmodel_frame_size[0] - height = hd_kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - # init kpu and load kmodel - hd_ai2d = nn.ai2d() - hd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - hd_ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - hd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - hd_ai2d_builder = hd_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - data = np.ones(hd_kmodel_input_shape, dtype=np.uint8) - hd_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌检测 ai2d 运行 -def hd_ai2d_run(rgb888p_img): - with ScopedTiming("hd_ai2d_run",debug_mode > 0): - global hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder - hd_ai2d_input = rgb888p_img.to_numpy_ref() - hd_ai2d_input_tensor = nn.from_numpy(hd_ai2d_input) - - hd_ai2d_builder.run(hd_ai2d_input_tensor, hd_ai2d_output_tensor) - -# 手掌检测 ai2d 释放内存 -def hd_ai2d_release(): - with ScopedTiming("hd_ai2d_release",debug_mode > 0): - global hd_ai2d_input_tensor - del hd_ai2d_input_tensor - -# 手掌检测 kpu 初始化 -def hd_kpu_init(hd_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hd_kpu_init",debug_mode > 0): - hd_kpu_obj = nn.kpu() - hd_kpu_obj.load_kmodel(hd_kmodel_file) - - hd_ai2d_init() - return hd_kpu_obj - -# 手掌检测 kpu 输入预处理 -def hd_kpu_pre_process(rgb888p_img): - hd_ai2d_run(rgb888p_img) - with ScopedTiming("hd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hd_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hd_ai2d_output_tensor) - -# 手掌检测 kpu 获得 kmodel 输出 -def hd_kpu_get_output(): - with ScopedTiming("hd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌检测 kpu 运行 -def hd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hd_kpu_pre_process(rgb888p_img) - # (2)手掌检测 kpu 运行 - with ScopedTiming("hd_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌检测 ai2d 资源 - hd_ai2d_release() - # (4)获取手掌检测 kpu 输出 - results = hd_kpu_get_output() - # (5)手掌检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], hd_kmodel_frame_size, hd_frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6)返回手掌检测结果 - return dets - -# 手掌检测 kpu 释放内存 -def hd_kpu_deinit(): - with ScopedTiming("hd_kpu_deinit",debug_mode > 0): - if 'hd_ai2d' in globals(): - global hd_ai2d - del hd_ai2d - if 'hd_ai2d_output_tensor' in globals(): - global hd_ai2d_output_tensor - del hd_ai2d_output_tensor - if 'hd_ai2d_builder' in globals(): - global hd_ai2d_builder - del hd_ai2d_builder - -#-------hand keypoint detection------: -# 手掌关键点检测 ai2d 初始化 -def hk_ai2d_init(): - with ScopedTiming("hk_ai2d_init",debug_mode > 0): - global hk_ai2d, hk_ai2d_output_tensor - hk_ai2d = nn.ai2d() - hk_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - data = np.ones(hk_kmodel_input_shape, dtype=np.uint8) - hk_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌关键点检测 ai2d 运行 -def hk_ai2d_run(rgb888p_img, x, y, w, h): - with ScopedTiming("hk_ai2d_run",debug_mode > 0): - global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor - hk_ai2d_input = rgb888p_img.to_numpy_ref() - hk_ai2d_input_tensor = nn.from_numpy(hk_ai2d_input) - - hk_ai2d.set_crop_param(True, x, y, w, h) - hk_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - global hk_ai2d_builder - hk_ai2d_builder = hk_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,hk_kmodel_frame_size[1],hk_kmodel_frame_size[0]]) - hk_ai2d_builder.run(hk_ai2d_input_tensor, hk_ai2d_output_tensor) - -# 手掌关键点检测 ai2d 释放内存 -def hk_ai2d_release(): - with ScopedTiming("hk_ai2d_release",debug_mode > 0): - global hk_ai2d_input_tensor,hk_ai2d_builder - del hk_ai2d_input_tensor - del hk_ai2d_builder - -# 手掌关键点检测 kpu 初始化 -def hk_kpu_init(hk_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hk_kpu_init",debug_mode > 0): - hk_kpu_obj = nn.kpu() - hk_kpu_obj.load_kmodel(hk_kmodel_file) - - hk_ai2d_init() - return hk_kpu_obj - -# 手掌关键点检测 kpu 输入预处理 -def hk_kpu_pre_process(rgb888p_img, x, y, w, h): - hk_ai2d_run(rgb888p_img, x, y, w, h) - with ScopedTiming("hk_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hk_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hk_ai2d_output_tensor) - -# 手掌关键点检测 kpu 获得 kmodel 输出 -def hk_kpu_get_output(): - with ScopedTiming("hk_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌关键点检测 kpu 运行 -def hk_kpu_run(kpu_obj,rgb888p_img, x, y, w, h): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hk_kpu_pre_process(rgb888p_img, x, y, w, h) - # (2)手掌关键点检测 kpu 运行 - with ScopedTiming("hk_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌关键点检测 ai2d 资源 - hk_ai2d_release() - # (4)获取手掌关键点检测 kpu 输出 - results = hk_kpu_get_output() - # (5)返回手掌关键点检测结果 - return results - -# 手掌关键点检测 kpu 释放内存 -def hk_kpu_deinit(): - with ScopedTiming("hk_kpu_deinit",debug_mode > 0): - if 'hk_ai2d' in globals(): - global hk_ai2d - del hk_ai2d - if 'hk_ai2d_output_tensor' in globals(): - global hk_ai2d_output_tensor - del hk_ai2d_output_tensor - -# 隔空缩放剪切 ai2d 初始化 -def space_ai2d_init(): - with ScopedTiming("space_ai2d_init",debug_mode > 0): - global space_ai2d - space_ai2d = nn.ai2d() - space_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.RGB_packed, - np.uint8, np.uint8) - -# 隔空缩放剪切 ai2d 运行 -def space_ai2d_run(rgb888p_img, x, y, w, h, out_w, out_h): - with ScopedTiming("space_ai2d_run",debug_mode > 0): - global space_ai2d,space_ai2d_input_tensor,space_ai2d_output_tensor,space_draw_ai2d_release - space_draw_ai2d_release = True - space_ai2d_input = rgb888p_img.to_numpy_ref() - space_ai2d_input_tensor = nn.from_numpy(space_ai2d_input) - - space_ai2d.set_crop_param(True, x, y, w, h) - space_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - data = np.ones((1,out_h, out_w,3), dtype=np.uint8) - space_ai2d_output_tensor = nn.from_numpy(data) - - global space_ai2d_builder - space_ai2d_builder = space_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,out_h, out_w,3]) - space_ai2d_builder.run(space_ai2d_input_tensor, space_ai2d_output_tensor) - - space_np_out = space_ai2d_output_tensor.to_numpy() - return space_np_out - -# 隔空缩放剪切 ai2d 释放内存 -def space_ai2d_release(re_ai2d): - with ScopedTiming("space_ai2d_release",debug_mode > 0): - global space_ai2d_input_tensor,space_ai2d_output_tensor,space_ai2d_builder,space_draw_ai2d_release,space_ai2d - if (space_draw_ai2d_release): - del space_ai2d_input_tensor - del space_ai2d_output_tensor - del space_ai2d_builder - space_draw_ai2d_release = False - if (re_ai2d): - del space_ai2d - -#media_utils.py -global draw_img,osd_img,masks #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img, masks - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - masks = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888,alloc=image.ALLOC_REF,data=masks) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for space_resize.py********** -def space_resize_inference(): - print("space_resize start") - kpu_hand_detect = hd_kpu_init(hd_kmodel_file) # 创建手掌检测的 kpu 对象 - kpu_hand_keypoint_detect = hk_kpu_init(hk_kmodel_file) # 创建手掌关键点检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - space_ai2d_init() # 初始化 隔空缩放剪切 ai2d 对象 - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - global draw_img,osd_img - first_start = True # 首次手掌入镜参数 - two_point_left_x = 0 # 中指食指包括范围 x - two_point_top_y = 0 # 中指食指包括范围 y - two_point_mean_w = 0 # 中指食指首次入镜包括范围 w - two_point_mean_h = 0 # 中指食指首次入镜包括范围 h - two_point_crop_w = 0 # 中指食指包括范围 w - two_point_crop_h = 0 # 中指食指包括范围 h - osd_plot_x = 0 # osd 画缩放图起始点 x - osd_plot_y = 0 # osd 画缩放图起始点 y - ori_new_ratio = 0 # 缩放比例 - new_resize_w = 0 # 缩放后 w - new_resize_h = 0 # 缩放后 h - crop_area = 0 # 剪切区域 - rect_frame_x = 0 # osd绘画起始点 x - rect_frame_y = 0 # osd绘画起始点 y - - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - two_point = np.zeros((4),dtype=np.int16) - dets_no_pro = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - draw_img.clear() - - dets = [] - for det_box in dets_no_pro: - if det_box[4] < OUT_RGB888P_WIDTH - 10 : - dets.append(det_box) - - if (len(dets)==1): - for det_box in dets: - x1, y1, x2, y2 = int(det_box[2]),int(det_box[3]),int(det_box[4]),int(det_box[5]) - w = int(x2 - x1) - h = int(y2 - y1) - - if (h<(0.1*OUT_RGB888P_HEIGHT)): - continue - if (w<(0.25*OUT_RGB888P_WIDTH) and ((x1<(0.03*OUT_RGB888P_WIDTH)) or (x2>(0.97*OUT_RGB888P_WIDTH)))): - continue - if (w<(0.15*OUT_RGB888P_WIDTH) and ((x1<(0.01*OUT_RGB888P_WIDTH)) or (x2>(0.99*OUT_RGB888P_WIDTH)))): - continue - - length = max(w,h)/2 - cx = (x1+x2)/2 - cy = (y1+y2)/2 - ratio_num = 1.26*length - - x1_kp = int(max(0,cx-ratio_num)) - y1_kp = int(max(0,cy-ratio_num)) - x2_kp = int(min(OUT_RGB888P_WIDTH-1, cx+ratio_num)) - y2_kp = int(min(OUT_RGB888P_HEIGHT-1, cy+ratio_num)) - w_kp = int(x2_kp - x1_kp + 1) - h_kp = int(y2_kp - y1_kp + 1) - - hk_results = hk_kpu_run(kpu_hand_keypoint_detect,rgb888p_img, x1_kp, y1_kp, w_kp, h_kp) # 执行手掌关键点检测 kpu 运行 以及 后处理过程 - - results_show = np.zeros(hk_results[0].shape,dtype=np.int16) - results_show[0::2] = hk_results[0][0::2] * w_kp + x1_kp - results_show[1::2] = hk_results[0][1::2] * h_kp + y1_kp - - two_point[0] = results_show[8] - two_point[1] = results_show[9] - two_point[2] = results_show[16+8] - two_point[3] = results_show[16+9] - - if (first_start): - if (two_point[0] > 0 and two_point[0] < OUT_RGB888P_WIDTH and two_point[2] > 0 and two_point[2] < OUT_RGB888P_WIDTH and two_point[1] > 0 and two_point[1] < OUT_RGB888P_HEIGHT and two_point[3] > 0 and two_point[3] < OUT_RGB888P_HEIGHT): - two_point_mean_w = np.sqrt(pow(two_point[0] - two_point[2],2) + pow(two_point[1] - two_point[3],2))*0.8 - two_point_mean_h = np.sqrt(pow(two_point[0] - two_point[2],2) + pow(two_point[1] - two_point[3],2))*0.8 - first_start = False - else: - two_point_left_x = int(max((two_point[0] + two_point[2]) / 2 - two_point_mean_w / 2, 0)) - two_point_top_y = int(max((two_point[1] + two_point[3]) / 2 - two_point_mean_h / 2, 0)) - two_point_crop_w = int(min(min((two_point[0] + two_point[2]) / 2 - two_point_mean_w / 2 + two_point_mean_w , two_point_mean_w), OUT_RGB888P_WIDTH - ((two_point[0] + two_point[2]) / 2 - two_point_mean_w / 2))) - two_point_crop_h = int(min(min((two_point[1] + two_point[3]) / 2 - two_point_mean_h / 2 + two_point_mean_h , two_point_mean_h), OUT_RGB888P_HEIGHT - ((two_point[1] + two_point[3]) / 2 - two_point_mean_h / 2))) - - ori_new_ratio = np.sqrt(pow((two_point[0] - two_point[2]),2) + pow((two_point[1] - two_point[3]),2))*0.8 / two_point_mean_w - - new_resize_w = min(int(two_point_crop_w * ori_new_ratio / OUT_RGB888P_WIDTH * DISPLAY_WIDTH),600) - new_resize_h = min(int(two_point_crop_h * ori_new_ratio / OUT_RGB888P_HEIGHT * DISPLAY_HEIGHT),600) - - rect_frame_x = int(two_point_left_x * 1.0 / OUT_RGB888P_WIDTH * DISPLAY_WIDTH) - rect_frame_y = int(two_point_top_y * 1.0 / OUT_RGB888P_HEIGHT * DISPLAY_HEIGHT) - - draw_w = min(new_resize_w,DISPLAY_WIDTH-rect_frame_x-1) - draw_h = min(new_resize_h,DISPLAY_HEIGHT-rect_frame_y-1) - - space_np_out = space_ai2d_run(rgb888p_img, two_point_left_x, two_point_top_y, two_point_crop_w, two_point_crop_h, new_resize_w, new_resize_h) # 运行 隔空缩放检测 ai2d - global masks - masks[rect_frame_y:rect_frame_y + draw_h,rect_frame_x:rect_frame_x + draw_w,0] = 255 - masks[rect_frame_y:rect_frame_y + draw_h,rect_frame_x:rect_frame_x + draw_w,1:4] = space_np_out[0][0:draw_h,0:draw_w,:] - space_ai2d_release(False) # 释放 隔空缩放检测 ai2d 相关对象 - - - draw_img.draw_rectangle(rect_frame_x, rect_frame_y, new_resize_w, new_resize_h, color=(255, 0, 255, 0),thickness = 4) - else: - draw_img.draw_string( 300 , 500, "Must have one hand !", color=(255,255,0,0), scale=7) - first_start = True - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - space_ai2d_release(True) # 释放 隔空缩放检测 ai2d 相关对象 - hd_kpu_deinit() # 释放手掌检测 kpu - hk_kpu_deinit() # 释放手掌关键点检测 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - del kpu_hand_keypoint_detect - - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'masks' in globals(): - global masks - del masks - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("space_resize end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - space_resize_inference() -``` - -### 15.拼图游戏 - -```python -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import random -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#--------for hand detection---------- -#kmodel输入shape -hd_kmodel_input_shape = (1,3,512,512) # 手掌检测kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 手掌检测阈值,用于过滤roi -nms_threshold = 0.5 # 手掌检测框阈值,用于过滤重复roi -hd_kmodel_frame_size = [512,512] # 手掌检测输入图片尺寸 -hd_frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 手掌检测直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 手掌检测模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS - -level = 3 # 游戏级别 目前只支持设置为 3 - - -root_dir = '/sdcard/app/tests/' -hd_kmodel_file = root_dir + "kmodel/hand_det.kmodel" # 手掌检测kmodel文件的路径 -anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] #anchor设置 - -#--------for hand keypoint detection---------- -#kmodel输入shape -hk_kmodel_input_shape = (1,3,256,256) # 手掌关键点检测kmodel输入分辨率 - -#kmodel相关参数设置 -hk_kmodel_frame_size = [256,256] # 手掌关键点检测输入图片尺寸 -hk_kmodel_file = root_dir + 'kmodel/handkp_det.kmodel' # 手掌关键点检测kmodel文件的路径 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global hd_ai2d,hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder # 定义手掌检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor,hk_ai2d_builder # 定义手掌关键点检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - - -#-------hand detect--------: -# 手掌检测ai2d 初始化 -def hd_ai2d_init(): - with ScopedTiming("hd_ai2d_init",debug_mode > 0): - global hd_ai2d - global hd_ai2d_builder - global hd_ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = hd_kmodel_frame_size[0] - height = hd_kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - # init kpu and load kmodel - hd_ai2d = nn.ai2d() - hd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - hd_ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - hd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - hd_ai2d_builder = hd_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - data = np.ones(hd_kmodel_input_shape, dtype=np.uint8) - hd_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌检测 ai2d 运行 -def hd_ai2d_run(rgb888p_img): - with ScopedTiming("hd_ai2d_run",debug_mode > 0): - global hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder - hd_ai2d_input = rgb888p_img.to_numpy_ref() - hd_ai2d_input_tensor = nn.from_numpy(hd_ai2d_input) - - hd_ai2d_builder.run(hd_ai2d_input_tensor, hd_ai2d_output_tensor) - -# 手掌检测 ai2d 释放内存 -def hd_ai2d_release(): - with ScopedTiming("hd_ai2d_release",debug_mode > 0): - global hd_ai2d_input_tensor - del hd_ai2d_input_tensor - -# 手掌检测 kpu 初始化 -def hd_kpu_init(hd_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hd_kpu_init",debug_mode > 0): - hd_kpu_obj = nn.kpu() - hd_kpu_obj.load_kmodel(hd_kmodel_file) - - hd_ai2d_init() - return hd_kpu_obj - -# 手掌检测 kpu 输入预处理 -def hd_kpu_pre_process(rgb888p_img): - hd_ai2d_run(rgb888p_img) - with ScopedTiming("hd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hd_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hd_ai2d_output_tensor) - -# 手掌检测 kpu 获得 kmodel 输出 -def hd_kpu_get_output(): - with ScopedTiming("hd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌检测 kpu 运行 -def hd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hd_kpu_pre_process(rgb888p_img) - # (2)手掌检测 kpu 运行 - with ScopedTiming("hd_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌检测 ai2d 资源 - hd_ai2d_release() - # (4)获取手掌检测 kpu 输出 - results = hd_kpu_get_output() - # (5)手掌检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], hd_kmodel_frame_size, hd_frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6)返回手掌检测结果 - return dets - -# 手掌检测 kpu 释放内存 -def hd_kpu_deinit(): - with ScopedTiming("hd_kpu_deinit",debug_mode > 0): - if 'hd_ai2d' in globals(): - global hd_ai2d - del hd_ai2d - if 'hd_ai2d_output_tensor' in globals(): - global hd_ai2d_output_tensor - del hd_ai2d_output_tensor - if 'hd_ai2d_builder' in globals(): - global hd_ai2d_builder - del hd_ai2d_builder - -#-------hand keypoint detection------: -# 手掌关键点检测 ai2d 初始化 -def hk_ai2d_init(): - with ScopedTiming("hk_ai2d_init",debug_mode > 0): - global hk_ai2d, hk_ai2d_output_tensor - hk_ai2d = nn.ai2d() - hk_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - data = np.ones(hk_kmodel_input_shape, dtype=np.uint8) - hk_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌关键点检测 ai2d 运行 -def hk_ai2d_run(rgb888p_img, x, y, w, h): - with ScopedTiming("hk_ai2d_run",debug_mode > 0): - global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor - hk_ai2d_input = rgb888p_img.to_numpy_ref() - hk_ai2d_input_tensor = nn.from_numpy(hk_ai2d_input) - - hk_ai2d.set_crop_param(True, x, y, w, h) - hk_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - global hk_ai2d_builder - hk_ai2d_builder = hk_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,hk_kmodel_frame_size[1],hk_kmodel_frame_size[0]]) - hk_ai2d_builder.run(hk_ai2d_input_tensor, hk_ai2d_output_tensor) - -# 手掌关键点检测 ai2d 释放内存 -def hk_ai2d_release(): - with ScopedTiming("hk_ai2d_release",debug_mode > 0): - global hk_ai2d_input_tensor,hk_ai2d_builder - del hk_ai2d_input_tensor - del hk_ai2d_builder - -# 手掌关键点检测 kpu 初始化 -def hk_kpu_init(hk_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hk_kpu_init",debug_mode > 0): - hk_kpu_obj = nn.kpu() - hk_kpu_obj.load_kmodel(hk_kmodel_file) - - hk_ai2d_init() - return hk_kpu_obj - -# 手掌关键点检测 kpu 输入预处理 -def hk_kpu_pre_process(rgb888p_img, x, y, w, h): - hk_ai2d_run(rgb888p_img, x, y, w, h) - with ScopedTiming("hk_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hk_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hk_ai2d_output_tensor) - -# 手掌关键点检测 kpu 获得 kmodel 输出 -def hk_kpu_get_output(): - with ScopedTiming("hk_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌关键点检测 kpu 运行 -def hk_kpu_run(kpu_obj,rgb888p_img, x, y, w, h): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hk_kpu_pre_process(rgb888p_img, x, y, w, h) - # (2)手掌关键点检测 kpu 运行 - with ScopedTiming("hk_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌关键点检测 ai2d 资源 - hk_ai2d_release() - # (4)获取手掌关键点检测 kpu 输出 - results = hk_kpu_get_output() - # (5)返回手掌关键点检测结果 - return results - -# 手掌关键点检测 kpu 释放内存 -def hk_kpu_deinit(): - with ScopedTiming("hk_kpu_deinit",debug_mode > 0): - if 'hk_ai2d' in globals(): - global hk_ai2d - del hk_ai2d - if 'hk_ai2d_output_tensor' in globals(): - global hk_ai2d_output_tensor - del hk_ai2d_output_tensor - -#media_utils.py -global draw_img,osd_img,masks #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img, masks - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - masks = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888,alloc=image.ALLOC_REF,data=masks) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for puzzle_game.py********** -def puzzle_game_inference(): - print("puzzle_game_inference start") - kpu_hand_detect = hd_kpu_init(hd_kmodel_file) # 创建手掌检测的 kpu 对象 - kpu_hand_keypoint_detect = hk_kpu_init(hk_kmodel_file) # 创建手掌关键点检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - global draw_img,osd_img - puzzle_width = DISPLAY_HEIGHT # 设定 拼图宽 - puzzle_height = DISPLAY_HEIGHT # 设定 拼图高 - puzzle_ori_width = DISPLAY_WIDTH - puzzle_width - 50 # 设定 原始拼图宽 - puzzle_ori_height = DISPLAY_WIDTH - puzzle_height - 50 # 设定 原始拼图高 - - every_block_width = int(puzzle_width/level) # 设定 拼图块宽 - every_block_height = int(puzzle_height/level) # 设定 拼图块高 - ori_every_block_width = int(puzzle_ori_width/level) # 设定 原始拼图宽 - ori_every_block_height = int(puzzle_ori_height/level) # 设定 原始拼图高 - ratio_num = every_block_width/360.0 # 字体比例 - blank_x = 0 # 空白块 角点x - blank_y = 0 # 空白块 角点y - direction_vec = [-1,1,-1,1] # 空白块四种移动方向 - - exact_division_x = 0 # 交换块 角点x - exact_division_y = 0 # 交换块 角点y - distance_tow_points = DISPLAY_WIDTH # 两手指距离 - distance_thred = every_block_width*0.4 # 两手指距离阈值 - - move_mat = np.zeros((every_block_height,every_block_width,4),dtype=np.uint8) - - osd_frame_tmp = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - osd_frame_tmp_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888,alloc=image.ALLOC_REF,data=osd_frame_tmp) - osd_frame_tmp[0:puzzle_height,0:puzzle_width,3] = 100 - osd_frame_tmp[0:puzzle_height,0:puzzle_width,2] = 150 - osd_frame_tmp[0:puzzle_height,0:puzzle_width,1] = 130 - osd_frame_tmp[0:puzzle_height,0:puzzle_width,0] = 127 - osd_frame_tmp[(1080-puzzle_ori_height)//2:(1080-puzzle_ori_height)//2+puzzle_ori_width,puzzle_width+25:puzzle_width+25+puzzle_ori_height,3] = 100 - osd_frame_tmp[(1080-puzzle_ori_height)//2:(1080-puzzle_ori_height)//2+puzzle_ori_width,puzzle_width+25:puzzle_width+25+puzzle_ori_height,2] = 150 - osd_frame_tmp[(1080-puzzle_ori_height)//2:(1080-puzzle_ori_height)//2+puzzle_ori_width,puzzle_width+25:puzzle_width+25+puzzle_ori_height,1] = 130 - osd_frame_tmp[(1080-puzzle_ori_height)//2:(1080-puzzle_ori_height)//2+puzzle_ori_width,puzzle_width+25:puzzle_width+25+puzzle_ori_height,0] = 127 - for i in range(level*level): - osd_frame_tmp_img.draw_rectangle((i%level)*every_block_width,(i//level)*every_block_height,every_block_width,every_block_height,(255,0,0,0),5) - osd_frame_tmp_img.draw_string((i%level)*every_block_width + 55,(i//level)*every_block_height + 45,str(i),(255,0,0,255),30*ratio_num) - osd_frame_tmp_img.draw_rectangle(puzzle_width+25 + (i%level)*ori_every_block_width,(1080-puzzle_ori_height)//2 + (i//level)*ori_every_block_height,ori_every_block_width,ori_every_block_height,(255,0,0,0),5) - osd_frame_tmp_img.draw_string(puzzle_width+25 + (i%level)*ori_every_block_width + 50,(1080-puzzle_ori_height)//2 + (i//level)*ori_every_block_height + 25,str(i),(255,0,0,255),20*ratio_num) - osd_frame_tmp[0:every_block_height,0:every_block_width,3] = 114 - osd_frame_tmp[0:every_block_height,0:every_block_width,2] = 114 - osd_frame_tmp[0:every_block_height,0:every_block_width,1] = 114 - osd_frame_tmp[0:every_block_height,0:every_block_width,0] = 220 - - for i in range(level*10): - k230_random = int(random.random() * 100) % 4 - blank_x_tmp = blank_x - blank_y_tmp = blank_y - if (k230_random < 2): - blank_x_tmp = blank_x + direction_vec[k230_random] - else: - blank_y_tmp = blank_y + direction_vec[k230_random] - - if ((blank_x_tmp >= 0 and blank_x_tmp < level) and (blank_y_tmp >= 0 and blank_y_tmp < level) and (abs(blank_x - blank_x_tmp) <= 1 and abs(blank_y - blank_y_tmp) <= 1)): - move_rect = [blank_x_tmp*every_block_width,blank_y_tmp*every_block_height,every_block_width,every_block_height] - blank_rect = [blank_x*every_block_width,blank_y*every_block_height,every_block_width,every_block_height] - - move_mat[:] = osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] - osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] = osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] - osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] = move_mat[:] - - blank_x = blank_x_tmp - blank_y = blank_y_tmp - - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - two_point = np.zeros((4),dtype=np.int16) - dets_no_pro = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - draw_img.clear() - - osd_frame_tmp_img.copy_to(draw_img) - - dets = [] - for det_box in dets_no_pro: - if det_box[4] < OUT_RGB888P_WIDTH - 10 : - dets.append(det_box) - - if (len(dets)==1): - for det_box in dets: - x1, y1, x2, y2 = int(det_box[2]),int(det_box[3]),int(det_box[4]),int(det_box[5]) - w = int(x2 - x1) - h = int(y2 - y1) - - if (h<(0.1*OUT_RGB888P_HEIGHT)): - continue - if (w<(0.25*OUT_RGB888P_WIDTH) and ((x1<(0.03*OUT_RGB888P_WIDTH)) or (x2>(0.97*OUT_RGB888P_WIDTH)))): - continue - if (w<(0.15*OUT_RGB888P_WIDTH) and ((x1<(0.01*OUT_RGB888P_WIDTH)) or (x2>(0.99*OUT_RGB888P_WIDTH)))): - continue - - length = max(w,h)/2 - cx = (x1+x2)/2 - cy = (y1+y2)/2 - ratio_num = 1.26*length - - x1_kp = int(max(0,cx-ratio_num)) - y1_kp = int(max(0,cy-ratio_num)) - x2_kp = int(min(OUT_RGB888P_WIDTH-1, cx+ratio_num)) - y2_kp = int(min(OUT_RGB888P_HEIGHT-1, cy+ratio_num)) - w_kp = int(x2_kp - x1_kp + 1) - h_kp = int(y2_kp - y1_kp + 1) - - hk_results = hk_kpu_run(kpu_hand_keypoint_detect,rgb888p_img, x1_kp, y1_kp, w_kp, h_kp) # 执行手掌关键点检测 kpu 运行 以及 后处理过程 - - results_show = np.zeros(hk_results[0].shape,dtype=np.int16) - results_show[0::2] = (hk_results[0][0::2] * w_kp + x1_kp) #* DISPLAY_WIDTH // OUT_RGB888P_WIDTH - results_show[1::2] = (hk_results[0][1::2] * h_kp + y1_kp) #* DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT - - two_point[0] = results_show[8+8] - two_point[1] = results_show[8+9] - two_point[2] = results_show[16+8] - two_point[3] = results_show[16+9] - - if (two_point[1] <= OUT_RGB888P_WIDTH): - distance_tow_points = np.sqrt(pow((two_point[0]-two_point[2]),2) + pow((two_point[1] - two_point[3]),2))* 1.0 / OUT_RGB888P_WIDTH * DISPLAY_WIDTH - exact_division_x = int((two_point[0] * 1.0 / OUT_RGB888P_WIDTH * DISPLAY_WIDTH)//every_block_width) - exact_division_y = int((two_point[1] * 1.0 / OUT_RGB888P_HEIGHT * DISPLAY_HEIGHT)//every_block_height) - - - if (distance_tow_points < distance_thred and exact_division_x >= 0 and exact_division_x < level and exact_division_y >= 0 and exact_division_y < level): - if (abs(blank_x - exact_division_x) == 1 and abs(blank_y - exact_division_y) == 0): - move_rect = [exact_division_x*every_block_width,exact_division_y*every_block_height,every_block_width,every_block_height] - blank_rect = [blank_x*every_block_width,blank_y*every_block_height,every_block_width,every_block_height] - - move_mat[:] = osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] - osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] = osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] - osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] = move_mat[:] - - blank_x = exact_division_x - elif (abs(blank_y - exact_division_y) == 1 and abs(blank_x - exact_division_x) == 0): - move_rect = [exact_division_x*every_block_width,exact_division_y*every_block_height,every_block_width,every_block_height] - blank_rect = [blank_x*every_block_width,blank_y*every_block_height,every_block_width,every_block_height] - - move_mat[:] = osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] - osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] = osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] - osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] = move_mat[:] - - blank_y = exact_division_y - - osd_frame_tmp_img.copy_to(draw_img) - x1 = int(two_point[0] * 1.0 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y1 = int(two_point[1] * 1.0 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - draw_img.draw_circle(x1, y1, 1, color=(255, 0, 255, 255),thickness=4,fill=False) - else: - osd_frame_tmp_img.copy_to(draw_img) - x1 = int(two_point[0] * 1.0 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y1 = int(two_point[1] * 1.0 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - draw_img.draw_circle(x1, y1, 1, color=(255, 255, 255, 0),thickness=4,fill=False) - else: - draw_img.draw_string( 300 , 500, "Must have one hand !", color=(255,255,0,0), scale=7) - first_start = True - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - hd_kpu_deinit() # 释放手掌检测 kpu - hk_kpu_deinit() # 释放手掌关键点检测 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - del kpu_hand_keypoint_detect - - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'masks' in globals(): - global masks - del masks - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("puzzle_game_inference end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - puzzle_game_inference() -``` - -### 16.基于关键点的手势识别 - -```python -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -root_dir = '/sdcard/app/tests/' - -#--------for hand detection---------- -#kmodel输入shape -hd_kmodel_input_shape = (1,3,512,512) # 手掌检测kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 手掌检测阈值,用于过滤roi -nms_threshold = 0.5 # 手掌检测框阈值,用于过滤重复roi -hd_kmodel_frame_size = [512,512] # 手掌检测输入图片尺寸 -hd_frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 手掌检测直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 手掌检测模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS - -hd_kmodel_file = root_dir + 'kmodel/hand_det.kmodel' # 手掌检测kmodel文件的路径 -anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] #anchor设置 - -#--------for hand keypoint detection---------- -#kmodel输入shape -hk_kmodel_input_shape = (1,3,256,256) # 手掌关键点检测kmodel输入分辨率 - -#kmodel相关参数设置 -hk_kmodel_frame_size = [256,256] # 手掌关键点检测输入图片尺寸 -hk_kmodel_file = root_dir + 'kmodel/handkp_det.kmodel' # 手掌关键点检测kmodel文件的路径 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global hd_ai2d,hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder # 定义手掌检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor,hk_ai2d_builder # 定义手掌关键点检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - -#-------hand detect--------: -# 手掌检测ai2d 初始化 -def hd_ai2d_init(): - with ScopedTiming("hd_ai2d_init",debug_mode > 0): - global hd_ai2d - global hd_ai2d_builder - global hd_ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = hd_kmodel_frame_size[0] - height = hd_kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - hd_ai2d = nn.ai2d() - hd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - hd_ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - hd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - hd_ai2d_builder = hd_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - - data = np.ones(hd_kmodel_input_shape, dtype=np.uint8) - hd_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌检测 ai2d 运行 -def hd_ai2d_run(rgb888p_img): - with ScopedTiming("hd_ai2d_run",debug_mode > 0): - global hd_ai2d_input_tensor,hd_ai2d_output_tensor, hd_ai2d_builder - hd_ai2d_input = rgb888p_img.to_numpy_ref() - hd_ai2d_input_tensor = nn.from_numpy(hd_ai2d_input) - - hd_ai2d_builder.run(hd_ai2d_input_tensor, hd_ai2d_output_tensor) - -# 手掌检测 ai2d 释放内存 -def hd_ai2d_release(): - with ScopedTiming("hd_ai2d_release",debug_mode > 0): - global hd_ai2d_input_tensor - del hd_ai2d_input_tensor - -# 手掌检测 kpu 初始化 -def hd_kpu_init(hd_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hd_kpu_init",debug_mode > 0): - hd_kpu_obj = nn.kpu() - hd_kpu_obj.load_kmodel(hd_kmodel_file) - - hd_ai2d_init() - return hd_kpu_obj - -# 手掌检测 kpu 输入预处理 -def hd_kpu_pre_process(rgb888p_img): - hd_ai2d_run(rgb888p_img) - with ScopedTiming("hd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hd_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hd_ai2d_output_tensor) - -# 手掌检测 kpu 获得 kmodel 输出 -def hd_kpu_get_output(): - with ScopedTiming("hd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌检测 kpu 运行 -def hd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hd_kpu_pre_process(rgb888p_img) - # (2)手掌检测 kpu 运行 - with ScopedTiming("hd_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌检测 ai2d 资源 - hd_ai2d_release() - # (4)获取手掌检测 kpu 输出 - results = hd_kpu_get_output() - # (5)手掌检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], hd_kmodel_frame_size, hd_frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) # kpu结果后处理 - # (6)返回手掌检测结果 - return dets - -# 手掌检测 kpu 释放内存 -def hd_kpu_deinit(): - with ScopedTiming("hd_kpu_deinit",debug_mode > 0): - if 'hd_ai2d' in globals(): #删除hd_ai2d变量,释放对它所引用对象的内存引用 - global hd_ai2d - del hd_ai2d - if 'hd_ai2d_output_tensor' in globals(): #删除hd_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hd_ai2d_output_tensor - del hd_ai2d_output_tensor - if 'hd_ai2d_builder' in globals(): #删除hd_ai2d_builder变量,释放对它所引用对象的内存引用 - global hd_ai2d_builder - del hd_ai2d_builder - -#-------hand keypoint detection------: -# 手掌关键点检测 ai2d 初始化 -def hk_ai2d_init(): - with ScopedTiming("hk_ai2d_init",debug_mode > 0): - global hk_ai2d, hk_ai2d_output_tensor - hk_ai2d = nn.ai2d() - hk_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - data = np.ones(hk_kmodel_input_shape, dtype=np.uint8) - hk_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌关键点检测 ai2d 运行 -def hk_ai2d_run(rgb888p_img, x, y, w, h): - with ScopedTiming("hk_ai2d_run",debug_mode > 0): - global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor - hk_ai2d_input = rgb888p_img.to_numpy_ref() - hk_ai2d_input_tensor = nn.from_numpy(hk_ai2d_input) - - hk_ai2d.set_crop_param(True, x, y, w, h) - hk_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - global hk_ai2d_builder - hk_ai2d_builder = hk_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,hk_kmodel_frame_size[1],hk_kmodel_frame_size[0]]) - hk_ai2d_builder.run(hk_ai2d_input_tensor, hk_ai2d_output_tensor) - -# 手掌关键点检测 ai2d 释放内存 -def hk_ai2d_release(): - with ScopedTiming("hk_ai2d_release",debug_mode > 0): - global hk_ai2d_input_tensor, hk_ai2d_builder - del hk_ai2d_input_tensor - del hk_ai2d_builder - -# 手掌关键点检测 kpu 初始化 -def hk_kpu_init(hk_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hk_kpu_init",debug_mode > 0): - hk_kpu_obj = nn.kpu() - hk_kpu_obj.load_kmodel(hk_kmodel_file) - - hk_ai2d_init() - return hk_kpu_obj - -# 手掌关键点检测 kpu 输入预处理 -def hk_kpu_pre_process(rgb888p_img, x, y, w, h): - hk_ai2d_run(rgb888p_img, x, y, w, h) - with ScopedTiming("hk_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hk_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hk_ai2d_output_tensor) - -# 手掌关键点检测 kpu 获得 kmodel 输出 -def hk_kpu_get_output(): - with ScopedTiming("hk_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌关键点检测 kpu 输出后处理 -def hk_kpu_post_process(results, x, y, w, h): - results_show = np.zeros(results.shape,dtype=np.int16) - results_show[0::2] = results[0::2] * w + x - results_show[1::2] = results[1::2] * h + y - return results_show - -# 手掌关键点检测 kpu 运行 -def hk_kpu_run(kpu_obj,rgb888p_img, x, y, w, h): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hk_kpu_pre_process(rgb888p_img, x, y, w, h) - # (2)手掌关键点检测 kpu 运行 - with ScopedTiming("hk_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌关键点检测 ai2d 资源 - hk_ai2d_release() - # (4)获取手掌关键点检测 kpu 输出 - results = hk_kpu_get_output() - # (5)手掌关键点检测 kpu 结果后处理 - result = hk_kpu_post_process(results[0],x,y,w,h) - # (6)返回手掌关键点检测结果 - return result - -# 手掌关键点检测 kpu 释放内存 -def hk_kpu_deinit(): - with ScopedTiming("hk_kpu_deinit",debug_mode > 0): - if 'hk_ai2d' in globals(): #删除hk_ai2d变量,释放对它所引用对象的内存引用 - global hk_ai2d - del hk_ai2d - if 'hk_ai2d_output_tensor' in globals(): #删除hk_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hk_ai2d_output_tensor - del hk_ai2d_output_tensor - -# 求两个vector之间的夹角 -def hk_vector_2d_angle(v1,v2): - with ScopedTiming("hk_vector_2d_angle",debug_mode > 0): - v1_x = v1[0] - v1_y = v1[1] - v2_x = v2[0] - v2_y = v2[1] - v1_norm = np.sqrt(v1_x * v1_x+ v1_y * v1_y) - v2_norm = np.sqrt(v2_x * v2_x + v2_y * v2_y) - dot_product = v1_x * v2_x + v1_y * v2_y - cos_angle = dot_product/(v1_norm*v2_norm) - angle = np.acos(cos_angle)*180/np.pi - return angle - -# 根据手掌关键点检测结果判断手势类别 -def hk_gesture(results): - with ScopedTiming("hk_gesture",debug_mode > 0): - angle_list = [] - for i in range(5): - angle = hk_vector_2d_angle([(results[0]-results[i*8+4]), (results[1]-results[i*8+5])],[(results[i*8+6]-results[i*8+8]),(results[i*8+7]-results[i*8+9])]) - angle_list.append(angle) - - thr_angle = 65. - thr_angle_thumb = 53. - thr_angle_s = 49. - gesture_str = None - if 65535. not in angle_list: - if (angle_list[0]>thr_angle_thumb) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "fist" - elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "gun" - elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]5) and (angle_list[1]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "one" - elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]thr_angle_thumb) and (angle_list[1]thr_angle): - gesture_str = "three" - elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "thumbUp" - elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "yeah" - - return gesture_str - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 标出检测到的21个关键点并用不同颜色的线段连接 -def display_draw(results, x, y, w, h): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - - if results: - results_show = np.zeros(results.shape,dtype=np.int16) - results_show[0::2] = results[0::2] * (DISPLAY_WIDTH / OUT_RGB888P_WIDTH) - results_show[1::2] = results[1::2] * (DISPLAY_HEIGHT / OUT_RGB888P_HEIGHT) - - for i in range(len(results_show)/2): - draw_img.draw_circle(results_show[i*2], results_show[i*2+1], 1, color=(255, 0, 255, 0),fill=False) - for i in range(5): - j = i*8 - if i==0: - R = 255; G = 0; B = 0 - if i==1: - R = 255; G = 0; B = 255 - if i==2: - R = 255; G = 255; B = 0 - if i==3: - R = 0; G = 255; B = 0 - if i==4: - R = 0; G = 0; B = 255 - draw_img.draw_line(results_show[0], results_show[1], results_show[j+2], results_show[j+3], color=(255,R,G,B), thickness = 3) - draw_img.draw_line(results_show[j+2], results_show[j+3], results_show[j+4], results_show[j+5], color=(255,R,G,B), thickness = 3) - draw_img.draw_line(results_show[j+4], results_show[j+5], results_show[j+6], results_show[j+7], color=(255,R,G,B), thickness = 3) - draw_img.draw_line(results_show[j+6], results_show[j+7], results_show[j+8], results_show[j+9], color=(255,R,G,B), thickness = 3) - - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for hand_keypoint_class.py********** -def hand_keypoint_class_inference(): - print("hand_keypoint_class_test start") - - kpu_hand_detect = hd_kpu_init(hd_kmodel_file) # 创建手掌检测的 kpu 对象 - kpu_hand_keypoint_detect = hk_kpu_init(hk_kmodel_file) # 创建手掌关键点检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total", 1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - draw_img.clear() - dets = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - - for det_box in dets: - x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] - w = int(x2 - x1) - h = int(y2 - y1) - - if (h<(0.1*OUT_RGB888P_HEIGHT)): - continue - if (w<(0.25*OUT_RGB888P_WIDTH) and ((x1<(0.03*OUT_RGB888P_WIDTH)) or (x2>(0.97*OUT_RGB888P_WIDTH)))): - continue - if (w<(0.15*OUT_RGB888P_WIDTH) and ((x1<(0.01*OUT_RGB888P_WIDTH)) or (x2>(0.99*OUT_RGB888P_WIDTH)))): - continue - - w_det = int(float(x2 - x1) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - h_det = int(float(y2 - y1) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - x_det = int(x1*DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y_det = int(y1*DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - length = max(w, h)/2 - cx = (x1+x2)/2 - cy = (y1+y2)/2 - ratio_num = 1.26*length - - x1_kp = int(max(0,cx-ratio_num)) - y1_kp = int(max(0,cy-ratio_num)) - x2_kp = int(min(OUT_RGB888P_WIDTH-1, cx+ratio_num)) - y2_kp = int(min(OUT_RGB888P_HEIGHT-1, cy+ratio_num)) - w_kp = int(x2_kp - x1_kp + 1) - h_kp = int(y2_kp - y1_kp + 1) - - hk_results = hk_kpu_run(kpu_hand_keypoint_detect,rgb888p_img, x1_kp, y1_kp, w_kp, h_kp) # 执行手掌关键点检测 kpu 运行 以及 后处理过程 - gesture = hk_gesture(hk_results) # 根据关键点检测结果判断手势类别 - - draw_img.draw_rectangle(x_det, y_det, w_det, h_det, color=(255, 0, 255, 0), thickness = 2) # 将得到的手掌检测结果 绘制到 display - display_draw(hk_results, x1_kp, y1_kp, w_kp, h_kp) # 将得到的手掌关键点检测结果 绘制到 display - draw_img.draw_string( x_det , y_det-50, " " + str(gesture), color=(255,0, 255, 0), scale=4) # 将根据关键点检测结果判断的手势类别 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - if (count>10): - gc.collect() - count = 0 - else: - count += 1 - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - hd_kpu_deinit() # 释放手掌检测 kpu - hk_kpu_deinit() # 释放手掌关键点检测 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - del kpu_hand_keypoint_detect - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("hand_keypoint_class_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - hand_keypoint_class_inference() -``` - -### 17.自学习 - -```python -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import os #基本的操作系统交互功能 -import os, sys #操作系统接口模块 - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGHT = 1080 - -# kmodel参数设置 -# kmodel输入shape -kmodel_input_shape = (1,3,224,224) -# kmodel其它参数设置 -crop_w = 400 #图像剪切范围w -crop_h = 400 #图像剪切范围h -crop_x = OUT_RGB888P_WIDTH / 2.0 - crop_w / 2.0 #图像剪切范围x -crop_y = OUT_RGB888P_HEIGHT / 2.0 - crop_h / 2.0 #图像剪切范围y -thres = 0.5 #特征判别阈值 -top_k = 3 #识别范围 -categories = ['apple','banana'] #识别类别 -features = [2,2] #对应类别注册特征数量 -time_one = 100 #注册单个特征中途间隔帧数 - -# 文件配置 -# kmodel文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/recognition.kmodel' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -# 当前kmodel -global current_kmodel_obj -# ai2d: ai2d实例 -# ai2d_input_tensor: ai2d输入 -# ai2d_output_tensor:ai2d输出 -# ai2d_builder: 根据ai2d参数,构建的ai2d_builder对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder #for ai2d - -# 获取两个特征向量的相似度 -def getSimilarity(output_vec,save_vec): - tmp = sum(output_vec * save_vec) - mold_out = np.sqrt(sum(output_vec * output_vec)) - mold_save = np.sqrt(sum(save_vec * save_vec)) - return tmp / (mold_out * mold_save) - -# 自学习 ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - ai2d = nn.ai2d() - global ai2d_output_tensor - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_output_tensor = nn.from_numpy(data) - -# 自学习 ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d,ai2d_input_tensor,ai2d_output_tensor - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - - ai2d.set_crop_param(True,int(crop_x),int(crop_y),int(crop_w),int(crop_h)) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global ai2d_builder - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], kmodel_input_shape) - ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) - -# 自学习 ai2d 释放 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor,ai2d_builder - del ai2d_input_tensor - del ai2d_builder - -# 自学习 kpu 初始化 -def kpu_init(kmodel_file): - # 初始化kpu对象,并加载kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - # 初始化kpu对象 - kpu_obj = nn.kpu() - # 加载kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化ai2d - ai2d_init() - return kpu_obj - -# 自学习 kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - # 使用ai2d对原图进行预处理(crop,resize) - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor - # 将ai2d输出设置为kpu输入 - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor) - -# 自学习 kpu 获取输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape(-1) - del data - results.append(result) - return results - -# 自学习 kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - # kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2)kpu推理 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放ai2d资源 - ai2d_release() - # (4)获取kpu输出 - results = kpu_get_output() - - # (5)返回输出 - return results - -# 自学习 kpu 释放 -def kpu_deinit(): - # kpu释放 - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): - global ai2d - del ai2d - if 'ai2d_output_tensor' in globals(): - global ai2d_output_tensor - del ai2d_output_tensor - -#********************for media_utils.py******************** -global draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# for display,已经封装好,无需自己再实现,直接调用即可 -def display_init(): - # hdmi显示初始化 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -#for camera,已经封装好,无需自己再实现,直接调用即可 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for self_learning.py******************** -def self_learning_inference(): - print("self_learning_test start") - # kpu初始化 - kpu_self_learning = kpu_init(kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - - # 启动camera - camera_start(CAM_DEV_ID_0) - - crop_x_osd = int(crop_x / OUT_RGB888P_WIDTH * DISPLAY_WIDTH) - crop_y_osd = int(crop_y / OUT_RGB888P_HEIGHT * DISPLAY_HEIGHT) - crop_w_osd = int(crop_w / OUT_RGB888P_WIDTH * DISPLAY_WIDTH) - crop_h_osd = int(crop_h / OUT_RGB888P_HEIGHT * DISPLAY_HEIGHT) - -# stat_info = os.stat(root_dir + 'utils/features') -# if (not (stat_info[0] & 0x4000)): -# os.mkdir(root_dir + 'utils/features') - - time_all = 0 - time_now = 0 - category_index = 0 - for i in range(len(categories)): - for j in range(features[i]): - time_all += time_one - - gc_count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取检测结果 - results = kpu_run(kpu_self_learning,rgb888p_img) - global draw_img, osd_img - draw_img.clear() - draw_img.draw_rectangle(crop_x_osd,crop_y_osd, crop_w_osd, crop_h_osd, color=(255, 255, 0, 255), thickness = 4) - - if (category_index < len(categories)): - time_now += 1 - draw_img.draw_string( 50 , 200, categories[category_index] + "_" + str(int(time_now-1) // time_one) + ".bin", color=(255,255,0,0), scale=7) - with open(root_dir + 'utils/features/' + categories[category_index] + "_" + str(int(time_now-1) // time_one) + ".bin", 'wb') as f: - f.write(results[0].tobytes()) - if (time_now // time_one == features[category_index]): - category_index += 1 - time_all -= time_now - time_now = 0 - else: - results_learn = [] - list_features = os.listdir(root_dir + 'utils/features/') - for feature in list_features: - with open(root_dir + 'utils/features/' + feature, 'rb') as f: - data = f.read() - save_vec = np.frombuffer(data, dtype=np.float) - score = getSimilarity(results[0], save_vec) - - if (score > thres): - res = feature.split("_") - is_same = False - for r in results_learn: - if (r["category"] == res[0]): - if (r["score"] < score): - r["bin_file"] = feature - r["score"] = score - is_same = True - - if (not is_same): - if(len(results_learn) < top_k): - evec = {} - evec["category"] = res[0] - evec["score"] = score - evec["bin_file"] = feature - results_learn.append( evec ) - results_learn = sorted(results_learn, key=lambda x: -x["score"]) - else: - if( score <= results_learn[top_k-1]["score"] ): - continue - else: - evec = {} - evec["category"] = res[0] - evec["score"] = score - evec["bin_file"] = feature - results_learn.append( evec ) - results_learn = sorted(results_learn, key=lambda x: -x["score"]) - - results_learn.pop() - draw_y = 200 - for r in results_learn: - draw_img.draw_string( 50 , draw_y, r["category"] + " : " + str(r["score"]), color=(255,255,0,0), scale=7) - draw_y += 50 - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - - if gc_count > 5: - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_self_learning - # 删除features文件夹 - stat_info = os.stat(root_dir + 'utils/features') - if (stat_info[0] & 0x4000): - list_files = os.listdir(root_dir + 'utils/features') - for l in list_files: - os.remove(root_dir + 'utils/features/' + l) - # 垃圾回收 - gc.collect() - # 释放媒体资源 - nn.shrink_memory_pool() - media_deinit() - - print("self_learning_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - self_learning_inference() -``` - -### 18.TTS中文 - -```python -from media.pyaudio import * # 音频模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import media.wave as wave # wav音频处理模块 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import time # 时间统计 -import aidemo # aidemo模块,封装ai demo相关前处理、后处理等操作 -import struct # 字节字符转换模块 -import gc # 垃圾回收模块 -import os,sys # 操作系统接口模块 - -# 有关音频流的宏变量 -SAMPLE_RATE = 24000 # 采样率24000Hz,即每秒采样24000次 -CHANNELS = 1 # 通道数 1为单声道,2为立体声 -FORMAT = paInt16 # 音频输入输出格式 paInt16 -CHUNK = int(0.3 * 24000) # 每次读取音频数据的帧数,设置为0.3s的帧数24000*0.3=7200 - -# tts_zh(文字转语音中文)任务 -root_dir='/sdcard/app/tests/' -# 拼音字典 -dict_path=root_dir+"utils/pinyin.txt" -# 汉字转拼音字典文件 -phase_path=root_dir+"utils/small_pinyin.txt" -# 拼音转音素映射文件 -mapfile=root_dir+"utils/phone_map.txt" -# 输入中文语句 -text="嘉楠科技研发了最新款的芯片" -# 中文tts encoder模型 -fastspeech1_path=root_dir+"kmodel/zh_fastspeech_1_f32.kmodel" -# 中文tts decoder模型 -fastspeech2_path=root_dir+"kmodel/zh_fastspeech_2.kmodel" -# 中文tts 声码器模型 -hifigan_path=root_dir+"kmodel/hifigan.kmodel" -# 生成音频存储路径 -save_wav_file = root_dir+"test.wav" -debug_mode=1 - - -# scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -global ttszh - -def play_audio(): - # 初始化音频流 - p = PyAudio() - p.initialize(CHUNK) - ret = media.buffer_init() - if ret: - print("record_audio, buffer_init failed") - # 用于播放音频 - output_stream = p.open( - format=FORMAT, - channels=CHANNELS, - rate=SAMPLE_RATE, - output=True, - frames_per_buffer=CHUNK - ) - wf = wave.open(save_wav_file, "rb") - wav_data = wf.read_frames(CHUNK) - while wav_data: - output_stream.write(wav_data) - wav_data = wf.read_frames(CHUNK) - time.sleep(2) # 时间缓冲,用于播放声音 - wf.close() - output_stream.stop_stream() - output_stream.close() - p.terminate() - media.buffer_deinit() - -def ttszh_init(): - with ScopedTiming("init_ttszh",debug_mode > 0): - global ttszh - ttszh=aidemo.tts_zh_create(dict_path,phase_path,mapfile) - -# ttszh推理过程 -def ttszh_inference(): - global ttszh - try: - # ttszh初始化 - ttszh_init() - with ScopedTiming("preprocess",debug_mode > 0): - # ttszh数据预处理,获取data和data_len - preprocess_data=aidemo.tts_zh_preprocess(ttszh,text) - data=preprocess_data[0] - data_len=preprocess_data[1] - with ScopedTiming("encode run",debug_mode > 0): - # 创建编码器kpu,加载kmodel - kpu_enc = nn.kpu() - kpu_enc.load_kmodel(fastspeech1_path) - # 创建编码器模型输入并和模型绑定,编码器包含两个输入,一个是文字预处理的序列数据,一个是speaker数据 - # 编码器序列数据 - enc_seq_input_array=np.array(data) - enc_seq_input_tensor = nn.from_numpy(enc_seq_input_array) - # 编码器speaker数据 - enc_speaker_input_array=np.array([0.0]) - enc_speaker_input_tensor=nn.from_numpy(enc_speaker_input_array) - # 和模型绑定 - kpu_enc.set_input_tensor(1, enc_seq_input_tensor) - kpu_enc.set_input_tensor(0, enc_speaker_input_tensor) - # kpu运行 - kpu_enc.run() - # 获取kpu的输入 - enc_output_0=kpu_enc.get_output_tensor(0) - enc_output_1=kpu_enc.get_output_tensor(1) - enc_output_0_np=enc_output_0.to_numpy() - enc_output_1_np=enc_output_1.to_numpy() - with ScopedTiming("encode postprocess",debug_mode > 0): - # 给编码结果添加持续时间属性,每个音素编码向量按照持续时间重复 - duritions=enc_output_1_np[0][:int(data_len[0])] - durition_sum=int(np.sum(duritions)) - # 解码器输入维度为(1,600,256),不足部分需要padding - max_value=13 - while durition_sum>600: - for i in range(len(duritions)): - if duritions[i]>max_value: - duritions[i]=max_value - max_value=max_value-1 - durition_sum=np.sum(duritions) - dec_input=np.zeros((1,600,256),dtype=np.float) - m_pad=600-durition_sum - k=0 - for i in range(len(duritions)): - for j in range(int(duritions[i])): - dec_input[0][k]=enc_output_0_np[0][i] - k+=1 - with ScopedTiming("decode run",debug_mode > 0): - # 定义解码器kpu对象,并加载kmodel - kpu_dec = nn.kpu() - kpu_dec.load_kmodel(fastspeech2_path) - #设置解码器模型输入 - dec_input_tensor=nn.from_numpy(dec_input) - kpu_dec.set_input_tensor(0, dec_input_tensor) - # 运行 - kpu_dec.run() - # 获取解码器模型输出,维度为(1,80,600) - dec_output=kpu_dec.get_output_tensor(0) - dec_output_np=dec_output.to_numpy() - with ScopedTiming("decode postprocess",debug_mode > 0): - # 将有效信息拆分成一个个(1,80,100)的子向量,输入声码器生成音频 - subvector_num=durition_sum//100; - remaining=durition_sum%100; - if remaining>0: - subvector_num+=1 - hifi_input=np.zeros((1,80,subvector_num*100),dtype=np.float) - for i in range(durition_sum): - hifi_input[:,:,i]=dec_output_np[:,:,i] - - with ScopedTiming("hifigan run",debug_mode > 0): - # 定义hifigan声码器模型kpu对象,加载kmodel - kpu_hifi = nn.kpu() - kpu_hifi.load_kmodel(hifigan_path) - - - # 保存生成的所有梅尔声谱数据,后续保存成wav文件 - mel_data=[] - # 依次对每一个子向量进行声码器推理 - for i in range(subvector_num): - hifi_input_tmp=np.zeros((1,80,100),dtype=np.float) - - for j in range(80): - for k in range(i*100,(i+1)*100): - hifi_input_tmp[0][j][k-i*100]=hifi_input[0][j][k] - # 设置模型输入 - hifigan_input_tensor=nn.from_numpy(hifi_input_tmp) - kpu_hifi.set_input_tensor(0, hifigan_input_tensor) - # kpu运行 - kpu_hifi.run() - # 获取模型输出 - hifigan_output=kpu_hifi.get_output_tensor(0) - hifigan_output_np=hifigan_output.to_numpy() - # 汇总输出数据 - for j in range(25600): - mel_data.append(hifigan_output_np[0][0][j]) - del hifigan_input_tensor - with ScopedTiming("save wav file",debug_mode > 0): - # 将生成的音频数据保存为wav文件 - save_data=mel_data[:durition_sum*256] - save_len=len(save_data) - aidemo.save_wav(save_data,save_len,save_wav_file,SAMPLE_RATE) - aidemo.tts_zh_destroy(ttszh) - with ScopedTiming("play audio",debug_mode > 0): - play_audio() - del kpu_enc - del kpu_dec - del kpu_hifi - del enc_seq_input_tensor - del enc_speaker_input_tensor - del enc_output_0 - del enc_output_1 - del dec_input_tensor - del dec_output - del ttszh - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - gc.collect() - - -if __name__=="__main__": - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - ttszh_inference() -``` diff --git "a/zh/example/K230_Canmv\347\244\272\344\276\213\350\256\262\350\247\243-\344\272\272\350\204\270\346\243\200\346\265\213.md" "b/zh/example/K230_Canmv\347\244\272\344\276\213\350\256\262\350\247\243-\344\272\272\350\204\270\346\243\200\346\265\213.md" deleted file mode 100755 index 11103da..0000000 --- "a/zh/example/K230_Canmv\347\244\272\344\276\213\350\256\262\350\247\243-\344\272\272\350\204\270\346\243\200\346\265\213.md" +++ /dev/null @@ -1,81 +0,0 @@ -# K230_CanMV示例讲解—人脸检测 - -![cover](../images/canaan-cover.png) - -版权所有©2023北京嘉楠捷思信息技术有限公司 - -
- -## 免责声明 - -您购买的产品、服务或特性等应受北京嘉楠捷思信息技术有限公司(“本公司”,下同)及其关联公司的商业合同和条款的约束,本文档中描述的全部或部分产品、服务或特性可能不在您的购买或使用范围之内。除非合同另有约定,本公司不对本文档的任何陈述、信息、内容的正确性、可靠性、完整性、适销性、符合特定目的和不侵权提供任何明示或默示的声明或保证。除非另有约定,本文档仅作为使用指导参考。 - -由于产品版本升级或其他原因,本文档内容将可能在未经任何通知的情况下,不定期进行更新或修改。 - -## 商标声明 - -![logo](../images/logo.png)“嘉楠”和其他嘉楠商标均为北京嘉楠捷思信息技术有限公司及其关联公司的商标。本文档可能提及的其他所有商标或注册商标,由各自的所有人拥有。 - -**版权所有 © 2023北京嘉楠捷思信息技术有限公司。保留一切权利。** -非经本公司书面许可,任何单位和个人不得擅自摘抄、复制本文档内容的部分或全部,并不得以任何形式传播。 - -
- -## 目录 - -[TOC] - -## 前言 - -### 概述 - -本文档主要介绍 K230 CanMV 人脸检测示例的基本功能及使用。 - -### 读者对象 - -本文档(本指南)主要适用于以下人员: - -- 技术支持工程师 -- 软件开发工程师 - -### 缩略词定义 - -| 简称 | 说明 | -| ---- | ---- | -| | | - -### 修订记录 - -| 文档版本号 | 修改说明 | 修改者 | 日期 | -| ---------- | -------- | ---------- | ---------- | -| V1.0 | 初版 | 软件部 | 2023-10-16 | - -## 1. 概述 - -K230 CanMV 人脸检测是通过python语言开发实现的一个简单的具备摄像头数据采集预览,人脸检测画框功能的应用。该示例程序应用到了K230 CanMV 平台的多个硬件模块:AI2D,KPU,Camera,Display等。 - -## 2. 硬件环境 - -运行该示例程序需要如下硬件环境: - -- K230 CanMV开发板及配套的Sensor模组 -- 支持HDMI接口的显示器 - -## 3. 源码位置 - -源码路径位于K230 CanMV SDK的`tests/demo/`,如下: - -```shell -tests/demo/ -└── face_detection.py -``` - -## 4. 运行示例代码 - -通过K230 CanMV IDE 打开示例程序代码,并点击运行按钮开始运行人脸检测示例程序。有关IDE的使用请参考《K230_CanMV_IDE使用说明》,有关镜像烧录及开发环境搭建请参考《K230_CanMV使用说明》 - -通过IDE打开示例程序并运行如下图所示: -![fd-run](images/canmv-face-detect-run.png) - -运行结果如下图所示: -![fd-result](images/canmv-face-detect-result.jpg) diff --git a/zh/example/demo/face_detection.md b/zh/example/demo/face_detection.md index 0a092c5..a939590 100755 --- a/zh/example/demo/face_detection.md +++ b/zh/example/demo/face_detection.md @@ -3,7 +3,7 @@ ```python import os -from media.camera import * #导入camera模块,使用camera相关接口 +from media.sensor import * #导入camera模块,使用camera相关接口 from media.display import * #导入display模块,使用display相关接口 from media.media import * #导入media模块,使用meida相关接口 from time import * @@ -228,86 +228,51 @@ def face_detect_test(): ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], [1,3,320,320]) - - #初始化HDMI显示 - display.init(LT9611_1920X1080_30FPS) - - #创建osd图层使用的vb - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - #配置媒体缓冲区 - ret = media.buffer_config(config) - #初始化默认sensor配置(OV5647) - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - - #设置通道0输出尺寸 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - #设置通道0输出格式 - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - #创建媒体数据源设备 - meida_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - #创建媒体数据接收设备 - meida_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - #创建媒体链路,数据从源设备流到接收设备 - media.create_link(meida_source, meida_sink) - #设置视频层 - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - - # set chn1 output rgb888 - #camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - #camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - - #设置通道2输出尺寸 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - #设置通道2输出格式 - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) + # 初始化并配置sensor + sensor = Sensor() + sensor.reset() + sensor.set_hmirror(False) + sensor.set_vflip(False) + # 通道0直接给到显示VO,格式为YUV420 + sensor.set_framesize(width = DISPLAY_WIDTH, height = DISPLAY_HEIGHT) + sensor.set_pixformat(PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # 通道2给到AI做算法处理,格式为RGB888 + sensor.set_framesize(width = OUT_RGB888P_WIDTH , height = OUT_RGB888P_HEIGH, chn=CAM_CHN_ID_2) + # set chn2 output format + sensor.set_pixformat(PIXEL_FORMAT_RGB_888_PLANAR, chn=CAM_CHN_ID_2) + + # OSD图像初始化 + osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + + sensor_bind_info = sensor.bind_info(x = 0, y = 0, chn = CAM_CHN_ID_0) + Display.bind_layer(**sensor_bind_info, layer = Display.LAYER_VIDEO1) + # 设置为LT9611显示,默认1920x1080 + Display.init(Display.LT9611, to_ide = True) try: - #初始化媒体缓冲区 - ret = media.buffer_init() - if ret: - print("face_detect_test, buffer init failed") - return ret - #请求媒体缓冲区用于OSD - buffer = media.request_buffer(4*DISPLAY_WIDTH*DISPLAY_HEIGHT) - #创建用于绘制人脸框信息的image对象 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_MPGC) - #创建用于OSD显示的image对象 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - #启动摄像头 - camera.start_stream(CAM_DEV_ID_0) - time.sleep(5) - + # media初始化 + MediaManager.init() + # 启动sensor + sensor.run() rgb888p_img = None while True: #捕获摄像头数据 - rgb888p_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_2) + rgb888p_img = sensor.snapshot(chn=CAM_CHN_ID_2) if rgb888p_img == -1: print("face_detect_test, capture_image failed") - #释放捕获的数据 - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_2, rgb888p_img) continue # for rgb888planar if rgb888p_img.format() == image.RGBP888: ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - data = np.ones((1,3,320,320),dtype=np.uint8) ai2d_out = nn.from_numpy(data) - ai2d_builder.run(ai2d_input_tensor, ai2d_out) - #设置kpu输入 kpu.set_input_tensor(0, ai2d_out) #运行kmodel kpu.run() - del ai2d_input_tensor del ai2d_out # get output @@ -315,7 +280,6 @@ def face_detect_test(): for i in range(kpu.outputs_size()): data = kpu.get_output_tensor(i) result = data.to_numpy() - tmp = (result.shape[0],result.shape[1],result.shape[2],result.shape[3]) result = result.reshape((result.shape[0]*result.shape[1],result.shape[2]*result.shape[3])) result = result.transpose() @@ -327,58 +291,35 @@ def face_detect_test(): #获取人脸检测结果 dets = get_result(results) + osd_img.clear() if dets: - draw_img.clear() for det in dets: x1, y1, x2, y2 = map(lambda x: int(round(x, 0)), det[:4]) w = (x2 - x1) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH h = (y2 - y1) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH #绘制人脸框 - draw_img.draw_rectangle(x1 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH, y1 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH, w, h, color=(255,255,0,255)) - #拷贝人脸框信息到OSD对象 - draw_img.copy_to(osd_img) - #显示人脸框 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.draw_rectangle(0, 0, 128, 128, color=(0,0,0,0)) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - #释放采集的图像 - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_2, rgb888p_img) + osd_img.draw_rectangle(x1 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH, y1 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH, w, h, color=(255,255,0,255)) + Display.show_image(osd_img, 0, 0, Display.LAYER_OSD3) rgb888p_img = None except Exception as e: - print(f"An error occurred during buffer used: {e}") + print(f"An error occurred during running: {e}") finally: - if rgb888p_img is not None: - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_2, rgb888p_img) + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) #停止摄像头输出 - camera.stop_stream(CAM_DEV_ID_0) + sensor.stop() #去初始化显示设备 - display.deinit() + Display.deinit() #释放媒体缓冲区 - media.release_buffer(buffer) - #销毁媒体链路 - media.destroy_link(meida_source, meida_sink) - + MediaManager.deinit() del kpu #释放kpu资源 del ai2d #释放ai2d资源 gc.collect() - time.sleep(1) - #去初始化媒体缓冲区资源 - ret = media.buffer_deinit() - if ret: - print("face_detect_test, buffer_deinit failed") - return ret - print("face_detect_test end") return 0 - face_detect_test() - ``` 具体接口使用请参考相关文档说明 diff --git a/zh/example/example.rst b/zh/example/example.rst index 5a9b2a2..c592779 100755 --- a/zh/example/example.rst +++ b/zh/example/example.rst @@ -1,11 +1,11 @@ -CanMV 例程讲解 +例程讲解 =========== .. toctree:: :maxdepth: 1 - K230_CanMV_AI_Demo示例说明.md - K230_Canmv示例讲解-人脸检测.md - peripheral.rst - media.rst - omv/omv.rst - socket_network/network.rst + AI_Demo说明文档.md + 人脸检测.md + peripheral.md + media.md + .. omv/omv.rst + network.md diff --git a/zh/example/images/framework.png b/zh/example/images/framework.png new file mode 100644 index 0000000000000000000000000000000000000000..eca36625e7309e56c95f642df34378c25d0f7ec0 GIT binary patch literal 25220 zcmeFY1yEdFv@Y0$1QG}#5L`nOJV5XO!GgO(@C0}F#u9>CfZ!V3HCRJK65QQs8h3}r z8kv*-zxURAQ&X?rOwF5`sT%4~yZ4cO_FnQW+3SRmwz2rCs(8!k#nvK&HlL) zqBb#CGFO+cQI|M3H$i0+`jAFd@0H~H(1)|BihlkJrkp7XWwP`sEk-e$u}vWssjmkHi zzrE0Z8^#`tzdeb68v;}Gzdi834OR`>-yYt-&7;}Boq)&xHedd`*L}LgkJ&ye_RpL~ zhpxl2V=GF5TTh5X9IqLMahKmZ{C!e%ZGDKGG4UyTdHouP{Qt!37}?ZTa}K42<;L82usC3S7{$kB(O*5cOR<5~H^ofM34(@kU8 zxJzZ4um?Nlnez4<>q3pw8`C}zLqu}+V5wFm8E4b=k1WW{4%8YLa0JZCsv|Myhg^Mb zfrsbyyk@nVP|$RIbb9UvD|teE?dQOIPc$_Y#um9$;RWfp=NE4brq?@3 zbfGbNO*4{lO1`Z-8XL!I*7_5-DBN0VBeOHjp2gD3HfbIC?}SvsMwP3PV>zp8HHiRM zY+mEx#pX%JdQDyZP#FucRaxx&En~V{!aD8ZdmCSIc-ImU;bP(e{9SYnu^9*>%Ii-C z4QS!;?t><0tNxC2xvU`Ig0EI4%)XaAe(O`^Ot zu2vtyZM);kc@$Z5GoK4-4eLAQ3xFvB!E4_cBspm`4moNp6LC0kF6M!ZOHia~G^hs4fT3LWXaS;8@UG=E(QsdyUN>!w%S zp#0jm;dFQX`&eqLVa2D0M|Y}BVDJwSTvsPc0i@rbe?YIlZe3 z#YP%M3hu+mdA_#!WcV|aqHK?W086upNc-w__Yn!K6b4l1|d7>ea z#zv2}@Q@aW#&kVXTmGRx`P$WSWdxM3_~Xr1*Fhz{Viw0G7b?&OqC4pW`MO#7qM=;) zOsB9S#ei~UXm18kk?Z=&RjP78WjaPGrpXTb`ssas?%b4A8)&6W`D~t6GPry6$vjrY zY|HSPD?`NW3cInhfQ_yA9hXn;**uJXdLupz8%}a!%O(bM-F-N<$1%aP8l5<#>Q+WE>dtK1TOan#(Sp%{T2oC!<>I1awJQyjavCl z$H(#)VN;AkFFE_}tuJC=dru{G33?2@IJU1e9d+m8FFS6SDL?_nbOvkdeh%vRU)ENM z{S0f0)r>l{QNFm~_i>$OJw4}cr4cfnQ-Sx>0`s?{9s}L`Zv}=X-%I3amR_$5xFtKg zA`xOjQUX>?gUC~^q>Tg~E&C6e6eq}sS#~cjGM^-5Y8j8`K`{AS9FiO)rVn_*V0?)A z=C$ducVWJwq0sny8TOjbp(NT?{C9i^w4@mObfJBQwtaQT-YsP4TvKT8DIKzp>Cn_l zl?l0Wyjn6jK4vQ!ZPZMg(}!_#Kte|n8cH%{HzN1y%vs`VjZ_eGorK@5w^+Y}G+(KG zN&WZ=8`YT-Q4$AwYEi}$#@$7a0v`sNI!}z7zN?psnmB_v< zG?aw2)%=qo=P))PYb0{A5d7P>k`hm!uJHuww>6KCjq|(@(Amb%NY%CHr_gz&v@X5a zl)mdrXiE`g+TXxcKan{-**W)i6AMSmRsGB7P#IalVKK$di-{jD%d{CkZ)P0y*--fu zya^PN&oCFRwNtf-OV6p#IOiJ7+FR4$nA$I5i(q>7VX6q4^4CV!DTHW2M<0>vq}nCp zem5AyVhkG>BsF)Jkq)@^clEhu5*=oE{CE4ItEr{r2|fplL_F8!cN{ybPNl(aA8f2A zjep$>_qRAQ3G5_>PE=~KqOR+o4egBPyipgXJs)rxJdG3d-C+m2m81=kYYP6mXsj?9 z64W3a-uBcVYt!kHrg9=5TKAQ3mOg|WGs%gTCf=-bqoqCrHL;7dgkfdQ{*zR<cC1% z=%v-_Ci}#k?~Q|>#f9DF4&|;(X_vc|i1YMd8AM%;2U;o;-pT@AS5!1SZURuYg`#$h z&f81ZE@f8b35H~M6gH$XjJs4%5#@$ymVv=8K;`6fSZPAub&ztmk!Dwz6*dmd2@TCF zu#V==qdz!18yZZDPN}}`VJ)BiVVcWEoshI-^z2cXMEXLF9<$FxZT=OuPzqQYw?@p` zr{(-OtS^a)6+F48A1UF(9%5;*8AOt*<+00yY-krbXQB4LnI7kp?C2_p4!y)M8YFtQ z@_s1uMYV_1#IB1H2$Zc-^YaZ`YO$!q+Gurt#89Yew`%#%bE+Yg5;b^5uAe}&g;MRB zKk-3Zw!5-iiMPf6#4nRYuoMo(PdB_q57)szX}yE=H>B=&H=Quy&5P@FqY=J&vfv6| zsp;4e!e+Jwq59pb?}r!WzA)&96t++g2A)5T&6LG)^?V8%4N`36T2oK54AoG=0v@Y^ zkDTCN?~lPfCZZAX_SEyvYXym8Kl`Xe6cqcyWqPOOLPI~H4`)i@L(g`RVz_Et5sM-o zb5o{Hq;r|_R)9bHxwhsFb9k|6+=2e$u~W?N(%dLr#jLSGIh25t^{GU{Yddo`W7`k! z?Sf9@gB52JoIWJGdiCGXjT+BZ`ik3yoP~u|gN?QOf8;njSWnFU5{+`(;r4IHeD|U5 z)4@irnoFbh3*Q)w>k&kDi@_>Zk`|6lbKgW?Yh2i?1?;Fu;#~Up#J0l7m)0{!w+fY&shC!Zws0;tbAs|+G6y>OK-L}49l}L8^IsL`hS&cu?o2Q!qsvfG~8|j;A^QM zsE3DFs}(=>?3BFT-7jXNzif{sB~J*ejWpx+R7PvcJgmxt=dX)ZJ}53o#Y>16dkLYz zI|fTlLWT{S8j}LgvBT}{{hQ$vHg5AR<9~j?z6-vHZoB8OZY^u@uI$CCKk06^^Q$e> zs#91fCm-!K7P9S5-q2-#Sh8pwkTlwB)LqvIZ&EgiC&z^08t~Rnv~1;e>AlcZq^@y6 z2^ZI9BX_-yzhm)y2X5HMd*vGAv1`!Wa1T56Y5ZO^f3?boINsSQm|~UEy8>v@sS`wY;4l#x zrV00hwQt4fwaiv4-jWr}#wZWkk^HSB93)haPHOoICgO zRwuN7tt-u7zo}ZXIP{m}-oO8S#N;cqSM#iIpj1F`s!WGwHG81Xrv0w3%~tnzycO0w zXycxC(mh1)Veu@Twy&O3!_VfU(Q0m5ynb+t4(+hV`(=}FnRkEC;moVUuSpH*sdoau zoR6kSFS4ULWsb>Wey=yn#!*owk|>{Tb!g6fC!C z+52_=+sk`jYHw7z6nB2XdpSi!BR-oDgPN`B)s^GpLd@I0abLiV(?w+#A48Z0fIt& z>|0_+rocT*QNNpqafenpSv&`X4`aK{h^*83fb;*)MV|kkh{{0!ySTbN$!|^ChHCSZ z5I`yw5D@svPHT!TcY6IBeY1Nk{(^|*zN^FpE}syFIC|0#{jZMT|JTtmH1tOW3#cVh zDeHO{iq&JXh)uQ)#bOU2D)Ptj$IhGF&iaGcQF-dL_*ZTM9&h5Pjn9UOfi~Jm^y&W{IgI~x6!Z84 z6^naiOyEd<0K(Zp8)C4rWXH_dk4)bL=dL;JW?!L8YmK;SvnUK6+kuH_HRJb6SDq47*|y!{s|m-GPqM7d>-GZ%|>(Fl6oo~T(9Ev|Nms@xJZ zz9XHqE?^t?`yf*Ni60N|%42#Wb9Ko2yRGRquQ75VW?pkNl?&AJu!8Bqi9Rey8g{7P zf9>I5jc%a*#8b(s+uc^aM5Q&SX9(fj?q)!~^*Ka()76&Wtv?FOVfg-&!$?@|w9;kf z<#NAaf=Nu}&L*!jq{zB6P4D_5%fBFU`UWSYGUM%T+o!vc9Wky=NQgO0Iilc&1#l9w z-=TAlNK!ie8I45UWhkLfhvXBTjXyXengs>TQFnv2_o4Z_x-E?DHD)Xt^?ldcqmeIM zB`3RlmnEwnwSGZ$`zpV^qex zvFF^J$;{8yohUcg8SSf)yCo9$J6gPtis+!ccO4ylYc`l>;hv7aE$Dm4*g&S0NNCdp zLk%X5`{TO28Pv*@CG8IfPL{uBnX*1Q??@2d)H*MZ^Ko8S%16sXf9dcIM`dRJEz#_@ zy8rE4;fPjedqlBbjO1Oj%l_7n@j`KYHO>Rs=q9hfJ=C)d>=sg#Q9Wk{g&>c`ev zU)|r#FGIlKxbs(%r=!7%gY&$_XFpGHNSp%rQw=a zIi8Zf3s*2qi9JYtarwGsu4((!ZB!x~zvFq0rzU=}T5fTE`s~emk|9sqiTCF_^!2%O z63Z5s{f*qsM6deSK3NXRv3F|a6-C{W1b(LIYn9}9PvW%=drQYOx-Gmkv)ODZC+723 zZ>VG3`661_cqAV*RL1PQJvfqJ_mGZ{yN8R^`N-9ho?_grIDh)?I~AzJy64(gaxq4Y z+oH34fRKm82v%ix>mlf56AXKNcLZJ$jAoVX;~ zp5IxB!l&H~svoFY>SZ(2n0`&pd3EiV9QsyJYue9aolFUAfiCVKS8AZMm7R8D z%>+AoqVolEmiV*MXQ;GI_qc2iH%s%+>df*uHL$R+T-7O$gbPze)6+@jX>=B}W5O7m zkLO@}h12E2i>Lh#f$4i?Wogf@P3T3*;eq_mAu)pBOam z`b4t`Wup_nrz#|_{`eB?QIUxs>rB5fT<6}Tr5Ih>9WO02EG)4^7F2C`KdHNj0dHNYTD+uT9KM21M^xHY|bZs z`d?(cVlVxb9)?dGAzDvr_jM%o@0Hi)ZZRv5I!rHUPSf7lBsa7kK0zWkYk!+2|~S*6vc z>xa@LwF+m&SQ{oQCU6tqk2pHHqC4J-MduT-HV&Fr5ntic`8E$_>uvvb$T0DNxiY+O z(4QxSmz8Wx5~Jl0aeiQwER`093Jv+owx?SkQ9D65POsJYIn^oSVcscMmAY*)-OnI* z>hnyo-F$Y*b)NUJBgvaBkw3pE))g*NyYMMUfR*<7@xR-7A?GR*`h`7USdpJYL&Y+= zlixKs8F}K_erBhoTRIBIG^V)p4M}9t#9jqUuK3NZ@Ii~4h5dC1>WN55GxAK)HJRVA z7_6kkFn=F-T_#!aDCm}j)yA?3b%1<{M{5Cz5^}KIm~mb>T+EopwGufv>tlyu?(HV@ zOUj0acvMEn-0n#3d;#Ne^yIS3y>!?G5xe(d)xMk*opLL86pZ=eTjefzEBa6Qgi z4>9KJV$n}}{je4aSk=Al8idFBY)qElV=c)v)?P8aPIzkM(@65?Ri#QHH&*>MrrDP~ zM-xMgv1|?#eYMu;on(Mj)V)a5f+On?g)1Aynvl5s?uE3{d9A*;n^fP zHM2v@fy*X16N~e$+m-{D$M!O*9FK?2V$ZVHvr@2Ml1Pt}w! zF2_%MP7CHK&rIxB|5NW%%`MS~OFr4hNxg$hAbuu^;aZ|mTmwFN%c0tog1_44H(#&K zHd&7Pc8$-4%H`=kC>{Lj`0+}xpS{g_wll(yB1>p|l?&xBo5dp@heI?Ygd}?+-w5r6QP5RTv`9(h0@4(gZ zRKc?pkEO2MdkKlSgY_(3012 z8aP`$A=RJrNBBifiu`7=5$YksA)R3<;^Xy{WYcv+ zx}8(LGkMBGW{Q6LP4$pfZ=7_Bz7nJ0hf4{hd4;>njPMai-{ZK&aZA{B_P7E%-m&3` z%rv1FnjH25S0=|pq!C~d{~^%d`0;H zxc-JfzxsE%$3=layn4#a=?f5G8e3crbOV7W$X5zTh!ChNXXZ&EpdU?jhk6qeQ(%jQO<-j8DFpVjTZb~9iC^+13MfzH@C%=fhT${5{@ z)7XRLn)9=x`qRkwG<7?CZL}7DD!xh{_~z^9lS?UD!~6kjGL&jNOPN?esfqm=Pu;r$3&a?l9F-*@$w7&N$3L};9d z`eJec2{AC5;rkBQOu&+3l>HMhr}6dB+<9U-`TQt9ny9J-FDTn#1%I6F>$_IG*d_9t zA{|%_x`3&mL`0-gIEp>$UBJx}HHEPAwbGv08AqA}Y3r78a86R^>fT!wKX(?C#8O;dazE!!o*DbpkYeb?83x{>4XQM z8nl4-%Dc$-yTX+Ynwcg{1e$5*4=WZ|DxbRNIhI@K%`cw78`4QHAO9mI1lAG$*y@U6 z*^X5lPVU_P&60FAXyF3)OGl+YRD~`6Xk@(5@|}%}?gzPs+5%~N^ZbWl7esCa7kchb$*{q_Q7 zgdktDqFpQyV>G~x%sOF-E4`GQ52VJSr|1C~xO@=czzd5~R&)LmV`MpW*H?Wk8|nIR zX%ff>i3+O?;fBh%FknZm*G?H}7rcCl3u0%sz@Pca;5Z6>(f3jMUylFk2TEIdOmssi z;GDHIzM-FQaut%ZTRYkZTeXl_hMARIjLZR>(!8K=N%WO1`1rBeWX>8qlE?F`Eb%x2 zHw91K2^_bhtyLx@;{}+KUZMRu&2B>rNFqo|PMlwEt<7z6hCNp&Kz}4V$uGt!$T8i= zkuzF?xEWB45?Y{mn+1Bw0OaR9(a5H;qs~g^TAk`?V|un;jgJMdp=CRUM=76>!BkUX z0A2%3U(BtB*$9zebD;k%#CfM@>N?iD6jJzwy2Ob675H{u4|hv6NjjTx_9uf1>7I=5 ze=55s9ViHMngGqS9?;TK54hT$Rso}X6&rk@_sc1|XzX~Ue4w8JS!k%gE^ zdmc#+*kh2)#z$#kKR)n5C$l{LQ+I7P*$3H4fbDkr;np+y+l8&`HmO8G?ANzkkn@Xw z6n)2q-u2HC5!oh=J9TT>Q?I;IC|ZxWDY|Hu)l+@}2MxQZ#RpDdR^azkprf%CDF>Ay z=EkYoayy4vXfO*Jhi+3+C(R zBS0PxunZ+Zw@~h607ydW9%M(~VTjTY0@BJ+@Xgun8|Nc5KnNL~$tG*11*e`-k(6`NZ3wxTxWK8_aJt=S4V+6=bnc%6UdyhS-fSn4%On_hD zI2Y%Y#t!Zipm1pF>(wH_hK+(QD*N+3g@H(qhik#77{9Pls~=1)C02TuNTQg+UDC>* zg0jVV6Z(?V$hx~2_Jw`CE|&Xi6l|GOAdUQyTa%&RK$c4_Xh(klym6*^rInmR0t$aN zsW1R2Wph%Sg?g`kL)`-`a)1FD*5NA!$Uf_*{N9|5^qlmwv!m(nKp@Fj01rE0-GDr( z7?F@?aRZ2rA^4sHK(QJ|uO%soFTm9I$#tR+O$x_>qNkG#K=FIF1vU8+#8ib+-vZ*+ zi8TxA`KxX!16P+p82aIlA7g0;^QygJWDY012Is!@?)#QI6uluw^)cKS#AvS(nA6q8 zz{LN%oBbz%n`ET`$9VX7a;Uk^@PV|vzNLcdYe2X`p3eY~Cgb=x>EKt|aM}FdXk@lB zfHk*34BHro$z4I*m-6s2;Fsd?592O`g};amSULg@MaToXT9@~2Y&dUI0&;-l{fK|y zrha6kEGMhMpT0lIOyOuCulFfo{7Yk$7?MH4G~}YnMgK^sQZDW9-fWWEzrYGu2UP(E z1(LmrYJDC?=0M6HMK8&-AFH1PbP;sYLbdi6$K)Ca(LqacA2iy++F1P4(#Rl0?qC?;d2 zplYiNl^CLtMguWA1HfqQsmf^GXW?7%&_@EsxP|;5JBLm z=`@ny6l8e&<_Hgq1Q>4XEZv<=m_xA_T|xCJh>`yL@8}0#{#1UIWRfRR+{qgle@Z}7 zgXSs<*kr3f3YTa6)|!jSfG9C$8n4C!%(XrfpYgq{;arQY*uj8F^NOOW4x{e?M(faS zx|JhW7#(!<6yU63E+4ZvRnvdN)^80x7>q9gpJV=zIv=|I(c}-HS)g}qGnaV)#U?F` zwE&SidF~qN}@2+~#{)ex<9LZtV z@@{vlN{jKP2g8$hlAx2=p#$)C^;Q=}q9p|cQ@Pd{fuhLx|1!H=T|Y;Y73=^!m`WG` zfQH!=-7;zZTkfgUq`SLmv=j(*_IY{>2k!#dGx^s+p;Dx9HvI9`ILH>X{~vg9mG)N) zMnnKvkRJM4Y;LUO**H5)G9H}yr1QmWcBA?bE~E7QPCwrPcAF<{G{EG5#fT_ntjE!h z8wsjWN9zD<{=<27%g-h^4P23NgN<~OI{{2N4zqG-_(?WfS5mPHP|Ni-ukg0qhwoCj z4*>^qvJEv|-{AUhR8k840eTZ`bqpZbdD1PO9F+>IYZV6vD>4<(2@nYDiHrJbx_{|Nye;LxSXnd$P?=f0@DTW%3M&LR7&HXXC03%{YRTnyM1#6XR;av@-# zfwCDk9KchU{#5>Lvj@Xl2Jp#(zRkF)@7OFf-z$6%EPtZ@99UXX?%G!%qL4bXFLMfu z^>=J+D&`2JBs4HZ7y6+f=E!^*3+Vj6uCo(>H=<%B9KIH!e{ZYiWGqRtpzIQ4)&<5l zK1z>><&f<|{M8?jt8rJ2IG|>L$d}OlYo;5|iU1TL;OM(Tv*_j|%XkPx13=`54_E@U zB(TrTd&v&KiLG0?^u4zg8wxR##dDKT9FL`~aJLm>Vf&h9`50usPr%Iw9!zQrMmU(Cr_8 z1q6Z-{NrtT|7)`|0Z8le3J!%-Zxl!YbNkqX0sWVXKL9G~ne)B^NEoof#49XIi=mKw zxW7ugqdQz*52NGaAO_WZwg*s1iSWrk$|71y+W!o&emcHkWBfmik}%R0dT>nxZvdy| zIcdh4rAZD4+Ux+520T_~uLJm800;>Dhi^&eSz)?25EAidK>i4-{(G#C-kP*D zrRJlYD(RsiZy`Z4g#&S(^D}-j(s&6ffUzTl{}o*UQ+F{B1nB3g;fYoAyS8`}4Yy^SapgP|r~`h<#~&ZSc@)bz6AOfv0Pwyfw>)NK(|iU0^-HvgH0wLB>4D_BL%bT@ z9!pY%jf2+k(83V(yHznhhVeJHoYvOQpJkvY(p^Rv7#Q2&xXjEZ6}wy4-Ww4cU)kaF znMfoT@AumZS5+fc114UfS~6gMD*r@065HuZ>pCGrPu}zPZ10p`#q9Gk6mNI z9z7Ycbew^b^*ynfLSv@FVA`Cb&F8)0+}k_57Q-|;^JAf+vln{ieyZVvcqyDGs`gGS zr88$|wsK&hHeXs!|DqUGhfC!bM8+^DPeM7Tk+>uFMhn zvxZEAQs7h^wTbi0E_+o?rrYA?iC@27o^q-6*E`p9m(Q!svov9#3hRCjB(cvAKc73# z)Xg2L6Duf%UC))CeJ?DY3eOxHt6`3V$U`>L_;#;Kd_65&)V9ewt!=(-^ZOO!^-jGU z&w|Rk++FAT>hbHQcGYKIZD`KhH2Iz7Z=P`Ew_c3#gb$9soNVi?fz`y-RWD=~Sl^!M z`~B*h+b>aEo0(Go`_NeJq>-J24N8%<0i(ES(}i0G8I!lLxT>qCTdx&+Yr$W~IK>t6xEgAb$D6*3QX{fA7(+mFnrc{A<0@sHBzZ?%+3By^582G- zxl-)j2??!=^PE;h;@Y6&A(33uef6!MO}UjPO}Oq<>ip`Eb@nVS;Hsv=`HhrDp7*Tx7yBZ+m6j*C@^Me6*p)#Mg>?x zWVG@)RE$I*&wA$)r5>uGgj97zcBNT+|4UXr)ESA?YBCw@qmk~G-bUuD(xKZq$?yAq zdda_{H3T>QCe&%Qr#5-WJ*#GRZbxca-ZsZJxo`@wQtVt?Zhn!gM=rVPt7h*pH_sny zEs~+w_qEJ-Wr^nBnrvL=^Gi0FuiVSeE|#X7CC?}>?;u?u>0T!yw-6a)zXqAaR_p&{%95N_k^V^pvfv$mThFwEXK}<@Z*2A0i z@?00qQ|wHW*E=`@({`|5Y1*j8{yC`wU_rSvd(9ell~%3YyR&J7REF~5KoSUGEC?@l z1;Ru4_m|uM#m{;EN14+9_;BJdqi$z3cB@#oI`nrl z!2L^IZ~CjnM~CpVY3epdlvB9Ob~ulLKm!D|KQ+WlcWO^QD!nqbh>|xBD^Kp-`&rjS>J8E zueAws3q>-Mz?^><(-iXBUo+607KHg`bPXbk_El1ObFuFaQdLp?EUVKu0^RFZ)C4fM z^Rg?nL?Mw3?#6}N+_;x&!uQspg%FA=kM_mplC;FiW+->1=J?JeqNqt8M+AEFtFv(+ z&3sCUHG^!rfn%AqOnsB@!pX>=eb?QpKee*?+2KM%Lz+S71Ai-LtpYpL?=0lrOiMsX zTw*FX?&MXuC~t7#k~+;{p44^a<;UgBc_up69j~WlW;9{TXnq#cGPKje z?lxy+V>5FZF04K>t}uh0*V|@@nP_MZ#A;-s3Sen^5i%=-r_#!IJPspwZEYXk6{g{^ z=2&72pCiX8cd&UwlN+Du))dFYcd`&tY&W}2j>+%HYst#aQ@tSapDxj*qBnY%^pl|& z%*3Q@xW+Mhf8+E%ZyHksX{Vx=XW*xaGrT@`aL8AtYE-3}_j7zLB|@Z3eIo%oM#TRk z+-MXBbzyI4U}xod)a9keUR24aET`olvRIv#3oo|O@qXXp zTuGq$!hg!w<2C7QpMFS!Y@^?K&!PeLJcL@m$@&=L<^J^a!5y5h^?*-|ntg_EUKUf2 zedto6hou+~QaA*6X*LZUPV_-4B@7t(2N!lN2<%hqG2`-m|8^;p=Z=FUF+%xf*&Vi7 z@<$2cP?om9F(RQ}ALZHzKV!S6+weh;Pt(0bmp582Lp0ILBwt-Evm)3F{m%QZN4Y3r zZB}PHm3CSC{=N(ECDdO0sl4Z~Ke?E1&0H~VaPq9vW+S;`TW&*vqii%@^%9a*D)aNV z*|_1!M#HG8&fxsb8;VzpR;%B6=_@}Rw9BJ-U!xd0X(7BSPqTcjgKI92+=CSPFQH(l+|YAX3pHkpf27Zhun_srP% z`<--MYzklR{(*>RU^i0zQ~mRbF5&d?BOt5h^=juRnDDb&{pRLh}3#Xedhh_5?0(vM8o0jyVo-#9ra50X4HHy zp6K}Ojnz&|(|oe*vGAiYH8sqg%}apw%l1_mJ-{lU4gHalKVEdKJ|C)ox4(4tVD?pJ ze5~7awY(M|OA0}w=~OXEhE``)L4VqyL>wC&`eWQXJHTo2rNg_Yiz|8hM<=(Fk*u#}lK_ue;Qx zRii-^C)nA}qf6#LcYlgsc3d_?@ObDp2NE$GaR% zJ9>w$#`BEYi8OmooXzOf~j!O%gbV7SpN5o6{OeYX| zEjCFP!I74lw5Jr885u@T4OJk^#~yO`xAQUbxR?vdudO~E@_P@2+MjO>zvjQnyl3C_ zoAWvMRtT4|>Zi{X=V=zrgNwjSIxiqM74MW(wMga*742#x1+#QHWLr1e+}HWqFs0~Z zWFz{9_Cuo6wsLOR;wYThOtj^DYczMf6-YSdU?=KE?KuUeZiT#HJu1462n~WTSk2~q ze3#JTX&Hf^G3v*XC;7#q9r+A%Ge1A`jLOrk7vVP!7Fi+Smw&a>)nzlqdlxhndcl# zh>Bf^QIzeK)COt;VZdeWZ&mXf67bsSjrT>lDfVzwTg)*tJJ_m0`7pi8bQqyc#ULx& zKgYHz?CtOKqJJO*VkK8=CDL|}L=IKEY?OU)ViuSx8X%`cRZ<|%aghCH6TEh|pYF}u z&>}FH>wL|xu6wJrGR(n?xcDz>6X~Q4umrr8R?6ST%DGe7!L8>l{1LAtG49>wHnWn; zhS0;bFJ2>Y1c+(C7E|R$8?@m1%+y%#DJ%%v+34spv8kf_>!-!U9BfLEu@3;wrpDO7?c*nrfc3?zqjnE4lJxd zjNeJbIT-S{98Mr1#@5=+PPG`;7xdEUr$QT)M0EM3P;Sea_p9ZV-LIRGJ0{!BPV+xb zvGqK)fPA{YbC5aYaQcjQ8n&Yy0 zIfFboLT0Sh0kPNG(0Q#`>ur7~;B|PJ%yb4jyw{ zhjt$7!1@e3Pnqc*YJEIVS+w94wc4(6Ejdj#xGb9ie07j_QjTHqOk}BcJ0d>hn$6(S zuYC**K6%BXL$!bH-f`WGTgq+4Hv+!CjC}#o@}bc4hClF-FJ9sIpW9vb>$kXJ6%wI< zL(^qQ_3&aDtctasIfI;~w1I049UEhqW;m^N#YiV}B?cXuAbKbi~pAbcDbZv8sE8j)Y zgo0DUY)Z-0P7(~%=_!O#_ZIF37xQVA?%li*d&9Jnlkv;Nbkd2hcUKnY``=UP!<+9b zy#k+6-#9PtDfPvA&+TV$Ne@1g<0#%)tSv8>NEd4KUmS@eOn7@187@hEwbIVtN|^fe z*3$>h86zHZhg2UCq9b1r;b^#6;YuJwX7SW(HFfjdFW&FYGWzyo;FG&Z?@t9*!Q~tB zx=1N!FZzEw;>G?7A1cN8ATM|Mc~t$b;j}f(jZl94XE$F%nOwz;J2IWrVSeLoTeb7$ zf7Sxf;8DAr^2%XIUIaAS`IadwPtB#)495B zo&tk8?r%m;YwL*f(=#+l+dGQ#^;&tHDe6szMx-{tV5VgVhR<7+W6nSC2Cj-QMBdDK zBm%ZGcrR<_E^XW?pJl2@Md0pta8pBdf~b&0+yg4k}ctn-X&?)s^%C-I9VwiIsLl_@rf z+(JH^GZl+bIFg*+JoK0j_vtX|<--f-rMstM`M5_fAeP~Yx2$7p4ndf)Ay#FeRy4*N-qQMK{ zusAhnU=)#d)jgL@fER1ptj?7#Y0U0-u`YdDQ6vTaW@aO&ZXJ@I^=b=;(I?okq~3Y>&jx=Tz|IK|jN>71p(WQw8Nrw{ z6C_@-^2kzEWC_`xi?=ILB3P-Nlq6m`-~UoQ9oBN-cUn+Ms=0>?-0rR!V5f!jcS>eZ zc3k2l5!GB=BD7z4^Qu;;$-^joz_O+di1it*K~CL{@=}O;o=$}K+!wKvcD=&F0XPX= zLhDgc*3~Cp2z-LZgiUjA!r`P$*X+f?6`LV!%4TaFDKfaDBPOAC>rX!GZIZ4x9mC64 z9S7Ou+kZ=X#z;#Je4dzRzWWNfCL1mjH)Rvd+&Gj1TdFzqXg^=@cIo*t8Vyz}a1YXk zmwGCh2EKFJT*SdEwm6*#hIrhU5q|DXgloShTQ9kp6Z)v~OPSiI%Hs5kxE-I{5Y$xg zL@Xbdn2qwI0~=w>yKfn&?E0DQV!XVc8vyZ)T_ZefT=bXQ&EJfc2Dlh{x9n$SLXd?gqOU)8+joTHf93!|N)}`dde4tj8uGEQYVh41QvDgo z-+bS1y)zH&Eo|Fk!$bQf4E!7VKS7Uq0L0mj-8w>5JHSc+R<& zT`QIZHkqiqy;4t@U+u7h>o=j22K%|}-xelP>;m4^c9l}=O}zuWlns{y0p4?@KSf#?^=-dHfVm5{38+U{ zz-eyhZR6kvP6m~n(#Vy8G{L~O$MBo=$Y7i4bFPPuZu2FlkLg_=F{x(+Rl<%fc@J@H;a9&u zrN_rEh^DK7(Vi(#tHR)M|miJ+{vBCMebbG|;PkrI!%2k#c=mA|m zbG>Yoh%@Qd8{6!(MxT^ay=fZ70KLtM{60G?>bC{{ISQnL8TR~Q&Y zNgkgWTU_Z&NY2G?)BD=VKL~SbNxkQ` zx}%=Ob*Z5?r_Y$-n)hEPe0-beQ5}!SaXJduY(;*jd2Un0gs!~e&N_tuEb9Xqu+7c8 zD_mS!^>(ADvgrgR9mgHsYmCOpCbJ%qBs7zDv~ z{sWqze3QjVRd$;%*<~hn>SRAek`h5BD+2j85*Ud@)W+FS{%XV7W4}NFQ1PTG(c{6q z2_V}I@zfS>@lA`!GBw}q^OK9TZTSq-COjtI*eFhQFPKlh>#-qPbK#Mt-9|FhrQ!$|j%Gy{rIlFUzZ=otajZ^W*|+9~28Y-pqNuzNuJaX7ICm$mHZ z@12XkEkq>+CN@nCm6eKRr#;J(@JfC_-j__jznw%+ z2i1BDK=?r?o^oWi*4p07I$*ZQAZ{z)#x|*&Gh|40+Fq9iE?}#P?Gsm5I1%_uWk-*XpBl;QkfT7lG_sS~B zJwlkd*E+Irf>WFFP2i_g8!Jn=45}-yrDJ@1L3dqXb)x0Dx`aMXb)x-ysb=^Awladm znfy|aZy594!s@Ek1@qlDY@n(XL=_Y&-K74|(r4gQ+V=t-p|6Q8Fr|GtC#QIoHnQN^5#XVP$ag zp{#Ixayuf^m9RZRp!R3Dt#eSMw*%*a$NRJxxHk}UlCx1A^KI*(X1zlBij8ND+0zf| znT7Fko8IH1NPRTQ5LcrwMU`?KE53TAjRa#Q78ia!554cpdcnMS7F&o%zMarUg+5}~ z_~Uq-ag?6yy(*D_^mulpfh^Li3-1V?PJL$_!#S7Ju?E)Kl>=Fj*qYuIUximP2cAgI z)H!YxGpR(yv5s|pe_?eLQzPf1Y{LF3U*H=1{4B#jBbsAYQz~TjV!RiSkeS9WSrc#a z)rtM1G~@)!c^c@S=Bv;%=u=s!?EH0B=fDr9DQJ*JO$Wr&fDm^`Q{s)p{5$49f$U-k zPc#NkYVO@|;H)1Y(YW1+v={c(ik~_jO>e~xUd zDyCVFKD|y4e!=phL-r4^=*I$4@vy$nrQ$mWyCQ9VgIQDX^Yw9u0!fWPk*g|GAQ-Sj z3_a?UokKM#UVPw?h+;h(ah>{htMkG2PQvf;A;JA4($EY*SI_ab`I{Cb5cG!w8SVsJ zrx#wjo4{THl90^r&`r(c74UR7NkA@7AZyGANA9h2`s$sFej2$XVh&D!*I zD+G^yk6aDCK0c>&o<6QvqYe%&2%0({rt4R)w5{TI5e>L3fzZ-~%h;%NIAkG)tk3}+ zV_A1!3gU^vTBjPby}MO{SDN*7Da*`2N%K~@mGtiEGmW6g$Y-&xL4jclo7Y;5r)CN* zNOpiRkVMoWD|L_OD%gflX?fh+fyC;UfFM!0Og2>3fweIN>2*}-BKxq^?1Wj`4?2`7 zvTqWR8sROsw(jINqtX}mrfq6Jv=(Ge?6NS2b!{`e9E=DgQ}^~$f|BMJXh7$8t^ z$8F2Mudl7FA1D509%UNb;Ff*e7GPf&cT}wVx9cU5(!l+*UxBCMEGEGWGTsg)H=2+% z&W)K47SqdUtbR_Ikk%?gxvQmDHGsFjn8hTw6-dH8)*^ZEAXt2*spQob(>5)sf0Y`9_1<^$R(>@y?c7^xx*9pD4Wt;JUKcV)hgscNb>{ zv>%bLNci7O#_`78+V~($-F%%Ja&N|OQy-5z_2G(nZ*j~_GC>#Xj(pV%`>GHlld2H(5hvIJd>BMh$j*rW8x6+T%M7^SIXr{&r)!`ehENw& zWneJ^{{}W`O~d0T-oYyNz28}lDT#pg2zBIPGL@3kj3Xe0SOkw=RSbD&iq|UpC=Ous z+f;yOI(36rlPKhRbxv80le`1*I4H`gv8*W6p`xN7>&GvT;ZqR7t<74kb-gRl0Vhv` zYx+u_)8k<DLZa zs$^Um$`NPAGXU3QPdb+G)jnc1Px3x5PPqT2Zf4vN{j_HreT7}Cl#{Oa+~&Sb^7p0q ziCxEm=h}SK*XxeGAX&`E=Qo#WET~EDYLz}p_;UC0R>#1H-J_u#dIY|MJp8MQ+p5rd zm;i1(M)CYtp@rUgxoeW$j95zoyhfA4x? zeQA~~Rz}M2?D0j9?#&IEPQATu{Bs~(WSsFEUkqU-!i9V*FVQV^*qkS7IvK^L$+-l; z{o+A08MgEC7X8!ZG;cjF^ZHkN6W4guteXTvK71wsymyMLIuFMUnZlCj(x(Ep34X70 z{E-uwNOa;_!N#MF=R8y?cqqi7X*tzD^r|>pLlw9v_kC(;2_M(H<`H~Tpyysdm{^$U zlPjU14k4Ut8cS}2uWvo08%a1;UYzyg9fLa}WxL8W4Q=ez2jA;)8GN1tS(sfIAuaI)didM%8I zLKO^meMHVfnbD4vH>K+vh$W*z19#c@>*r`b#>y4zvG09Ov@Bw160GO@^5Wn$Jp;J` z=+(PPedfG6zqxmpKri*i;_872XS9!mOsv_dSmwGNv+Ro%kiErnmU;gRjA5>@s+qc)!Iu;+~piISS<~u*jDqY^+Wx*s( z{Gnf0FuB-xwK6?R`(!-~mE|urG}&tbPytkR;rub&4nuQ8lmEQAjS#KIlQOrN^FYY9O%B#qumg(pOn>UOS)LT!JErJl3V0p%ItfD9ko8Qq&USTwnVsidGP&1zZ0fAJ^yHd{R9+ zl6zSmNW#AF^2ZLAcsEQ?Mi{L+9KB6E@JSqQNS_&b zvQ(mvxOBT67xjxvYIc>EvAFH|c^$pKE)&T>{Hdz5uf@@*J_C|sPfd3x_(!ddfaL>r z1#cm$<~iraa(GcPRf{ly9D;-@0EZed+zNZ>ZRDOr!tN7lO!>=okp*o!AQ}p_0W;Y7q)^4^sndS!LLTT6xHV#m&gm* zpOR6U$0LZ z^s5oAM}2m4t+F{hYb=RehRjD{hevE?0Q-xx2-Vmg(1eLSO*BV$x9TOO-NJ9O+2Pq2 z9|eL`;(Z&AU$$jgEiJkqPcAfI&LX9*9{;)crk5gTGgEAQDbpQK3(+$_Q6a=VJQ3XO z+Sw6>k01h2Yb4t`8Bczgupl+js)AQC5BJArbz!hJ2YHnU-CwMZt@d7SmfUpEdOp#E z(;tr(bVQQ9#8DF80LVK*;YZuZXkuzyaU5t>ib z4I}00ol`LE6!HYY2)Ogxo?C=ho*y~|*_fwR?C1CM@!HO9!N2;EsIM#{GFtpo_5?J0 z`~uHqnHqOucG)@6Vuo^}HP6j+#mtw#UOLEWHPiuGmn#tFF3Rg z{O)aAh34Rvu%bJU+Fm^Q{*6hF;V3YJv$|PQrEN0DX^w~VQWXSJa1{iyM(nCY<)O9@4j1syW^zH2#zU%CF zN&cldPb>2g_hoCt0+++F{P~RSLfs~cTbhTul%?4>8wF2A(E!ZO&a{>&LD&2;)r`-C zfW4uk4CLTayE>K@Ht1XvPk+pMSnb8O!a}OZ%UV_m$CZUkDRNnlGuYd%6-OF3wdWO& zta%Y^wSG?FZqswU`QxdUpjc+_SKEt=zikq^IuYfL5&E0;S0|az#sjpT@%oZI(lRU@ zai5rkd)B`@^Qc?<*=k9LiJOw|(xPbR(9~k6)D1$aje882rHjLqzf_Qu_4gZ8T5u_u z*J=xpzP}?{a>L|z9a$=G=3TD|VysyZkvq4y8%U{3WrG@R>hnCV;e^%)d_`SCyivUV zyP0tBFbxfys8VH>(WG8)Bt+1Y%}9%n@O{I*b1qYjvZ~XGxg}3}Pf~f7))mQ;@dj6E z{x^1E<&9d`4zH$M<`O#$jL?4rc7NndrU>8FQd1q=*b@|xjQrG}Qjx;araKO9FWYpa zh@Qphy23r@k;Xu46N0$Wq3f(EKu>Eih|lb){n}5@3%yXIX6cPy z?R17&LpmkpixYFQRU5__9&X63i5BALTg+hShhj>P)PbX-3cIlygZ6J#Y&Z$@iGi)!v*(S*speilVPur*?UnxQbj+)mM9nAYnfTlMF~E8& zdFyOJ*?K+CjhEfT&l`w3Ol9P@AsaKw(kF|DKAPME6?tp$x0i^CFLrg1YEn51!V+er zM5Y`#7|lxz$Q^xa7PCAWU$s?NK&>yHD0U6%-Un>M%c$yvN$Amxb>7SK1sD-Gv&QWP z|3)<5&Q~ojk-G8+)co@@zna}Ixk~pM4gSb}?OKZ0ItEEa!3TkTS_h<{eI_x+Q|uuSgb$ z?N3ibIH$oED20TnG62Pafeap__&DmM4 zrNQdYU1lm560^5trOjSWnADdg6-7={VQmKZOY(zob?;W(J~92(pAfC`EV9juf9(Z& z@q5qwRyaJnG&VMS7W=znXQ#fazIVuGy0(x{a`}5J201j}yk7r}j^?NT^|~p<+wW8- zbU6h#>5Boyg>A_VvgaK12=%W%>|W5Q_W0vbWIIkK<6;Ntne#VC)>b&@-&c z?wONCc2m1rws}LfVhpQz3)fZLoh)SrYH$V}(@9+10u0-I*)`d-vS9*7<6{E`r{mh1 zWQV^Kv9DKvTY}Cs{Z~dJ7`t*>%QHPAgC9ISVny4u!^|pCXYiMQrRNpe# z$hpnBkIqCV4mT;c+s z)Ex)Zle?35g{j>bXIxr9J4DbjU2C_j=w)M?5q!^7E^MUMZJARp?6905497UuH=@L( zS68+3S9(di&1T_~j$%g&PW1+G2u7AMsi>^t>d842#A7TQS*u789$nSy(0*tx-?G)V z2hF>&x&Y10;zc@=G&D3L>QS}I6c5Dqx1Nla_ud5AT@LzC`dzJ4Y5f~+{M``?^-V7t zn)a#ikN@B`oe_PKx(x*qpOWqC%3B>mLBl7GiF++L!%5S~of!S_%H6p5QaZe;AFZ z+W)JF@I@8pMceP5hC3ey3IDC+(vsC;=PIvm->zo>ez literal 0 HcmV?d00001 diff --git a/zh/example/images/task_diff.png b/zh/example/images/task_diff.png new file mode 100644 index 0000000000000000000000000000000000000000..ad64f8a58027ca79f9f8354f9a5a5f3c0dfc6037 GIT binary patch literal 86833 zcmZs@WmFtZ*EQTgAUFhf5AN=o;O-LK-Q9z`y9Q^_;1Db!xVuYmch`54>webv=c~15 zO;62qcU5)OsdM(;XCf5kC6M6p-~j+Yl9K$S3;*L=Sgp@MHIk@tJ%V$j&6?-!mcS9#rK*i3|!O+>oR2O?}6#z&8sZS!R9@)pM-omn$ z_`tb&bKfTwRTUcYHd(6_)i%^1Y6hty%_>nw%ie;C>fuC8(Qv`-Ooj;9R?I=_Hfpj@ zmXbQ4N!J3F#!jJ}fYiITnctL(j(KhvcO0jQ#|q`|0%!a{Dh z!2gcjnBgVFREhiFIiiF}g${qKNP~It#Q*#|1J;#2 z`qzj5E@^=iLjLdTnaTg}f>Qt8=9~Rjm8S#Uo&Gxx*Bk-VU`2`hKkpUePe|!jO}{QP zvMGX3lw{}J>)I~ep&6&u!)j-$+E!Jcce@Qo6(x)zc_?g;E)qQ>>Vdi8d@64awW=8- zN>CK?dAu&K@BiB;-vn=kp_)==mE6>;SE;1rdl7kYWo_w#>Cg~xXWw4#%A?hEZgS6# zm?8_37ZrTXG(iK`kEeq0|C%}zB4Ai6zfLRbtNzoM$HRch*fz1#i-{+(sPfwVP6C_s zy2`imrKTNpDmU2QV59zspl+$g09;c@kJKLX^(YtDOZ310H#oYpu?~Hs4eo&O;lox# z!gj(VtFL6bWdy2KV#?j0sg9|cJ$gVfm5Qm+_b1qIj2j~-?&fxE5Z!LUoAxU2U2Yxz>uIsco<_|=nwrktpsIvfgs*-% zQOh~BXjzq=5NL&14HifWrDM#P8w}+3^Y}YaSV3#^r`$K7rqk(#Zyg=ksx0p$U>k?` zN$U_OY2MDEzI1A!dHyJKejlXpo#Y*G-dR#z89BXJD7=~$-B4u1^ek+*8tWjgcM9-QBPp&h8Mjq^dv^Kv>;!(J#^ z-AS<4(N$n(5H?YWdf-;^Bl@V{p>e^q&gZ~|Me@hfPiuZzmzAR5xeOyZTEaG<$BEyV zh4nuxc6!Sh9`;`6s!$z#;Pp|3jfhfutovL{Om5fpq_3I_o&gjedG{9WMi3-c$F^2L z(_9GCda9Rp$z;+48?qFK{AWTl>Q_t+)vNkTjTcL?_nKcz~)R@*(-+V8N6Pk_@Wl~5pS#t#1#@rw0bmqc%$w3X^Apq1aNWC)* z8fTS=0gxVLai`z-G|0#lPRg;<+Rdtxc}FWFtJ0X~6DB}nN;k{7Uq5C|l>T80^a>3n zQD&XNt1OswA^2?ebKJ~?Cqupks+XF;6}y=z`4o2iZyY-mmZsUgS~& zdXWgePC(Yh3#$7hteB;IB;E+0GjA)ok!{J64 zHtV><3q%V5Y=#{j=E_m>Hjk-VE^A0clPN3I1OPy36RFK#M7Za3E%bCVFXY^=AHZj? za9p+W-dfJBI`gss-s0YU!<%&OdbT+1jl%iNQtf{9SMRMAp%4%)s@UvsXl6Lj=ns)! z!#}yIC)del=lQ`ZZBlGUV*f5e&QMRrkff}R{%2?ke|$c} zEl4PZGDz|+SN)>^%~>)vL05>x=GpzMe=F&|2|}@unY9sYY#aR7-*M<+bgi!|XZ^%~ ztdt|xg<5k23gnY6DC&&XHI`g(_U#lHG_%q6;IviQ*u8!sjj7`FW^wE zH|*FnF|957k6Gtnw4;`^*(p*)&EdAAYk0zAo$rn}Mw-9RZCl&cc&zgu$&ub7R)u~{ z$nN%tNaA|hhFQPEsji7NpbigkV5cur#bJZZlQ^}2)<6laEzc_D9P>e!Eq#^0 z(f8gsBvzLPk-1}{h--IL7x~EgX;<^*7YZ4I4ddqVQ3GNF(mn>E)LM??_5sOF}}4qjK|>cE^kE zv$goC+3-j%0^F<5@AI4J6()8;+1ucn!Y6I=b4LWpo%+ePfUZ9hVqa~J?-xEN==Xg!&;t2*$;#m z1|WbX6uvMl@(o>AvK>i1Ekm&6Z1KLCs`|y+l0qT=So|TlSd9S4r;hZ6xXDBl7ZiT= zk~#DOfYWRbPfDPLKpH&(TQA$g!aq9E=xUUSXa&fB#dS87KXO%|DB6Lx*{Mx0o^=(? z&8@5nr_OID4eO<^O4zikreOjZT-}xbebeZCA8?oQ`Sa(Purl!HGF!ifN78ePqx$Uj zI2@Gm3mA(sK z*KYWyyN5rh25gjh_sSznL%ye$hq+CG1k8H-V})F~9tk8?N5N($PEY-Y^uw`4r;l!r1=*h!Cm&p?3i zR6*Krt`RY%c3xpt(jc=a^14jc>nn0|B@WK@t%KipFrSGZZen6$uv5MF(Q*03(0mVc zaB)fx5>+@0;fjC54#>BZgbMdfUu}w7Z5+H!j8DG*R@auQ@}&Ph zPy1MCM)3~QuO1bslp7R!UEr}cBE6o~%AkhqK4|&no|40CJFV5C)Y4vgFQmaxD5<2b zUM;iG#u>4>#Z}=5pCuWeUz1?li`V%pMDd271+Lp8|BIzk7PFQdi;3E1$s}hMnpG%0 zJYrNNJi-^Ay4~ZIo|XHS=uW$9i}4ArwkXJ+f`Woykl0A&QVTr26C8B`ch|>I$e}98 zU!4s!RWDzCmgC~Amn&MH1A9JJ#u{f zw3mvD3USTVYr0oWxOR(>SwH-<+YEczP(o96CgCO(L;bKss{YOJ9A(BbdC&Ri#8641 zsS2+c9>~r{pOzRETJB+BXJO;xdwShDnN$0ACBe|qhfS}4&5nPwyb3`V`-!7dL}X+nZ{3rm?G&dgF3h$eMAxBsj;4Gg-(32B-(yLm|rdwgPI* z3p^MQ33nZrdiZUrZNIw9 z4~J#c|1>PBA4Lq)d-?5}X0ks6C)LerWJOUfayEQUdiTR6>c9Ri0mQH{H z9H8i9T92)3_A?}^(|88}l2cZL6gvF*4{LO>eQh$aIEMCch<;!nEBXi0&-k(DqP6dR z+N4Fm$4!thQ{gYzYQ=}3XXwYT0e^Gcywh%7lmyzBSwHNK)E9s_chKjR#s^=LutOBT zrep%k3#H*QO3yHhYmc#U5g(eDNglU?fqB!}YX%X?FaoBL^cZB1Ag#rc+VpzfkijE-z z`j89M>eGSHwikQ_F{SDEu#D8U%cn?)d*kf3*%71`TCEc#5kyza_T_q8D>V;?pV@N< z@y5^}c{nw1clg7jtXDL3=7~vSg~%GS_9k3DZ1>4KQ^K6_V`=C61M2)KT^!;i@NdB_UHXR0abs!%b^I^N_cz4vfm944hEmb@0~w-_;FUKGBK zK&FMpDU%H~KjBY}E@>?`%kpAfqXy2m-_*gq%rBwdXRK);!SBwd;@-`TDv*Gdw3dip zrHyR+@ZqcImXnzyRmiYiE<8jn$K*pnA*lQSVnS$c7Va!D?+ypp^eOGS~{?8p{3>F7xE=Fwpyl}a{R){nE#CKHRH&ANN{|YFNatVwTE`T??P}YuXsczPtgmakj67Pt z1)LMLMr#3piSjaI?m*xNYGbYypN>mYeRG}OLF@=_!(rS58q-G+}`4cAHs%$lu zt(oj-ATj`j%{>gSgn);DkfEZU`us8*w-Bbm-PQXQA7(5l_zL($?)rgFzuY?G11Zhx zg)`3b=IRx3CQ~+}WLF|3cURko&vL{hG_OBZ#x#RUxZ4grm05vVpLn$J&TCsjoWLTU zq1m^R)!!+7DqrI!aM|#v)}fmuQNw{VT5U#rE!~Ui#s~lF-db^ZG%?c@0$`sZX1$s8 zf`X0#4#_=B(#BD9q@W#tZ;b1c1pp3Jz-5xifGAx_ODp4p#n{#KUd2{zs}$3X2|a;s zc>KwaT}iPI{PKfA=ut15AfSQX3=t;$OULQdQAtyW-cc^FxTO(QlMbo*d@E=Eg!Nj{YQs_-2m%vgqM{}U!fzsGz+#O|=K8d=w4OOsIue?rPGBd2JQf+tZ{01Z zLI;ZKmi4P-R}UqI>ln3;_FIL4j%)%ipWP#Vn55CwA>G+{Mz47ox#9U{Ek_d~VBpRQ z{+nr~`YFD<-OtgTl9xAijijJD@Q>wOB36(AUOTFw>S=z_>>Guo+1YK13WRzDO=0$P zxElZbYdRK}YYsTA=7F#7MtMa?$LOfmn_jcaP_p6$+?*s=zIG4{(^5}YvP31lvto_$7z5 zMwZ-89Us**2Ld61fk~e;Avxq#ld5=3VnV_kkI+x?MFYIE)ixA~Mma&Rf-2&>hKZ-mTSirBL?W{EX`x6Jm?{ zk(sXUXGY*$oMb>pDl)i z@4-3H{(9-F_8;&=WZSZ)E2?&kh7^YS8l~%MYbaED#hB3gRM{p`{2i%(oqyQ-?2M%> zNT$M293BjuUh9&?XIk-e{qWwD6zttIiK2L4rKOZrGzbV2wbC4qdO>L^fSoe!l~A%k zZ{eJ*jR?dbpEyuR)jlqW4dC;B~)nCv2xJ`YZc?*K(Yqd^8* z>OxDqqYX@M)jEGB63~8D)3=%YrECAXss*1caBw)dzRJreoq9@?yr17I9)zr^&^SW| z!3CWug@fHNC|_REUiOv2=JGF?!zR>v( zWqw2+%!sv+Hk@l*Z&4B^*0t7Hy(Lfq#a)`K;vIN6HB=qMQRT$K3J?2n(h2yLOnAh#nhTrx5xpvKRbu+B5gMuWokM;4kcuPbKT;)9nU|3kBV?TcWdoo5oJ^pUNtJ z{X^8z>^&!Cbh#+h;q5B*B|D(C70s?|ZjM9+qLxr!H|phA#~+(Ru%z=E=|ne?|FuxE zeefo+3gxG%Yu`kJWDHU(3j}uEdxga&Te}ylOaS=wnStRWnWQ9Fm6d)X1}f^cRw?Iv zdVMW}j4s&>(tl4nf(v(e)Y38wntfss0y{TauXf=(gZ20Mnnk^HYQ`v_7+*BqrY+D;sKw*n+0o zM?JWslezIq4m#SQw)p`2 zTCZY~Xq>mL2-;0ShHO_B60AyOu4?OYLE&0=y8B^?FuoEUBleYOy6@3DnaH1{8JBf8+ZCPgY5h|RExojoYOatV}pDdPweK?`+JpYj;Mm5#ELVP;L zDl?m;?K;FS<9;u$2=4x`RlR8)*f3q_itVV}OvDoq7R$zX);LXz^<_wZ==o~&W8gyK z(2c7nowa;#C>65#ZG`JHw+ZHkqbpd_Sl1!r*)2HdupViB1h_wr?`sqsw9@ZamZ)`^Gk_Rk!4+KTZ#ZI0j#$OH&TYU;5&&G3no*)Oi*1V6N}o6Dul z!7&q)lC#TO0mO{;8i7@IQFws6GL7f63EV@Y()QP(6AieF&#Wi~5@ zY+*K(mEqj%HzGPF++0$8hH>h&fH^VCkNzuuW9?e~$%Y=91NqFx5i2QHVUcb7B+g<; z+pE!y5K3BQMU`|fi=V$6!*X_UO=j{)j|Prg_wXbTDxbp=@Kf8t^w{3%X__fkywjYV z1h@?!o9AM3H@Wr#TtnknAV_w8UuMx)ya~X$pDF+WnZNpLgd8Qv+M)HjB%w$u=r~at zO)+K%0ZS&mpcp-!SG9>duUC$UjfKy1EiW1V{;C3ENLU@b}qnuM5Ah zQMxsL!vH&5rtN2rXWKgwze97~!dzk0M6|@+*SuDOo|Hc*kxQZlMRa^>{3G8J0&)}@~q?r|!{peF|YdVnq&t+lYq`N-pN!pg1;J`&e%^e$f zpUKNF&5t7d(yJWD<(X-~y9Dq)l>$e5SrpfsNV?kF~4=;@dkr8gwY9H3V{ze6_f{2*R z?0Y`u73nDIvhSRH3@Q0sKKb3^0U(WXnuiV-3FKX!hrr&JyYm?K6wXhrudec5k4L#n zTP{z22bznRpHqk<>LPu$W5d+quk=J@qy~hS%c$+%ZgSBHWE+Yu#E`#<>t`lR$$$D9 zypPP<;X&zk#BBqFtc_OGl>H4|4+}MJ^v8t=ef*yCtebdrY{(KNs$chh*L{ezh40Oh zyi*4Tux5k!_L$Zyv`*jujYlH}L3S&m@u)7RR#@o3yA(qzt?TyyfQztx-Cub3aO#{)n-2`YpW zytHR&83ea|ZV3dA3V!S-MRu3VuFY+obg!lUS?8n}LYf^tKI}>wh5^eRWD(JV_rP`v z2Gltgv`|A2ay(QPys>fIBfk#pGM_k_6U9+<{;9|XjY*SjmQr}k_;FO*PDV-`x4R-b3+|={vvqnK#m%~f0L%4L;YnPN|KR%6+uqhy zP3HoMlV|)qi(zwq|A_v`n|I-Dw0%qvP= zU6DYP*!s`-kCeyPS#SmNB@67Mah=|Ig1%NA=xC9fsHwK(i#i!XJt7X`@cq}13u=iS z_+o zJgjxyu2f-xMK6nbG9R!>ss8yyoRfw248jq9e;DK;Z<`YUQ7kkJ`o=Lr<*Z!&jt#<0 z8}Y+LeQstoH17WQ1b27>0z7*P?hFm&Qiz3E9NB=-$AG=@k9PInEk|_z{Ee^@lcx-p z48mhOa`ko%$f?fX#lTXGUUc?1QQ|Q8D|K%p?CUfQR479bZDX9TmjJ>dU0+jTefC4-=k{Y9O2wESI+NNKA{A-=tn)o&3}nVU%>|Q z_uHr0^*1_`_i0e#QEcO#*{BHzqReAI{tQ7ipT}q5y;hvqpU;#27RxbD5}4TefZB)$ z(4rQ|2~cK+QXIu5%_66Vr4xwmzW|Z~}nn znXEKALl^}%kCw}g)7Fv>I_0kdeo@c`C&bgODFVrr{yO3jYg=54yIVh3o#j%(F`35f zb?QzLvn;&3XK{dhT87Int+(84gzypFh8@v@u0#gs-H!*muR$ZgJ|D_@t#-9;g4QQI z^C^!@iZdlMn$9P6h1pQ-y=MALbI4xvNp7Y3;XXF5NjJ|=Ki-bj>+Ek-Pu7k%la0HW zmLA?w3(}n=AO5gmzPm!qvn};uw+Jjp!!Xxd>@d1=;|N1wNEXm(Q|I zC{g5krwJ5JbD2SSr$2amxy~`tz)+i$9;S)ce$s#39fI`D&Bp~Un`LL$rrLFWU$T5a z5<+?7`x9C?prj|HWltx&-ZG5a-QThapMFJ%kk5*{?QxDfeSiX9{`gg2w2v- zp~y%8AU1&oChqIxG_S)u6omWz>sTNRg75pS-}F0tJnRCS`h%3OUJ z7RPj-Z>uio0gzJW-;fuaxICKVhp#WS%~lwn4#M>5uTrY=d!gc6jzG!WeeYK)04Vcm7_~(o|NU;z^ToyNTC{^WwL zsJ2=X?L1UxLcv17)>-&1pWh@Tpg7B6vwn){{^pY2akd)H@Zr7gLOPQ1SamEM`1JjjRt8BR{+)sK z6q$ZjK8xr4izi(jt6p-gDUG<-sB{1-HRli3waW2fzeJT^Fa-s31|U2Ilwz9x^=;eX zVb4T;I1(A$fO&l++&P(-ZoJvabnt6M*Lp>{TJ7Ycx8M*pWsxzD$OjV+| z%>uNbHe!4K(?5tS<6W;Qg9Ba^=QeVMzJGtUXjJP2Q!T-2cD&6gT0f2FVTv(AW5RF$ zz!=gD1hjo;z)Y+zGJVuO$AB4^{qYM1RONVeMyQA#;2)%@;rc4CG?jFD(FdC*$-xc}T)*TI^RZSNj_c(Cg}3qxUiDo5$Q9HugyI@X=r+UmGH zzWR2PX4KW{?DeDnwrnL1gzQhexK(zCYc5iD^dU1E5~wmM0)Xz3e0F=b1oMFvYknhJ z&VO-@|Hv^U7SP+Xs=QGj@eb|t`d6~D5E)QhW@kw4;(IVtxYU!}7}5yZ*C~TtR`O0S z&VPYT{+9YnT#O_kq1 zb+*0==zb9YQ&wSw;p1^#cL)}8lm&0zRA&xd>Z>oF@?AV`^UK`1V3rcjo0)1a5!*k- zW)BUa*^Rje4Gn1nWgu!An$6`ey$}}JpfCHCH_^+Ztm9%iE1u>-i!v8EG79Y z7Gq6&i}L5mgF-3j*_G1#Oo7Z+*MdHEiwCFsb-1I?U4eVw^!nEu9|c3o7>p*8)?*gX zej@#Z0Jag+)?*28=QnxycH_8(375Jy{?uEMAnBqt9=X#+YChMI%wRc%BD}n;`@g=L zh(yr)!0f-s7GJifs(-^fdamO{v0DXg~YjSf5n383iuBN319DSsmPXeNTqO((h^{lKjp@;yFnM;>k^w{Zcfiv9*2#C4b~|A z`0PTS3?yE8U@1Q_M_{Hg{P4+zR%z9GNLH&&X(Y4&G4C`UQadBzxnOO zj&0E0UEE%8MhMn2{_$6N5X^1;$wE9=B|~Zh1O#*3l1o%YteK7F$d4^B+mG0Zd~`jc zT?24ty2||7(d(-9hM|%Smm*iv81lwc*)KIGjl;VoI1sAu8D$|YZD-w@WkR$meS{Zg)lw@UUKOaP!*XyAH( z=;Ev5!(1N)h7Emv-PwkT_Ud?#U8V#+hFT1PeL=OCZ+05LE6Re6>O+fy?Se_@mQi2l zyqW0W0M-sn%!jYeb`B#^DR{0TW|tl@GF*?bta8g-{t0$2`(TA#Mqa1_YHC8_vW?|o zBtBo0IfQLZoDzY&_GQAq+9#u z+Z`dRb>?thiQF>Ju;q-EjLki?Y!=l!0P3)pg(y;*Hwt~Nz?QD`LjL5 zdv&L#sgC|c1vJKzDFTis>#;mHcbm>vzpSU_Ph~%j-%m@j!U4j{#<6cVf0VVg$2prg z$e{)K`L{Y)%KJUKf{21V1eGX?o+_+GnQ~tSU8BWBkdF4RC(^rmL-<%q_Ol=Y^}LHG$0{m%$iDYSU;QW3!LiEJJnvFkGx{CgnyvampJEc2~OH zM+L+&LMR6HMT|sjMlL#5M_P8(v)B7?krrK#*bLO(fX@h*YS+TVJ?)=U{0SYCWlb+1 zlV>J;5AQqNqT8VEhuz3KuoT);9nq(fnx;H{pN5HP)86|BP3HX<{>MOfnK7O?%*k!6 zBU}i8AKp<9-#$N(lv|W+TVjm$Jh(d=N8``9YSR+c(fxQyMP#a;Z{1=6l|GbjOr>qS zgv-vHn-6g>_le2(-~-Qxf;h{H3;UmiE%au~D5GJ2rM;j0bZ=~lUk;vrGVmJHRi(Tq zA698%daC!Nw5azwXJRQoqabn(odNn1+| zbwmJ)H?$ld--6;}KjNAK@fq2|=CB8oQAX@OBYpMkDm(#oC8JDZXW=$Y_b!Ytu)j2VHHTQHaH)p9cDAm9Wu>e!Nj?f`9H|Liv` z>7Tm4dszWkQ!b!$w#*yZTVNR}@3+N3%eZn&Sh3)OKR(#*DknNFo@TBhpWfj+VV>Jh z-lnXI!6@+S!M*CmI%wsN$Lt;_DpPi&-86(`=oMgRm562FQ=ac_S1C{`Nt(<*Ku3j2;hJ!LZo46iP&Re z1E9+ub=^4q*?SP@-93NBIya;L(&{~*?3(3Kxey}si-27j-kR*09xzeisXoG`kwwU3 zzS2Zr@ZhF@)cKWHwO$%x+vLae3%;nWT8=X(NkAz)+KZ-e#X2F!Een{gnkaB|#(V*@ zB{25x>>p={vG13bqp7}9DcV3X5*aU!B01>^-Cn#=QUe&DUJp{fy2Ew7WwCsEd0aV2*@7I*IIj9gF303I1c*c!v` z0qS)||7OOwv!sSZO0{k?q~S%e8a7I~S!jSnl8#yiVgDTl}9#{-`L}FX#$EZTD9{AkLx(HU?}c63j~75Z~A=FOH{Y=oE=KI{`*vmd&_~c ztW^T~RTT3cw|_`p^1VZe%S*5NhsVbmU1M5Zh-hxMrFRSRkDV83ao7%nPJoRpq zwu95N<_nFkp`3knb|tPP&fhVFZA5{hib9bzfPgw{|5l0m2sVXkyM)43{#ZM~RwjqK zE*2jLOU8&X@G1v z!SbxGZOXq@IliI*haN?>Si$L{=J=*vVSYd?XtlU7V)Ji0IaumIqu zG<5S)!9^fb4l`92!}4PncZP{4*_nnL)8&a{#3ya)?lJo-5%O+ZdSS#wbB`3?g$8|m zhkr9Vs?1#S9ws(~@PKGgxH}@{kMDs@1~rD?DI_6@Hz_TU_SC;H{AluWW<~&rhT=8qDy}{^92yFW#QHua}yffFwS{ z+IRPzcWahsf-sD4?M=~lI#=EsjN?&1RMUdLte51oEFL19BFvbE*yQ@RQjoDk8u+>Pz*bOF9>xr=ZtjG%dEp5!@2-(Wt+F49OLruGAliTQ!05(J(x^8r2q z`|Ypqa!fyz&c)e!uqFwU(Oq>BE`ryyjJj5)U1Sh> z>MKCThQ%TacXlUyw5z)+s;41(uRiUCRU&@eCim9ey;vZZqUcMFy&I^njdJ()jhjrBn{a^dDXnr&nklUYRdRfh^ zwN%)Ux#1SKIQ{4$|F|mqOTsGK3$e7=#NB`C1f#cQJ}AH$T~uUbCI`S+Msxa43^0TF zJh2G1UqWQlz_mt4Oa2k1JKi3>v>{&VB3#dUFnux?eRkFfDEfFM%BkPn(y$!S~imaFWVA6u||syrljhqHuFX=9`w{qZW(MEjVTux9Uvz*{?^ z>5#MhSEy(%ivF<1F9V7C>R|Ry4FoNJE_2XgHvs_{d>Ug#RV0B1V#f=J?7x4&p22Yc zk3N&c$;j(lz>D(Yy`m^wL~KrMoF5f=q;z`tHFttpcC00e7Z8?-2M&CDB?t_ITG^_! z+J|Xc8!g+XiC`um&vN$UgSbf2U)z4Jth1T;?xWGU+m&NB9!N}=aGhi|jSGY;)dzq` zUCQCu&e;EWvLcG}hD#$b4ah?=&ts}Wq^PJsDJutKK`UDk+i^J~Rh+#qUv3- z8vUGl*t?8q2;eh+5OV+k#k}2<%^lP_JN?Ql!Oiu$rmyogD7ma8ng&pLsnZj^2lGk5 zNEo8c8+mFk_MW9@=kwK<6)Wd8NSf?ya=sus!}DF;t5E8#psZCeDDyW*^pIZLOWi5) z4@tQZ|3li%GUcEn0IOKRY1oJ)vKq+*3#P17-w9{99UIk(4WkkO!e-IK%9pM8xq0dN z|3n|<#kUXNA1avQzucZ=&H2#&pttK;>A>C2_<=G`u+WgQ(*9^9;-kslyr}D@ zQx%4`nLbcE#ZLfFbJsVks!rX(@gC_-6Q9SoqFP*{Rv_a zlJLus_zFCf2!s__AHYURRW-A&tt$=stdxLqhvEUV0~J4Z?_Gg4LTp}KI$FiAoP_yD za5M?|a5%OG6Z0z+6_riQ89K?>3bU96Dl+&6VBIl;^apsvU#%?n8>1ieVnQ%6=`|iF z$Goatmkbjo3s%U`6#lK0L$q2+=`h7Q&6dfp?qc9v%!9_O@#t{=v%VxcB{$nY25~-n zNP?%xyP1&K8TK)Am1We{ufO9qHR3V=6^<6`j`{Y^@3y(aW2#v3Gqa{Rh|thlq6hPKL48C6 z3Gp8~7s%4Y!FSAix=&yZ7UmB)-2-8@Fc8bbiRG-4OT>Eal+ZlX`*C#blgw=+>LV-- z8u9-nNM8$pXk=r~iAq^4viCB+a=B0I`B_+jhDIw+^^+VXoD;u2=0~hiowl1eE;$Xj zW6mN8fsns9G!XpyG4qj%jwAKy*;q?F_>LRt#gb2j=P)*~nXRDRAJQZKzFl^LW!NqU z!4zuNM{&3y6bVeH8OBDTd>LdIkU_|)8KVtHxLWlDrxX@Mj~q4N-{~jM!!9NzUB#h~ zCXG=}xgF<{K9j1IwYk6W!d0v@gM)%bpW|ei^j%{{V&^Fk0T_`Sj>0c?nj>iR06+~spQ7ciJch)p!OVJsMG*i3vyDK02P;vNu1`W5`_ZYQ@`djvqzRl;R=I z71%7}gCSby9ac;LCZ^q(Cwg)k?8_pRyGwjYG*54gTs(}PdalOss^kvM!vfoPzd!^K4|J0L_ z(?Z!PW|q@+gpHRhndSCa=D>X;wrKyhPHswyY_6Q_tBrsjP!>Iq?~OCy1&ibR&NI=V#>+O@X;u=blWUzlV<1hY$c3HW)2->YS z`oI1T!uUH=@i!1NTJ-NrOWk2fKYo<^R2O(Ngml`)Nja&>%ZO~! zp*Gv9^M6`^krppO1A9f;FAIk)%)n(`g@O-*m*}gfxyrl@ePCSh?MHvuEABpj!Ggy+ zNqxFkel$Bn%tIz}{HZ~>l34$VvDX?!w8yLkJZh-xS$qLW2;o%L7XP!yUREAUKN#be zuPg3yeeD>IVgLCBcos{H4eOc=-z~S``Os518S>;6XL8#AM2-%nHM>_U5DFEuyi1EO zQ7rGSM=K^6oA#~01vY>3LVKt;*q{r<_;=FGZl?wnE%Q1diPOMm;?drd=cOfO<)(h}0 zr|D)Vsr-sL2h%+K&igUjRWFS1W8Z|gGD_w%h;vxUvYaTPGT9mj>?;Bj_@{=60_IpJLHjJUjo*hC{qH>azrREH#wc7u_4-p3HUjfmQ4E%J z2HNMB7kIn7`1*GY!P)oIrDH@8;J+nMyLX}duR;U1Ckh?qB<`VkQT%Iek5fM>!yMtB zN~)93ovFBvSEMp{rT&D0v-G?)ui{uNx6)f*aPQbF2(his)&M7IzL9gJ>D#g|*Y^L_ z0QSyxF*Kldp2q$^?!L0EjwR@Ja0mnq?iMV#6Wrb1HMqM=g1ZNI*WgYP+}(m~+})kK zbIy6+dw;{74}93q?AhH-Pgm8dRjW#`dVlVnM7AXzQtd&Kv+%Pm|EZCATQ>Qt<4}rt zkEgOR$88P;nc&fkY8iMpAAwxSP1(#5?fyr_u<4=y%m6h=WT{FZx&)flY%kNh!`(+i zCpy^C>sN!E)Q4hu9gT!CX@Lz)hn@ku2rFG$6-Br@=NBq;JHBY70iAm!)0{Edt z%X7zoVjGl#VHq7xUG&LeVoGPCl^8<|^C(t|NsHuUoxH<8zy0JbGtP?XZMC~XdW^Bu zWG?Avp@0?7ixfxi&*r?*zD{($&^G3owH)W)-jn84AKG#ag03B?FiU+6)L*+|s6|pp zBTfx)m=kZ!wU^<-)06lVe11|ayb(G59Sp>uf{-RndV~POV^?M-#}=xr)k_k+zqBjT zZ*2iJguy6l9V24BDjnvc4Ah+pkwVeID_5zzx-qzsH^!%^+fgzv1vv#w9(IfyyfxD_=Ky zScG;^YQ%P`zjJb@c>);fRL6rKUbH;jc}(f{+Y}#Gl_LM2UU;bMfdFn_1CQ9oZ*`qT=lEb4#hq`c~DAX^9WujD)V>b z0WklEGqaay7#dpFqcdDIUO%o0MK9XnuRiaB4_&^Noi8-k0FU{iQ)%`BVf8+?bjoZGlH z(81&BnGqduw%S^bKJl$~$dn}!mk`Tf{U>pJTr_r-j$P3PSK;eTr>}||Kbw{(#U{44 zf7xb|VXUXOGN+t&)!k;?*xq(->eNEi#V4%}`Xo;3Uuos44p82qiKMcr(-OB_NAXLI~JhyXwBX)7#njEDGH z(pOwMX^;~Dq_Ge=ol0&ioDyb$!EjQ14qY4DFmT*%H)~skVO34bZ_1eX)?GLb?S7?} z4S`*sx#9|aLdM!Z6pe#Ed(-3acNZq4#(xWugdx39(HaX3X&*b?vR0+FVRMbjTOVu& zUBBtQGNv%Om6N*r={%*r=#Wd$yVoAMKCJkp73L<7+)^?LtumkA&TEB`75@xWadIDM zpz*`C$q$^^iR2&qSh!=MgHeEBLU2pq#?rz4w%tSCS@I9EoQ zE$&gs;MmrNNCVMlSVdGk0?h0);BA2bWFLP3UKJwOIL%W;1?RWr;OI7e^|`z7FAHP- zpM*IYP6!6lfNXH?IWpd6^Gs*gr0D9mAR83GL*v)O8U6<~!P1#zRR=&&zSvu?z-Npk zvs?lVFLgMLhxGPwKI@J}AI3aQ+*X8Vi7d*ywTv@|xw^d|LIo8!h_m{$Vc&0L-Et@9v ztq^LV{(gBRo2tK$Z`6KKCW|A@-dMkp5ffNfL-WrQM5cRlG}hH!dBNUcAA^%8iBC>G zs+ZeF(v4m5_xe1kbn$k)>PeEM1D1`Fh(i0y6jo>&PAyfKL{TVQ*g!p(oUoj&ygFUf zrJA7LT&-TzAQ?Gaq$*7#MH|m&I;3fy#5{X9pc#UTgmCfv%5vrjx+03Y7){&brQ$Q3 z6CIm-!(_iZsnds<9jt#s4--vsjhZbz3KZ1$XXoemd)+pWH)9TP8&6u;^z2qL{TbNv zv7yn}D-M0Q*OjSSpG-)`5rc5(&{7=_?x#6zwLkLX8+#eW<+djyTibydXEe?jno`Wn$}_ zENi*)M|Wg-aKwcvho$`DN?61j4-H*d-^>w@2!N-Hp0X=GaG!41?^R+XolVuSjZ@q# ziMUH^Kojvh#3ar;4N+E4);q7N0nX0#Z&ywSTOAOT><;}{Ex4snpze-&96rax3QlYJ zIm8wSR~|)$_q;am4*NDsWp17iE8N|^8l4e}K#|C;xz|kn9#4zZb}v()rr%>Z`r_lkv+GMlMK*LjJNdIISLqq+9I%mhTQT&iFGmFn%1X1E6ZV3? zv1SH-QU`~qi(ADdM$gPe{Xw~u;<1A(ac-%j@Oe8;2tT5CHKl08%JO2j-RX1If>oht zxsMm>ztQXZ&`PgU?Bz92G}O6=4S)DlX)RJ**-QOyzpRl1?RC+9AO^0{phTehbGqx; zxPw@xiN!OP?dkN7r1bH4PF80ksk_#PbQQQ+l=yf6Ry*QE@=dza!tCa85TnN4Utv|% zU^AUcj{vanCR!SQvIXa)^Ph}{o^Z=!dIce^OYp1$L>zbgJW)-{7GgqkM~ARrzK z8=;pS3?t#vNAqtKD?mOeJvg#9>^Ji@>t-%%>$_LY7PnH3nHClbz^>od1f|7JrzYT` z#Sf3Y&%la_b!N^M#Q8x=VijJkYId1+!|E>J(6mz1|Hwi1Xy48iTm(_0%YIPSXY2&T zIU40%6l0sX@tx}wlav3+&USj8yqOygnp@Cg1 zeQUb4BiU*1Co%L%rv#!dYV(iU>>$XfW0W5NEY#9`gY=CC1!RZ(8+2k4*Gnnp2XTn! zTUTXpNWhm^UB2JyowGGFY%ZU)yVo;HH|yM6G7ShSbhZjx^Th!_PynBb!$~q40yW;X zJiZC4R9HZe!dkn<_7AUnLOB7#yDj-8`O+*%ANNJ+C#I*yanc=cBzQc!m`C+w{OYvX z;|LjK_)(g@s#c%(D%Dc49~*HI^~}_GVBPPJ(rbJ1Jn|snxX`5_fLf} z0U-B-DI_XF$JnZxrns2C{TmkRk7J2ZC@@@eCQI@M4wtIZ+2e=DTfPyE!dHmO8ws`r zk*qJYn8Vubb}Iy@JDY_3i6&_6$QnxG5CLy1m9JZeE}i5T;?Z54ZMCj~+suf&^+H}( z4_61YO@V$!_RSWYg(`#GaD2w6n-ugYK-3_{5!nF;#Gb}c7zBVS8G*$=K=5LIuOId} zlFQ$hlaU<`D?wKCNtp~7lv%iwb?fl7-Yb0EfodMdE?tB@<$bjnQo=CG<>zrGftz*L zyKtT+0RkGXj|ove(bB=R;A+A|L*^P47D$o-fRtTJu%yI~QxSUSyUfR251c%LsX(5B}Vu)xT4^6ZdWg;H9syTHDLnF_&}SQL@=JMcO@A(74G^WmuFlMcQh7)N}lP zwKs>PsEKzzsMdU7jyjk2+Vt@=2<<)FcPT9B6BvdQJPaXaN5!6k#dTikmyYtL zn~$y&a^|m*%azMsIQrrR#E-c2anYo3mVhHKt8vqGOLH)k! zGePr{w??KaL5;FI*JoxtL4~r1lMuE~3{S%}0MlJ)iTJ52a$&`pPFb4kpPl&&l?h^= zsHno)5}+AE$o&^GUya(U9R!^mm&Sy}b_uJp3uj4T<(8Ex^P5Vxe@YMhHM{0o9JuDi z7&4F)1!QhBRZSeQ(;)7;CD4UX0@Zr{`irmz4>`q;Vxjnc;cdTVU(31)0(|B+D{1Tt z>I?XzAD#4moTv{K?A{B{`6>D}t)Pl`qErb}0RjS|8unuevlKEf=Q0;ljGbO?$)NIB z-9yT4MOoQlfc{~UA}&C(c7O?D2ET^$nL{i!{3*c$iv91gk8Igj6%{aV>rjDR*+bAA z)Pd?~HwayOK#ecrC2kCm*SSSyOId6A(QpePv*VEUo^!(1EC<98E^utvbwxO3xbSj? z*diUjj=Z&_C-4q+>(xB3v-#Cfj0iY6zuC)heXs4qSFu`J@8~KD49bPua(@e1X-1QZ zCcosz&`l8%7lmf^-X6(v08r`61Sa9N<}AKd2BD916rG=2Hu2=w6RH-$Kk4`3wu0>p z%Qxg*4h+99#-c{M;@;T!{;@N$*e4l_&B=-WJmg}zIc%xma00rOTSmHWu5PQZl?D)P zO4v0X9=6)XN?~=Fau&md5maaz!uR<7)94TIqqPqJ*pa;IM?Pv3;s`D8T$pHame6Mu z8?1_|GCA{kEd>{*}i;Ngz%I&d5{ zd}tz23F9G0C3?|tze*YiLday+6`Q;HEHC53WFl3UwO`sOqgvj^6$O3!g=a`BLoF{f zUdZJ2Jg-xQu7+@;W)YiUE66C#vs>p}%;iaySOl%?m<65Bu$39LwHmvm%$yH!!AB%{ z`nP*ySZ|`)-DK45QT3_?o>tm%zwm)59cYpd;pvqGxhmDWPFQkxN?9KkoJU)HLiPwrkkkeb2jj zlzkS%U&7M)bKrZtid=!EFI*8={T&`I>bKIw?LY$r!oln}B<;y9$c*qR8~!Lo_uG59 zi-Hz1K+}q0;c$p=6sQBEIOfh;c`^7Ox@$G(p|!EN^D0Uhc{7CI$UL86-~4r>;SdDH zR`d(P-wJp_MNgPfp$tFtXK8cz&OqUn%~i23e{H=fY)x(UPF!V4G|fJ+yX?l>vHaZ% zaFMeQPE9{R$liADnD=nZ2`Gx8kZh~VPGg-ss>URYHh+pl){*X}_OCOWCSCxGi}*S8 z#9S4^{@8)(q0Gx>n9A)M3BPiavK3Us`y3cKle7zJF=CgLP>8?r0~_$|KeD=)uBffA zg;;I}+k$73KHr642cod3X=%-Y*%yZIGGB%g*|g#3(;9_{{0H@_ri|nM+-AdB;{c)| zyg_^l*Apfju+|}@H^4oGCL|`G;uY=hF2oT&7OkW|SOyyXf zSePhmK;>^nKyEDybFUaOE`!EA>H1n(I`XSUBubhAgrVZwJiqn>%wN-stCa*Y%l=*L z&(H6tLP0%ge)xTj{cD(4)ILg)Q0x_iVb;kyT102{acmo;lyH(jb15x(jj~X&$rnKy zKMisXD}0B0tzkL+21P33d2b%M$_WF^X>DSM?V&|A4#eb|8hY+JBYTV0*IWA2l^Om~ zIa%4t{yI6_3OokoP`SWYqxMmoMLgUxp_mkz>Qs($jR9$^%4lw83e)zH4Q6w73#s|- zKHarCi#gYn4@tYzV1kC@TcwU;6#<|ed%DQD*R`fx65Sn|5Zayzbzr*JDRamgGqlBL zw}QIea$emk9lTNjs@uEh6d%x8Y=m2PeARWQYy9GVU^5RC{gG7CNu%eLdstp9-JaH3 z2#*l;(%w&De|1p=*-cF!s?y9*-u9xtjWIxw%P-IQ^(&`gJ1=eVEUh|w^_+E{@QvWp zGgFC&v@F^aJIuT0lZ7jSZ}1#3W_(wArp=TB_67{=Rx)u2FZb$t-uaEVm_pOaB<2ul z{|NLqc*Z)JhwUGfk_`$hcNzs_Xq8EYZ6s|ESU}mYo*~rB3s#*E^7Ny*!1oFVTB{)s zPgmklJf4#qo+3!VWv(sv`~%oQ4#npe^~gFg%%MxW%SVhHF30+gynl#BI?K_Vw=`Jzp*6JO6lAC#ua+9vYw;&h$ z?eSBxK?1r^blaf<9G?s!@>TRGQ)Z9dEdZA4nYkeVJl1)of2TDDS`a7s&5Gg(&QJH4 zUy|>^Eq|o7Gijn8<=>jIAL8lWmFUSW;=9%V0uGAT23+kE-% z@|CxMhWkZn8|n;&&@I+Rs9(|W92FH@;0R7akNpSV7xRx0%Y3ZQMP!SsY zsxbc8TvLI$R)&dtLVbxhk#Q?h*gKgT-KuWI`RT3uQBIj^`wU(e9p4Goi4h0p+71c) zn=JBhQy5R?uET^%q<7Sn2u{;>KVSgWhA0Tg0s%j*swrwDg_w4f+)BlZ^h9q%bS;aasH0R=-mvr8O-RjJ3+Imy& zwUU1G>A8zcOd3qN%1ZN|n2-Pu@Kdnh zi_rw~6I|xyt?%4c*6Y@|nrRKhp{D}5qyy>oWQ~VoK8Na(pM{7P&pcXd%GTqIUG7F# z8g<)ch4U{f7|1T{2(_)Ym)1@Wy5I@=zmE(_=Ucg1*&g~eeU%c1w0tk2K-^zA6Tl`4 zE|?P;6K9u!m*xP6;LD2MXRmn0TLac8HQrs#Tucw#fsW;j1_Qi)IVh6`J+qUF&9~uG zK8OH(Y`QUW;0Ci45GKn5Ybv z!k(pznn7JYlU*VH9v^>qvVC%F^i@h+8=dxY#U>|I5qpsK{+6O^Tg;dShmVdW*Hs1-t#IX|3~T?o_=-RQA}9p5f`B%GvLGC?Y`$ zbElfn6zqu2ZzuXgEN6@)tSiO?diC!uV`J@?YB^_n7-qi~e%WjCzcNNXqQvFSJCe11 zaSnr(SX7Vv7<&9J%t#!xg!T%Ucx05#$tEePtEXU;TaUHfhTg#VIm=lBxkVxRZF2UR z$a*y_U#VS>s8w$)sJI)eRlI^3_Kp%XSO$orz5}X0cDdaS(5GHt(Gj<_iAj;CU#;_o?F|+xExWl&Lf$^JT`hC0cZiRAsq43%6bR> z_vj$SV_xgk_`Ns^8H%oj%KizJ#Vcd1c{EI@%Z!5$Cy^#Y^$?0%T*7=pGtT#|ps6Tj zs3?6+dOPvuX)Nt%`fzH~^XmL@1Ri0X6~iX_(HrgytN_hnEj2*^QR}c%JEZdvrHb98 z&lu&++qP7l_*HW~7RuQaPT8yaRueOZkYt_sNVAQ_t1Vn$p~bNirQc4;lLV*TB0gTN zWorj=sPB%Y*ic%qC{zyBLVDK%1jz@n$NSkaOLWY6oAhzro3eIpN;ASE<%y41e=e1k zY0$d((OhG}cu=7H*h0HyWzOwy*OlyL0tA*;3d+A%(*};z*r`WA2EX6%AELUDTh{Q6 zXseAq{!Rkq!>2EtWzhyttB)sO-!@9?G?& zPPsSVPBl}u@jG{id!F}E%+fp!+Pyx(JAS0>7Z9(?X$)+L62f(?c5Ct}DHQ?Mf+{2&U;EANAGcV_mwuq}Ct*3t-{huqNe4JDg$}!HC7O>IEx=K)hsL}z^Jp+|f zeOPPFTKqloxZgt&pF3LrNR~)6ib93nPcXdXQ+fJvDN#x&I#s|&%Hz2!*Albu_Dx8s zd)|gCTT8lPtxeJKci5M@B0pxZcR6shtK`vH88nO?UZnK^iWtTf+fk#OjakNk&f`GG z+D=B@NJXU_l!V_ek0?bZbG->`EBIc>(yT%Q)cU1VuGQM%4UzRGfv3pfxA?nphFh)j zpqS4+Eg{t$W$j>~QVGf+|CR4n*0?D#x}Wid{H`MjBUL%C=u@3{n)H$R8y>i1E9S0U zbOSyQG+y-N`TFMj5&hBQ6Vam4jWVim=jeeqOdfO5w6W;fxArk`srtV?0+P$o^?n$j zsN__)F@s5bV2$!9Gg)z&NnB9u+Tfe&WZ=_1$v3aaidY)(mw|N_dk5^0^?-kp(x8z? z?sq+*@aEXl%{k){Qu(eo4yeN2i3OaO)%*;Q0q>nZuzhIb`th&Ie|MTMT?k$@dX4Gx zdE4c~?bT*ASlD@=GcMks^+i^Fd)W!%m4EpdGg`u3&Q(}lvkaB=+vJ(~RtG5yIwSS^phcfuD|+njuW$p7TZ^7xI+G;fB%w#^?i6) z22s~9E;ak^L{ahdMN^26-N+V`w9X>7j35y8k;Z<8PM>N<*dW>W?G+06bW?<-KsedF zeEk03q&p}%u(&0#_&#edI1uF$ou&JusUM#!N)jAA5;56qY%iVoCI&VsJUa6odY*pX zKY>~L=H(%}wyQiTA45^lV^!17mb{evOevPH?E@b?{uSnbSu2iB&g2OpxM;fh zH#7M2H5G6{sKQl0ZMgpu5WvkB@c$*&3B$W`{BQFGn}7L<;2p8_FFg#L{;&VvZVvxR z?0|^~uA?yEkbNK)Afm=t15G3FgBP97)cH9>_o$n;Bgj?`!IM=u06zm6oYZK4m z=iU4bjiBVmY}Azp73?xA3Ks{{=B46oJ5$3* zYFgmCzrhQM2QAvZ8M!DR;5IBzbalvndJ?ZWIQ&8Ktu3-9nVma0891k|zVEHFDS`;v zk_a1PPB=JpWd93vMF$aKgYWmv5I;E#BvCzL1q5NPtak;Xn0p?z>7_Ft4&pFvsXgN%ew{`%OaD>-`n6_<3Rw_&LjN^+OqUWZPOBKh_mSYpMohWQizg z=VB)tMZkLypEpQ!6JW8RhG!v$6T|8&(V#H%{Y-F8b7?!&-}-KoOz$n?*VWcQY&8+o zp{4xFaMwklVcL%15yG?{UkWTt-}Or%?is5Ez@%CnJ4em0C&o|Kg(9}9Z1ZJom2-}9 z+8W61=5p*=`GxoHw;o@qUGJIO?ad%0@SV3DG>U+pkbj>Vg=(d@y?kjMpY&`j?(djC zq6^~+qr4CU?#VHG(Ps34C`bDg^ShF2Kh9*$#4$6piR*I`@e8cJ*{rVLFUiptWB`Ac z8Yead93Z(OdP7bVchkTIn9USLH$MG$NM1%9g3n1%p&Ib>)Qnbpm`slgr&2Y{tEhL#hmiR3jN&&-SrMTvmQh6G$@u)f`Q{VXEjK8N#nd`kzPa|Cw$XX3W>SYbpQuDvr@Lk#oB z2TETZH`UcOVvDSGV+P;E->O-Y*wnBwlu(5QA|=BQ27%F%BsJHef1aZ*E#KFP z9Sqb?7amohkzQZT%WkA%ZAcB~ighp?9{zA?nn)VX9e4l?`5;nD$-eRQ*9rwa;{677 zzrp)VvRBys9nAbrm=92k*GEMaGT}TcvKX@g7f}5eIu%T290?dsODa)nt*Ct4R8bLzKqc^4n)yCp-BgPP08J(iWCS+^Dj`?2Qg=hI$`1{x^fNwbny`S$ z+z$&L;ZpJ;8N|}}hfgn0D7%reRNz9$8(HS?9U!;nH`O$DWPUDDpCL6d&>{H={7SoI z>%QZ=xzZqoSc{L)#D*cA{nd~xH7Uke4z~H(2YsQ+z@*a8>Gjf*Dcs+Pt`q0_o3?nE zKtmEU^qCJ5UQzDfD?r%9Sn^o=ePNmBgVtPuIcV>nUc%%fKybToN%x7 zz)jw-L%eB8>0p#B)xLX$J7Ty35ELh%BXo_5;FLHf82`e#ke-?#Vs;ed!hdzd zsPsmJXl-E2S=D-~ZgtXzo@XGIGKvE9%^ng91e7=n@Pp|ZH-chjY5)mcf6qkd0d*-H z9?2S2qOywY34LN>-wqOhiupUXi6RY73TjfB|t zEL;R6s@PGiYN7tsMY!EUXBHKhnAV0*1b|OmbSqaSPfV=_70?OzLobpqE@TYfFg8lf z6HpJK6jf%$VC+5NvFaS`_9##`cQk5A$0{{f%tJqsdrM17NcR%Nhrd|RJaH{?%G$ZmoTQhV{Hsyp< zz{T*w?C`|1?4jsQ3?X&-fQRQV%aXGO66{?OmI*3Q*4w$7+miA9;_j8VJ#HF!b#cs?zCaEknF9erm8Ssmj0H`Q`EOV^c zGru31R%(kW(_^}%o{;ZEq}o%q(8w%G7`3g>G5ub&2HH<-ELrmgJ;v#M`$CYe=87ha$K8}n-pfb_vy;|=H5MYHyJN(31DR;mmF@77 zB1ogMH=!f&4SD8+@K-z;b{ms{W< zECU6`hd+w1y$_cy5%PaObFtNkubp9SM8S&)@!h&~jv6L2x7-90_3~4o5qWUyMmPI^ z%PeW!i$hL6zmgbbg8*1~eYpP(=sO>-%RGP81g_$TPg*>U8e%7`mk^#M)EPUoRlG+B z)W*3ftYRTpiHbY!?kiJO6z-R z%hz;4R{BP87T$W0T?$CWD=$GJ zja~EoCWUu$zfoZKpNt-Mgmt?2w5+%^IA1fM$+#oO1(>dIHEq#z-jVMw+OldGaiexR ze#zWTKXRT$fM#GBspiNq~HjM3*=GHiXLqn;CPus^Xqbg$0lKV5FZ+rIO9-IbJyJNh3Urc3W)LX7(tTP{lL3GL5|fUr<9@n+{Z!)KhwnJEds_Ps zE0^_Z;CyH+F`!7Cc|JZ7 zkE~2>d>usot;*EftEUH#!O0|G6o3X zT~)p^sJa*I>L|wk%Qv4zd~p$%#=p3_Z1M@J&GoSMRZUU?1~aA8>8#`Y z%lE+REhwy7I%K3}W*e|r7XqL2bKs)g!q@N}Vvs;7+}V@$mHua66f)0_qhM$2ekpWq z8zDZz|7JPc74-oOA&mYolxFn^?j7pIg9R)G>r;7bwq=a48qg~d!xJC`L2JH|9Csq? z<>ufl772SZPQcAqK>RjT6R_P0IXKI&dv_bzijO0tJTiY&eKOwheMASV>XNCRqSNQJ z@Q)MX;eRog7w+m94Gn|v=C=3{3OKj?tDl1m|2%%?ntTX3)_?2e8N&rV;e;37d1+71pDfMnr`e2(etA08)yb(JS!_^1nf%7e1)6(rZGPznrH5!hKa;|bV1ld456-CF`Mm>~AJI{RXe_CmEU0Ib5sDR|6A?84D zjaSYCw{yscoPDj~>j`!z(okOd(QS**MF&*1raJi)-`16epJ`Ov_zE|1sHgmZX1?1NhEGHTqLEqXBf{66MZzsHA zfVd7ZLwqB6iI_``NwgdP-vf9}RCH-&C9~%O?1d-|rvYym`1 zjwi0|4R*&3Gk0J3DGpjzxxVyRo|r%dHOVZFm5aHpxfLkP6H}CNXSt<53}px3tp2W6 zl{N5~ZhvJVp&jM-Y~2txf?8PjB`&dR>#}^jk#prcTj30f4D=moJ*hkQ@}+e8d!O@~ zP-7n!fWtHW4Zce$Tq}1BFT|WuTq(ffvy(+62<`Cof0mz}&fT33d4P4CPXGg)<3`W*35;$|Y^0hv<)o2n*a#vqt-wk6&duwjEeB=_fH zeI_Kc@P1pE2UvCmQ|Ee8SK5te3&{rN>z4xWZJH~SzRe2rpi(B!Chuf`J4+pWc(xQe z=@i=*FQsLpxM|>GD#JCeufP3L^l>{$WD7CF9s&qT$z#8V75B?!h_nBGSL_!SWeQTl zcnSG$+~KjxQWa4t>Xs^q@JAC( zRIvKU;kO(WAjdE~LUbh2r=`d5v6st#U2jodJw5>JO2oj*UtC(LxW@NG!-Y3Gg`BTD ziVy!-x`}=0-AGG_RQnyie2Y3??@IEN(}sIusf-_@QedxNoT#ykZ;bl-^xpwb zX)1x!m`V6Dqlyg*(|fkIpK6E4|Fl+k+J11~S@p504v6zUYZ1`%1=;BNXv}~eguj(j zdE@LpL!J0ord2Vrf^R!~?wD$!74ft~zK6uGp5(%g1nL-PX#Xym;+_=^dwcusiWy$f z;M-#?tK#0l%p=X;0!i?5kii-^f1w>z;r|zv*$pP2T^go&VE_Hn7bdXup07DqSzWuK zGgU*yITE){c~gKpqA5>fIvi5qDcT*Hr!zs)rfot($E;uAHIGeR2%Ziz_cT_h;Dg4j zgI$5YlpMXyV*8K%t*yHU0O&Ed&nOC1u>aw?iQn(hQ6r2nT<~_LG_MDB-#z{(C62s;j1Ht(+ozv!Wfedk%_;+!dzU%8Mra( z{?3D$8hwuN2g>^HjRyy#>T#jVDZ_3`=@n3Ac-?0>tcjM7U02<3I*Z^lx->)TrWvP^ zA!WJV`fg>2=#)vO!0j#l9}9NU)(FOiYmrwYG4iqt=nBs5)j$S*)V^t&>Z_vXG!7C390OFcl@_#nG22Tz*CnF?6n51kC#Wqsy*#SXDX(`5X=m7gx<{w&Y z0nc}6j7|V;m(OSo@S<0yQ3c0&n$LAPIBl(#AGwMpt#aAj;PA#TY`ppVJ{!-Kxu`7E zj#?1_!V;vVmEYUTn!J|qz8!ByYCVlEaRi!|@S9{E#Br>!^NsF(5zZKhP&}K0QM!*M zcHn#*i9|7DmNJt|4}&Ix@LRb>^?MGutN}$1~6^t7L}sTdGA43Ebk zs2Z*Yt55eMNtEMnE~~BsQ_}(`nF9w*DA%0JKYmfoKAnT^iea?oL8n&(B@@pDgB%rA zM>R`0ue1eyQ<8TMd9SaN4`RX`o?as?y7kfT#l{hu{Kquab@hD!-j6YdExr*b1$Wuw<;yJc z1Ds&@j>0VySI+l*IgV~2&_M4QF^5r&lH#Kh#){OH`|Og^{p$Yj5tPfWr1@8nO3Kb0 zjwlYvI33v_IoaS0w#YdTs9R)d;cN%w*D+FBUHW)ITESCN%TiK%>1F!M)Hb{|(QKQ- zpQ8-ZHA3O_iU8#Yq3>1f$gF>Q>+cKMM(-E@kxlaf`5y*rB!`#3sAJyW7DwkhU_#M3 zo_FD=?N=7`^ZqU|X&xV4zS%WMEhwp;H<6!b;qMcN*te*t>&T3xwn(D}zRJUq#RGFg z*o3iNdz>r&RwYE?K;i3qs$%~bC{12riHMcaQG2E17cO7~;gH@(Aah8EXq@>K8DQ-A z_y>CQomagT5^#Q-EXnf@7*5}k{a(N7qp(O^M}zp;y%_bQhk^z)H8^wTYvaE=Ap1Nm z0xOgJO*6x@k1M;(ZNb)!t%#9x`P2jYS+o@I5YNFno1i3B>2Tyl#$108o1-C(DR&IS zC`@>;L$srwAT>U&!M|)%;4Jw-1T_ye#K=%wh?IaoDGeXvhtNM#0j9p+5{aXa4y5l^ z%<7qx78kBi@#src~?_H7}*kI8lA#vQC=Sxt)y)wh%4dW;nuOzko zZpN^$6%4G1da?Mz=Bw%=qZej1+DN5Q(qq~CRe5syc&4$3S zLbkds5%Trt{1EVV@MyW4p&2{zC%N{E=?DldkYX(d8X%#C2ZHjIw_oWECWE$@{q-sm zT%LP61OQ3d@2nL{*6+)bL0=B=QI>Z?sjcjUB&dSvIhNbOEXF=Mx>-%^Ru*c60S!j2 zyHeprhlNPzdD`zWL2W6gyGco|W9;)^3Jaf$W>#=Tl>p!?-_DLVIM+qT906&_K(DK) zI60gZYD-58nz8f;7m19rXBt$tYP8f_B5R`v`)>Px7uL76*A!68w`v_Xol(xwk!q4} z$=W?DNz6a5zjI=c*oG4HXV#yI7qu-qZKSgGpmZOu}0*kLpw_hDFsw^i(dwJOCj9ofMW&Vi@&v}qSP=G`s#DR)F6pNc%~ znMgQ18v_C55wjE)KncWWmtm(j`S9iLn=QmBBVCsdH0oNr?u$i_t2P4_!)R&NJBC=~ zPJrYS!=Lj31C^9CEu`5Ty*OX5WY1E?6P!h24t2M^(mU?}_Tn}&+io=SyNfn0>sa%M znpUr2Ih_v;jG(eKR^Po(Cmwg!?UfBfK0iw(JrA2bT0w~Eli$Z)f6+A*F+oe{$>-)Y z01dj7!Q?5bAF!etS|lyZ9y{ds7)C40${=%Dy{<1=tY+2xd8Rz`u}0`3D`Sd?DlZYe z&kDh9$H_%bu0{$r4%3Ojs5RMEB!r+d6Nva@C!6UpHTrK}XDaw86-sCcjp99j9e*~< zt!L6lycg~6x&Z|E3Gde~3KCxXe-Z-p6ufRp!@_O}MSHqI-4XAV?N zMc#zwaW9kttiK?t7`sjLY>wO~Bz0Sdl!E#jU^d%Pvbylv9a`L0iC+!%DneKjdKkV0 z^j=z$FB)LotL_r}#O*OHeOB^pK$q#4Qd83Imq@r)v=l;3uwo0=|GTiCl#~o~i>R-s z=>vwSUO(IDyp0ta5Pe?z%Jl8Vec;~1^JL8hy!HFvZ}a#O7TU7eGJ$)k@h`bkwGN5Q zv*J2J(A6|5s}y;{Z0HjWIAgBbEi8a3Mk{WtUl!etZa(*a%g$_GseHa|{hX%O`)fu) zcXf0wlmgy0J2Uqoy&|8R!x6b`Ng4bOtZ%hT0Nbr_3gd(;hx9PZ9q9OtVR){5sD{;f z>IK-?Ft*NSi*5{?FIMl7di*)NIo^R0b1ftj2{^6gsLo_?;l7*ix&QH48zK?^4(uordktRUU%KEh5IyB6q$d|GdU4Q*<` zvk3=~82g;}dg-Vk{ZeY~#nO_n!(zTO`<(7LuIh|AfZXL1B@glavvX6tl8y${mrT|< z7hEu2&UY|;N@xdx*2+evo<^^wQ3@|`)&MEQg;VC|rLLO9{Y4kC#n@!z(g6YH7mu+6&!(Y+%|HB2? zKJ4fHN?~ofx6w7%)U}mx-{uYJO|I(K*6ltvi~$}pNdo4zZHG^XET*=l)L%|oi9o|p ztlz$a2++MBYoKl|*?5r})V5gjKJw0BSQ5N{7>C+_*4DTPtd!J$U6T7if_SwOU6j+7 zNKPrvX6Q?L1oOcRW?=%ivV`IBtSa+*r3S9rv`eOBDQ!?P(*3lWDMA^&DjTC#n<1Ca zcP5gRzqoNv7zi+Kk}|+dloQXsO-0Vs;Wtv7;h}{2H8B1mC?fV|uz$m3Em>eRWuTUz zh51STMC10(nd(QjHrO>QA31+&Ew>p#s{p~|yn_j7XzBe;*>ePWQTWXQSlzg+G)D%x zTr8fd;8WS6WSXJwTu%(WsO&%k>n4ly)@KkTH()yV)9aZa4q%~uYB!?7f715!h%>3E z;+gY(<#CAs?U8N-3ShG6vmA0%dRu+mGv3a~rv?0mez>(0uy^aexPv;M=!#biAcUbB z`nA*LnTX0*ROOn8?F|sZ^_yE!PIL-p13r2SP10sG%+KnnjrnzyR=yi4aEYqfk*jMR z>2~7LRv%yY;UnMBf)BXGDRRTw&h0+esq}x5-|@g7a1}1gOSOZmWEl_H`xTi4#^k|t zrg;+$v1utQtQhAc$%ffUnP$E(Az2`Ytpj9VBLEDS=5P(wtC~N&MykLG-zLg*-L0vl zSdQSHwmFg?jQ5-Q(Wvm+RpLMZo3t#tYYoN4??}AbFBUVW(XJM_mC4f-ekBZ!J??gK zi(YzUxbjRZqB`^M_fCgHq(0{d?J=1c&rdO^2F-@8*ULo1z%pG;r=_MIe!A=6a?LfI zI@)T#Wyo@%IxzwlkU;|5urJJBQZ1EfM>HTMbTQ(Z%nPrCv8cuj`!czqW;KiSZ4n$^ zVHA>Qxp#|(A6_#af|c%nRis^5svh|Hs>@=cA0xN#+=!}(qj`F)kJGg+6uICcsF}4I zq-v~i2&_wh36bV!GCUjlBcFEc_+KtLqVZlM&|f2r*@d+A+gP#AQcHUIp-2vC-&@_x z-d2yZxhRyn*`ID7tFVryh-6^dNl)lE`-t?0k4X?O6J`&#<<{~Z9;Z*zZg$*6ri<^f zG?fr5n8Qle#16hV+8!JX_{Rn{ckRxMUpi*QD){=D+OwQrVN?*U_=?oC8x>EPcO#7f zV)!S4a^qNp2j}*Gz9S-j@gj1sg*R|H?;7Y0Olt8RI6MlXG7v~X0@k_wnrQ$*>F0Wf z-R`k(3dbmEI0p+U?OlZ+mOTlN4&Qd3&@s!*BlI5>i}HShNsGZ}WehiT+oav^6@l&(>webRGdzyv5YK#idvFzFSfd0{%G$uf!Sgo(KE% z0k74F{Y_o}hqSkhiff6wMVny3g9mpAZoxIUyL)hV_dtN)E{z6v3GVLh?(Pnab35mp z@4fr)j&X1Q=^nkich#<{Rkh|^vsR5?9H^U_3fx@#QQ4aA{eD%(xr9373K< zDipNef=v9zib4SPZpk~kNmk&M@hR!?-Xn2%{bs~DtnDN!QRLjw`ss zGZy^XxeDor^VNHp!nEqi4t*nkYeCMPPUkMDt?ghG z9GIgB=#CX;Ndq!yVsa^%Mr@2`&$7PX{n7?$c0a(;2mj9RZ-JfCV9q_2u#Kh8?1@6r zU2MIf2E-59*)93uI&O{h%jwcS&Mke;o5awo>yz!cW!k)%DbJWjq?}Y-@q^kcd*two zRuSXH7tC0ys}5(}cJ9X#@H+JQu+^Sy?ETd^_QH#uUfmbG?viXdK730i^g`?tJ;xl2z^A)`(r&ZDh9 zZ}tj1fx*|ubTcn^*TQ9Et%i#NEt_YwQ}Txo0}E!M*75KjKxP2#u5l_MY)(o-Kt)2p zdGArF_8ZLd9xMu2CHZA=*7sD{Qm3|7GJ=+TvhbWUr6;Wz0816GbWP?rg~J|3O02 zGaZ<*S!u2oiY5i zE$Y7Pj$N3`P4u(jUdwY-(nvl|REYz&S*$)`4QLFOpn+DpDWmjN>Xjs)c}-3I1FrZM zTLewaCQ}AuMaPz?c@PFfr(siY!$CWb#m!mq0q>5W%?MpN)`f@){>*jUdo$Oy7E;X} z*wNnC%g|1qB;y^okc$u3EO^9g=RkuvXu#QCLK0oqxKC-cE0-D)AE`4#aEbE#W zS>utFGg7^8QywVA@S`U8E(OF$GfP^d_@Bklg| zABts~<4g#40ub3Ah}tTfUx+UlpY;i!UD54kiojCj!UJDuf7Gz<5jt3!x2NNyzKe;) z`W(=u1l=eLaEMz{jY-MqdE|CV@eFN+m=@U$G=aVFPPaI1Fd7~` z`Gl7>R_WDUi1dgS&|{{LkT&MZd9yswkPCbU)d;UaR>q`PU%PHBMn>jX zjpI;jF1%IUCltLCWx5-Q8|dKK=CduV9b4Jb_{W*kS|t_{<>s6Ox;SW>_#ZX^5fuzP zxQ1oDL?M`nmK3}b1(njP2CceZWKkOA)3;8X$EONxo^QW4Q=9!0!(&uZ>hj-XOpARa z!Syezq(teIiGtW~b)OjldMsbS7fnxFGa}MK83$`OLTQxAaI;x;bIV8bxRN03YFj~w zv3ae-SmM6W{CYNYrP+z?HIs}eypYnPN{&=QwRsKLSOx3A zI5IH+ou4$n@t^}8GU8s9fZj{<-tT19udmM)!K>;P9D05eKWu36k_f_}O|Q`2IXkTHgtmMXx-PZCj^2KCSK3MHkB_Jw67;22SYjTPJ?6 zmTzl)`q-GxCVZG|IQgX)F4K()V4oPPD`z04}7`vEr zsqE(g-ad~*B?)#8juRn&032^9Ex`hixf;^IC5wr6q{Z}XXV z)ej{`GJ7O(!dhygz>R2U`?Staw6%C-V?t0^~0590BQ@rvv{TN0Hx3L)s54xc83M4aXEO(13?<)q6rbnLfp4o;}rT7HE| zR`-x4y0P8!e1?OB?l2W-+V$FYZSHC7&UgYSu7q&5vg#U{@u++V(<9ob;|MaZ8W)7n zvV>4jz!f&m%7!&CZjXdvn*6+44~`kbaC5>(OVUC5f4}zjZskN>hyDtCv*sh}7pt)h z-h+*ZAOlOsw1@HXLOM%TD$7Tx3TGAl;}?1enTD210ljva=m-Dq38G>J(bo&%)SD{g z5qLxLtR<)Lae*Zh#e3h2y8Ge67kb!t5n{Os_lJpqJzqw1;YRr-j5l_Y|T1rcEnsy#z&Zp)X0V!MM44`{&Xr7$K-LdPR8*w+nlO#0vogaOy zI58eb6OSj044wzvqW=m+wrOHFH~#JY%49LAGXV? zfQ%JIoQh{Hu4XAXYO8qhTz(1 zoiU9Vr?7;MbQnH`oM;zTB90~1b@=tEc5u%-y(yuDP`lBs1%gI#mk!YbtIh2~j(i_Q zd_au|xf^8;DHk@CpT0V*+_NE2enBROE79Pxp5AM`(H>eWXLmJ2ND;OFg=VQtZ@M^& zYgbS)X>yYIx$nIVihkg=0P4{F~!JU?wHhzE~=6|~^p0sKCKy5O`_47nuo zgR})55_ElES_36hb%tdcXSbdn7V+EN4!yg460OLh%KY`hrlG z$94ZQu(un`V>tw>W`YyCtD9@^j8WxtoxHFjM28&Azuiq_M1vaMJ^Xksr7b^?R%bMO z2Lr`y_{Ciumw5ob^KiMtWrZzoIvrH?0Yz5_GqmHLuK>Z7v%jgPbVg6GVK3z^Q)g95 ziW5X=hOwrF5Ob`T8p@Wc2VDHBb+8v)&lPz`pJx_ zu+rP6Cf)cx=05Uo?8&5X-HbkF9xM}_=+-=4Xw2L##lMQHO_*0A0$M8hD%ZqYn`1cuy4xN=&5VJA1i#5ge%g@>0*WrqQUzHLN#5sNLIIF+MdPelV znPncZAm2%r>fzQapKf^^KE>aq=WN*IO-KsVu5fqPu98Wfsbm~ouS_ffG#n#~@I?cb z8w}h#HiN}X)mmwbnfuwL%V}?Pzt7(Bt@lZqS5Leb;6Fzcgu9MYe-So2mS5aivfFwU z)&`w5qhFF@`yUd5lZzNUlLn23;050~yu$kzo7Nljy*vu@gada?kMDpkdw+ySPeR1q zPS-ujNJ)0<%^E_j=1{A0%^C=XpuL8oj_JQN$+_423|&~Y3OHpOZ0+v1=Xu?1=hLfr zy-d8y4#>XZubHg`i|MyLq^WgoPj#F=r;OBUZJ)50+XtjCxlY=(vxj%4&S_D0x$N3-8&RDPwWE(7+RDDnGLQsb%ykU(~lJ@mjm-r^S3(kWRl`=_?14H8(CcAui0;g4F zp(Xb)Qw<+dZ&m&RO^Hw+{=zilI*!=@u_So?>{U6H9nv1v{6jO}_aJZD+_SKL`rKSM zG%9_1y4oMP#Z((sVmC(sy~lf+d%VIUq)lxlVjA$t-NNdN)qekvS+qLaxZ#P)`uz6j z^_Oj5D%Y4|>vx9Rc{1jWn?DZ*%?TJ4c4sujpTj&qt}jvoKUX|coVH(D7>ENOUyIaH zibHqAv_t)Pq;nwTrF+WzmQ z7_AcS8hc+oen2py$rc*r%*QV;Cx5qleI%((5<*yY1^GgNBU>^o}A2}!}NbGV(gLBaVgjD-7E+u2UwU>nl>u^Uedk@9D^uMibY~=jomAovj^(}D>?N|cW zsb`F!3TTl_#mOiYsUDj%m->wT&?bJC-Y@yQ@8lZyZ3~=rzaWjX@74G2nsla{5E}-a zLBF2f!~KQrK-1Qqx5}m}cyK^;5oT{7o5S{|gm2ByEv>^&=n{U$F*91I^3KgoT!ZIz zedc&glQ9{ByEXfEwh=>E-BgZ=(fQ2SlIh z=e@4M^^d}upZ6psN>+Js5P@=OF+rE}x;-q!CBmj+Nu+tm+BXqaxB7 zZ|noBy;Gel&w;nxG=3wqF$(d!d+GFKDCN+`rW_K0)9yFsCT;AfqmHOR?V)nS+kCs* zJ~;H2bM!JzBrd`^2Wo93$(|rX3{(zSlY9Dh0q zz+mffE8Q74r!)g?>{z>-=!m_$Ps;K^m0!_cK6}k))}Z0}tPWU)dPn$22BWWI5Nd$y8WVvHsJj+O zN_Zd;!6#roUQb|s?#IO}r>^+UW>u~l+UDKJpf2RaRiP`*x`BlJUp1+!&|J2XOgKa{ zwf;~yZyqExv^QMn@is7i+^ndr`v6F)&qjmXl;gYGns^KN5Hpy&p0+xN_|x%T;$4{v zB!0NebBpSusn>2LD=XU-y(EPQkQxrclM)Dy z^Y)OBPac5mG*xY`=3(Wcq~G6`xhY&g(Zd+;g+u*Km;QHp*-n9%#SAA-kz=6W?;fV; zOC+w8N6h-8JQ+GMdDZ#0^cr!yNEbZeNWr7-L2*T|yq&di4e6ArDn8Xi-PLly^=R+1 z?{SWk|9ZAWDSaY)jYs%QXZ=3293vv$_My{sKN++KP}Z!G{N7w{*xvn|{gxif$~b?kTJS9%= zX`>B~+LiOo1nsw);S65%S*A{gsBH&M8K&`8*fDpQpx9hR+LqT8LDk7dzO^5w-tAbQ zF!apk00I0+*PYlKzg>Ao=?w$X${7BZ(Nkp$Z-YHI-~O=vLfwG;*BP3c;F!+8gT62n ziJFV@!zVu{(|;zhEt3Y;*_O)7w<--eIb_9y!stZ|9ZOzXtj<501?wg>x~IC38T7ru z!pHpo&Jd(>jVonawwR~NEC?LOd`A1{>!sy+@~THd%&6ZwF?0#KauEe4r2WS(ZS&Ff z{?@YIC|dMHhUkBRtbAmPc)RIzC{~LUVbE4SSjJ{b6$G zQl?NC;B$`D9jR3Bg@e2YK*j|5eChb+$pnJ^D_ZH8xlZDVfA;R>^|3(OJ{;4eg z{6AU%r_FVa!2ON$@}+^G?+7kkv#!AxO-4G45hse(&EyATQ&;nHZY(Zhg!f-sKIKpK zx+tqfa@V_dObm`4yUjPkCgP7H@*gC9WT#rQdd3Au#*VE!T-}GFLR)`7Cj(qVlD3;M z_OBZKQ>%Y@ad`ZIO+UAwcl>OiQ~eE|(LO#DAq79#=w0sVM}CAvhWNjeUI}`j;v>8{ z<~nO3T!7kgS-W&dEiAy8&(A(hlM)VOU(JbuGc*SzwWW9OTUW-aj<#`n1y$Uv+RK0A zl7aA*QZXh?&bKK-doJn)#9-nP#>0UvK1t!j7;bI-Zj;A1lyS$dbdqz^sQ_e}>gj2d z5Is*D{yI4NCvPUKbBJ+-r4|F_uXMU$JQ+9cttBt#61x9XEQZ^kWJY3|v=z?kS(PvGIEIL0PGp3cPqd*AN;7JD=4Z^&IG^YXS&<~j3pyKf-8Nv+v%n*-&!9jC|L1?IAO5nHW9AMYf2_n-F8BZ#Z1l5MCr!D5aY0WeN8m21U6`Y1MLXO}`dII@f%eG5 zxIKG)mtE!TkU{#zX5u3kXWxD0J!bE>W6XaB@+^e2n<>&z80W<{Y3q94#CPnhFl4l4 zGrf%xldTx59L-sYXRF;rmvFbMb03ZG@LVn~T!6txXRn>SD_cOXd_c%XI;(@(!CIFd zvS&JpiI}o21Ku7k(uoCDwI-Lj(rSHLvc26LIDc2gIB7%vUO{_nOiyXFLw1c}#3DJq zhj-@Tj{x%44*MExz20K|qzC_+7A9C&3{_R!n;bW8+uEzOQRA- zMTUWYt2|Q3z=ch1&d%>#mg2jDgX5s{J3RAn*^;oMukoyAha*GBZpMXAV|iW}TQ+^E zL4Ky}MlLXUeP|Zww%2tG?|9i3)e#b4@xFksd?n9L$@{75D}z&f{x9;@FR+Y%whQlz z&Yl_E<()EWz(SHOBq*OQaj>KLDb^i$VOL2oTVPWO+@Z4dyDxVqK3WbA9Z83Ao45VW z?6%PZ0vZ%VWit+ z@3h!Zx()h#-0wqBuj`AB{yqezc9@Ml;!jz&|jBrOGiO#JxmK9 zcq4=_SZf!6Nr=Z47vm2p3Sxt7TSe=_Un%_p$9P!t@p-4u1`EMFdbxcf3g?X5rKb(! zN}cph{BAr*TJ>b<@1saaOG9k#DT+$T4u>LLK3I)ilSQL7834!l8-0#`jSI=v&%s=T zBc^(O-zcaPmt!FCcBCG0AFRY%7&}rOA8q^)j^v)%it%-@xaL=Fa`NNbe!I_BKp#uT zqur5J4H+(7&3ne1n`Q@=W2`tm1W2E%VO(P|-&kLSoNuGfRGV~p$%S4!R(BgGzn`jK z+|L{PiVGbOUtUO?_K9%k#)@c7AK_rYX1T#u$q?wF*9$$6J;NX-Y_=Z6CsYd27t$_a zVnr=YTU4!y=%0+^>AI&S@SC7ecL1(Vl%$-#aV>`|x(Psm9RwRwp z>yt>O(MbAkZfg-OIZ@MiP7e==xp8<-hWgOK6nWmri0+Tw2lw$YgCYz7rX@bp3q59W z1*v%S=i!Bu#dBGzi%cz75C+&6x|1J)FVs9@p%;%dzIBWC4?fEy4+VH#YmzDlZKgt0>Yc%;!f>GMova)_kS z_Px9ZuyY;82KdGT##-;Qa}b1n^zSBqt_U`NSf^>y?@(TBD!iV&2Qj$TYhORD{Gf!c zIYJEVLWb#vO=E0AuOSAb1?_!K)2V7+3JJe?2RE za%8aa+_^lkvObNqm3rOVwp<>Xpr5HlLI|dUC=QX5=iE*r{SFdrhjXmBovsXLMn7gf z+f73N#>Py?HNJZKI~2c??x063mZ|xKTXkc4Rl>n*_8EkQ;{rS_tsCbL6!f-}5sMUB z5^!VVx?q2okZboWB$o8Ij3;0CVdKZPcgl4sNQ*ScC>iNg82dy_I61JePh<@ld| z48E2X3@t_s<O8_M34Q!pNBSpNFP`e=IWD0$S{kkz5u%0co|rSIm_8erN#%;sk?+%5TPY+Hu7 zc?3&CPH`tIXiVrY2NO1226Jmt&=kw!Z)PXDfQ~LS7%e%piLl>IzZ|Ada$lHDmp&`M z-!BV*Qoxcs&M3UNQFvf%-P&|kg0|mtaMXc!kph>KCfzOx;G`@WhDS?T0DI4~^152I z7BxU3wDt|;J!x(%p^~wcl-6gvYk;%WLRGarnJ#o%g(ZN2Pb$fo?q`fADNUx=jRlSW zpLtmdG=Rxg-g20o(&4{W9f|6+$wWZ=nNU3FM9gSF%0cE^aV(^Ulia$O*!s%xi%0 z&m54K6N#0vf4Jh1Lp~`r5HzD=W5niXlk&7@8B=zD16$-qKWOg5=6UjotP6Es6hi3u z^D`E9C%LvwP(PBhY%ttwt#xy?lg9=~=kbXR*uGEt^W}G`ypk<+-UoZa4XN^e>=SA* zk}sa1TV@CH)pGe~G&lzGPi3U$RfNeszAcRD0EE5F5ig3VkL<~h798kTPX6RgCGs8lW!HG{4B=c^C5 z-mGMMO+CF8$m{mj_SS2;fl^v(>%r(*e1tP&sXt?e;iJcw>`9kTG)odN(g#kKoK2+J zGht?JlfG&=1+yi`CN*Dfo@A~x>CarB(c?to4W|a?jkjE@=wB@@o3F-KnSfk zh4-(vpB5agotaWHdULv(2?At-Xy448fU&yN=u~yfYYQ(o5HCe}7l3|ED_Md?nQ7EH z23DAp+6AMGrf0TO<^A9o3UxDVHHK=!67M0Zb6}6Kl6<*g#}FgWQUO8 zC*a?e?4?16>$2dkwj5ex1CNJbLwk<9Bw4(cjYyw+$}@eU^`=7a8ntQNJ6sn_l5%o! zyTv@U!R`Hy&_}{c=8L1}R&ZMaV7RaE=e5)bRLBXmY>cPsLq%68*C)^_dBgnPDW11V zZ$m1lp@k!Xkr*i|j*=3PcLWlW2L}i?vGF#&%!+jvrTMC9%dJXSa=0`i zGMmt!@15qs58F}+A*iM$l?aUqJ=_=Co98F2v=741t|yXJ1@4s9!DvV{TK~DPIU24% z8h(rGyhD*j`Y0_eF(xZD@!J3T7Wu3@osB9)E-4zDT|mCl+|&xLovGNcQr(4YCb*?H z!B&>1-9kA92UdkELv~N|_nMj+ANa`jEY!Y)p-}Fc#U;njo3heJq-oUJ7#5KMRmqQw zOpixnk5kFGpplGOI`&MQis{ivkPAI%41CDeVrNX1)Z;OdQ;H~836m2hM}sNm5hH{S&&&EE44%v&c&iocCU0wvfs*v$$iJqA@X zm_2`v{qpTRla}8u@rF0?sks+Czlm1Nhk4K(3I)H0G)c{d5X5Gh_7KCk68`@C=T~DF z0R;J5E+>9<7oAFE7VGiGsp`uscdfgxFFaW&of6tD_K*VeZW|w37|xoVYpScOkLUAs zlO3KjDKM#KEh;DI{~XbMX8Zf>VLW}xmoIr{&y;Q_I_~BNw(-*VQvY-wZY%c&8O8Zf z{)&Q2^*J46Rr4(ojn~@?byg9Hk#ng%^0`%|GGq2JQ)k}c0;;?FQ)YwD{(Fs&gc&>Q zYtNmoG)J?HcDa|=1hFgt$ge%{>RXrJBcP7YT@(ATfAtu`6@<%N6#6HkuKC zRw^{@v$JJiB?EdP3opZ%xw2EonfE3raEoPHiOp5?`$nTjz~_Ie_DjNr4ZiSKGFox29w-Kf3ki{e}QjryKN@(PJq3CXKcw{1O)biqB_51+dh16N^ zz<;eC@fgs-2*v$ks%X?Q#zVd;w{lnnpQnUH%U9_kNt#Yw3f`cp)AD}V^Drl!9wcPm1dqV#}uQ#~_ z?)~9fj;C~{R@k@@|oSWTAy8dx294H5eC&p;*FD_wEgLXDni3p6~a z8d+(;wh1?cyg>HsNS`V&Y8r@IcfH{V56F6yBoW7s-e`8Wl+IW|;D-05T7BT)=a|rG zdLk!<1XS+?MVUAxWiEB<)}DhLIOdD%>(W{b-#DEJC;`2q?NIjn4LD{}`jsAJNAK4v zr4sN;!DE^v7c*9uss9Y-yy|0Tthf8FVmF0J8Kh<(ZF8&461q6~b)?jpKTalD84+4u zAxpUpPg%=;wC3N$S&3OUD^X`a;(J1Bstv&EU$>e~XDMgv!pZGdO%I@XK0GQf!mQVe zeEl(a8ZGWOhm+NZr1z_#S2I=^Hr8ny`dDccE!>nWutsR19<7P{Z(~TDsfU0=9g~rP zv4_=Ly+=w7hV3tfD=BLswonX*I(yuwI_qu#iCeB}0h)%9cZL_%wzkww{5QXPpkwXq zx(pIo8rSm^A}3!H*ga_z_G7E@m%~Ho6!^m8I`8tt)5uTCywpaw;TV{L&7tF|1&+Lpi@s0G&QFHHTgs0<+-MiZl?)u zjfOv6E-K*b33D){&g02s_}6TOc%ceCKg~z;e@3^#H_A1svGiENs!@k(y-IVY2dFV-B(;S`0Jwb9Ox}Aea zglP^u|3qG=Sk#;wt>{6PY0((_wEFe&=*%1Tq>O`;68xZjZ^eH|J89&{_FP0}GbLjl z?Kx4z4{mhDgGwd*XH=625{!3~bql8*-DR20h|L-d@H=sPX8K}3!lk)_9KdjWI4NmVCNLzwf}}% zU1Y(vM__|t&BOlr-sZO^)xcTW05V;f&JURp#gI%5-aAgO)3Rg(@BQVPwz8Evg$KzF zxPNAKr2~n*19fe)roHD4Xz@J8DrKhGk|9^(ysyc$nnXJ{I_VLS&J^)a2ORE=u zeOOW)B_*YjFc!<<>XNVswdHF>9BN5%a_}md_d`l%=dnW$=`|%byX%;q4)#@*lxU}U>GUliLNLP zBqfgHV^kJK6z}Jkjmp!8B^Hv39l%9GUSFDoff)f5<|V*q7Nz~*jWML`V!Hg2kE(s% znyKF8XxH@ELqLgPLFc%HcWo%q?WL7kGrO7QS~Xm=@;OC)#c7CnF$+!a1!+b8t|35b zm9eL@!dEuqIxA|%qegImQ`!;c{jp`V^K!J)O>>~FT?Z7NWxitl8(MqX0Rrq(!hrLR zzSx|{f&$|pYG*qeGavaPu1(;l&%Hx~#Xf#!QHT?lMoKw8Kw%Z{=UDZjaM?%A?2z_* zWAKr=D42mgk>KuM-m_7eub&}mCwDZ-%MS)A;C;{6vO3|1BZwh_jEqrd{#Gk)=C@!ADlJQ8SA%bXTgEF@4L{rX7aA~>K15B_Qu zUV_V+>}~b6l38+fqwr9go!nEF>TdPmA)Uf&{cll~K?F0@&{QtLWr&oZ{-@B#;cNF> zmOhUQZC^qkyFhg!mio-6%U3Vo4u;_l`RQzo^iN6@uVX4|a@G}P#5>P4CM{lh#({q! zrP)jCi%p?Gp6Rt`ZJ)mKjdqbVH?dGjQ7QjhY83m}r#CLEl_QimWbtma8TS==@eiNP zD_BM3`1u|^VWa5CIS1Y}(jVCV5}$rUtsd3ZX%tievIU+-E+MVeP# zt9p}+IH7Kdn6;Te#?dapjW4-;PKcA0y3(frK5GO?asX^hJzC~0*b$Q-?7M0EFI-CU zx{KcEcm=ZBSt;rQ)CIQ#v4^B0{}{VZMW%qu=C;&eAtmYrw){b=AKC|B>q0#O8y!Nh zl_VJEN@SSX=9w>RLHIvsXh`-6I)RZaw-<=kABX*M%t};%ROk(z6*A(_Fs%Lky1L{5 zDNXg)RvTZUprwXC+fw)^_s!pW9fK_ zhSdTC2Sqg9)scC^BzanQwv5o70(5oXTGWE zt+rP};q!@y>0JE{kx!lX!+>_L!3LR7KwvWxYMqIkf|xFHV8U*um0268MSv<)0<^Hg z0)(nOO)UfS2t83Vkvm)K@^*-KshBuM{a!U`I7xH0TY{A`QR5RJJF9@{d5|QD10&u< zFSp9gDN}h3C_-T;_N-*y=N*!;ldDI&d&OcwR1z}d!_V1Ptch%ClLA;u7X9?Q5F%9e zR&UdP8GM%Fyjl4mnb zpI`33BpzkohJl!Vioy0SNO&Rh0N4(^5f+bwk&GglOIbR^<6ykO%bIHx(>FU5jhqz@ zfD)BeoObwhjb2oJ4MF|Qa9NN%|1agI$R(v~*iJ|1{f$t;(~=t;(+)*<6}WSpEPWsk zj)JX6s_2H7y;?{&YYI~5nt%8I%S@3K4x2Jd2@t!4mgJ0L+uBOOfZ@*z2iUFE(zZMk**vW^&dU5WpwiBvR1$GaaFI5nD5E}u2Z{NeJg_tQ=O zGVF%V=ThB3<`~Q0;JgdaO%|H1`$bc;Kft}dad~|W!DK-bFsm$s1K7Mi+&5Cvrr_7& zYYi|&YtZaj>y6YrN4}p{Pf(j={`xcEtA7#Gh6ocCu4+sGWX#|1zN#3haqSY__V@JR zvBQR7%~siGl%K zuERb8f|s%`*4wnF^0n(iZ;SVQMgiSqIC#Y;-#BByU#6Qdmr^#QdM8 z;nh%Y-ACC}_tKF@>8F(9&o&){V!Cu}=M#I<(C^WA|1C)6kcfPXj2SWfv(TV7rzsDfDbQld7B5roTLRmLO}DP>Ub}xNz6V>M zYdYk^DQi9`UaiHoj`r@-0RvV`Xq4`J6Pyg1hH`FwzoQQq9(nzBs6-d z^yF1i^P+5cG!3uTcAHBE{uc!FjT*ONNG{_;Rmn?OhtJxRa-9&(@CQ+L;xU~~NvbQ? zlwkW+XD6;@i>edio>sdy$HOsUuSsawQJ4p4lziDA3RZKiND^Iv68D7B+;B>1BqK%# zq<9+L>n?H(|7OBuEK=k6qN-tJFdG;uxKZ^b7TcbZy2cZX1i4I(J>#^0JE0_(eL2mYC=5iWzP!+~ey$mcO;8 z=jf%8Gt$_X%GTn$@fZby9{@=(CgVjmlQ@@fj~~Hh(EKNb@r^2)uA?%fRr0^jk&D4c zN8TKS>LE*P&a?pf2J776WVWYq(E!RyKQ(R+(xS|R6wLrt^-8if%vaqw8^5>$^yp(_ zCm@`sXq}>qGxe}LFFcINITH_m$>Fm9I{>boUJk-A0^yPwSJaM`7(vMTKH((F&E>6o z)l2w^G<|f=dBo7LfW-SYdWSAXUYMdrBT|w?;eS0r->7$;*qpVg<~S2_7CiwB67V*I z|LuyBD1oy}$rjzuS;YStZL{>)jJ75`eo2Y;i~`J<)ZddA8VB&cVDN@<3;6UFY!2%W z8TaM;jpYgfnDS_s3V6Pd!hC@zq2g${R)+*gNRNDw7#{~nv8#^`%E1uR>Nc|k$5j7I zp-8ZWdw6iD6{|B+*h&mxGWL}(lP!WwBJQFqn&Xp`5k6a%s6>sl3-*K~Up}$d7IAallDDVwj=W~&%^9=vxY4TD<({cG%RZoi=gP0KJ z+v)V$+pj+ukpdu1g2F&5r+jDX8AfTVV)bo#g{9q*NYURmM579LX!TN+Q@&H-b#S7PHhGNPIn z4`D4Alv!Dtn~yTYr?ASXKft8x@a46Ea}I&@T&4vc%Hmzbw{%7Ei#yrbzkZ#g*GgB; zmG+a~be(L8JMhJJ#xqfyB^UXc`cJrD;x5#~10)rJ8Du#`naxLVzVG8!A$@{!1#bTop{HSM1S_-B1N;vslq$lQUN1@PMA-42_7TL)7NEZ%%Z{+yrfS)m1*S^p*<+r6ZM+fDGwu^GpmhfHKY20Yn=rf7Uq*-I~xJe`5THQg&gP z5Ge^z(p4UcP8i#NbV#t^D=ny)MPpD3!vnyFc19kM3;2&rz;+rdK;r+!T7UI}=xnMK znt-FFgdk}RPc?pkYM&GXvDiOQ)lv(rA+!WbgTNG^GzKH1KzJG>8GZg}jw9KB|MooS z#sN%sC}J^4T4+83W)qAl2tF2@Rr7a5zow>)p|d5$r{1zLPzdj8)Q$W?h#mv;(VZh&%7yUe#Yl4HUbi4SreL{S#D#Uve&@Vs*<-h zK9jV=Lts2O8Y!OuCn(eJdAS-TJqE6;(c?PyBstWKMv{xiq(JL`&apX`7ztJzw5HxM zray^zFgR=Mq%nBLWT9aK2kBcfc5t`3B-JmS7WdfBzX>=WnbzN!X-)~n{rshraUpo7 zP5r?geQaWQNIfRLq@Oi@7~wS?_IFLTV~|%|Gx5C7Q@21sse8<83CB1A1o5`k`&Oao zDU+Su&#U$~Im`^V_4St=a?azPKoO<^Ts(DVtX|mHngNdHTeZk#ctC4HDF z##;SU*|Mjr-!qtle)mdo_am%T=^K27@obur!s_ltVDd=frE5>UHE>?6+?-5#ty3@% z&H{QaM;6W(yg~>#Ta2hFn`ypk%>JBp1M5&}2PNHXa$inumQNiJyqYLGcfng9Y-5()zAZrp8!lQ+NV-oIZh;{NEvHVd~{y*9PKSHzt4Hg zvPc=u-Ks%!HIT}p`s$^!2yN~7x}sG)_bj$R$dAxt;f*Ywvquwv577~XzIiW;eplX7 zn(k+@S@L^P0y0yc!Umf#sF3_R$+X#R_y8=)^O+&zSSwU(54rh|kQb@kZ8qf6w?dMbcnI90h8gF(f;Yq(~ zf{M!uNU<9=EY3GF4%V)53m6$v5<2sRIo1QC+U1;`OZO%wqOG;>EB%wV+id$|GNy3J zIL6P^_6nsX%6B@(oHG~ic54hF@Y76=>F@b^*4ZYPgxrODBSm|#{j?k|0yRgD5#m(T zMk56dntH7XI@B&>=E*ykctyl zMe*?Ioa9|sxbX$?xp`?A)sqCk=`Tob8nUs6Gb#qE9W(AFl==6F0lvZbnh zWR&Wd4}9wVN%8a9-8oVS?}W`MWjU79Z*f(#lpFUZ!J2|Zk7M|Zd9MoZWt*MOPy7+L2GgAeff@vH{YU8ZPJjv z%Ik4NIlAV#<~E27(p_d?8RLlB$Iom>!LYI7{s!HlReu*uPIR%GQ|fPxyR1E@oKlfc z%L^$n2!!dM{jMw?PH+}UBsOcj+P52DQ~eAu_`Wh>FLk0H-J*ZBK53*gr^l1KApo0V zDV3P^VDK4{$B)OzxkEQvV1Vjc(a<;9hTF=qg~jP=YC-qBoyK?2&-u#u1EQ;~UUQZG z6Ese=q0V{xjF}bu8CI=d#Wd3WI%_E1GLcM6f0J&xVuZy7A@}X?yfCX+(WV8?(S0D-7UDgd%m>q{ocRuWwBVSOEULn z?zwa3?Adz{X%bFT*x{Wb&2t6DtNaIPWSkIM?D2 z2Ob55T;x!rR5%q2A1>>IvO2!9#L{@Kh{VnKU$v3)XveO^tl}xRAR#J>#yK*j_1|OlE^tWuMtXxBa^f*3-5AAAsXyyFL zyKumcCuAO;EVb6Oh&w%{RC5dFfE>0i5m6Z7nq^@hY+F@$d7vtY;zmwa)!E&>#3gAD zxXP9P`YbP*TnIfaz)?p8DkXbj~DSaoPE6~_6>7stveXb>r+G8p>;|8gW!EI z)T_0kRF$yh3!%|X9Awo!s2fm%ZRTI5f>8DWTzd7P%ljXtM{N4GUcmFW>qOk}JN~Lw zauR0PPPhgRt5X0o)=Im2Xt?Pc(84Jx*Y2G`3I^ABi_ow&hyHV*E$`2bv<0VNzJH@;quqv3Q!B1~@BTWj(AiIYa1uSYng8 z;Q$LRu5NzXUZgB;4$OKeeE;wlG#dGa=uhp8BsKf*8|jS>NBMnih5_lD=0qWKEgumIXuqi@|rDpLWaH#}=w5a=<-wlpWv3mn@Y;8tdP)(wVAKtWMfAWB|FOQ{EByVk| z3yW|0)?G($fzO$7;Wl$-a;_20E_O_2*5foAE(*3rYWZ=07L5k@@|E}L;xgxc@WpD9 z=}YU%e2DAwr>I@JzH3}1^)A**jrxV$Wf~u!Ya)k6dNg|ZA6lz+M99Dx%T_Rlq88zF~_=uin0dD`tiO*>W~0CdOS1RrX250}d{1nu-eD&Ogs zGG-*Sw&(GSDd}HgU>I}@D$nNJG>Qz}#pY=2Yp0p!|?cYM~Isk5h z)5cNO={Qk2>rR;dFCD~$e5;?K`Xn^K8et0miJ?O6K!mS@!RXIB7JtzCxvU+%HVBkq z@EO4$N_bYOFi6cmtwGT>7y^Jk>3CC3o7xpV_3R=8c&aJ{vDr7Vgnr7O!NSyoJd^K` zBTa1p<1zclwFP|aN5G6k;=7|_oYWD49U`aA*M?qE?37~m$8We~dp=HGf>#_W#z_nF z<$TZtEF&D*F!+ET-*#*7UUov8_R0N5K3ZN_aU$7E_W-dTvg~93jn4s0VyPLqN^e*e zPCu^EkD56@M1)kVF3TA6e5}e$ujV@2*(P=TauIRVC#HU~RwHQr^JYVG9O$;+2Vnil zqzCJi7F0Aav2#uNiD$8XxZ3teseOiz$6h}$lQ3}Bw(p0)e59$T-gR`EQEo^C z-xHU3&T4QCqv$mrpi}<<&4FjryA8}a@1xt{C}Hugykbf$YaPS}FAZ*FZTOYsG${gP z{A6o{@)58g3kXbuOvew`3VC^FC9h*PrU4hbpgHd2=(XftIJ%i7025423rJbC~qvROGVi>XU}E=>|W2@ z0crgWib}-{$v^p(cR|bvDzGg;DlKAuo`2F* zl+Pz&pbIS7j??5d{D;kt#X`V2x4Z}p$Uhnsy(@68QTW>KcY?0w+Q*DZ5!(;&SKJF~ z$>}q*DoncS0R7>#p3H|z8{ROgraZYh7y*+AWao+1i~#=i2gRvg%_&19)%WXy;?vK) zu())iFIw7HJ>>Heruoo~;1B?|ee>J^;p2h^e-E~x3WsmmWC>O#wg!eO4|6~BC3;e+m30L>3 zC)cl5!;O|bmh|a~-5C?|M`jI-gU`Ub&!2a4-RJ>9eN(H;MStg&&_!CKIu4|H!q&}E z{Xfyq5qHrzXd2BH3nO+QDjg}N9CB31!K45e37?VNxM}B8X@y<45JgyXfYkGQX9??M z-qL88?}iEw{DQ>hq}d`mz0OBEnDB7>0@m-BjkelDth@-)UZ)9;E!aGhz~o;fMBARJ zRJbK|FA6;yPIo{>j@gX#*g9q%j$y!kZW-wO#&oXS?sr%bHLF}1jzXm})5|~oZgXx< zA?~Y{xD+-E9`P>Ps zPbAGL1bdxjf-jh~4eY z+3mx|%uwT!NrNT)_((ZMBtos#cfFkfIV~>@<1?+<$p^5>8h~mxX*p>w#qTOb<*pQm z0yrzpY2>XG&pmcXVBbv`Q`%49rv6*%JJ#{QH!F z;LKTAXrbmPBc@QzII0he!7|vTGy^#MtB0AXJeNVf88d3oA#M@mTpvM1C~$!bt_C6E zx~m1Bi;Kqtfp5_h(b3*@@OnR<6AB~fq;D#jnEfh*RnV`3*+sVS%(3h{e`boPsw^hQ=zIaG$YG$*fb#bsl;-v^StlmD#Uf8TQLbYl3=*+&@pRL!b@y?=yO$E@A$xT`M`(S zKa>J{S{u==BMG0|MW!jUUAM%EUUr!D3P+aN%6}$?`K&fK4wY(g&B=8*4th#;zIS8Z z!y`y)NqQad7}GMfYx($&s9#8;Q0;JJGZd68xLN8VcJJUaCP8xztZvn4pHL@dN(gl$Q^rXl8r!RzY*uc zoz=$+SCw2mXtIucZk}Cn%>jjhSI0|0rfUi0dae=)2vSR z%U741%ni{O{uqdCX1brys9z8@#>B;^IqD6&`=7eHe=FpK34T2nDJMpLbKKlu1emY_ zhoE#wvhfoQ|uQ z$};-9uwOI}EN_LCmEZXTQ(OuqKVlSSX`cK59@;o#ublM=dM7)Q$2@UI%+G$$Z)40+ znGXK_<<&pJ?twJJJuBl%*Jm3>pKds+#@2GaZG&@vUfIO5m^tKTrt)COl%pII64nl? z(sQ>PX|~x~vKoc~Fp*mCp`rm)jd&aA7pShsqo!ZKVkLX#gS9xolT-~4t?5Tk%HP|& zhGpM3d%hlTSGn#mFfxHXuMJ$&1TI zCV{AZwAmUF^>e=@Oz`1mN*#vc{4d>Vgq`vwrPkZLKY_zZHYN5P^)To#0G;b0?_al0 zrxq8^FG9l-wS6tpeq(FEcj?$QPqeW9!6(n+ydXX<6ug)g$eG11{z$D`Qa!!8OVn`f z@}52-0PhDbV}AXql|gQ|;W;bV3)KFm)=J}CuCQc2`F(~FUldPmUA)&FZ-Y`sb};FV zwlcEPlS^91uE!W2H~B_}3ArYn(^kf(SCiYQ@B2{SobQo8b>`G<9UU|Hm9O;FB%M%t zLbM@I^C}KwKS1%$k->~rS{$Q3kJpT9uJVWfPMQN=;kzCtdu>KLD)awZku$E`vRG@k zDu&8a1@p-a_D@>&sDuZ(aQIxl@cmOop6ULN!0`&0?pt1VJ>9*T6{7gE+8w{tQV1DY z_lumhz1U;=`U>BlrXfVd6Bj(q;>ZMiX_{d!Mb#Kfdg&Y5dk|A1@I^r2$xBn!SM&Cc zYtjRw>a7Y+HLK#*qvfKO6_@3L9_7B(F!4iK05ltX@NgO3R+5{R=NaT;Q#K>zqvR)r z`+nSV2!1aPw%yI8gnj3w=2=YsvQ}IKr?d4tEwLdyDHE5DmXfMj`Vqs{q#9g;-Snh^BU4^A%=D~&3}!Ks^nHKQW(z7qtcpFi?|XIVXHL?B}*Q|EWWgnTM_c8Jnjl zBR%$Ap~^YI=iJkn>FU}V%)07D1STjDo{jh$CR5{&BTw(}yqae2ozo;-&Z~Mxv(Lf0 zlQaxKri~?piJm6yLA6%%#D&x9d^>=TQuBm*A)|=PAt_=Z;w$*TtmgcZkv4cOcdLP|08QJoe4p9hr2aAgDLWhPhVt2q z)BSI=RGKIBsB==dP=~dXru0Skp}0JsWt>kalwK2$(m$hL4wxE7)Tx}EP=~E=>TWp0 zcA@}bHl_3%r!QoL6i7PRUEY{bEnpGKR~@BkA8B4ZYFF0p$WdSY@PVhqz()$welGfk zV57qD++2OAT%ak|vEi0hR(-}MpHY#>b>OIb#p_eSSz0r3GJK?ORJ>&LUc_-R!}U#N z`RK7LS67)NlAl7#UK ze!kj1&mz6@wQYj_^v6Q(h{ZdviadhR)^cn9j_#(Gun9zanzbWSWDIZQM)t;wbRr*0 zy^o6<5-p^t9So;NHkqrQU8h?0j-J`=D6)5JnJhyANPy}?$~LnxLjF=M-1xaL$c=7J zT116Ko-QwI4d9$rX)CTB(aw|>Sa?1d+SKH*c%BZ;Zg(2=TRG9h>kHQiIFzKhs#BFI zb*f@pN}VA`Pwc|%LMOeAhE4bs4p*brn*L0`*fR~n-y~zWan>ot7<}_w! z_-E{A4;N*3b1_(48J7Up;ARmDc6^B0*%+Lm9E97yqlo1|SynOi1B!Spc4A}<9i77?X2d2nVjH5T4Hj3V)AaSfrR|44|Y;Q zh7QRPqkFv8_cWdJ{K`TxMp7Tu6fUd$oTAc_MG>|5tg=CUf}$m@TFXD?OEuuI%pi-q zKQ%3AAcmTNwfWbUj&k>hU^j=oz;9lLZzlGI?hZ4IO0Ys}kJcLKHt+oQmc3JCdsQ!? zw%8)z&V4$9=`bo1NI1V2(#LGwSFE$tnMSs&g2#}{HS_BXuN-G%S*x?ranDaF%*I>e zjO6kRPcJ>kOj>OIWV*lZO6{WZ*wRf`(F@8Lk%-suT3tq$C`h%aKuffr6uKvRDj#Pq z&Vw5K2pi3RM@_oHAuOlXGduxkoW|3`bi9T%yj!yUk`5aVcje&b;S_BRp7T$SWfP+k zDty%w@SGga(uh&jS~AAF{N#ljI8EHB&W7+iUd%T(dh#LDzhGlDB49ItVNmnT#?1Q% zj*9u@#>Lwb3T+qB`X8DHzgLuD^tZQ8=-aFn^Hlmz$B0a#3Z{k{OL9ubJ88?V`CR9d zTv$chMBU5|^4wXOhL|6x7QYTTT)3Cwi%FF(#WR5raE&OLGLYpkJon{XAY?SZlb%UK-XH(rHf?J-40AYho&7|aT zOx+(A?4@yx_^C zn)*-A&_Vr+h~mnPm%^l)k*;ve?Xw=tXCEuOtm3R&>94bpuB#@sZt~(bqWW(IT=HGy zAZ5W~&ep?nVgR5fYp?65-j{UvqdWH1G9>tEbsP)!5Evd^rpqaON%zk5w3h?S5YJyA z9x{mtD-tPUm6J0B|bfKV}Y>F+AzChtH2nAIeTW~TT_ z#8KOF7$ zR?zkIe3WdH%&p37w*T8EfMNsX%w*vWo!%SDw7b%JOrO=K&nI|>4J1Bu$xkIjTB?@o z^i$3#m!Z_^TG2!S_mgy^qOcS1M2MO2S)Ua882Qm8IYNS@R zLclj3c6jlX3-$TYaM{QGT#$#SDgXB9yA0huL$RaxaZ>-NR#Wf3>$r;!{#v`}fAE7L zVG_jaj0GnN*-+HDB?{^xKZ2blLmAk%qANPk=AOU#aP(|tOL_TNXpJuUVSrK9SqtvW zl|OHKkL@~O2HYE=em=o3)>@jiu=(<5X~^vA7PXk&LGQ*TorS)j^y*=igWBF~+j;p< zGlw(#)9$(KT-ujUd;-VrN8`^L-3i1Hrwo_Bc{pi8L~T_~`ypCoXr)-VcBH=C)KDHH4Qf=U%6SxDMYQl{yHk#-1?>bK-7EvD2%m?iu@x#p66gTDz7MXFrHDS?t37M}h zl2On_yz{l5d63@g8h1nSZkCeraW<6^7LJjJVB6vVGHn{}w|jMI`r2Ax(J5@x5AHtb zs6sfiEZ?bI?H9*CDN;QRPwy2IOXmJ+^ZTY6zrBYMtlLpHdDk8i`W~*e(8RWVR#Kp0 z*MfNUWoZ;Af{%cG&!{!?Q`3g=L?cxs%Y=|0MomHz}%6 z_}wR4M=za0Cm5Bm@i*oj3+3NRpXP)FoiIdv^sf(n59k~aNH|g?Tcol@@#+E%X02p+ zDf1>4sq|aL^o9QuJ4ON$gNDC35*>Dp=CYrQ;6AmzA&j``8LaxM{uvfTtijEipKwBNrE{1>(*J|)Kc4@$j?zY5|@1_h3|Dq4@pNDILWvw5Lhd;2D$ z%f8d5EhyNZT(w+nr=o^;XsTHOIt+DTUrr^6aS%tXd536kcVBMGye6dcmDbNNWdh|H zZ7=~!#kngk{|X4<4+B6^&n@!V=)Rw5%Kw!ir>_nrX`bwQq+r#vEnM)k3RaJ_19ytjS*TA*XivU>cpEZRSjD*lfKyB#8!qMViC zlD3~U>eH~MYpGXA0Cl;@Mt_!#yeYGQ^ba9%(IbvtY&+F^!Vk4HiYpri(VN{4e{~KAOLBoUT*tYelR>fbx!IaVw zeb~u0!Fv#+ci4?A8xN8S6ZcPvQ#_E?J+6Avf8=&8nNvu3P6~;)>t%2J*ga--A`B_| zf+R`_K4O9-s{oHDL4WAz#id25quOXo8@tnR0k5n?H&xa^OMW&~J7 zGow5^G-H!IeA5xuwCCsqSPzn|jH?z#^cTQf@G2(CMd85ykDVI+4>xFDX`0wynV+rC zFJQa7Dju|LDD1BSqt)Ftv6u)}Hi6 zLJ(upDxKEmhY`1NJ=&4T#1sQAr7d0|44^Lu5<#r9#`t%b>!C~Ovt@Fc_jr1^s6H}w zZ(S}dqUTQ}K;u@gO`Q5nOZzhnF=q*urWaD$K#ZxqzD`^RmK~}|u9XSP{>PF0N)DBK zI2v;9hG28d1%V4y&9V)qMK$q4od;XsQI^um7-Y8cVao-i%&K|`z-r&R#24pZ;G@o^ z(eWSB8k>iTmKYQ8<>GOjp%#>O8UrIg((lZhrNAA6?t$MbXM&}QmzRSHeA1-27;Gm^kq+bC}voBjali zU19CCnTpj0SP&QHhbY0zP_$`2NV}dIULw39p{!c{`gw^cR#cu%Y9RGLglKm7vqG}G zFXzydr`>#Pm8W5Ykx=~kai#^h=q47nN=oani{bWprp>gCWN_=pqUpEwp+Y98cJ%LEM-dyfzE0y2usWj(uUkXV=BMJ&h_Sp> zC3}x;q6=oXy&mF*0o=vxuXg9=4;5*Tbt|Qe!{#qr7Ak8gZ{L*Jw0~iOxgmeOLQyKX zB{pAFXubTZFlBJ2&`$V!Y^^3YAB!-1dqTYAG=RtUK|T2!H70CPv5j8e*y}>~EPI46E1YtiuM3k^ zRyNYTHR(~4RdFXFLCk-)PRFXKjG=|8J@GKse)oB7^>w&a1Q+%&c&xjvRNT*orz>D~ zbD9-NyGLv=OK4>4U9#Mla2?}2Ol)Hl7}Pi~KiHU*!0@;`xw|OgSt21kQ%qm5eQrSo zxPdO+P>pBEJ1~JdYWMO#x<{h2*s1fsUts(U+^*yYCNE+t?-%mWne6dPQCz5}wrn~3 zZZ$2ZHxBkX2^CgOBCWYX8un#B-aS5Ek#!6wQczrZdvO(=os`7o?tHe=fA=nW(&)5G ztLn?C%=)2F=oBNFRC{GK9CGC;^;I?U4eDyYcgM+DK@rUbsgms5qS&kw&QBZf|IAFd zfAYJS+iE4lGXpY&4>&KKKdCRG75((vT5P%R-oob}B5%KmbvlZNDb>xkjs zc{23%xTO_4uZSGnTnj~ar|0Y)P4vR4k)gBhFo|zPc>bHk%Vg-;Bw`e2{-(%zlnHT` z#WZl#)bv0%5?M)wr8Xqe5@0FcFy_y}Lk>5+^%cQHs#Xr`f{-3L4F1putM1lB(q*3z z+dGvrmhQ8pWFxQCfd#X^ftldYOcl&F&C=urg*wfv5?nJ_96Q`FOjU_;MOCuDz3@W_ z7nNB7=3dSyIrWy|Vd(QOo9}tMIn0r%wEmOU^2s&*&^iGBOu>4l>U?5r53@SZzqz-^ zM7n>!ox62eh~C*inY!m{tA`v7e94FEE16QPh2Yei-90BlAnl{t%Z1_3d2nd5UP zn}a`C;LOs8KfuZ;C7G0{xi_ot58f~LeLtI6_OdFS9||!G*d1H@P`B!~e@p%)6a)NG z`sAdR+4>gmPF4~}hMR0_=@19g?d*;fA@ij6(Dab$@X+L;vab`RAeW!_s zfr+5lEP&N5v>qbV+T6Jabp;i?(U&UHu;}(AI;?5Ay!oA9)2)h9N*L&FfPH(fYqGPA z+`=NnL$sQbA5kx$U3UG06X?Tx0GlGAvt*fQ3#19Vt2x}m-F{hSE|#{SZhAHZG^e1X zvqR_ISTXg%msSSfP>J>Shxo8W1nZA~x(U|=L%QI}Yxm684)v?Xc zsil_uM!xr5_CGs%m56BeF^O^I@9%V6_*$Tg8sNF;Wo;x3Ick+jYv>U?m^?= zTYturlnMIe^q~A|#hToN`Ng+T|Dkse1GYkpZ}h=UD&2q8k=g#o^tFHd+x3wwvw7R%1j~i0jtc<$C+W5J&U34kBL#v|FL3P>}F6*jUj$)vNwt4?Cee zJ>LiMb`11hOkaka%EF$qhIUVB6OVpa9~&rKEFr=QaX4IF=-0Q%F7pQcA<11ETkETk zOf|t_KU$IVSu|$zv3anTcxtk+D}1ET+On-C;kmiVTlN5q$SNOTa+2nnqJcV=93DG5 zjIIEUH**<$W3|bxehw3;x7%CsPpG%ECcABm_AzdE?}>%c*dmTz%AK@4`o(@(&&S(S z7cXup1MKT3)9>(}?Jk-I8PVSo`S}X4paG<%7mHGXC|Yefv0<%SWP#qg_Z8C6$dA}V zH2ic9V_r-nPiW4)QdQTw1q%(Hs32o$%$w2-ImnBQCmR?PGUgBwQ}DLmwC$3hdG7koSy-)MWOO;wnox8GMIt!-mr;U-N8 zl8AwRN9;ChGB~p**zj50R8Ab~7t)^n!p|!xs;IsMZ=Z+PT!ANc1q0bNMpM2>24t1Y zifD;09wXHOGmf~7*q z^sTvB|1)9|{hw7u@c)dMQ2uAcg#SMyrvHx(DgoU9f2~|Z(Xl(?pK`ybory61#^KvTvWJ}`qS)eF=P>mo3zuDI%!c&4jX0t&--Gp zp@r8SMy$|h=z4!%LgV1-Q6%C&P=LC9Pq>lVG)%3c+NVWnvtQ};t>k2Z1Ibe1yHP-V z=ufv7PLSl@ihC#6?-Y#WkbKqOaJC|+vo@*nAG|r|T2sAxW4-1mvajmr*^8qH&nI5gNUHuBm)(Bn5f+BPUV z6yT4M2RS9*Y1|KUye!10Qvc5RGvu&1toZ}kO?6~dl6;n3?W0uzoP^siE`lm+7>U_U z|DY%t%lh*IqUftP*>W~okIi_sbFYLMEsucg)Rti6^LcgbbmkVTSV(r#(!#R1&=$_ z*DU#~H)IG%k;MHUhxpRu{ryb9yY$G~eED>M*ZAL!dljEGQQ&#WDII6CBTxKlmCCqB zSlv!d-vPMht91g_j+#+_yx$X$6(X@05p*g?U3y$XhKNEwwWpb8E_e0n%C}$q159&S zE`$c8rD)hWz9%1;lANgvhR(1p-@ezl!i{2o<6rt0)?}IGeb3V&xBm6hc_{V1>P4*a z`Ox`U%j{N)TSGGg-yP_}7jV%kpd{TfyVsKKH2pmx2^3u|6e7ihB^NDs2u7KNT**A< z=ZvE`tCWtgZ-0bt_yCQ1{K%KR*EVlHt;EG=Kk}toeQ(|HC?7&-PvYq4H=o{C#}}ep zKq362&sSJ$YhP5`6VY5M9b8>SBo^M(&|pl<8QUGJNoR##KA^pOX!{k{-MZQqeI5w- z9p44A#T)p(zj9VX*?eHfrhheWAm4yI1Gt|Dsy9Ofjy7}hToE-hwx@DbJjYU}tD0UB zK(B(sKl5fqbYrO}YUuwCW15=v%-4qa{>kJh5n6P+j_XDJ^ABctFWhd8r0!|t^PpjO zu^M<89nRFU;WvO&AzT2}5o+C@;@=}SxpVf*_!kw=r#^uNTr}G_Tr7|4VoKOz6$Ukn zec&Jyc6H=jEXYv%s79$q{9~9|5XYljO7GB8>lvcnn1hcLC_w9OKNqA!!b|v&LD-%7 zpWy5MBW(eA^eLKc+cK-&JnQe-v(|O~!E;*Kz#!^;RWYdh`dES{|Mq;_)!4Xdq=xm> zRJye~gP2%|7!Z?&Me#<{ltNO1a^BHkRzYoZk87Lj%8e%^&|aO}OUA@4nQRxlIzYVeDqWxcV38R@Cg`p(lv825LjI@d2v6^B#%ex4 zPuuYS)`a%OHy#@40k&<<+eE`uWd9KUS(hE>Bd&vJ)Y7YQR!#5iC%I4z;p_Sb`B`I- zT3RaE39}Fn=cmpl6Wcm9Z$9Xv&e=qahI{DOcMHFz_>nAGDENJ);3R~l@+63w0dt0u z(xFDcs2q)RKCHyUtWJx@(DpECw38Z#CIqHDhLivMk#6(3 zCydpD=||FV+Tt)kn4b!i^iF>&>K^~Fi!Iyj*`eK~uJ_X?aMQa=KS9Xd_R_q7f@1vb zq{QceAsYJFi)-V9uwRNmE-WbM8K&G55`H;#6_ogd_4mm}(uHwXi2;Fh1;wC6xUa+P zq?BmP_jxlO3St(lXvLF}Fk$hZrD-BNv?qCImBG^_Hz;$0j*BgEFS>`0Mvvs%}*cEg}l@x+|go?#7P zJwB}$-a_D4sLlAfi>noTfF4)~=eUpNLyX|K0hHy zoi&GZ(G7PCJfVfW)pxFRg=Kv1E?XY8Ep=wcBU-M1gYLrBFgKO~`)}kCbgM$U0xVR{ z&S-J2zy+eni;jQQ)9d(bc@pdCHX1!fwSq7#==3k6r7`3IV2yQ?CG)K*iEAu+;ym=X1sy1jo(AX7(g-@*Fb+&d z%dAbu1u7(~j7@Gl!#B@vQT~rg-=LHqJjtsW)$O=T^Jy*SBEeIrpNzoOa(aNcmi*C`ns;$|1sTfWP{}Mp6jo8v)6?_h#*2MIMNAf4hV64U0Zi!C*@>1Wp1mF= z3ppn_1hZ=trGzu-YFHPb0LI$N>Oz)AHr}L3J)5eEW7K{hrz7($i{}KG0S4~9Zuq16 zYB8yxCxi$1_?)f7Rh-_u?li=Q>(Fv?c+ym_`H(NPUBmO>b2SiQv|89*!%E^t;!EP- zKZt2#5Wh)FM_3rEBf>boJ8~FjM(a+o^C|mb!ry{+?>>IGis9e>3I#rP+>}g_bonD- z2pMDFhLC%28sf*q>faF4tTn9_h6mkgI`hpT8IET{ZzEIFXh%0Vko8Z z#IJxd;-JkJVUfw@1?UE|)+5G3S%oByJV&p%BgAg*S@H0IUt-zHw*1U^!i!|yKPVUWMn9cmp+YD?hkYkw7LSW%doLX*drtb8?hQ- z6<)uS#KL-t_@-cY=H!}nPN{z}G3kbR1YXWj1bOdSh^B^qc+ZyzXUsmX@EK!uj}Wt~ zDRN)V*!bbkW?a3n@lJ&Ez1t}^*OwM>Vd%)ZY=6$#sl7u23oGRIz^JPjq3%ic;RjLc zw#YGgYzS#izLuVse%Z1MjO$CQb0;XshgX@FC2m#Z@2D}}x3elUR6w5=1An=mOu|S5 zSyS?r`$63zd@XUUx`q);Sw>OQRBno-BO5GB#lYt@>Iz?6moaV3?@sCdgqkj^T*ra+ z3@JUHzFG;;6-mEx0^fo!s{{2JqTUU`EFILGSAvWzobByTzZZa7ahv_cUhJ|+ko>Da zwFQ@sicS6jVvh9fz*q``Ulo#ZI&|bp1e8BT$l|GfQ3W|M5iKAz-ZDcn6(S=UV8&A8 zJ}m0!=C8n^H(5Fx&*D(#EtbaiDPnI$)L0A}w-Q`s=Qdy{Mk%AHN(7#h~-iWznQ* z#?_&*0WWe7ZI!&HYSz&p%m;Kta2rm$PqM}}YYp|rd%rs{^Q0Y5&lfu}CUM>L%+u;C zae%sJ7=##D9$S{j);%aiY(xL#JoVO@w>}DjQ~JddUwAQ&>}>@J?SQic#VA}_ z6@EafN{{aehQ43E9o~$@JZZQ?ixL!^ ztL?*qt(`Og42=0HS&c`_UHBDYC^9~4K0+s94?(>E%_H^(LprVMM2)hPhAHbg5eI>b()(fM)B?p*=_U zJhsj@ROGfBD-p!Lt&w{PG7K>Z-p*6|_syY{uBRJX&( zXUfJBoD*8DH}VM{W@mT4vEH2{4%WCN)*Dkw<8(`$<654?^ci-9LYtn$c;ly{a16^R zI#n-c9T34ri_^fzf_11B zRQJ)$xq0%iI*>PF`H$ zz0DT`QL0T@eaj#09(NwF5IFg1C-D9eZPT5ZX6m$_*JV>pmHvV{Gk1e{$QX`JYhof$ zhuzQH%g081wJaZJ@hur(=@3zfuHVn`w!()iE}lp2Lq@@Qb>L>srVa`ZM}}s;;nO4U z@H=Uk%pA>F5i8`hmOMz6MbhptTBC zI%$~AMIt#q;lrr~jkfQ$TYSqvwL)2hx9n}&jFC8D6sm%b=UVz)sszoaY|u?8=A?)Z z9G<$ft0TN|kU7e$-ZIzQ8~;%93457rqXP_2W*;I3BNitHvqD_Hx&9%Lt%Dy$H)l+v zZhYeZ)0&xky}|`F9SYAcR78}+$DktU%RW=?n6UHBpi*zVQMe7kg>cGMgh>IYZ>2T> zfW!nOBsf^q9^BfKhnL_9w#it6c35?#N4JufnY(`ZuyB}#;KW0aO#dgjUnm+}e-yOv zCJuNkbiN)3`2WBMbTU8f+KX5ZG+PA7B*1*UPOWGBeEVg4Gc50Grit~isaEA@r1V*J z9P9eBD%Cu}xQY^28b%pu%#V>vc)id{K-<>vI$6hwK{xOoRo(N8qwML@@0X82dzZZt z5byUn*R;yhg|=m@bW3C`KyA9}swX-@wPTF!RN}8+zm`8%KI>nvE-b1hdZM63w7FQX z+B*rCO;YO4m6wcO0($^0kMBSC>?DngP)n^}NR=l~2Qcr}N8@bAjT2bI5kPHFR! zl+TTgw!W;mn{o>B!f~6OHyGlIBWyQ61?EB=1NCZEAA7Yn=DEqkioV|4dMzCFY#Y+C zMb@$L4)PK5+*|Z-@vZzc0oT{DtDNTRMS zmENh5+q6Ws(ON)bcSceY6Z#i8iFdoHNj4%@_}9c5GaqUCL;Rg0NYU7G7P8(W!3k~<#{uGMm-b_h@(6>*L* zEx1f{Hd>GKr~bUog?t>P@T*ib0Mory$9>XIjxS&YbT{uERLNw?87YXpAt1#G70-$6 zf8OoK`9R>l^Pz$-ExCM(6F?^Z7QvqZi}(^lzc7grDXN3QxYv@>0HRze9*A4Yhz!IyE)_sHGZjVl!D+T+F(As_D65iBE-RjEVqV1B zRylk*n9~~#vJ2xtT6WY?ieu7fDpw`RkNiz66Z#ZZJpJvRPZ`h5d0E=RW&_{#z;^J` zSLIU-PR`g4QP${mZr+~g%+aW&ndrH2KSI$xjr zNlSC!p78q1LhmWU=0%P1YV}YY}CGS3d3M^%a z!4!q3<6GynUaWk0g?1SO za9}J-%_+xvM4#$lU5QA1r=~xOwR;A&}x9~SgCgxqO z_1FBZylzEpZ()%8a~d^dmicsVHbl9G*r1q*8N8ezmpdx3rVQUXLueLa&zcH0$ly$R zE+}qr`*h@k+}i+m;2KQ8bm;bD@oo>lG~{lKnVV3NCBdLxW3)!eqU)cah#aPM9};zLsQaTtIE3| zPfYFk%r;*khAfws+B1S?yihmJK_Z3pm*vO6if{A0R?de~RG<93B@9+SEejr%E@QW~ z z@-4{3?c6`PsQ}JSZIj(~2P3X)t;}T7`y(^P$wCTtPI-Xg;W!`k-+i*+-ha@RkAAhm zfMCs(&uWkT0CR_OjT_?fNGUJjyY*m>w>MdI!_7!^BnbAsKYGv{V3g#TVt$`y=J32d zqji5272${f&uuUT_P>(LkTpR$@8p7LSzQ!G2^|X{y_c;(n6(FZJM2)a>N!eZo2C9f zB?%YX;89-r58@?nh&5RZSDXCp)!o1%fP`_*A%pjyU1=1>*IZv;I~cX_d`#-Q@$mvS zwL47%)h zL_!Pb?pmJ&jdRlGYtvSL!+7AGiO zk5>C_{go$e-uaFB{oP@f)Tho4N7IQ0KFV15<+~)%0I4`I3Zvs{!?X-O!{mVX_~sT9 zPpuAy&l~Rj0U%`;8<2J2vLrFdO2oh9cD1;^TR5j@NCtWPeAZ?%7!78YidDfImA89> zy(ut=0SG6fOc1WsS(vOMh~u_=i##GQM??gvmanRNt3`O-l6j6(_ zQ{a&a8~Dj3dD@DS-pJ7yTUu(N)4OxpA=J838ML?HVYpM+XGbc8?Y0=M>O>0T&vCCQ zNl(!1X7U`2(0hgjLFMV`nR`B2!5-U7BaK1+zN=>>@I&ul>?>fF$nk`Hdgk#-XjN5G zx2;pu@r=d6twv8rWue3TzHwZ`^U%nOKHuR!eMMOfS)_M#9jnQC|I%&M<%%o&AAFq$ z7ZR=R_9^$1$TBd2#sz%xJg_KLPCPywqtddd{O;Zrb0a}=&yL6<2PQn^CU$qT`@q1% zMK;_1awy9{Ic#=YPg`3yjHNfk*E;E;P~x!}z73Mn0;<)1wS!X*+ig+R9BpIA%%Alc z_)_d2q`f5%?dzNk3Ddx>32#5EfO+TSY2~_2aP75Qs!qr{2?50e*iV{hIDf^FC7FZ} zpiDgU_^Y1-eAvL!0BVhjPDHv996-yC1qb;jeb=y&_@7*@3wusEV)oq!sc*wA0Zc

m%BPS^chWkwB1RmYtb#9^^Ws}9Sk{r; zV3~OJU(TRYIx*&wiAqRK;>xJ7!UOEA_J%Ht_jGolqz;4YJ`=%vvNE$kXRdlZE^gMw zP)kc{h&wW`3AXFbB6T?QJ=uw%tM7GDj1e$sbH;(---W)~n|zGMQT0jl!p%_?T54tzzi?n~3! zASS0-pqp~gir0=YlNx49G*Bqg)>>U}Z8}IBpG)p_y!K>lXixDpg8mcE!kYQ4+k|8M zW6f&rUZcOxkQCK}H_3R_W=(ci4dn|wRven6rMZkK5hXXL(lyzqEn88tUcABd>&0&#l zR|t+;5u!-PqkCNrgjDpijlReBgC=PGCFQA$^+pH>i^RV?t|$IUKREVeCTAmfQK17b z8!I_&WZ0oQT-)tK?Bo)%ubqITTlXXqtxb$9F(btbSmU5gXMcul_8NnQf0!>W$GXz7 zm9EffXE+~7jah-8^D+$Y5U{l6-f^4E9f`&Dv?nrf>Hy`N@y zPJnc~@u;>9ZVb1+%k5oJ_Fuo{j%^;UZs__f&6r^~^3p2zN~gi%kIUhkcnUF7ABZa} zgwSQWvZOqW$QPC3F18FGkXQXrmFU>ieVgA~JBqx0kW_9BV+oGMVIQccYx@upzVYXSYqlU3hKMwBj-VVf*r3H?2@)C-Yh)@^x4DQOv#CG!0tWeM91Ze@!C?nwh5RwNJn{p zf!N9oIbRcReC4mcis@Y7J^Mi`@2Q&5V)b>rRS?=Vl} z{4ea02dt%)N=ZThAduDD8WfGsI~iNVqv91*bMgv$(JC7#(b~e`9;k72QJ$uo)NSIC z30u)bt%pIZm7e)wV~Jw39`_y7LcXe0;gbXfM8f;)4doYQX+1dE)3#+X(Ft;yX1loh zlJcH%{j+Med4pQ#osfItU)lF_ZJn9pSocNTwo<>$I2j_Ov@}7)E@Rm=Su51P8V=Q- znxuE6>`$(oXivT~lgG!>lOhQ`qJM&jD2kjteGH(6Rv!2f-DomK?V3j9CTv@uE6NYH zp+3whG=I|ajn;$u(80=NE?;JBA4G_S^O>8+y(SpeGr2~j{D!|mX-(MD?%0;?udjlw z+KW7rxTOjw{72x3qw6&Wjig_{71By1r&NSZnLR9X9UaC;)F1L&*M&lrx7};w%#(T@ z{OKgS`Bu^z_<$caOPh^)P~#a!K)w}D^uMdfUB6rn=q?VJh-7l#S;@64zK}lJlX7qd z>>jvOlomjyeZRV3hl#TPya^6{_R*T*ylYVGUt14|y;lDF&0Qf z@IRpoRmPuLuN=;3xyH=)ifuR_%0{kel=sq~aqqkgpN~AvMHsceW9(aV^J~g|F?Ln< zjIN{b#AB3t$lfm0UaLQ)kdb}K-j13WYIQo_Api$4gx?jhrGtE^J@MFWUgB7AFl~vO zj0h%%BCawIR&Hf~iQo_As&TxoCa8+=9?7?Ki%E}uu7qNL*0wKgTMz-g$6IQOsET>< zn0)C?>)sv~{zAI~DIvX6$?|}`mo_d~l7(mWiNrnD4jt`C9P-B1ie7fT#eN60GJS&g?(8zF3ayXpHJee-g(g;0n zd4CmLH2tMq2AxH$(DBIBYHDD*2sUc4Cp0#4)OI5kp4jslvGJ5m$IARSs{lxB zS*!67#ZwDe#LYIg!ZSFG{fon5#TOmu%4q2IGCf{?jvwOW^KxZsT(9;7qKRn>#q;ee zpT9A}7XNLYaWC+5QG@zgH>qzn_cmw#<7jtmLuS=eLzl_YWH5x^RsQ+K>9^NWU6A6f zHf{ex-I{Ok{x58h>D_v?oU&vY&5@`)G9q46SAjZoWlnJ+xS$ZGuBrLKVJ)y34-Fys zIKqvpxlzjoN1zf`tqko-$L-FwiEs(tOkby}PE$?GN_krM#=P@_gRTa#(_ewxvBMoE z6(&Eu{QmRqz}fLugL1f|;pgg(6m^*#*v4I%ovB|`%eFvpFuGT^@=-_M#Q$%)8sgOY zn-stx0p8XeZYCxQeb4ph`zC0%&ozl^V=8;?lTdh{5oGwz%=pfdOc@&_ z964~n1w_)7*#QD^^&DlGL#dMM!W~zSl|S1ldkzAL!#eo0B8gx6`zF1?Mk)@M9Xo+ zNx6>Oc|YauVB~SEDtC(-*Am;EBp?6&@u8Oqf=1H!wV#^{zpU&y1=PxiHL%*2+O-Zl zt>o_3WCb!^3Nv~at@aO*2#sUj`7ra>xUhc^Q4L8Bf%!F(GVp`z8eNvp&3Euq_IaRr zX!ejXCRa6`e*v^Sc{_eIRqx~`zo>W@d1oePKmm{LsNOY+V{_(f`%<~xwSW!llqEb`FV2E=1AU3Zy<5#b$JJ6P>?gbN@t>0`CGRqx+FO3LtI;~^p- zKF!s#Sn-Mt7V00lbaW{w8<>fF`)Y+r{jZ%13HZR*+zYl?sVyuz|Bwd**lQ9HZRlQ4 z=A??vZwxf;3Jq79avCmkI+qFjoUte-@EE472jNbv-~1~jGYUGEe_YM|)>yJ`S4TO? z7EHy2a4~753ub=!aUW)(;91Otz+s>MI%66ib9mjadQ1Z`X~1)jC6)YHP3qJ;F9LS< z|Fi%)USrrQ_myeS10saq`^)&-9Su3!OWxPRMQz$X<8A7^Z6Ta9g{@=g+#QhZFHo+& zR2DROQPZo8MWP^V%WRY2`*5+7u8!38oqiezKXnIx!ZV6fL-XA+VD3I9YM8#thTsb~dmmYQF z(a4j1lOy8;)#N~9!Qu5!epIM6JQ@V-HLgY;gbH6Pvaj=1xF;dLtrKf%hGJ%%e@~J; zxuf|n2I)?P8_0ZaDNjd@y%W~FgPF{u;yPj^HME{56q)FFGjD17v99Iw<6(}38n0y9 zc!;ozwU--mc6qQ!2>R$D9Vu{%g<-8y385S}OQv)5Z&JyeBPY!4Cy` z2oT0G(ZfRc+ZOCz0Da-E;#GyWR$fB;_e>0ktCG(Cd1#OmuPtsL&q?u^W&{!qb;wGT zFAF6)F`kb5^y;R5lZk9-II;;1T1z}*sIM(&Q=i@e#Sy$xYXa80m)YBg`WpgDomKJC z_UNSJc@3}riv~d!m%uxc5TY~f_E-U8(+UIHt>~u=TIZk7vkhGia{hpyYp(LiTL8iR z%5?F@YpwG&W3H4T9U()L<3krd&{SP=FBr%smdF$QjxVXXzgMHRY!mcn)eCH78aqGI z*vIXqM{?19$v>Zx0k!1Lo?=8V1*Nuh_MGkwL8CG0x_{C^@Hi;+)_r6*E|~^2dM7o} z{kZ?uZU7}M_&YYBd&D`a`np|LSx1--uiCzSGVGET56muCmAkb%o=@c-W0&9sXmw1M z&X895MLpGO6KXf#32R2n-%@dgvTE4~!T;^8>S|a>?0>qg5NNAzX+N%5qO3@^a=dFN z1CnYd3@d(OF#4$GQ|)oFeMMkW5_qO@F$Pj4;EfduUfM(-~3o0HK zrZFIl+qLEK&OEt{?1l!6c9Ne{!4^w7&Ml+kVp2P04&rQ131IsPs(6Io+;Cg1YB-D# zZm~zMe{Ih`7NR`t2wEtSsSmMDZM~%sZN&YR4Jr#|%2OMU2jgtq?3?B8Hg{`t^CkJM#Y=IRrwsfqjRHyYqH~JF5k-iI^ymjU^#AM zAtU-q33y}CwNn3L=|?)6S`wr_ev*!40BeK-;ANz6-Oaz%+l?e>yLeh2e+jQneICdN zoy2c4ml^2OLooK^q7%~s)gL?ji*Ki8OnL+4v;lR|)9v3yS2F}E3ci$Uxz!(}!vHhv z+yG#VP%ybU@M?E57m;{B76OM1AHy!LY$+~ixhs_6q3oQDUCx`m>e83SFYSBM;>65` zb}rG^`Qn@FlqB=pICE6_w0xkhk&p$B3d=CRkoON?2mVZ3V<8k~(TA(xJjHUpXtikh zOH2)Nu3}BVjiTy)_RzIqZk`3>$VEiB(qS5UQ!}#bw@Jo-Sd@2du5`vwUzO0*(E6Wx zbx^Y~VEMiDHHMX~9!%f*+S#_GH{>AC3=~$}-AP>=8Qis2brVMhF||4~H6B_Z1>J<^ zy7^Q!WLSrVHM<@S*1z0JPXjlM%gVOn-2p&i9TMWwC{++ zmlZ%)926_urh$)Najl^14$8v-Q|Usq*2mIny7?awfH22xC%=2srS6;cN{4ds?qj!S zPwff48#wrznz)1VJo7eEAr5JO{1?`#zPKF4pBPed5|a6Gb0YWZsfM{%8fKEMH1#v}pq9&J26Kn0 z`WE}|B5_8n>8UY=B0`GxBek@K5Zk-A|-CpslUz zlpod(c%E7n-}a!8MQnpCkh}n3(+=Uf830^P=)8ZeNw67d;JrMASrI&QaelKn0_<5~ z2YNJAN2`0EHY&m>lWXq2ik+^QnB*l%MTR?=DXUQP@b*-Gh>ItaUSWKXsF=xN4qc^2 z>;?nu>BN?Mlk-M3y$fh5KF#vt_~*_q7|%htoUao}4Ppjt9%;oHhk+(o`@L-S4j|+D zBfN37ybv#>qi)W=?7)_9rJl7fD&G6$#2MiGew{GBn1u->TEpk`qXRj+hgg?iu4f{O z#^Me@&;y&d)krIx?J6<^d5RpJmWJxjFa@m|ri-c`fOijqMdHWL8jppI7ru*sAIv7w z983#bbYUijWOyL`_DscTwK8d0)0!$ASZ&cdE>G-e+kPhm6j_m2MU>TWRXJLUJy>4d z_Fx78Gp|1K;f7iKHj>At*&$Cq+0w_8FpONV@v-luauM%&sGrCLK+BAvArF0C$*jDR zd;plb6t~4HGkWl-jdra#kTy~X(>9`@qb4pI*46C_o=))rPK?i=zepLZ7j(YR8?Mt* z4L7-86n4Ezq?Ih|yLTlnF1I^+xshh3Gb2)xf&+qcJQ*SPk+EZ2<-!RtuULpv>FmYBk{WkpDu*eO(#AiG#2|Pt zw>qcw7VKGEcF{~LQ;Y2!Eoa(s^!fIi7dm1$>eDno4{5&t6TX7_7By@=Qdo~%pgl)D z*&kgj0Fh}}E;)>D5%xT0uqWIz+N$?zZl@Ackwmjr|qJlie3a5U=Ur zq<}n^y3eW1Aht~z{wunE!QCf7kZu1g!$kon(&dPhz0dN#bfYSOo&BWC)WXWU@s`g} zTh)OyfTc zx($t_<=wwIO#)=`Cu>pQk9Mzmzrsvm>*3;3GwQ3PQ!y!2at-tIImsLTymqUB4_`7# zO@{{{^No#f`1E$cXOXgNN^+`0^JV4b!My*#Gk81y5V00v28yU8jQfdaKZtG>34Lah z$JZ^0ddss|Xo=5s%MoEp1GVIL#ji5w zI7PH|t!c(aO{F#30hdzqo6mCj+xGQ_Q%luep^o9HjZ8g00~hl8NK>hbuuL4b;VyXB zYi_GfGG0jgsp?%OyT*f()|~B+NUtABUR@S4#+#b~R^PPv=@n{SN_0+OH~Qsp@!1;m zV3E71hoQ4@-G{)4Z|X;+nllQy^!+FVl-vg-)mrahE2V}U-e$F&);lY~vvAo|5obOu z6>*@(L}awLEIQT^fHZ+r*-5Ys4&f062?`sg4I ztrdLy`ZVZV0GRxswN?EaF+u>IfHS@p8~KaiE61r)=rD#`F1U&tR_x z7_FO4?T|OvNh6{T{NyBahAqT4zqN0)*HhF5HLsDfo}z$ZwTa{1^h|fg%QD53HqtTy zX-h<91rCtF;rNrzYDXi%%oKqe#0(Z^^mgLglG>H608__(Uu``PNA}?^r|_M$JmrwH z*4_GJ>S;R&o+R8Fu5~t+IcK~~Tsb;fBz*!H@W3->#CVuFPF7gmvbq!3w+m{@F0A4o zsI8#@*d`-1wLM{^=$DV)!)d9VkfSSCAB(P0h_EPjSi8nrG9jzFib$~&#ERN;D`{zC z%j_X#Q=>n17)e{`7OrmsVeB8)@V<{MridO(T7O7FFB3ZFty^u~Yur9G;tQyF;j#?@ z85eAZa%7*$Q#*ltdZtamw_99bU%L%XNzRU95p6No$XT|BluWB~DZWnYVMNHfyjMBpZ#8aBR5WdEf&$tb|ZxwU~d~H9CS1Pm; zYfGuN*)_|D3~)QnR`W9iq_yr&qu5v%h&rz`nd%am)o5J?#NX{|G88jlfI^6Jbfa-; z7qN9jT(aG^GGV*DCmYk_-{Z4b{DH$h1N%nNv##b4g=KOCcEGk)UOb#0lw$^uyv%O4ddFD`Pd4r(ic1Zd@n+Oe)ns`s)G zK#{YZ#l(}8gkahUyXuW7?K}Q=ufxd6Ll*vrgNMy0bZd+tnrMo%bU&bkJQj>m(N zZMMZz%j_t9=3|wraiB$~oZV(7i63{B3MH1*EAy8o3lsnt^5*v#ntdb{)ku!wH=|5x zo7vzQZ`Xzb@_yOf3BNLZv0v5zNfOUzel)Usby_Z@Y>rqU?hmAEG;X=eb{O_>RwQnr*l>LTRHbdNswoc4xey8eAVemr6nPu z#4{_L+B8-ilu{d0&SsVp-fB5tnhK+Lzl;?&-^A_jYe@59CptFiq}$xQo&B9qujhPp z>zmAEJOO#l{t^19iwRz=2;%s#*>;`iakP7UTw16M`i7Zp{v~b}9dKabcqiuj?7h`T z9u_~YUJ)_B7a+gwoHKN>i+xT%B-wd+kj|%tplf+>RGoP*HH(z9@0SIoTL2B{&L*zM zV5L&_LX;5?7v*=$_g=05k4I;A}T?&FeJD!pMPh13~+J|E1r97(<#4`QEoJ3fYv zik$zJmN?Vgz$%Ira`)lm7~p+apH2V(9~kCTmSFzlevd!k3O94zR3Eb=!kD_w`->;M z5SR3fi+nGBc~JPVwx7hCnx1jLUAJNu+OW9Qk5R6d8Z^EXrI(53G}yXOsiA~o5$An? zC9C-rhBCbz*FQ)e7JToxDy4140|~^OYtR39`)y;Gi~a~Z2Rhe4vW~9@=pROn3600g z`0#3WknMtb1AVzu#;*v0|2jE!2*P?$|NU!j0Xi!DJ{-Yv54nMcDbAKKG5)(iJ&ok` z#ueC(f&*lBCA{bssOd9Ge&Xv zEM+UFEQ+Ki7{YZjo4(Gx&U|$u!%uWE+;&O&z&oQ30W4n1!Nims2R6y>@-f^{gKizr zN-!2)sVuybpNA6n!Md{j=i!cODlQ_71A?rAf45*9px$ub^h>5@lyivBYBnc&-kR)K zCpD-$i=W+abL$6Q1$ikie_J`lski(0-F*#_jNLmsV<^2e;@#Fv2@Y?SWK>g`WCa68 z3kLdIqQ(I4zen>0;cV++#@OGdijt!Q^d>XCM|f*4-7|cBWDp87O~|^E_|Qi!BG^Qq z=qQMmT@UuqR+yWNOa^@AIKRSXBZEm|dd74Rma4c7nwJfUtF^ZT|4Z3Kdn!L?XhL~6 zk&~7iNoOwnO$a!CH;MSZB7z1nBZ{JV#eEgq8cjkn+a%W(zDtGLDH?pe+%n+( zfod7}@A@Qp7d0{y=@`j$E)CIPanQw7Jp*k`yzC_*w zt76FHzYJO!aag{|#?Z4fTCe4~OhnhY0$S7}oN!HyzQqi7RCMNSL4Dc?u4|>$5qLWO zqf`72R-70fyDc8AtVUl}c=<7lz8Wj@gfeL*3@oTMmEvM{VD$nSI@k8^20PZ}UR+Y6 zyBFrtURJxq*NS2wr-t@EnY>58`aY~yh>Q9P!_DMD0cy$Xeh}v=Q<`L>MLGsQuNk*I40l?Ye%(h3d?3L~|MkE`Gi^B$Bz!lB? ze&K&x5%)C3ki;bBtS|<3a{%*$r5FInQ9e;NiBLc!o0sKkW(?@kQL6iEa}t@ClD6Fj zVXwX8MH&jTC^AkM2w#}PPqI$Sr^alo2>+oh-5AJ#{=t#?3L%|9|L*kV%J1);Npj<0 z8s+whk2Jr93}3e@?%Ds5YKJFKxtx0=?d^L%eTD@i-qiV>L;m-5$J3og_JD6~bwcZ; zzGx7}r}eG-CddHzUOyZNC6RTHc&i_uixd8EQNvJ)Bd|SLBXRspUYTnF80sum`Im#N z8m`%T6~P3%M)Y@M!B|%tnAR3yw93F^+#Q)0B=(v(mWw~wIT^m~@Sza8B`ezv#}5I` zCZ4t$*B=fnDo?dd4T|97T~M6pA1Ev%7c@(EoONgb!KZaKyYcyOEN3p6i8A%LcmzSr zb;4xp0cTs+jAj}D7T%`pBQx{QJL$w7SqtzkU*9ha&IXQo=}A+*APKO`gtiw(gs>w* z+qf=xRIw|Pvfh!?ImCy=lwK%F&FrOWU5p4&AH4?w-cAN%^lWlMAzH;VozsEVP~ycUuY$03c%yo%?@9 zh34kwIQ74a-_K&-Rpypgkh$ahGF(oCl(NX1QNIhjZfzfh1qA1zy=P}S`!9pnDNz_3 zP^EiH@s{b10zVcJ&px88SLm=za#g>SFetWbceUhk~^ zop%A(y7#k(I9Uq|?|X25?*d~ha7Pi+ z_{MOx4~1^!YO#>tF#;MN1N*<-e9??cO)pGF#rDI$C4oV(;Ntl2iYpQfZ!t+z*nnCJ z*ZlD?7i-kotwPLW6V5V{7=lD1E*#G9mHrlJB0TZZockL}Qu@Zp`m035!M(JVJVLTD z^G;lm5Ww_sRqRJs0PqeF>f1IUZFoN~+yGyFgC)LAuYM%%9nq|x!pighFEVu?XC_3; ztEIdvt9N6l1*tGw3k|Ft%rCIxXFAfKGK+^ zCuZ0h-bRyAAt)x8C5iejCU&CZkwo)O> ze(l812LrOsuY`!nttEGy7fJ-!C#vTtJH~?4I!$+}`KLSzE7<@b(Phul>|D-q4+HH> z*GLEtA%yN!t#Qjz5_b@6a3DLu)#-jk%4M5*mV#Dylqh$6_p%$(E5|!$=Z|anM#jup zjZmEn-S?Pt_IxmM`$Ndr6iMYJG3c};wr*XW8*mK|D)IIv&@b>vGTM~fzlZL#rZUdO z>~ywiw_=Q(7X=HhE&UYUE(u<`&=zJ3=nfFM&pI67jI1x0$R^)kykveSe!_{)uEqjl z*XMy`wRefpNeGM%7i7WOyiIqP|4R`T6_HNY*4)I+od2eA6!gDvfh@N%N3PqI1rB_k zt?C=-iA>UO91M0m^@sovG+X>U0l6^lZo%m{kV)s1T5mFg7%0WIfL^R{|MKWI=6BPY z*6CxCUM>?sB42nWz+1^q@O$2g)WbwjKZ=wd)|)V0oyebKz8G;fIk56-FG9nygTboLXRZ`9z0E5jCQ!_Y0<39;b##Fp z=TGB%zLV(DcG~A0Z5!HP8cN8JW1AtHo?4a}ReyKlqhz(oFcEpv8%jdw*mnNZ_yh&$ zED2m)Y4XrjX{PINJst|slr-oRiU#HWv6HyZ=0cX1F86OSq;u6*p)P!}z0xw#%Nnny zEb>JAacnqc5%K5iJ2o6}_+N4~N;h{_mt0P~@H`W1Gyre&Q)%D4(9u~~nCMfNrU`U( zur{%8rSm?^Twy)YIfpsZYFbLgZC4&5yD)4@VTw^X)Vm�Gj=X?Ti=@n?8|H98s` z#OP0rerOeS6JImuqoa%cn;BHP7#lO7c>g)yr}MDNDRDd}8E-e@mr+`2>hvue<5H?e zaz$Z|a$xyo$lbS8XOxac1(8lIKaP+oW^G>-&ewP;3Lb2^nn`!s8n*_ndsx%;$#Us( z+fP4bcVBNf4iH$pM^qEd=%(1*lDnXPI;m*>o1jRdkv^FN*cs;iGA z5@7d0H5W9Q>?tY=k?G8&qmLE|3*P0#liE>Ti!!$2r+4=tZV!$RF8Vz>@U+B;!0F7g zHql0bxKEM(94n)J!h+^|5djJTdEPDNC2iD2ql{0J;bC@L;!_w2?tpg%-oqh_ITn(F z8*XiH`MmG7I)&XS_9^&Wjx_Qm+}*0}QFmKB$b)RKz3%$MhJLT2#*6^j~r7N!YPmQIuY!JE_8%Q5zYh7^ce0s6kfT zLMIM0di1nO5z9zna#7$lhoue|SDn4WSJ#5 z-c{W22>Hb+{~^&@CB72^-;Vy-a~bryCmLAiv=?{e%BfTAl#YU$y1E)n7^NW#cR?%R zu8|>Hvve_~guVU!#-j;X2aF`_2#kb#Dxv-%mBLD;=N;++0Fu=K7 zz~5`C@1!MLV*-EEVEhx|jTeh)l(1T@gw6-~W%WP?eWYMo*0tQ%?cw=)-2vjW^tlK^ zFXUwx&&EPE1*I=P6)FntQr=CO4ej>9!gn+h-wBqzhR>N88C(e3gW-RHQecyp0`%tI ziEqYh{V>O-^lx8?e|v-0s>;B=BX($GlOJ~y?$PKT(Sp{$Rn$Ao_*BKXM*w_x_fAWzy%(Af>ay8tzGWebNv(hC58%?f^<02cMo-VZ4lR!*H$Ty+RyD zdr6PZbjyttlJTP&^EF{~nxM34)r%ql>AWkdxuy}t4o(>!C=?VR+d$-d zSks4D+ATtrz6n;)TGY|+*W7?fS&QQ$9MGBfK;>G3|2oEzq5J+?_i&Iu+(+s216lak z^IJNPM?N!>`TqK)z`7)=W{Z~6Fer*KB}8K)C7%fEwBYaoq!Xb^97SR+_MpsZh`1npIHKgp6M?A)3bAW zd_xfOD$RmZEXpe6u=T?M4jahW2pdsIa7Em8Np4Q4-?Do>7BBC^OgWpnqaV_xe%u#v zsq6q*az}vd&jH!L328{X9RG_y`)k*UL9)P7(Au}Y^yX!0W)%R?Vq50^L6mEIqa>?* zRHrXmnT@qfS+U#xbiP)>f=RQ_gOo*$bRac3ca$e`RF=P#?5(yfF8!cHwDD6Q!B$`g z+e3V)NP4W~QFnRxd#_vER#&BG?NdbxWYH^2@bbq}T$e3ldA!AeucUUsN%*Z_bhx(; zHQ?+)qu>9ll_ROSZ5d?MbbEQ7bYi+tXxDR-{Y6An>zIh7sWIWm<>ym$pho}jK|9%x zd=*u^=#JB>IGnbC2?k@UThy(-*Amsswu>hNan0<*H`TrSW&I_uaR*J?72b=TOWVTb z_+&`5B|&%p`T{(l$P5n$r%rpK2&$w+Kv~g~xWvA{+19Tk3l*X@0%!5|CEW*KCS*qE znvk#3^X4PRg7E3o=6Z^O)$Ku36aoO)`^!1wif;Yv#!RM3_{y2SDmb(|xY<${x?@^s zvoH_29$aXHqsBuIdt_XrYL7*=g31tq7WJcB6p3cLm@Qvmu zKpLUHBYY{-R5^1zdeAawf0n0^OHf0fzWo{mzz}{3b`SAd)ze`C-}&rR#+C5k9QMpC z6AXzH?GOn?EdoUSe1CFim^Ik?Dv>t&Ho47=F-u+79#({Y~d5n~EbobMjSDe^(hR z4*sV82YdAzZF9Bbx;3&3n88$SRwfugw?D9!gmU6LamS%%DI)xIx}7YD>`)aqO`%JW zw%hEFbYrzodH~v5qOl2qW#U2bODPARs$->n9U@z?T~Dlc7CVA}`qe>y znV|mjMqhi&0mDF8!La!wWQ05sH&F=?6jAf^?AWROTXo9U{*kkAO_M_g<~??zo&*0u zkay$#nzYQ#4-&?Ys`yK|!v~?xHOsxO76!P~OMq{mG+f9vC&qgdlgV8p{o#Q}Js{X_ ztpjuST5BeF-=^U=c2| zIhgjN(E7`VcDwBQ8+yibF}3GT*~EJBgRxLDU@4SE%`F`oMk0Mvh3#GoB$({tXIcC9 z?H2~X$&~Jx)h)3qTPwxH;sWTd$RKxW{41+x*OzW*q`xzG#&!RuY8x}V!9TQKTRa-= zwG76!_DI~c@OWA|*s0Q(mKh3N`3`s)7;Nc1#ivUM-n<(Z^#u~eAHo!J!Dw#Fy=K)=ErbKz_~Lek1>&Qd&rX}jSWw+(hrTsmd1u|Poo zbdMcNGH7x=sB}V8RQ4&M$^P;y?x+y|q~MI9HnPQIQD}JiJ6-J6E}xf{Tu=E~cIEO& zl~RL~74AP2z22$8gAI(BtJLROS#Z85FRsCm-C^iR^t$JqZIERO$C8vjT$p=a8#}RF zg!`p!R1c2<((v)>zy^|CiJ>(t&>O~?wK*D0Qm^5Lm4bpa`dO65penmEI zk=*F?ycdpLlgIC4O$%Ufb|;7H@)Vp%cz>tm6j-1mJ}#_XNL&fU5=X7NvtHJM#><6RYnGU?HPc@yZDJ?zJEyYGLJmA(SEm9r2s=jvsPOd5<&|#8p{YX_b{d4(xmH4 z-NmgnH#h(eE3WLh(TpXHl?k_TdJHdE2jA|>KdbEeN_-9X#X$KjXSO-bf}M|jKEEUT zDf9I@F_ZMsa)V#(Cr#_lQwf(w?9h7Zz4LvUfMh~hN0QsM>PPS2hCFIEWIgaSd`N>x z>P?ZAeP9zEslOhkM^~@6Xk)z5TG|_i^>>doWFYT^#+Zn0Xy1R9Fsw6zaM+(T;-ED5 zFUa2I7B_V}@EdM)dTM@o)>_yFMeIj4N=r~71B|K}JC)BTiJcUz{m(1@Ovxm->8Zjc zuW0BzCPg;0lP!|pVu;{MofRm~1v*!1^^%5{kNLNs{0`C)$|(svSR|C<0bu;LUj7@J z3LUq~H#lCx!)@Y>@K2ykd?WTb3j0J$V4*l#@OzYNN;IIm#UsDRp9NZt)*N15tL=Fj zADFIyF9TNReg@oK=(fUF6qtosWx5tLR{JBl*m@^)?2$5$2z8Z{Q+>d;ekS)eO6KxE z3dR8d)d73~_xOw-u>Dc=<&dn4=!alNJw$|v^ z73O(&z`XaTc%BC&k>c%G&~;w&A494q)rIQ6$VEFvIzkKY15wf<{y4kiL~Q*vja66T zMo$u*Ltw#zNW35BRbi3A=c_SMLHl0&wtSTMV~Cv^U|0@4l{QPPiQ}tL#)qq^vZ1-) zVn+6AFPO9-8n`pbsCxTx?qPqdwNqgo+#6ILxP*iK!{!Kj8^_leC@|N(y4#aq9P=<4 zG@n1;5MWg}e&CY>ffo~1kA(s0!O4`L;k16bWcz!Khbd)yX51Cz$pqv6qg#R~Ju%u` z`@xcCC7Tvnvo40JF>8Hgsvm`E;(LtuaZHC`<7;abzy@F4zA^Hx7>OU`YF6$@UV@sj z{3m!CK6)GO@anA-cSoNVF6v^selhk5jq-M|l(7AjDISUJ5bu$m71j$@g}wHS4&*^^ zfzNmQd;TD9-lpufeA){y41g9MR~PCe+9I}tqc$fPsI&vRMv7B^MSP_3M{r0`82}Md zvZ1E2uy*|pFy04t@1Qe)&W4JAlQb_dnh=pcT@hr7vaaH)KxZtChq7is@51?hl8*o*Wt^ucS5`r&oe z3$@nGey$JYtkZ9}%>+)2v}az&`1+T)qo%9r$~5X9^mV*>gvj;G-jQ$IHO$UqQv&~h z)FpT9$Lk!Mz`us|0bdEe`yB+~UMqSgPid5YvHP7W88#{K(BmC1o6{#@gl?HYxY6id z7fb^rzR~{}$*$6)m|_(QPK&$(9h`)+;2uWfgb2=QymtckHksjkSm>FfTM$uCXfGjeX7$EpfIpwY94eV#m>%14KS}X3~RY`RZ-aS6>;TIAWyCF$96_=m4Wq;MOlk4&%S|LWubAa>HgN)x4UzSf+y`LTJl`X zEdJSKkJ|Jc>s_Hcegz$PyjOkVE9GCOu7`hj&%6KmgKzWhsv8yl|MR{raTGdyX6K%g zYyRfl-kWaDxh2<`qAh;^XPvR@^=~)3 zi>9UaozvXAtLF;HvrUw$c(%;t?~yx`=K*IRGP2(riC@2#=X3w@zqfrhsV@>A=K+sk zJ6q%;AIsp|<;uTy)>$LgGalN@Psn&ZP(9=QlWCoicX{jy_67s_;P!t-b?3no4FT-I zVS#%~s;m4Gmo8lA)zH2$vi#1|-J*rUbz2fO)<*t)%p!eQ>`wwKaAe!O)m~<=@fNe9 zGoZCoDlNy}%udp&nmNVxPHR;4{%?1TFSoZWk(?FLx5WC^Zi`#MaXRVhy41{Lhl`Jw zFXQX!nw7!AppYQ@W!A($cV_}8*4Wl7PF?yl|K}ejvDE<*53_^Vy3Z`^w*P#~`qy{8 zUX{%zNA_+`#rF1^`i532lFr#-_;0qFPr?m0MayJ6fI^AJbuGJGIg%? z!9|{Jeba!0M1oE${;pc}??-jcy}i$47(UETv8kK!W$B(=9!QZ0Twc0fUsaR|xX$eD z$-Z4Fx@Y_xIXriLU8D1?tk6#}J~-AgC2GB5?XA<^*5G>CYD2L3r*E-;!hd{y@^F_N zU+mM}ceoU<)Y+U})v+`5i_&f;hBLiaoA<7~`}I~JlQcxnHgndgzz$PveGChORP2;D zzcP8ug15i(eFYpqQg-Gzd3gVvRq8XJp7DvhwVI3J!u7z9Wo9zz|1GV(Y6ZdL=|L>290ui1rT>n^W@#~&FvYOdea z@_k=R^xr9==l-o<#!w@^XwvjxbIW&qH!Z*|ASM?}3^1>)-FiQ=3OLgXS}!094iF%Ft0~&T z383_qYY08Su|KemfN1DK4334{+Q74编码->解码->写入到音频输出流中 + for i in range(0, int(RATE / CHUNK * duration)): + frame_data = input_stream.read() #从音频输入流中获取raw音频数据 + stream_data = enc.encode(frame_data) #编码音频数据为g711 + frame_data = dec.decode(stream_data) #解码g711数据为raw数据 + output_stream.write(frame_data) #播放raw数据 + if exit_check(): + break + input_stream.stop_stream() #停止音频输入流 + output_stream.stop_stream() #停止音频输出流 + input_stream.close() #关闭音频输入流 + output_stream.close() #关闭音频输出流 + p.terminate() #释放音频对象 + dec.destroy() #销毁g711解码器 + enc.destroy() #销毁g711编码器 + except BaseException as e: + print(f"Exception {e}") + finally: + MediaManager.deinit() #释放vb buffer + +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + print("audio codec sample start") + #encode_audio('/sdcard/app/test.g711a', 15) #采集并编码g711文件 + #decode_audio('/sdcard/app/test.g711a') #解码g711文件并输出 + loop_codec(15) #采集音频数据->编码g711->解码g711->播放音频 + print("audio codec sample done") +``` + +具体接口定义请参考 [acodec](../api/mpp/K230_CanMV_播放器模块API手册.md) + +## 2.audio - 音频采集输出例程 + +本示例程序用于对 CanMV 开发板进行一个音频采集和输出的功能展示。 + +```python +# audio input and output example +# +# Note: You will need an SD card to run this example. +# +# You can play wav files or capture audio to save as wav + +import os +from media.media import * #导入media模块,用于初始化vb buffer +from media.pyaudio import * #导入pyaudio模块,用于采集和播放音频 +import media.wave as wave #导入wav模块,用于保存和加载wav音频文件 + +def exit_check(): + try: + os.exitpoint() + except KeyboardInterrupt as e: + print("user stop: ", e) + return True + return False + +def record_audio(filename, duration): + CHUNK = int(44100/25) #设置音频chunk值 + FORMAT = paInt16 #设置采样精度 + CHANNELS = 2 #设置声道数 + RATE = 44100 #设置采样率 + + try: + p = PyAudio() + p.initialize(CHUNK) #初始化PyAudio对象 + MediaManager.init() #vb buffer初始化 + + #创建音频输入流 + stream = p.open(format=FORMAT, + channels=CHANNELS, + rate=RATE, + input=True, + frames_per_buffer=CHUNK) + + frames = [] + #采集音频数据并存入列表 + for i in range(0, int(RATE / CHUNK * duration)): + data = stream.read() + frames.append(data) + if exit_check(): + break + #将列表中的数据保存到wav文件中 + wf = wave.open(filename, 'wb') #创建wav 文件 + wf.set_channels(CHANNELS) #设置wav 声道数 + wf.set_sampwidth(p.get_sample_size(FORMAT)) #设置wav 采样精度 + wf.set_framerate(RATE) #设置wav 采样率 + wf.write_frames(b''.join(frames)) #存储wav音频数据 + wf.close() #关闭wav文件 + except BaseException as e: + print(f"Exception {e}") + finally: + stream.stop_stream() #停止采集音频数据 + stream.close()#关闭音频输入流 + p.terminate()#释放音频对象 + MediaManager.deinit() #释放vb buffer + +def play_audio(filename): + try: + wf = wave.open(filename, 'rb')#打开wav文件 + CHUNK = int(wf.get_framerate()/25)#设置音频chunk值 + + p = PyAudio() + p.initialize(CHUNK) #初始化PyAudio对象 + MediaManager.init() #vb buffer初始化 + + #创建音频输出流,设置的音频参数均为wave中获取到的参数 + stream = p.open(format=p.get_format_from_width(wf.get_sampwidth()), + channels=wf.get_channels(), + rate=wf.get_framerate(), + output=True,frames_per_buffer=CHUNK) + + data = wf.read_frames(CHUNK)#从wav文件中读取数一帧数据 + + while data: + stream.write(data) #将帧数据写入到音频输出流中 + data = wf.read_frames(CHUNK) #从wav文件中读取数一帧数据 + if exit_check(): + break + except BaseException as e: + print(f"Exception {e}") + finally: + stream.stop_stream() #停止音频输出流 + stream.close()#关闭音频输出流 + p.terminate()#释放音频对象 + wf.close()#关闭wav文件 + + MediaManager.deinit() #释放vb buffer + + +def loop_audio(duration): + CHUNK = int(44100/25)#设置音频chunck + FORMAT = paInt16 #设置音频采样精度 + CHANNELS = 2 #设置音频声道数 + RATE = 44100 #设置音频采样率 + + try: + p = PyAudio() + p.initialize(CHUNK)#初始化PyAudio对象 + MediaManager.init() #vb buffer初始化 + + #创建音频输入流 + input_stream = p.open(format=FORMAT, + channels=CHANNELS, + rate=RATE, + input=True, + frames_per_buffer=CHUNK) + + #创建音频输出流 + output_stream = p.open(format=FORMAT, + channels=CHANNELS, + rate=RATE, + output=True,frames_per_buffer=CHUNK) + + #从音频输入流中获取数据写入到音频输出流中 + for i in range(0, int(RATE / CHUNK * duration)): + output_stream.write(input_stream.read()) + if exit_check(): + break + except BaseException as e: + print(f"Exception {e}") + finally: + input_stream.stop_stream()#停止音频输入流 + output_stream.stop_stream()#停止音频输出流 + input_stream.close() #关闭音频输入流 + output_stream.close() #关闭音频输出流 + p.terminate() #释放音频对象 + + MediaManager.deinit() #释放vb buffer + +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + print("audio sample start") + #play_audio('/sdcard/app/input.wav') #播放wav文件 + #record_audio('/sdcard/app/output.wav', 15) #录制wav文件 + loop_audio(15) #采集音频并输出 + print("audio sample done") +``` + +具体接口定义请参考 [audio](../api/mpp/K230_CanMV_Audio模块API手册.md) + +## 3.Camera - 摄像头预览及图像采集示例 + +```python +# Camera Example +import time, os, sys + +from media.sensor import * +from media.display import * +from media.media import * + +sensor = None + +try: + print("camera_test") + + # construct a Sensor object with default configure + sensor = Sensor() + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + + # set chn0 output size, 1920x1080 + sensor.set_framesize(Sensor.FHD) + # set chn0 output format + sensor.set_pixformat(Sensor.YUV420SP) + # bind sensor chn0 to display layer video 1 + bind_info = sensor.bind_info() + Display.bind_layer(**bind_info, layer = Display.LAYER_VIDEO1) + + # set chn1 output format + sensor.set_framesize(width = 640, height = 480, chn = CAM_CHN_ID_1) + sensor.set_pixformat(Sensor.RGB888, chn = CAM_CHN_ID_1) + + # set chn2 output format + sensor.set_framesize(width = 640, height = 480, chn = CAM_CHN_ID_2) + sensor.set_pixformat(Sensor.RGB565, chn = CAM_CHN_ID_2) + + # use hdmi as display output + Display.init(Display.LT9611, to_ide = True, osd_num = 2) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + + while True: + os.exitpoint() + + img = sensor.snapshot(chn = CAM_CHN_ID_1) + Display.show_image(img, alpha = 128) + + img = sensor.snapshot(chn = CAM_CHN_ID_2) + Display.show_image(img, x = 1920 - 640, layer = Display.LAYER_OSD1) + +except KeyboardInterrupt as e: + print("user stop: ", e) +except BaseException as e: + print(f"Exception {e}") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() + # deinit display + Display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + MediaManager.deinit() +``` + +具体接口定义请参考 [camera](../api/mpp/K230_CanMV_Sensor模块API手册.md) + +## 4. Camera - 多摄像头预览及图像采集示例 + +```python +# Camera Example +# +# Note: You will need an SD card to run this example. +# +# You can start 2 camera preview. +import time, os, sys + +from media.sensor import * +from media.display import * +from media.media import * + +sensor0 = None +sensor1 = None + +try: + print("camera_test") + + sensor0 = Sensor(id = 0) + sensor0.reset() + # set chn0 output size, 960x540 + sensor0.set_framesize(width = 960, height = 540) + # set chn0 out format + sensor0.set_pixformat(Sensor.YUV420SP) + # bind sensor chn0 to display layer video 1 + bind_info = sensor0.bind_info(x = 0, y = 0) + Display.bind_layer(**bind_info, layer = Display.LAYER_VIDEO1) + + sensor1 = Sensor(id = 1) + sensor1.reset() + # set chn0 output size, 960x540 + sensor1.set_framesize(width = 960, height = 540) + # set chn0 out format + sensor1.set_pixformat(Sensor.YUV420SP) + + bind_info = sensor1.bind_info(x = 960, y = 540) + Display.bind_layer(**bind_info, layer = Display.LAYER_VIDEO2) + + # use hdmi as display output + Display.init(Display.LT9611, to_ide = True) + # init media manager + MediaManager.init() + + # multiple sensor only need one excute run() + sensor0.run() + + while True: + os.exitpoint() + time.sleep(1) +except KeyboardInterrupt as e: + print("user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # multiple sensor all need excute stop() + if isinstance(sensor0, Sensor): + sensor0.stop() + if isinstance(sensor1, Sensor): + sensor1.stop() + # or call Sensor.deinit() + # Sensor.deinit() + + # deinit display + Display.deinit() + + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # deinit media buffer + MediaManager.deinit() + +``` + +具体接口定义请参考 [camera](../api/mpp/K230_CanMV_Sensor模块API手册.md) + +## 5. Display - 图像采集显示实例 + +```python +import time, os, urandom, sys + +from media.display import * +from media.media import * + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +def display_test(): + print("display test") + + # create image for drawing + img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + + # use hdmi as display output + Display.init(Display.LT9611, to_ide = True) + # init media manager + MediaManager.init() + + try: + while True: + img.clear() + for i in range(10): + x = (urandom.getrandbits(11) % img.width()) + y = (urandom.getrandbits(11) % img.height()) + r = (urandom.getrandbits(8)) + g = (urandom.getrandbits(8)) + b = (urandom.getrandbits(8)) + size = (urandom.getrandbits(30) % 64) + 32 + # If the first argument is a scaler then this method expects + # to see x, y, and text. Otherwise, it expects a (x,y,text) tuple. + # Character and string rotation can be done at 0, 90, 180, 270, and etc. degrees. + img.draw_string_advanced(x,y,size, "Hello World!,你好世界!!!", color = (r, g, b),) + + # draw result to screen + Display.show_image(img) + + time.sleep(1) + os.exitpoint() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + print(f"Exception {e}") + + # deinit display + Display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + MediaManager.deinit() + +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + display_test() + + +``` + +具体接口定义请参考 [display](../api/mpp/K230_CanMV_Display模块API手册.md) + +## 6. mp4 muxer 例程 + +本示例程序用于在 CanMV 开发板进行 mp4 muxer 的功能展示。 + +```python +# Save MP4 file example +# +# Note: You will need an SD card to run this example. +# +# You can capture audio and video and save them as MP4.The current version only supports MP4 format, video supports 264/265, and audio supports g711a/g711u. + +from media.mp4format import * +import os + +def mp4_muxer_test(): + print("mp4_muxer_test start") + width = 1280 + height = 720 + # 实例化mp4 container + mp4_muxer = Mp4Container() + mp4_cfg = Mp4CfgStr(mp4_muxer.MP4_CONFIG_TYPE_MUXER) + if mp4_cfg.type == mp4_muxer.MP4_CONFIG_TYPE_MUXER: + file_name = "/sdcard/app/tests/test.mp4" + mp4_cfg.SetMuxerCfg(file_name, mp4_muxer.MP4_CODEC_ID_H265, width, height, mp4_muxer.MP4_CODEC_ID_G711U) + # 创建mp4 muxer + mp4_muxer.Create(mp4_cfg) + # 启动mp4 muxer + mp4_muxer.Start() + + frame_count = 0 + try: + while True: + os.exitpoint() + # 处理音视频数据,按MP4格式写入文件 + mp4_muxer.Process() + frame_count += 1 + print("frame_count = ", frame_count) + if frame_count >= 200: + break + except BaseException as e: + print(e) + # 停止mp4 muxer + mp4_muxer.Stop() + # 销毁mp4 muxer + mp4_muxer.Destroy() + print("mp4_muxer_test stop") + +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + mp4_muxer_test() +``` + +具体接口定义请参考 [VENC](../api/mpp/K230_CanMV_MP4模块API手册.md) + +## 8. player - mp4 文件播放器例程 + +本示例程序用于对 CanMV 开发板进行一个 mp4 文件播放器的功能展示。 + +```python +# play mp4 file example +# +# Note: You will need an SD card to run this example. +# +# You can load local files to play. The current version only supports MP4 format, video supports 264/265, and audio supports g711a/g711u. + +from media.player import * #导入播放器模块,用于播放mp4文件 +import os + +start_play = False #播放结束flag +def player_event(event,data): + global start_play + if(event == K_PLAYER_EVENT_EOF): #播放结束标识 + start_play = False #设置播放结束标识 + +def play_mp4_test(filename): + global start_play + player=Player() #创建播放器对象 + player.load(filename) #加载mp4文件 + player.set_event_callback(player_event) #设置播放器事件回调 + player.start() #开始播放 + start_play = True + + #等待播放结束 + try: + while(start_play): + time.sleep(0.1) + os.exitpoint() + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) + + player.stop() #停止播放 + print("play over") + +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + play_mp4_test("/sdcard/app/tests/test.mp4")#播放mp4文件 +``` + +具体接口定义请参考 [player](../api/mpp/K230_CanMV_播放器模块API手册.md) + +## 9. venc - venc 例程 + +本示例程序用于在 CanMV 开发板进行 venc 视频编码的功能展示。 + +```python +# Video encode example +# +# Note: You will need an SD card to run this example. +# +# You can capture videos and encode them into 264 files + +from media.vencoder import * +from media.sensor import * +from media.media import * +import time, os + +def venc_test(): + print("venc_test start") + width = 1280 + height = 720 + venc_chn = VENC_CHN_ID_0 + width = ALIGN_UP(width, 16) + # 初始化sensor + + sensor = Sensor() + sensor.reset() + # 设置camera 输出buffer + # set chn0 output size + sensor.set_framesize(width = width, height = height, alignment=12) + # set chn0 output format + sensor.set_pixformat(Sensor.YUV420SP) + + + # 实例化video encoder + encoder = Encoder() + # 设置video encoder 输出buffer + encoder.SetOutBufs(venc_chn, 15, width, height) + + # 绑定camera和venc + link = MediaManager.link(sensor.bind_info()['src'], (VIDEO_ENCODE_MOD_ID, VENC_DEV_ID, venc_chn)) + + # init media manager + MediaManager.init() + + chnAttr = ChnAttrStr(encoder.PAYLOAD_TYPE_H265, encoder.H265_PROFILE_MAIN, width, height) + streamData = StreamData() + + # 创建编码器 + encoder.Create(venc_chn, chnAttr) + + # 开始编码 + encoder.Start(venc_chn) + # 启动camera + sensor.run() + + frame_count = 0 + if chnAttr.payload_type == encoder.PAYLOAD_TYPE_H265: + suffix = "265" + elif chnAttr.payload_type == encoder.PAYLOAD_TYPE_H264: + suffix = "264" + else: + suffix = "unkown" + print("cam_venc_test, venc payload_type unsupport") + + out_file = f"/sdcard/app/tests/venc_chn_{venc_chn:02d}.{suffix}" + print("save stream to file: ", out_file) + + with open(out_file, "wb") as fo: + try: + while True: + os.exitpoint() + encoder.GetStream(venc_chn, streamData) # 获取一帧码流 + + for pack_idx in range(0, streamData.pack_cnt): + stream_data = uctypes.bytearray_at(streamData.data[pack_idx], streamData.data_size[pack_idx]) + fo.write(stream_data) # 码流写文件 + print("stream size: ", streamData.data_size[pack_idx], "stream type: ", streamData.stream_type[pack_idx]) + + encoder.ReleaseStream(venc_chn, streamData) # 释放一帧码流 + + frame_count += 1 + if frame_count >= 100: + break + except KeyboardInterrupt as e: + print("user stop: ", e) + except BaseException as e: + sys.print_exception(e) + + # 停止camera + sensor.stop() + # 销毁camera和venc的绑定 + del link + # 停止编码 + encoder.Stop(venc_chn) + # 销毁编码器 + encoder.Destroy(venc_chn) + # 清理buffer + MediaManager.deinit() + print("venc_test stop") + +if __name__ == "__main__": + os.exitpoint(os.EXITPOINT_ENABLE) + venc_test() +``` + +具体接口定义请参考 [VENC](../api/mpp/K230_CanMV_VENC模块API手册.md) + +## 10. lvgl - lvgl 例程 + +本示例程序用于对 CanMV 开发板进行一个 lvgl 的功能展示。 + +```python +from media.display import * +from media.media import * +import time, os, sys, gc +import lvgl as lv + +DISPLAY_WIDTH = ALIGN_UP(1920, 16) +DISPLAY_HEIGHT = 1080 + +def display_init(): + # use hdmi for display + Display.init(Display.LT9611, to_ide = False) + # init media manager + MediaManager.init() + +def display_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(50) + # deinit display + Display.deinit() + # release media buffer + MediaManager.deinit() + +def disp_drv_flush_cb(disp_drv, area, color): + global disp_img1, disp_img2 + + if disp_drv.flush_is_last() == True: + if disp_img1.virtaddr() == uctypes.addressof(color.__dereference__()): + Display.show_image(disp_img1) + else: + Display.show_image(disp_img2) + disp_drv.flush_ready() + +def lvgl_init(): + global disp_img1, disp_img2 + + lv.init() + disp_drv = lv.disp_create(DISPLAY_WIDTH, DISPLAY_HEIGHT) + disp_drv.set_flush_cb(disp_drv_flush_cb) + disp_img1 = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + disp_img2 = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + disp_drv.set_draw_buffers(disp_img1.bytearray(), disp_img2.bytearray(), disp_img1.size(), lv.DISP_RENDER_MODE.DIRECT) + +def lvgl_deinit(): + global disp_img1, disp_img2 + + lv.deinit() + del disp_img1 + del disp_img2 + +def user_gui_init(): + res_path = "/sdcard/app/tests/lvgl/data/" + + font_montserrat_16 = lv.font_load("A:" + res_path + "font/montserrat-16.fnt") + ltr_label = lv.label(lv.scr_act()) + ltr_label.set_text("In modern terminology, a microcontroller is similar to a system on a chip (SoC).") + ltr_label.set_style_text_font(font_montserrat_16,0) + ltr_label.set_width(310) + ltr_label.align(lv.ALIGN.TOP_MID, 0, 0) + + font_simsun_16_cjk = lv.font_load("A:" + res_path + "font/lv_font_simsun_16_cjk.fnt") + cz_label = lv.label(lv.scr_act()) + cz_label.set_style_text_font(font_simsun_16_cjk, 0) + cz_label.set_text("嵌入式系统(Embedded System),\n是一种嵌入机械或电气系统内部、具有专一功能和实时计算性能的计算机系统。") + cz_label.set_width(310) + cz_label.align(lv.ALIGN.BOTTOM_MID, 0, 0) + + anim_imgs = [None]*4 + with open(res_path + 'img/animimg001.png','rb') as f: + anim001_data = f.read() + + anim_imgs[0] = lv.img_dsc_t({ + 'data_size': len(anim001_data), + 'data': anim001_data + }) + anim_imgs[-1] = anim_imgs[0] + + with open(res_path + 'img/animimg002.png','rb') as f: + anim002_data = f.read() + + anim_imgs[1] = lv.img_dsc_t({ + 'data_size': len(anim002_data), + 'data': anim002_data + }) + + with open(res_path + 'img/animimg003.png','rb') as f: + anim003_data = f.read() + + anim_imgs[2] = lv.img_dsc_t({ + 'data_size': len(anim003_data), + 'data': anim003_data + }) + + animimg0 = lv.animimg(lv.scr_act()) + animimg0.center() + animimg0.set_src(anim_imgs, 4) + animimg0.set_duration(2000) + animimg0.set_repeat_count(lv.ANIM_REPEAT_INFINITE) + animimg0.start() + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + try: + display_init() + lvgl_init() + user_gui_init() + while True: + time.sleep_ms(lv.task_handler()) + except BaseException as e: + print(f"Exception {e}") + lvgl_deinit() + display_deinit() + gc.collect() + +if __name__ == "__main__": + main() +``` + +具体接口定义请参考 [lvgl](../api/lvgl/lvgl.md) diff --git a/zh/example/media.rst b/zh/example/media.rst deleted file mode 100755 index 6bb419d..0000000 --- a/zh/example/media.rst +++ /dev/null @@ -1,15 +0,0 @@ -多媒体 例程讲解 -=========== -.. toctree:: - :maxdepth: 1 - - media/acodec.md - media/audio.md - media/camera.md - media/camera_3sensnors.md - media/display.md - media/media.md - media/mp4muxer.md - media/player.md - media/venc.md - lvgl/lvgl.md diff --git a/zh/example/media/acodec.md b/zh/example/media/acodec.md deleted file mode 100755 index 25fc63f..0000000 --- a/zh/example/media/acodec.md +++ /dev/null @@ -1,145 +0,0 @@ -# acodec - g711编解码例程 - -本示例程序用于对 CanMV 开发板进行一个g711编解码的功能展示。 - -```python -from media.pyaudio import * #导入pyaudio模块,用于采集和播放音频 -from media.media import * #导入media模块,用于初始化vb buffer -import media.g711 as g711 #导入g711模块,用于g711编解码 -from mpp.payload_struct import * #导入payload模块,用于获取音视频编解码类型 - -def encode_audio(filename, duration): - CHUNK = int(44100/25) #设置音频chunk值 - FORMAT = paInt16 #设置采样精度 - CHANNELS = 2 #设置声道数 - RATE = 44100 #设置采样率 - - p = PyAudio() - p.initialize(CHUNK) #初始化PyAudio对象 - enc = g711.Encoder(K_PT_G711A,CHUNK) #创建g711编码器对象 - ret = media.buffer_init() #vb buffer初始化 - if ret: - print("record_audio, buffer_init failed") - - enc.create() #创建编码器 - #创建音频输入流 - stream = p.open(format=FORMAT, - channels=CHANNELS, - rate=RATE, - input=True, - frames_per_buffer=CHUNK) - - frames = [] - #采集音频数据编码并存入列表 - for i in range(0, int(RATE / CHUNK * duration)): - frame_data = stream.read() #从音频输入流中读取音频数据 - data = enc.encode(frame_data) #编码音频数据为g711 - frames.append(data) #将g711编码数据保存到列表中 - - stream.stop_stream() #停止音频输入流 - stream.close() #关闭音频输入流 - p.terminate() #释放音频对象 - enc.destroy() #销毁g711音频编码器 - - #将g711编码数据存入文件中 - with open(filename,mode='w') as wf: - wf.write(b''.join(frames)) - - media.buffer_deinit() #释放vb buffer - -def decode_audio(filename): - wf = open(filename,mode='rb') #打开g711文件 - FORMAT = paInt16 #设置音频chunk值 - CHANNELS = 2 #设置声道数 - RATE = 44100 #设置采样率 - CHUNK = int(RATE/25) #设置音频chunk值 - - p = PyAudio() - p.initialize(CHUNK) #初始化PyAudio对象 - dec = g711.Decoder(K_PT_G711A,CHUNK) #创建g711解码器对象 - ret = media.buffer_init() #vb buffer初始化 - if ret: - print("play_audio, buffer_init failed") - - dec.create() #创建解码器 - - #创建音频输出流 - stream = p.open(format=FORMAT, - channels=CHANNELS, - rate=RATE, - output=True, - frames_per_buffer=CHUNK) - - stream_len = CHUNK*CHANNELS*2//2 #设置每次读取的g711数据流长度 - stream_data = wf.read(stream_len) #从g711文件中读取数据 - - #解码g711文件并播放 - while stream_data: - frame_data = dec.decode(stream_data) #解码g711文件 - stream.write(frame_data) #播放raw数据 - stream_data = wf.read(stream_len) #从g711文件中读取数据 - - stream.stop_stream() #停止音频输入流 - stream.close() #关闭音频输入流 - p.terminate() #释放音频对象 - dec.destroy() #销毁解码器 - wf.close() #关闭g711文件 - - media.buffer_deinit() #释放vb buffer - -def loop_codec(duration): - CHUNK = int(44100/25) #设置音频chunk值 - FORMAT = paInt16 #设置采样精度 - CHANNELS = 2 #设置声道数 - RATE = 44100 #设置采样率 - - p = PyAudio() - p.initialize(CHUNK) #初始化PyAudio对象 - dec = g711.Decoder(K_PT_G711A,CHUNK) #创建g711解码器对象 - enc = g711.Encoder(K_PT_G711A,CHUNK) #创建g711编码器对象 - ret = media.buffer_init() #vb buffer初始化 - if ret: - print("loop_audio, buffer_init failed") - - dec.create() #创建g711解码器 - enc.create() #创建g711编码器 - - #创建音频输入流 - input_stream = p.open(format=FORMAT, - channels=CHANNELS, - rate=RATE, - input=True, - frames_per_buffer=CHUNK) - - #创建音频输出流 - output_stream = p.open(format=FORMAT, - channels=CHANNELS, - rate=RATE, - output=True, - frames_per_buffer=CHUNK) - - #从音频输入流中获取数据->编码->解码->写入到音频输出流中 - for i in range(0, int(RATE / CHUNK * duration)): - frame_data = input_stream.read() #从音频输入流中获取raw音频数据 - stream_data = enc.encode(frame_data) #编码音频数据为g711 - frame_data = dec.decode(stream_data) #解码g711数据为raw数据 - output_stream.write(frame_data) #播放raw数据 - - - input_stream.stop_stream() #停止音频输入流 - output_stream.stop_stream() #停止音频输出流 - input_stream.close() #关闭音频输入流 - output_stream.close() #关闭音频输出流 - p.terminate() #释放音频对象 - dec.destroy() #销毁g711解码器 - enc.destroy() #销毁g711编码器 - - media.buffer_deinit() #释放vb buffer - -#encode_audio('/sdcard/app/test.g711a', 15) #采集并编码g711文件 -#decode_audio('/sdcard/app/test.g711a') #解码g711文件并输出 -loop_codec(15) #采集音频数据->编码g711->解码g711->播放音频 -print("audio codec sample done") -``` - -具体接口定义请参考 [acodec](../../api/mpp/K230_CanMV_播放器模块API手册.md) diff --git a/zh/example/media/audio.md b/zh/example/media/audio.md deleted file mode 100755 index 0039365..0000000 --- a/zh/example/media/audio.md +++ /dev/null @@ -1,125 +0,0 @@ -# audio - 音频采集输出例程 - -本示例程序用于对 CanMV 开发板进行一个音频采集和输出的功能展示。 - -```python -from media.pyaudio import * #导入pyaudio模块,用于采集和播放音频 -import media.wave as wave #导入wav模块,用于保存和加载wav音频文件 -from media.media import * #导入media模块,用于初始化vb buffer - -def record_audio(filename, duration): - CHUNK = int(44100/25) #设置音频chunk值 - FORMAT = paInt16 #设置采样精度 - CHANNELS = 2 #设置声道数 - RATE = 44100 #设置采样率 - - p = PyAudio() - p.initialize(CHUNK) #初始化PyAudio对象 - ret = media.buffer_init() #vb buffer初始化 - if ret: - print("record_audio, buffer_init failed") - - #创建音频输入流 - stream = p.open(format=FORMAT, - channels=CHANNELS, - rate=RATE, - input=True, - frames_per_buffer=CHUNK) - - frames = [] - #采集音频数据并存入列表 - for i in range(0, int(RATE / CHUNK * duration)): - data = stream.read() - frames.append(data) - - - stream.stop_stream() #停止采集音频数据 - stream.close()#关闭音频输入流 - p.terminate()#释放音频对象 - - #将列表中的数据保存到wav文件中 - wf = wave.open(filename, 'wb') #创建wav 文件 - wf.set_channels(CHANNELS) #设置wav 声道数 - wf.set_sampwidth(p.get_sample_size(FORMAT)) #设置wav 采样精度 - wf.set_framerate(RATE) #设置wav 采样率 - wf.write_frames(b''.join(frames)) #存储wav音频数据 - wf.close() #关闭wav文件 - - media.buffer_deinit() #释放vb buffer - -def play_audio(filename): - - wf = wave.open(filename, 'rb')#打开wav文件 - CHUNK = int(wf.get_framerate()/25)#设置音频chunk值 - - p = PyAudio() - p.initialize(CHUNK) #初始化PyAudio对象 - ret = media.buffer_init()#vb buffer初始化 - if ret: - print("play_audio, buffer_init failed") - - #创建音频输出流,设置的音频参数均为wave中获取到的参数 - stream = p.open(format=p.get_format_from_width(wf.get_sampwidth()), - channels=wf.get_channels(), - rate=wf.get_framerate(), - output=True,frames_per_buffer=CHUNK) - - data = wf.read_frames(CHUNK)#从wav文件中读取数一帧数据 - - while data: - stream.write(data) #将帧数据写入到音频输出流中 - data = wf.read_frames(CHUNK) #从wav文件中读取数一帧数据 - - stream.stop_stream() #停止音频输出流 - stream.close()#关闭音频输出流 - p.terminate()#释放音频对象 - wf.close()#关闭wav文件 - - media.buffer_deinit()#释放vb buffer - - -def loop_audio(duration): - CHUNK = int(44100/25)#设置音频chunck - FORMAT = paInt16 #设置音频采样精度 - CHANNELS = 2 #设置音频声道数 - RATE = 44100 #设置音频采样率 - - p = PyAudio() - p.initialize(CHUNK)#初始化PyAudio对象 - ret = media.buffer_init() #初始化vb buffer - if ret: - print("loop_audio, buffer_init failed") - - #创建音频输入流 - input_stream = p.open(format=FORMAT, - channels=CHANNELS, - rate=RATE, - input=True, - frames_per_buffer=CHUNK) - - #创建音频输出流 - output_stream = p.open(format=FORMAT, - channels=CHANNELS, - rate=RATE, - output=True,frames_per_buffer=CHUNK) - - #从音频输入流中获取数据写入到音频输出流中 - for i in range(0, int(RATE / CHUNK * duration)): - output_stream.write(input_stream.read()) - - - input_stream.stop_stream()#停止音频输入流 - output_stream.stop_stream()#停止音频输出流 - input_stream.close() #关闭音频输入流 - output_stream.close() #关闭音频输出流 - p.terminate() #释放音频对象 - - media.buffer_deinit() #释放vb buffer - -#play_audio('/sdcard/app/input.wav') #播放wav文件 -#record_audio('/sdcard/app/output.wav', 15) #录制wav文件 -loop_audio(15) #采集音频并输出 -print("audio sample done") -``` - -具体接口定义请参考 [audio](../../api/mpp/K230_CanMV_Audio模块API手册.md) diff --git a/zh/example/media/camera.md b/zh/example/media/camera.md deleted file mode 100755 index 7a601f6..0000000 --- a/zh/example/media/camera.md +++ /dev/null @@ -1,132 +0,0 @@ -# Camera - 摄像头预览及图像采集示例 - -```python -from media.camera import * #导入camera模块,使用camera相关接口 -from media.display import * #导入display模块,使用display相关接口 -from media.media import * #导入media模块,使用meida相关接口 -from time import * #导入time模块,使用time相关接口 -import time -import image #导入image模块,使用image相关接口 - - -def canmv_camera_test(): - print("canmv_camera_test") - - #初始化HDMI显示 - display.init(LT9611_1920X1080_30FPS) - - #初始化默认sensor配置(OV5647) - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - #camera.sensor_init(CAM_DEV_ID_0, CAM_IMX335_2LANE_1920X1080_30FPS_12BIT_LINEAR) - - out_width = 1920 - out_height = 1080 - # 设置输出宽度16字节对齐 - out_width = ALIGN_UP(out_width, 16) - - #设置通道0输出尺寸 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, out_width, out_height) - #设置通道0输出格式 - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - #创建媒体数据源设备 - meida_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - #创建媒体数据接收设备 - meida_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - #创建媒体链路,数据从源设备流到接收设备 - media.create_link(meida_source, meida_sink) - #设置显示输出平面的属性 - display.set_plane(0, 0, out_width, out_height, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - - out_width = 640 - out_height = 480 - out_width = ALIGN_UP(out_width, 16) - - #设置通道1输出尺寸 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, out_width, out_height) - #设置通道1输出格式 - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - - #设置通道2输出尺寸 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_2, out_width, out_height) - #设置通道2输出格式 - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - - #初始化媒体缓冲区 - ret = media.buffer_init() - if ret: - print("canmv_camera_test, buffer init failed") - return ret - - #启动摄像头数据流 - camera.start_stream(CAM_DEV_ID_0) - time.sleep(15) - - capture_count = 0 - while capture_count < 100: - time.sleep(1) - for dev_num in range(CAM_DEV_ID_MAX): - if not camera.cam_dev[dev_num].dev_attr.dev_enable: - continue - - for chn_num in range(CAM_CHN_ID_MAX): - if not camera.cam_dev[dev_num].chn_attr[chn_num].chn_enable: - continue - - print(f"canmv_camera_test, dev({dev_num}) chn({chn_num}) capture frame.") - #从指定设备和通道捕获图像 - img = camera.capture_image(dev_num, chn_num) - if img == -1: - print("camera.capture_image failed") - continue - - if img.format() == image.YUV420: - suffix = "yuv420sp" - elif img.format() == image.RGB888: - suffix = "rgb888" - elif img.format() == image.RGBP888: - suffix = "rgb888p" - else: - suffix = "unkown" - - filename = f"/sdcard/dev_{dev_num:02d}_chn_{chn_num:02d}_{img.width()}x{img.height()}_{capture_count:04d}.{suffix}" - print("save capture image to file:", filename) - - with open(filename, "wb") as f: - if f: - img_data = uctypes.bytearray_at(img.virtaddr(), img.size()) - # save yuv data to sdcard. - #f.write(img_data) - else: - print(f"capture_image, open dump file failed({filename})") - - time.sleep(1) - #释放捕获的图像数据 - camera.release_image(dev_num, chn_num, img) - - capture_count += 1 - - #停止摄像头输出 - camera.stop_stream(CAM_DEV_ID_0) - - #去初始化显示设备 - display.deinit() - - #销毁媒体链路 - media.destroy_link(meida_source, meida_sink) - - time.sleep(1) - #去初始化媒体缓冲区资源 - ret = media.buffer_deinit() - if ret: - print("camera test, media_buffer_deinit failed") - return ret - - print("camera test exit") - return 0 - - -canmv_camera_test() -``` - -具体接口定义请参考 [camera](../../api/mpp/K230_CanMV_Camera模块API手册.md) diff --git a/zh/example/media/camera_3sensors.md b/zh/example/media/camera_3sensors.md deleted file mode 100755 index d36f623..0000000 --- a/zh/example/media/camera_3sensors.md +++ /dev/null @@ -1,103 +0,0 @@ -# Camera - 多摄像头预览及图像采集示例 - -```python -from media.camera import * #导入camera模块,使用camera相关接口 -from media.display import * #导入display模块,使用display相关接口 -from media.media import * #导入media模块,使用meida相关接口 -import time, os #导入time模块,使用time相关接口 -import sys - -def camera_test(): - print("camera_test") - - #初始化HDMI显示 - display.init(LT9611_1920X1080_30FPS) - - #下面配置3个sensor的属性 - - #初始化默认sensor配置(CSI0 OV5647) - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - out_width = 640 - out_height = 480 - - # 设置输出宽度16字节对齐 - out_width = ALIGN_UP(out_width, 16) - - #设置通道0输出尺寸 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, out_width, out_height) - - #设置通道0输出格式 - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - #创建媒体数据源设备 - meida_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - - #创建媒体数据接收设备 - meida_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - - #创建媒体链路,数据从源设备流到接收设备 - media.create_link(meida_source, meida_sink) - - #设置显示输出平面的属性 - display.set_plane(0, 0, out_width, out_height, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - - #初始化默认sensor配置(CSI1 OV5647) - camera.sensor_init(CAM_DEV_ID_1, CAM_OV5647_1920X1080_CSI1_30FPS_10BIT_USEMCLK_LINEAR) - out_width = 640 - out_height = 480 - out_width = ALIGN_UP(out_width, 16) - camera.set_outsize(CAM_DEV_ID_1, CAM_CHN_ID_0, out_width, out_height) - camera.set_outfmt(CAM_DEV_ID_1, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - meida_source1 = media_device(CAMERA_MOD_ID, CAM_DEV_ID_1, CAM_CHN_ID_0) - meida_sink1 = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO2) - media.create_link(meida_source1, meida_sink1) - display.set_plane(640, 320, out_width, out_height, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO2) - - #初始化默认sensor配置(CSI1 OV5647) - camera.sensor_init(CAM_DEV_ID_2, CAM_OV5647_1920X1080_CSI2_30FPS_10BIT_USEMCLK_LINEAR) - out_width = 640 - out_height = 480 - out_width = ALIGN_UP(out_width, 16) - camera.set_outsize(CAM_DEV_ID_2, CAM_CHN_ID_0, out_width, out_height) - camera.set_outfmt(CAM_DEV_ID_2, CAM_CHN_ID_0, PIXEL_FORMAT_RGB_888) - meida_source2 = media_device(CAMERA_MOD_ID, CAM_DEV_ID_2, CAM_CHN_ID_0) - meida_sink2 = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_OSD0) - media.create_link(meida_source2, meida_sink2) - display.set_plane(1280, 600, out_width, out_height, PIXEL_FORMAT_RGB_888, DISPLAY_MIRROR_NONE, DISPLAY_CHN_OSD0) - - #初始化媒体缓冲区 - media.buffer_init() - - #启动摄像头数据流(多sensor) - camera.start_mcm_stream() - - try: - while True: - os.exitpoint() - time.sleep(5) - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - #停止摄像头输出 - camera.stop_mcm_stream() - - #去初始化显示设备 - display.deinit() - - #去初始化媒体缓冲区资源 - media.destroy_link(meida_source, meida_sink) - media.destroy_link(meida_source1, meida_sink1) - media.destroy_link(meida_source2, meida_sink2) - - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # deinit media buffer - media.buffer_deinit() - -if __name__ == "__main__": - os.exitpoint(os.EXITPOINT_ENABLE) - camera_test() -``` - -具体接口定义请参考 [camera](../../api/mpp/K230_CanMV_Camera模块API手册.md) diff --git a/zh/example/media/display.md b/zh/example/media/display.md deleted file mode 100755 index 3946619..0000000 --- a/zh/example/media/display.md +++ /dev/null @@ -1,76 +0,0 @@ -# Display - 图像采集显示实例 - -```python -from media.camera import * #导入camera模块,使用camera相关接口 -from media.display import * #导入display模块,使用display相关接口 -from media.media import * #导入media模块,使用meida相关接口 -from time import * #导入time模块,使用time相关接口 -import time #导入time模块,使用time相关接口 - - -def camera_display_test(): - CAM_OUTPUT_BUF_NUM = 6 - CAM_INPUT_BUF_NUM = 4 - - #定义输出窗口的宽度和高度,并进行对齐 - out_width = 1080 - out_height = 720 - out_width = ALIGN_UP(out_width, 16) - - #初始化HDMI显示 - display.init(LT9611_1920X1080_30FPS) - #初始化默认sensor配置(OV5647) - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - - #设置通道buffer数量 - camera.set_outbufs(CAM_DEV_ID_0, CAM_CHN_ID_0, CAM_OUTPUT_BUF_NUM) - #设置通道0输出尺寸 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, out_width, out_height) - #设置通道0输出格式 - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - #创建媒体数据源设备 - meida_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - #创建媒体数据接收设备 - meida_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - #创建媒体链路,数据从源设备流到接收设备 - media.create_link(meida_source, meida_sink) - #设置显示输出平面的属性 - display.set_plane(400, 200, out_width, out_height, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - - #初始化媒体缓冲区 - ret = media.buffer_init() - if ret: - print("camera_display_test, buffer init failed") - return ret - - #启动摄像头数据流 - camera.start_stream(CAM_DEV_ID_0) - - #采集显示600s后,停止采集输出 - count = 0 - while count < 600: - time.sleep(1) - count += 1 - - #停止摄像头输出 - camera.stop_stream(CAM_DEV_ID_0) - #销毁媒体链路 - media.destroy_link(meida_source, meida_sink) - time.sleep(1) - #去初始化显示设备 - display.deinit() - #去初始化媒体缓冲区资源 - ret = media.buffer_deinit() - if ret: - print("camera_display_test, media_buffer_deinit failed") - return ret - - print("camera_display_test exit") - return 0 - -camera_display_test() - -``` - -具体接口定义请参考 [display](../../api/mpp/K230_CanMV_Display模块API手册.md) diff --git a/zh/example/media/media.md b/zh/example/media/media.md deleted file mode 100755 index 6cbe01e..0000000 --- a/zh/example/media/media.md +++ /dev/null @@ -1,131 +0,0 @@ -# Meida - meida模块接口应用示例 - -```python -from media.media import * #导入media模块,用于初始化vb buffer - -def media_buf_test(): - print("media_buf_test start") - config = k_vb_config() #初始化vb config对象 - config.max_pool_cnt = 10 - - #配置vb pool - config.comm_pool[0].blk_size = 1024*1024 - config.comm_pool[0].blk_cnt = 10 - config.comm_pool[0].mode = VB_REMAP_MODE_NONE - - config.comm_pool[1].blk_size = 2*1024*1024 - config.comm_pool[1].blk_cnt = 10 - config.comm_pool[1].mode = VB_REMAP_MODE_NOCACHE - - config.comm_pool[2].blk_size = 3*1024*1024 - config.comm_pool[2].blk_cnt = 10 - config.comm_pool[3].mode = VB_REMAP_MODE_CACHED - - print("media_buf_test buffer_config 111") - #配置媒体缓冲区参数 - ret = media.buffer_config(config) - if ret: - print("media_buf_test, buffer_config failed") - return ret - - config.max_pool_cnt = 10 - - config.comm_pool[0].blk_size = 1024*1024 - config.comm_pool[0].blk_cnt = 10 - config.comm_pool[0].mode = VB_REMAP_MODE_NONE - - config.comm_pool[1].blk_size = 2*1024*1024 - config.comm_pool[1].blk_cnt = 10 - config.comm_pool[1].mode = VB_REMAP_MODE_NOCACHE - - config.comm_pool[2].blk_size = 3*1024*1024 - config.comm_pool[2].blk_cnt = 10 - config.comm_pool[3].mode = VB_REMAP_MODE_CACHED - - print("media_buf_test buffer_config 222") - #配置媒体缓冲区参数 - ret = media.buffer_config(config) - if ret: - print("media_buf_test, buffer_config failed") - return ret - - config.max_pool_cnt = 20 - - config.comm_pool[0].blk_size = 4*1024*1024 - config.comm_pool[0].blk_cnt = 3 - config.comm_pool[0].mode = VB_REMAP_MODE_NONE - - config.comm_pool[1].blk_size = 5*1024*1024 - config.comm_pool[1].blk_cnt = 3 - config.comm_pool[1].mode = VB_REMAP_MODE_NOCACHE - - config.comm_pool[2].blk_size = 6*1024*1024 - config.comm_pool[2].blk_cnt = 3 - config.comm_pool[3].mode = VB_REMAP_MODE_CACHED - - print("media_buf_test buffer_config 333") - #配置媒体缓冲区参数 - ret = media.buffer_config(config) - if ret: - print("media_buf_test, buffer_config failed") - return ret - - config.max_pool_cnt = 30 - - config.comm_pool[0].blk_size = 4*1024*1024 - config.comm_pool[0].blk_cnt = 5 - config.comm_pool[0].mode = VB_REMAP_MODE_NONE - - config.comm_pool[1].blk_size = 4*1024*1024 - config.comm_pool[1].blk_cnt = 5 - config.comm_pool[1].mode = VB_REMAP_MODE_NOCACHE - - config.comm_pool[2].blk_size = 4*1024*1024 - config.comm_pool[2].blk_cnt = 5 - config.comm_pool[3].mode = VB_REMAP_MODE_CACHED - - print("media_buf_test buffer_config 444") - #配置媒体缓冲区参数 - ret = media.buffer_config(config) - if ret: - print("media_buf_test, buffer_config failed") - return ret - - print("media_buf_test buffer_init") - #初始化媒体缓冲区 - ret = media.buffer_init() - if ret: - print("media_buf_test, buffer_init failed") - - - print("media_buf_test request_buffer") - #请求指定大小的媒体缓冲区 - buffer = media.request_buffer(4*1024*1024) - if buffer == -1: - print("media_buf_test, request_buffer failed") - else: - print(f"buffer handle({buffer.handle})") - print(f"buffer pool_id({buffer.pool_id})") - print(f"buffer phys_addr({buffer.phys_addr})") - print(f"buffer virt_addr({buffer.virt_addr})") - print(f"buffer size({buffer.size})") - #释放媒体缓冲区 - ret = media.release_buffer(buffer) - if ret: - print("media_buf_test, release_buffer failed") - - print("media_buf_test buffer_deinit") - #去初始化媒体缓冲区 - ret = media.buffer_deinit() - if ret: - print("media_buf_test, buffer_deinit failed") - return ret - - print("media_buf_test end") - return 0 - - -media_buf_test() -``` - -具体接口定义请参考 [meida](../../api/mpp/K230_CanMV_Media模块API手册.md) diff --git a/zh/example/media/mp4muxer.md b/zh/example/media/mp4muxer.md deleted file mode 100755 index fe3e86c..0000000 --- a/zh/example/media/mp4muxer.md +++ /dev/null @@ -1,63 +0,0 @@ -# venc - venc例程 - -本示例程序用于在 CanMV 开发板进行mp4 muxer的功能展示。 - -```python -from media.mp4format import * - -def canmv_mp4_muxer_test(): - width = 1280 - height = 720 - - # 实例化mp4 container - mp4_muxer = Mp4Container() - - mp4_cfg = Mp4CfgStr(mp4_muxer.MP4_CONFIG_TYPE_MUXER) - if mp4_cfg.type == mp4_muxer.MP4_CONFIG_TYPE_MUXER: - file_name = "/sdcard/app/tests/test.mp4" - mp4_cfg.SetMuxerCfg(file_name, mp4_muxer.MP4_CODEC_ID_H265, width, height, mp4_muxer.MP4_CODEC_ID_G711U) - - # 创建mp4 muxer - ret = mp4_muxer.Create(mp4_cfg) - if ret: - print("canmv_mp4_muxer_test, mp4 muxer Create failed.") - return -1 - - # 启动mp4 muxer - ret = mp4_muxer.Start() - if ret: - print("canmv_mp4_muxer_test, mp4 muxer Start failed.") - return -1 - - frame_count = 0 - - while True: - # 处理音视频数据,按MP4格式写入文件 - ret = mp4_muxer.Process() - if ret: - print("canmv_mp4_muxer_test, mp4 muxer Process failed.") - return -1 - - frame_count += 1 - print("frame_coutn = ", frame_count) - if frame_count >= 100: - break - - # 停止mp4 muxer - ret = mp4_muxer.Stop() - if ret: - print("canmv_mp4_muxer_test, mp4 muxer Stop failed.") - return -1 - - # 销毁mp4 muxer - ret = mp4_muxer.Destroy() - if ret: - print("canmv_mp4_muxer_test, mp4 muxer Destroy failed.") - return -1 - - return 0 - -canmv_mp4_muxer_test() -``` - -具体接口定义请参考 [VENC](../../api/mpp/K230_CanMV_MP4模块API手册.md) diff --git a/zh/example/media/player.md b/zh/example/media/player.md deleted file mode 100755 index 7df08d5..0000000 --- a/zh/example/media/player.md +++ /dev/null @@ -1,32 +0,0 @@ -# player - mp4文件播放器例程 - -本示例程序用于对 CanMV 开发板进行一个mp4文件播放器的功能展示。 - -```python -from media.player import * #导入播放器模块,用于播放mp4文件 - -start_play = False #播放结束flag -def player_event(event,data): - global start_play - if(event == K_PLAYER_EVENT_EOF): #播放结束标识 - start_play = False #设置播放结束标识 - -def play_mp4_test(filename): - global start_play - player=Player() #创建播放器对象 - player.load(filename) #加载mp4文件 - player.set_event_callback(player_event) #设置播放器事件回调 - player.start() #开始播放 - start_play = True - - #等待播放结束 - while(start_play): - time.sleep(0.1) - - player.stop() #停止播放 - print("play over") - -play_mp4_test("/sdcard/app/tests/test.mp4")#播放mp4文件 -``` - -具体接口定义请参考 [player](../../api/mpp/K230_CanMV_播放器模块API手册.md) diff --git a/zh/example/media/venc.md b/zh/example/media/venc.md deleted file mode 100755 index 4fc1b06..0000000 --- a/zh/example/media/venc.md +++ /dev/null @@ -1,136 +0,0 @@ -# venc - venc例程 - -本示例程序用于在 CanMV 开发板进行venc视频编码的功能展示。 - -```python -from media.vencoder import * -from media.camera import * -from media.media import * -from time import * -import time - -def canmv_venc_test(): - width = 1280 - height = 720 - venc_chn = VENC_CHN_ID_0 - - width = ALIGN_UP(width, 16) - # 初始化sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - - # 设置camera 输出buffer - camera.set_outbufs(CAM_DEV_ID_0, CAM_CHN_ID_0, 6) - # 设置camera 输出buffer size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, width, height) - # 设置camera 输出格式 - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # 实例化video encoder - encoder = Encoder() - # 设置video encoder 输出buffer - ret = encoder.SetOutBufs(venc_chn, 15, width, height) - if ret: - print("canmv_venc_test, encoder SetOutBufs failed.") - return -1 - - # 初始化设置的buffer - ret = media.buffer_init() - if ret: - print("canmv_venc_test, buffer_init failed.") - return ret - - chnAttr = ChnAttrStr(encoder.PAYLOAD_TYPE_H265, encoder.H265_PROFILE_MAIN, width, height) - streamData = StreamData() - - # 创建编码器 - ret = encoder.Create(venc_chn, chnAttr) - if ret < 0: - print("canmv_venc_test, vencoder create filed.") - return ret - - # 绑定camera和venc - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(VIDEO_ENCODE_MOD_ID, VENC_DEV_ID, venc_chn) - ret = media.create_link(media_source, media_sink) - if ret: - print("cam_venc_test, create link with camera failed.") - return ret - - # 开始编码 - ret = encoder.Start(venc_chn) - if ret: - print("cam_venc_test, encoder start failed") - return ret - - # 启动camera - ret = camera.start_stream(CAM_DEV_ID_0) - if ret: - print("cam_venc_test, camera start failed") - return ret - - frame_count = 0 - if chnAttr.payload_type == encoder.PAYLOAD_TYPE_H265: - suffix = "265" - elif chnAttr.payload_type == encoder.PAYLOAD_TYPE_H264: - suffix = "264" - else: - suffix = "unkown" - print("cam_venc_test, venc payload_type unsupport") - return -1 - - out_file = f"/sdcard/app/tests/venc_chn_{venc_chn:02d}.{suffix}" - print("save stream to file: ", out_file) - - with open(out_file, "wb") as fo: - while True: - ret = encoder.GetStream(venc_chn, streamData) # 获取一帧码流 - if ret < 0: - print("cam_venc_test, venc get stream failed") - return ret - - for pack_idx in range(0, streamData.pack_cnt): - stream_data = uctypes.bytearray_at(streamData.data[pack_idx], streamData.data_size[pack_idx]) - fo.write(stream_data) # 码流写文件 - print("stream size: ", streamData.data_size[pack_idx], "stream type: ", streamData.stream_type[pack_idx]) - - ret = encoder.ReleaseStream(venc_chn, streamData) # 释放一帧码流 - if ret < 0: - print("cam_venc_test, venc release stream failed") - return ret - - frame_count += 1 - if frame_count >= 100: - break - - # 停止camera - camera.stop_stream(CAM_DEV_ID_0) - - # 销毁camera和venc的绑定 - ret = media.destroy_link(media_source, media_sink) - if ret: - print("cam_venc_test, venc destroy link with camera failed.") - return ret - - # 停止编码 - ret = encoder.Stop(venc_chn) - if ret < 0: - print("cam_venc_test, venc stop failed.") - return ret - - # 销毁编码器 - ret = encoder.Destroy(venc_chn) - if ret < 0: - print("cam_venc_test, venc destroy failed.") - return ret - - # 清理buffer - ret = media.buffer_deinit() - if ret: - print("cam_venc_test, media buffer deinit failed.") - return ret - - -canmv_venc_test() -``` - -具体接口定义请参考 [VENC](../../api/mpp/K230_CanMV_VENC模块API手册.md) diff --git a/zh/example/network.md b/zh/example/network.md new file mode 100644 index 0000000..97b204e --- /dev/null +++ b/zh/example/network.md @@ -0,0 +1,488 @@ +# 5.网络 例程讲解 + +## 1. network - wlan 例程 + +本示例程序用于对 CanMV 开发板进行一个 network wlan 的功能展示。 + +```python +import network + +def sta_test(): + sta=network.WLAN(0) + #查看sta是否激活 + print(sta.active()) + #查看sta状态 + print(sta.status()) + #扫描并打印结果 + print(sta.scan()) + #sta连接ap + print(sta.connect("wjx_pc","12345678")) + #状态 + print(sta.status()) + #查看ip配置 + print(sta.ifconfig()) + #查看是否连接 + print(sta.isconnected()) + #断开连接 + print(sta.disconnect()) + #连接ap + print(sta.connect("wjx_pc","12345678")) + #查看状态 + print(sta.status()) + + +def ap_test(): + ap=network.WLAN(network.AP_IF) + #查看ap是否激活 + print(ap.active()) + #配置并创建ap + ap.config(ssid='k230_ap_wjx', channel=11, key='12345678') + #查看ap的ssid号 + print(ap.config('ssid')) + #查看ap的channel + print(ap.config('channel')) + #查看ap的所有配置 + print(ap.config()) + #查看ap的状态 + print(ap.status()) + #sta是否连接ap + print(ap.isconnected()) + +sta_test() +ap_test() +``` + +具体接口定义请参考 [socket](../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../api/extmod/K230_CanMV_network模块API手册.md) + +--- + +## 2. tcp - client 例程 + +本示例程序用于对 CanMV 开发板进行一个 tcp client 的功能展示。 + +```python +#配置 tcp/udp socket调试工具 +import socket +import time + +PORT=60000 + +def client(): + #获取地址及端口号 对应地址 + ai = socket.getaddrinfo("10.100.228.5", PORT) + #ai = socket.getaddrinfo("10.10.1.94", PORT) + print("Address infos:", ai) + addr = ai[0][-1] + + print("Connect address:", addr) + #建立socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) + #连接地址 + s.connect(addr) + + for i in range(10): + str="K230 tcp client send test {0} \r\n".format(i) + print(str) + #print(s.send(str)) + #发送字符串 + print(s.write(str)) + time.sleep(0.2) + #time.sleep(1) + #print(s.recv(4096)) + #print(s.read()) + #延时1秒 + time.sleep(1) + #关闭socket + s.close() + print("end") + +#main() +client() +``` + +具体接口定义请参考 [socket](../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../api/extmod/K230_CanMV_network模块API手册.md) + +--- + +## 3. udp - server 例程 + +本示例程序用于对 CanMV 开发板进行一个 udp server 的功能展示。 + +```python +#配置 tcp/udp socket调试工具 +import socket +import time +import network +PORT=60000 + + +def udpserver(): + #获取地址及端口号对应地址 + ai = socket.getaddrinfo("0.0.0.0", PORT) + #ai = socket.getaddrinfo("10.10.1.94", 60000) + print("Address infos:", ai) + addr = ai[0][-1] + + print("udp server %s port:%d\n" % ((network.LAN().ifconfig()[0]),PORT)) + #建立socket + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + #设置属性 + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + #绑定 + s.bind(addr) + print("a") + #延时 + time.sleep(1) + + for j in range(10): + try: + #接受内容 + data, addr = s.recvfrom(800) + print("b") + except: + continue + #打印内容 + print("recv %d" % j,data,addr) + #回复内容 + s.sendto(b"%s have recv count=%d " % (data,j), addr) + #关闭 + s.close() + print("udp server exit!!") + + + + +#main() +udpserver() + +``` + +具体接口定义请参考 [socket](../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../api/extmod/K230_CanMV_network模块API手册.md) + +--- + +## 4. http - client 例程 + +本示例程序用于对 CanMV 开发板进行一个 http client 的功能展示。 + +```python +import socket + + +def main(use_stream=True): + #创建socket + s = socket.socket() + #获取地址及端口号 对应地址 + ai = socket.getaddrinfo("www.baidu.com", 80) + #ai = socket.getaddrinfo("10.100.228.5", 8080) + + print("Address infos:", ai) + addr = ai[0][-1] + + print("Connect address:", addr) + #连接 + s.connect(addr) + + if use_stream: + # MicroPython socket objects support stream (aka file) interface + # directly, but the line below is needed for CPython. + s = s.makefile("rwb", 0) + #发送http请求 + s.write(b"GET /index.html HTTP/1.0\r\n\r\n") + #打印请求内容 + print(s.read()) + else: + #发送http请求 + s.send(b"GET /index.html HTTP/1.0\r\n\r\n") + #打印请求内容 + print(s.recv(4096)) + #print(s.read()) + #关闭socket + s.close() + + +#main() +main(use_stream=True) +main(use_stream=False) + + +``` + +具体接口定义请参考 [socket](../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../api/extmod/K230_CanMV_network模块API手册.md) + +--- + +## 5. udp - client 例程 + +本示例程序用于对 CanMV 开发板进行一个 udp client 的功能展示。 + +```python +#配置 tcp/udp socket调试工具 +import socket +import time + + +def udpclient(): + #获取地址和端口号 对应地址 + ai = socket.getaddrinfo("10.100.228.5", 60000) + #ai = socket.getaddrinfo("10.10.1.94", 60000) + print("Address infos:", ai) + addr = ai[0][-1] + + print("Connect address:", addr) + #建立socket + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + + + + for i in range(10): + str="K230 udp client send test {0} \r\n".format(i) + print(str) + #发送字符串 + print(s.sendto(str,addr)) + #延时 + time.sleep(0.2) + #time.sleep(1) + #print(s.recv(4096)) + #print(s.read()) + #延时 + time.sleep(1) + #关闭 + s.close() + print("end") + + + +#main() +udpclient() + +``` + +具体接口定义请参考 [socket](../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../api/extmod/K230_CanMV_network模块API手册.md) + +--- + +## 6. http - server 例程 + +本示例程序用于对 CanMV 开发板进行一个 http server 的功能展示。 + +```python +# port from micropython/examples/network/http_server.py +import socket +import network +import time +# print(network.LAN().ifconfig()[0]) +# print("Listening, connect your browser to http://%s:8081/" % (network.LAN().ifconfig()[0])) + +CONTENT = b"""\ +HTTP/1.0 200 OK + +Hello #%d from k230 canmv MicroPython! +""" + + +def main(micropython_optimize=True): + #建立socket + s = socket.socket() + #获取地址及端口号 对应地址 + # Binding to all interfaces - server will be accessible to other hosts! + ai = socket.getaddrinfo("0.0.0.0", 8081) + print("Bind address info:", ai) + addr = ai[0][-1] + #设置属性 + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + #绑定地址 + s.bind(addr) + #开始监听 + s.listen(5) + print("Listening, connect your browser to http://%s:8081/" % (network.LAN().ifconfig()[0])) + + counter = 0 + while True: + #接受连接 + res = s.accept() + client_sock = res[0] + client_addr = res[1] + print("Client address:", client_addr) + print("Client socket:", client_sock) + #非阻塞模式 + client_sock.setblocking(False) + if not micropython_optimize: + # To read line-oriented protocol (like HTTP) from a socket (and + # avoid short read problem), it must be wrapped in a stream (aka + # file-like) object. That's how you do it in CPython: + client_stream = client_sock.makefile("rwb") + else: + # .. but MicroPython socket objects support stream interface + # directly, so calling .makefile() method is not required. If + # you develop application which will run only on MicroPython, + # especially on a resource-constrained embedded device, you + # may take this shortcut to save resources. + client_stream = client_sock + + print("Request:") + #获取内容 + req = client_stream.read() + print(req) + + while True: + ##获取内容 + h = client_stream.read() + if h == b"" or h == b"\r\n": + break + print(h) + #回复内容 + client_stream.write(CONTENT % counter) + #time.sleep(0.5) + #关闭 + client_stream.close() + # if not micropython_optimize: + # client_sock.close() + counter += 1 + #print("wjx", counter) + if counter > 20 : + print("http server exit!") + #关闭 + s.close() + break + + +main() + +``` + +具体接口定义请参考 [socket](../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../api/extmod/K230_CanMV_network模块API手册.md) + +--- + +--- + +## 7. tcp - server 例程 + +本示例程序用于对 CanMV 开发板进行一个 tcp server 的功能展示。 + +```python +#配置 tcp/udp socket调试工具 +import socket +import network +import time +PORT=60000 + + +CONTENT = b""" +Hello #%d from k230 canmv MicroPython! +""" + + + +def server(): + counter=1 + #获取地址及端口号 对应地址 + #ai = socket.getaddrinfo("10.100.228.5", 8000) + ai = socket.getaddrinfo("0.0.0.0", PORT) + print("Address infos:", ai,PORT) + addr = ai[0][-1] + + print("Connect address:", addr) + #建立socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) + #设置属性 + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + #绑定 + s.bind(addr) + #监听 + s.listen(5) + print("tcp server %s port:%d\n" % ((network.LAN().ifconfig()[0]),PORT)) + + + while True: + #接受连接 + res = s.accept() + client_sock = res[0] + client_addr = res[1] + print("Client address:", client_addr) + print("Client socket:", client_sock) + client_sock.setblocking(False) + + client_stream = client_sock + #发送字符传 + client_stream.write(CONTENT % counter) + + while True: + #读取内容 + h = client_stream.read() + if h != b"" : + print(h) + #回复内容 + client_stream.write("recv :%s" % h) + + if "end" in h : + #关闭socket + client_stream.close() + break + + counter += 1 + if counter > 10 : + print("server exit!") + #关闭 + s.close() + break + +#main() +server() + +``` + +具体接口定义请参考 [socket](../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../api/extmod/K230_CanMV_network模块API手册.md) + +--- + +## 8. network - lan 例程 + +本示例程序用于对 CanMV 开发板进行一个 network lan 的功能展示。 + +```python +import network + + +def main(): + #获取lan接口 + a=network.LAN() + #获取网口是否在使用 + print(a.active()) + #关闭网口 + print(a.active(0)) + #使能网口 + print(a.active(1)) + #查看网口 ip,掩码,网关,dns配置 + print(a.ifconfig()) + #设置网口 ip,掩码,网关,dns配置 + print(a.ifconfig(('192.168.0.4', '255.255.255.0', '192.168.0.1', '8.8.8.8'))) + #查看网口 ip,掩码,网关,dns配置 + print(a.ifconfig()) + #设置网口为dhcp模式 + print(a.ifconfig("dhcp")) + #查看网口 ip,掩码,网关,dns配置 + print(a.ifconfig()) + #查看网口mac地址 + print(a.config("mac")) + #设置网口mac地址 + print(a.config(mac="42:EA:D0:C2:0D:83")) + #查看网口mac地址 + print(a.config("mac")) + #设置网口为dhcp模式 + print(a.ifconfig("dhcp")) + #查看网口 ip,掩码,网关,dns配置 + print(a.ifconfig()) + + + + +main() + + +``` + +具体接口定义请参考 [socket](../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../api/extmod/K230_CanMV_network模块API手册.md) + +--- diff --git a/zh/example/omv/omv.rst b/zh/example/omv/omv.rst index 608fc68..5b796be 100755 --- a/zh/example/omv/omv.rst +++ b/zh/example/omv/omv.rst @@ -1,4 +1,4 @@ -Openmv 例程讲解 +5.Openmv 例程讲解 =========== .. toctree:: :maxdepth: 1 diff --git a/zh/example/cipher/cipher.md b/zh/example/peripheral.md old mode 100755 new mode 100644 similarity index 69% rename from zh/example/cipher/cipher.md rename to zh/example/peripheral.md index 4c76aa5..82da2d5 --- a/zh/example/cipher/cipher.md +++ b/zh/example/peripheral.md @@ -1,316 +1,523 @@ -# Cipher - Cipher 例程 - -本示例程序用于对 CanMV 开发板进行一个 Cipher 输出的功能展示。 - -```python -import uhashlib - -print('###################### SHA256 Test ##############################') -print('********************** Test-1: Only Call update() Once ******************') -# 初始化sha256对象 -obj = uhashlib.sha256() -# 输入消息message -msg = b'\x45\x11\x01\x25\x0e\xc6\xf2\x66\x52\x24\x9d\x59\xdc\x97\x4b\x73\x61\xd5\x71\xa8\x10\x1c\xdf\xd3\x6a\xba\x3b\x58\x54\xd3\xae\x08\x6b\x5f\xdd\x45\x97\x72\x1b\x66\xe3\xc0\xdc\x5d\x8c\x60\x6d\x96\x57\xd0\xe3\x23\x28\x3a\x52\x17\xd1\xf5\x3f\x2f\x28\x4f\x57\xb8' -# 标准哈希值 -dgst0 = b'\x1a\xaa\xf9\x28\x5a\xf9\x45\xb8\xa9\x7c\xf1\x4f\x86\x9b\x18\x90\x14\xc3\x84\xf3\xc7\xc2\xb7\xd2\xdf\x8a\x97\x13\xbf\xfe\x0b\xf1' -# 将消息更新到硬件IP中 -obj.update(msg) -# 计算哈希值 -dgst = obj.digest() -print(dgst0 == dgst) -# print(binascii.hexlify(dgst)) -print('********************** Test-2: Call update() Twice ******************') -dgst0 = b'\x93\x6a\x18\x5c\xaa\xa2\x66\xbb\x9c\xbe\x98\x1e\x9e\x05\xcb\x78\xcd\x73\x2b\x0b\x32\x80\xeb\x94\x44\x12\xbb\x6f\x8f\x8f\x07\xaf' -obj = uhashlib.sha256() -# 向硬件多次更新消息 -obj.update(b'hello') -obj.update(b'world') -dgst = obj.digest() -print(dgst0 == dgst) -# print('********************** Test-3: Call digest() Twice ******************') -# dgst0 = b'\x93\x6a\x18\x5c\xaa\xa2\x66\xbb\x9c\xbe\x98\x1e\x9e\x05\xcb\x78\xcd\x73\x2b\x0b\x32\x80\xeb\x94\x44\x12\xbb\x6f\x8f\x8f\x07\xaf' -# obj = uhashlib.sha256() -# obj.update(b'hello') -# obj.update(b'world') -# dgst = obj.digest() -# dgst1 = obj.digest() -# print(dgst0 == dgst) -# print(dgst == dgst1) - - -print('\n###################### AES-GCM Test ##############################') -import ucryptolib -import collections -# 创建一个具名元组并返回具名元组子类 -Aes = collections.namedtuple('Aes', ['key', 'iv', 'aad', 'pt', 'ct', 'tag']) -aes = [ - Aes(b'\xb5\x2c\x50\x5a\x37\xd7\x8e\xda\x5d\xd3\x4f\x20\xc2\x25\x40\xea\x1b\x58\x96\x3c\xf8\xe5\xbf\x8f\xfa\x85\xf9\xf2\x49\x25\x05\xb4', - b'\x51\x6c\x33\x92\x9d\xf5\xa3\x28\x4f\xf4\x63\xd7', - b'', - b'', - b'', - b'\xbd\xc1\xac\x88\x4d\x33\x24\x57\xa1\xd2\x66\x4f\x16\x8c\x76\xf0' - ), - - Aes(b'\x24\x50\x1a\xd3\x84\xe4\x73\x96\x3d\x47\x6e\xdc\xfe\x08\x20\x52\x37\xac\xfd\x49\xb5\xb8\xf3\x38\x57\xf8\x11\x4e\x86\x3f\xec\x7f', - b'\x9f\xf1\x85\x63\xb9\x78\xec\x28\x1b\x3f\x27\x94', - b'\xad\xb5\xec\x72\x0c\xcf\x98\x98\x50\x00\x28\xbf\x34\xaf\xcc\xbc\xac\xa1\x26\xef', - b'\x27', - b'\xeb', - b'\x63\x35\xe1\xd4\x9e\x89\x88\xea\xc4\x8e\x42\x19\x4e\x5f\x56\xdb' - ), - - Aes(b'\x1f\xde\xd3\x2d\x59\x99\xde\x4a\x76\xe0\xf8\x08\x21\x08\x82\x3a\xef\x60\x41\x7e\x18\x96\xcf\x42\x18\xa2\xfa\x90\xf6\x32\xec\x8a', - b'\x1f\x3a\xfa\x47\x11\xe9\x47\x4f\x32\xe7\x04\x62', - b'', - b'\x06\xb2\xc7\x58\x53\xdf\x9a\xeb\x17\xbe\xfd\x33\xce\xa8\x1c\x63\x0b\x0f\xc5\x36\x67\xff\x45\x19\x9c\x62\x9c\x8e\x15\xdc\xe4\x1e\x53\x0a\xa7\x92\xf7\x96\xb8\x13\x8e\xea\xb2\xe8\x6c\x7b\x7b\xee\x1d\x40\xb0', - b'\x91\xfb\xd0\x61\xdd\xc5\xa7\xfc\xc9\x51\x3f\xcd\xfd\xc9\xc3\xa7\xc5\xd4\xd6\x4c\xed\xf6\xa9\xc2\x4a\xb8\xa7\x7c\x36\xee\xfb\xf1\xc5\xdc\x00\xbc\x50\x12\x1b\x96\x45\x6c\x8c\xd8\xb6\xff\x1f\x8b\x3e\x48\x0f', - b'\x30\x09\x6d\x34\x0f\x3d\x5c\x42\xd8\x2a\x6f\x47\x5d\xef\x23\xeb' - ), - - Aes(b'\x24\x50\x1a\xd3\x84\xe4\x73\x96\x3d\x47\x6e\xdc\xfe\x08\x20\x52\x37\xac\xfd\x49\xb5\xb8\xf3\x38\x57\xf8\x11\x4e\x86\x3f\xec\x7f', - b'\x9f\xf1\x85\x63\xb9\x78\xec\x28\x1b\x3f\x27\x94', - b'\xad\xb5\xec\x72\x0c\xcf\x98\x98\x50\x00\x28\xbf\x34\xaf\xcc\xbc\xac\xa1\x26\xef', - b'\x27\xf3\x48\xf9\xcd\xc0\xc5\xbd\x5e\x66\xb1\xcc\xb6\x3a\xd9\x20\xff\x22\x19\xd1\x4e\x8d\x63\x1b\x38\x72\x26\x5c\xf1\x17\xee\x86\x75\x7a\xcc\xb1\x58\xbd\x9a\xbb\x38\x68\xfd\xc0\xd0\xb0\x74\xb5\xf0\x1b\x2c', - b'\xeb\x7c\xb7\x54\xc8\x24\xe8\xd9\x6f\x7c\x6d\x9b\x76\xc7\xd2\x6f\xb8\x74\xff\xbf\x1d\x65\xc6\xf6\x4a\x69\x8d\x83\x9b\x0b\x06\x14\x5d\xae\x82\x05\x7a\xd5\x59\x94\xcf\x59\xad\x7f\x67\xc0\xfa\x5e\x85\xfa\xb8', - b'\xbc\x95\xc5\x32\xfe\xcc\x59\x4c\x36\xd1\x55\x02\x86\xa7\xa3\xf0' - ) -] - -print('********************** Test-1: ivlen=12, ptlen=0, aadlen=0 ******************') -print('GCM Encrypt......') -# 初始化aes-gcm密码对象,参数包含密钥key、初始化向量iv以及附加数据aad -crypto = ucryptolib.aes(aes[0].key, 0, aes[0].iv, aes[0].aad) -# 输入明文数据 -inbuf = aes[0].pt -outbuf = bytearray(16) -# 加密,输出格式为:密文 + tag -val = crypto.encrypt(inbuf, outbuf) -# 标准数据 -val0 = aes[0].ct + aes[0].tag -print(val0 == val) -print('GCM Decrypt......') -crypto = ucryptolib.aes(aes[0].key, 0, aes[0].iv, aes[0].aad) -# 输入数据格式:密文 + tag -inbuf = aes[0].ct + aes[0].tag -# 解密,输出数据格式:明文 -val = crypto.decrypt(inbuf) -print(val[:1] == b'\x00') -# val0 = aes[0].pt -# print(val0 == val) - -print('********************** Test-2: ivlen=12, ptlen=1, aadlen=20 ******************') -print('GCM Encrypt......') -# 初始化 -crypto = ucryptolib.aes(aes[1].key, 0, aes[1].iv, aes[1].aad) -# 输入明文 -inbuf = aes[1].pt -outbuf = bytearray(17) -# 输出密文+tag -val = crypto.encrypt(inbuf, outbuf) -val0 = aes[1].ct + aes[1].tag -print(val0 == val) -print('GCM Decrypt......') -crypto = ucryptolib.aes(aes[1].key, 0, aes[1].iv, aes[1].aad) -# 输入密文+tag -inbuf = aes[1].ct + aes[1].tag -outbuf = bytearray(1) -# 输出明文 -val = crypto.decrypt(inbuf, outbuf) -val0 = aes[1].pt -print(val0 == val) - -print('********************** Test-3: ivlen=12, ptlen=51, aadlen=0 ******************') -print('GCM Encrypt......') -# 初始化 -crypto = ucryptolib.aes(aes[2].key, 0, aes[2].iv, aes[2].aad) -# 输入明文 -inbuf = aes[2].pt -outbuf = bytearray(67) -# 输出密文+tag -val = crypto.encrypt(inbuf, outbuf) -val0 = aes[2].ct + aes[2].tag -print(val0 == val) -print('GCM Decrypt......') -crypto = ucryptolib.aes(aes[2].key, 0, aes[2].iv, aes[2].aad) -# 输入密文+tag -inbuf = aes[2].ct + aes[2].tag -outbuf = bytearray(51) -# 输入明文 -val = crypto.decrypt(inbuf, outbuf) -val0 = aes[2].pt -print(val0 == val) - -print('********************** Test-4: ivlen=12, ptlen=51, aadlen=20 ******************') -print('GCM Encrypt......') -# 初始化 -crypto = ucryptolib.aes(aes[3].key, 0, aes[3].iv, aes[3].aad) -# 输入明文 -inbuf = aes[3].pt -outbuf = bytearray(67) -# 输出密文+tag -val = crypto.encrypt(inbuf, outbuf) -val0 = aes[3].ct + aes[3].tag -print(val0 == val) -print('GCM Decrypt......') -crypto = ucryptolib.aes(aes[3].key, 0, aes[3].iv, aes[3].aad) -# 输入密文+tag -inbuf = aes[3].ct + aes[3].tag -val = crypto.decrypt(inbuf) -val0 = aes[3].pt -print(val[:51] == val0) -# outbuf = bytearray(51) -# val = crypto.decrypt(inbuf, outbuf) -# val0 = aes[3].pt -# print(val0 == val) - - -print('\n###################### SM4-ECB/CFB/OFB/CBC/CTR Test ##############################') -Sm4 = collections.namedtuple('Sm4', ['key', 'iv', 'pt', 'ct']) -sm4 = [ - # ecb - Sm4(b'\x01\x23\x45\x67\x89\xab\xcd\xef\xfe\xdc\xba\x98\x76\x54\x32\x10', - None, - b'\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd\xee\xee\xee\xee\xff\xff\xff\xff\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb', - b'\x5e\xc8\x14\x3d\xe5\x09\xcf\xf7\xb5\x17\x9f\x8f\x47\x4b\x86\x19\x2f\x1d\x30\x5a\x7f\xb1\x7d\xf9\x85\xf8\x1c\x84\x82\x19\x23\x04' - ), - - # cbc - Sm4( - b'\x01\x23\x45\x67\x89\xab\xcd\xef\xfe\xdc\xba\x98\x76\x54\x32\x10', - b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f', - b'\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd\xee\xee\xee\xee\xff\xff\xff\xff\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb', - b'\x78\xeb\xb1\x1c\xc4\x0b\x0a\x48\x31\x2a\xae\xb2\x04\x02\x44\xcb\x4c\xb7\x01\x69\x51\x90\x92\x26\x97\x9b\x0d\x15\xdc\x6a\x8f\x6d' - ), - - # cfb - Sm4( - b'\x01\x23\x45\x67\x89\xab\xcd\xef\xfe\xdc\xba\x98\x76\x54\x32\x10', - b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f', - b'\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd\xee\xee\xee\xee\xff\xff\xff\xff\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb', - b'\xac\x32\x36\xcb\x86\x1d\xd3\x16\xe6\x41\x3b\x4e\x3c\x75\x24\xb7\x69\xd4\xc5\x4e\xd4\x33\xb9\xa0\x34\x60\x09\xbe\xb3\x7b\x2b\x3f' - ), - - # ofb - Sm4( - b'\x01\x23\x45\x67\x89\xab\xcd\xef\xfe\xdc\xba\x98\x76\x54\x32\x10', - b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f', - b'\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd\xee\xee\xee\xee\xff\xff\xff\xff\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb', - b'\xac\x32\x36\xcb\x86\x1d\xd3\x16\xe6\x41\x3b\x4e\x3c\x75\x24\xb7\x1d\x01\xac\xa2\x48\x7c\xa5\x82\xcb\xf5\x46\x3e\x66\x98\x53\x9b' - ), - - # ctr - Sm4( - b'\x01\x23\x45\x67\x89\xab\xcd\xef\xfe\xdc\xba\x98\x76\x54\x32\x10', - b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f', - b'\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xee\xee\xee\xee\xee\xee\xee\xee\xff\xff\xff\xff\xff\xff\xff\xff\xee\xee\xee\xee\xee\xee\xee\xee\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa', - b'\xac\x32\x36\xcb\x97\x0c\xc2\x07\x91\x36\x4c\x39\x5a\x13\x42\xd1\xa3\xcb\xc1\x87\x8c\x6f\x30\xcd\x07\x4c\xce\x38\x5c\xdd\x70\xc7\xf2\x34\xbc\x0e\x24\xc1\x19\x80\xfd\x12\x86\x31\x0c\xe3\x7b\x92\x2a\x46\xb8\x94\xbe\xe4\xfe\xb7\x9a\x38\x22\x94\x0c\x93\x54\x05' - ) -] - -print('********************** Test-1: keybits=128, ptlen=32 ******************') -print('SM4-ECB Encrypt......') -# 初始化sm4密码对象,参数包含密钥key、加解密模式 -crypto = ucryptolib.sm4(sm4[0].key, 1) -# 输入明文数据 -inbuf = sm4[0].pt -outbuf = bytearray(32) -# 加密 -val = crypto.encrypt(inbuf, outbuf) -# golden数据 -val0 = sm4[0].ct -print(val0 == val) -print('SM4-ECB Decrypt......') -# 解密 -crypto = ucryptolib.sm4(sm4[0].key, 1) -inbuf = sm4[0].ct -outbuf = bytearray(32) -val = crypto.decrypt(inbuf, outbuf) -val0 = sm4[0].pt -print(val0 == val) - -print('********************** Test-2: keybits=128, ivlen=16, ptlen=32 ******************') -print('SM4-CBC Encrypt......') -# 初始化sm4密码对象 -crypto = ucryptolib.sm4(sm4[1].key, 2, sm4[1].iv) -# 输入明文数据 -inbuf = sm4[1].pt -outbuf = bytearray(32) -# 加密 -val = crypto.encrypt(inbuf, outbuf) -val0 = sm4[1].ct -print(val0 == val) -print('SM4-CBC Decrypt......') -# 解密 -crypto = ucryptolib.sm4(sm4[1].key, 2, sm4[1].iv) -inbuf = sm4[1].ct -val = crypto.decrypt(inbuf) -val0 = sm4[1].pt -print(val0 == val) -# outbuf = bytearray(32) -# val = crypto.decrypt(inbuf, outbuf) -# val0 = sm4[1].pt -# print(val0 == val) - -print('********************** Test-3: keybits=128, ivlen=16, ptlen=32 ******************') -print('SM4-CFB Encrypt......') -# 初始化sm4密码对象 -crypto = ucryptolib.sm4(sm4[2].key, 3, sm4[2].iv) -# 输入明文数据 -inbuf = sm4[2].pt -outbuf = bytearray(32) -# 加密 -val = crypto.encrypt(inbuf, outbuf) -val0 = sm4[2].ct -print(val0 == val) -print('SM4-CFB Decrypt......') -# 解密 -crypto = ucryptolib.sm4(sm4[2].key, 3, sm4[2].iv) -inbuf = sm4[2].ct -outbuf = bytearray(32) -val = crypto.decrypt(inbuf, outbuf) -val0 = sm4[2].pt -print(val0 == val) - -print('********************** Test-4: keybits=128, ivlen=16, ptlen=32 ******************') -print('SM4-OFB Encrypt......') -# 初始化sm4密码对象 -crypto = ucryptolib.sm4(sm4[3].key, 5, sm4[3].iv) -# 输入明文数据 -inbuf = sm4[3].pt -outbuf = bytearray(32) -# 加密 -val = crypto.encrypt(inbuf, outbuf) -val0 = sm4[3].ct -print(val0 == val) -print('SM4-OFB Decrypt......') -# 解密 -crypto = ucryptolib.sm4(sm4[3].key, 5, sm4[3].iv) -inbuf = sm4[3].ct -outbuf = bytearray(32) -val = crypto.decrypt(inbuf, outbuf) -val0 = sm4[3].pt -print(val0 == val) - -print('********************** Test-5: keybits=128, ivlen=16, ptlen=64 ******************') -print('SM4-CTR Encrypt......') -# 初始化sm4密码对象 -crypto = ucryptolib.sm4(sm4[4].key, 6, sm4[4].iv) -# 输入明文数据 -inbuf = sm4[4].pt -outbuf = bytearray(64) -# 加密 -val = crypto.encrypt(inbuf, outbuf) -val0 = sm4[4].ct -print(val0 == val) -print('SM4-CTR Decrypt......') -# 解密 -crypto = ucryptolib.sm4(sm4[4].key, 6, sm4[4].iv) -inbuf = sm4[4].ct -outbuf = bytearray(64) -val = crypto.decrypt(inbuf, outbuf) -val0 = sm4[4].pt -print(val0 == val) -``` - -具体接口定义请参考 [Cipher](../../api/cipher/K230_CanMV_Ucryptolib模块API手册.md)、[Hash](../../api/cipher/K230_CanMV_Hashlib模块API手册.md) +# 3.外设 例程讲解 + +## 1. Cipher - Cipher 例程 + +本示例程序用于对 CanMV 开发板进行一个 Cipher 输出的功能展示。 + +```python +import uhashlib + +print('###################### SHA256 Test ##############################') +print('********************** Test-1: Only Call update() Once ******************') +# 初始化sha256对象 +obj = uhashlib.sha256() +# 输入消息message +msg = b'\x45\x11\x01\x25\x0e\xc6\xf2\x66\x52\x24\x9d\x59\xdc\x97\x4b\x73\x61\xd5\x71\xa8\x10\x1c\xdf\xd3\x6a\xba\x3b\x58\x54\xd3\xae\x08\x6b\x5f\xdd\x45\x97\x72\x1b\x66\xe3\xc0\xdc\x5d\x8c\x60\x6d\x96\x57\xd0\xe3\x23\x28\x3a\x52\x17\xd1\xf5\x3f\x2f\x28\x4f\x57\xb8' +# 标准哈希值 +dgst0 = b'\x1a\xaa\xf9\x28\x5a\xf9\x45\xb8\xa9\x7c\xf1\x4f\x86\x9b\x18\x90\x14\xc3\x84\xf3\xc7\xc2\xb7\xd2\xdf\x8a\x97\x13\xbf\xfe\x0b\xf1' +# 将消息更新到硬件IP中 +obj.update(msg) +# 计算哈希值 +dgst = obj.digest() +print(dgst0 == dgst) +# print(binascii.hexlify(dgst)) +print('********************** Test-2: Call update() Twice ******************') +dgst0 = b'\x93\x6a\x18\x5c\xaa\xa2\x66\xbb\x9c\xbe\x98\x1e\x9e\x05\xcb\x78\xcd\x73\x2b\x0b\x32\x80\xeb\x94\x44\x12\xbb\x6f\x8f\x8f\x07\xaf' +obj = uhashlib.sha256() +# 向硬件多次更新消息 +obj.update(b'hello') +obj.update(b'world') +dgst = obj.digest() +print(dgst0 == dgst) +# print('********************** Test-3: Call digest() Twice ******************') +# dgst0 = b'\x93\x6a\x18\x5c\xaa\xa2\x66\xbb\x9c\xbe\x98\x1e\x9e\x05\xcb\x78\xcd\x73\x2b\x0b\x32\x80\xeb\x94\x44\x12\xbb\x6f\x8f\x8f\x07\xaf' +# obj = uhashlib.sha256() +# obj.update(b'hello') +# obj.update(b'world') +# dgst = obj.digest() +# dgst1 = obj.digest() +# print(dgst0 == dgst) +# print(dgst == dgst1) + + +print('\n###################### AES-GCM Test ##############################') +import ucryptolib +import collections +# 创建一个具名元组并返回具名元组子类 +Aes = collections.namedtuple('Aes', ['key', 'iv', 'aad', 'pt', 'ct', 'tag']) +aes = [ + Aes(b'\xb5\x2c\x50\x5a\x37\xd7\x8e\xda\x5d\xd3\x4f\x20\xc2\x25\x40\xea\x1b\x58\x96\x3c\xf8\xe5\xbf\x8f\xfa\x85\xf9\xf2\x49\x25\x05\xb4', + b'\x51\x6c\x33\x92\x9d\xf5\xa3\x28\x4f\xf4\x63\xd7', + b'', + b'', + b'', + b'\xbd\xc1\xac\x88\x4d\x33\x24\x57\xa1\xd2\x66\x4f\x16\x8c\x76\xf0' + ), + + Aes(b'\x24\x50\x1a\xd3\x84\xe4\x73\x96\x3d\x47\x6e\xdc\xfe\x08\x20\x52\x37\xac\xfd\x49\xb5\xb8\xf3\x38\x57\xf8\x11\x4e\x86\x3f\xec\x7f', + b'\x9f\xf1\x85\x63\xb9\x78\xec\x28\x1b\x3f\x27\x94', + b'\xad\xb5\xec\x72\x0c\xcf\x98\x98\x50\x00\x28\xbf\x34\xaf\xcc\xbc\xac\xa1\x26\xef', + b'\x27', + b'\xeb', + b'\x63\x35\xe1\xd4\x9e\x89\x88\xea\xc4\x8e\x42\x19\x4e\x5f\x56\xdb' + ), + + Aes(b'\x1f\xde\xd3\x2d\x59\x99\xde\x4a\x76\xe0\xf8\x08\x21\x08\x82\x3a\xef\x60\x41\x7e\x18\x96\xcf\x42\x18\xa2\xfa\x90\xf6\x32\xec\x8a', + b'\x1f\x3a\xfa\x47\x11\xe9\x47\x4f\x32\xe7\x04\x62', + b'', + b'\x06\xb2\xc7\x58\x53\xdf\x9a\xeb\x17\xbe\xfd\x33\xce\xa8\x1c\x63\x0b\x0f\xc5\x36\x67\xff\x45\x19\x9c\x62\x9c\x8e\x15\xdc\xe4\x1e\x53\x0a\xa7\x92\xf7\x96\xb8\x13\x8e\xea\xb2\xe8\x6c\x7b\x7b\xee\x1d\x40\xb0', + b'\x91\xfb\xd0\x61\xdd\xc5\xa7\xfc\xc9\x51\x3f\xcd\xfd\xc9\xc3\xa7\xc5\xd4\xd6\x4c\xed\xf6\xa9\xc2\x4a\xb8\xa7\x7c\x36\xee\xfb\xf1\xc5\xdc\x00\xbc\x50\x12\x1b\x96\x45\x6c\x8c\xd8\xb6\xff\x1f\x8b\x3e\x48\x0f', + b'\x30\x09\x6d\x34\x0f\x3d\x5c\x42\xd8\x2a\x6f\x47\x5d\xef\x23\xeb' + ), + + Aes(b'\x24\x50\x1a\xd3\x84\xe4\x73\x96\x3d\x47\x6e\xdc\xfe\x08\x20\x52\x37\xac\xfd\x49\xb5\xb8\xf3\x38\x57\xf8\x11\x4e\x86\x3f\xec\x7f', + b'\x9f\xf1\x85\x63\xb9\x78\xec\x28\x1b\x3f\x27\x94', + b'\xad\xb5\xec\x72\x0c\xcf\x98\x98\x50\x00\x28\xbf\x34\xaf\xcc\xbc\xac\xa1\x26\xef', + b'\x27\xf3\x48\xf9\xcd\xc0\xc5\xbd\x5e\x66\xb1\xcc\xb6\x3a\xd9\x20\xff\x22\x19\xd1\x4e\x8d\x63\x1b\x38\x72\x26\x5c\xf1\x17\xee\x86\x75\x7a\xcc\xb1\x58\xbd\x9a\xbb\x38\x68\xfd\xc0\xd0\xb0\x74\xb5\xf0\x1b\x2c', + b'\xeb\x7c\xb7\x54\xc8\x24\xe8\xd9\x6f\x7c\x6d\x9b\x76\xc7\xd2\x6f\xb8\x74\xff\xbf\x1d\x65\xc6\xf6\x4a\x69\x8d\x83\x9b\x0b\x06\x14\x5d\xae\x82\x05\x7a\xd5\x59\x94\xcf\x59\xad\x7f\x67\xc0\xfa\x5e\x85\xfa\xb8', + b'\xbc\x95\xc5\x32\xfe\xcc\x59\x4c\x36\xd1\x55\x02\x86\xa7\xa3\xf0' + ) +] + +print('********************** Test-1: ivlen=12, ptlen=0, aadlen=0 ******************') +print('GCM Encrypt......') +# 初始化aes-gcm密码对象,参数包含密钥key、初始化向量iv以及附加数据aad +crypto = ucryptolib.aes(aes[0].key, 0, aes[0].iv, aes[0].aad) +# 输入明文数据 +inbuf = aes[0].pt +outbuf = bytearray(16) +# 加密,输出格式为:密文 + tag +val = crypto.encrypt(inbuf, outbuf) +# 标准数据 +val0 = aes[0].ct + aes[0].tag +print(val0 == val) +print('GCM Decrypt......') +crypto = ucryptolib.aes(aes[0].key, 0, aes[0].iv, aes[0].aad) +# 输入数据格式:密文 + tag +inbuf = aes[0].ct + aes[0].tag +# 解密,输出数据格式:明文 +val = crypto.decrypt(inbuf) +print(val[:1] == b'\x00') +# val0 = aes[0].pt +# print(val0 == val) + +print('********************** Test-2: ivlen=12, ptlen=1, aadlen=20 ******************') +print('GCM Encrypt......') +# 初始化 +crypto = ucryptolib.aes(aes[1].key, 0, aes[1].iv, aes[1].aad) +# 输入明文 +inbuf = aes[1].pt +outbuf = bytearray(17) +# 输出密文+tag +val = crypto.encrypt(inbuf, outbuf) +val0 = aes[1].ct + aes[1].tag +print(val0 == val) +print('GCM Decrypt......') +crypto = ucryptolib.aes(aes[1].key, 0, aes[1].iv, aes[1].aad) +# 输入密文+tag +inbuf = aes[1].ct + aes[1].tag +outbuf = bytearray(1) +# 输出明文 +val = crypto.decrypt(inbuf, outbuf) +val0 = aes[1].pt +print(val0 == val) + +print('********************** Test-3: ivlen=12, ptlen=51, aadlen=0 ******************') +print('GCM Encrypt......') +# 初始化 +crypto = ucryptolib.aes(aes[2].key, 0, aes[2].iv, aes[2].aad) +# 输入明文 +inbuf = aes[2].pt +outbuf = bytearray(67) +# 输出密文+tag +val = crypto.encrypt(inbuf, outbuf) +val0 = aes[2].ct + aes[2].tag +print(val0 == val) +print('GCM Decrypt......') +crypto = ucryptolib.aes(aes[2].key, 0, aes[2].iv, aes[2].aad) +# 输入密文+tag +inbuf = aes[2].ct + aes[2].tag +outbuf = bytearray(51) +# 输入明文 +val = crypto.decrypt(inbuf, outbuf) +val0 = aes[2].pt +print(val0 == val) + +print('********************** Test-4: ivlen=12, ptlen=51, aadlen=20 ******************') +print('GCM Encrypt......') +# 初始化 +crypto = ucryptolib.aes(aes[3].key, 0, aes[3].iv, aes[3].aad) +# 输入明文 +inbuf = aes[3].pt +outbuf = bytearray(67) +# 输出密文+tag +val = crypto.encrypt(inbuf, outbuf) +val0 = aes[3].ct + aes[3].tag +print(val0 == val) +print('GCM Decrypt......') +crypto = ucryptolib.aes(aes[3].key, 0, aes[3].iv, aes[3].aad) +# 输入密文+tag +inbuf = aes[3].ct + aes[3].tag +val = crypto.decrypt(inbuf) +val0 = aes[3].pt +print(val[:51] == val0) +# outbuf = bytearray(51) +# val = crypto.decrypt(inbuf, outbuf) +# val0 = aes[3].pt +# print(val0 == val) + + +print('\n###################### SM4-ECB/CFB/OFB/CBC/CTR Test ##############################') +Sm4 = collections.namedtuple('Sm4', ['key', 'iv', 'pt', 'ct']) +sm4 = [ + # ecb + Sm4(b'\x01\x23\x45\x67\x89\xab\xcd\xef\xfe\xdc\xba\x98\x76\x54\x32\x10', + None, + b'\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd\xee\xee\xee\xee\xff\xff\xff\xff\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb', + b'\x5e\xc8\x14\x3d\xe5\x09\xcf\xf7\xb5\x17\x9f\x8f\x47\x4b\x86\x19\x2f\x1d\x30\x5a\x7f\xb1\x7d\xf9\x85\xf8\x1c\x84\x82\x19\x23\x04' + ), + + # cbc + Sm4( + b'\x01\x23\x45\x67\x89\xab\xcd\xef\xfe\xdc\xba\x98\x76\x54\x32\x10', + b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f', + b'\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd\xee\xee\xee\xee\xff\xff\xff\xff\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb', + b'\x78\xeb\xb1\x1c\xc4\x0b\x0a\x48\x31\x2a\xae\xb2\x04\x02\x44\xcb\x4c\xb7\x01\x69\x51\x90\x92\x26\x97\x9b\x0d\x15\xdc\x6a\x8f\x6d' + ), + + # cfb + Sm4( + b'\x01\x23\x45\x67\x89\xab\xcd\xef\xfe\xdc\xba\x98\x76\x54\x32\x10', + b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f', + b'\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd\xee\xee\xee\xee\xff\xff\xff\xff\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb', + b'\xac\x32\x36\xcb\x86\x1d\xd3\x16\xe6\x41\x3b\x4e\x3c\x75\x24\xb7\x69\xd4\xc5\x4e\xd4\x33\xb9\xa0\x34\x60\x09\xbe\xb3\x7b\x2b\x3f' + ), + + # ofb + Sm4( + b'\x01\x23\x45\x67\x89\xab\xcd\xef\xfe\xdc\xba\x98\x76\x54\x32\x10', + b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f', + b'\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd\xee\xee\xee\xee\xff\xff\xff\xff\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb', + b'\xac\x32\x36\xcb\x86\x1d\xd3\x16\xe6\x41\x3b\x4e\x3c\x75\x24\xb7\x1d\x01\xac\xa2\x48\x7c\xa5\x82\xcb\xf5\x46\x3e\x66\x98\x53\x9b' + ), + + # ctr + Sm4( + b'\x01\x23\x45\x67\x89\xab\xcd\xef\xfe\xdc\xba\x98\x76\x54\x32\x10', + b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f', + b'\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xee\xee\xee\xee\xee\xee\xee\xee\xff\xff\xff\xff\xff\xff\xff\xff\xee\xee\xee\xee\xee\xee\xee\xee\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa', + b'\xac\x32\x36\xcb\x97\x0c\xc2\x07\x91\x36\x4c\x39\x5a\x13\x42\xd1\xa3\xcb\xc1\x87\x8c\x6f\x30\xcd\x07\x4c\xce\x38\x5c\xdd\x70\xc7\xf2\x34\xbc\x0e\x24\xc1\x19\x80\xfd\x12\x86\x31\x0c\xe3\x7b\x92\x2a\x46\xb8\x94\xbe\xe4\xfe\xb7\x9a\x38\x22\x94\x0c\x93\x54\x05' + ) +] + +print('********************** Test-1: keybits=128, ptlen=32 ******************') +print('SM4-ECB Encrypt......') +# 初始化sm4密码对象,参数包含密钥key、加解密模式 +crypto = ucryptolib.sm4(sm4[0].key, 1) +# 输入明文数据 +inbuf = sm4[0].pt +outbuf = bytearray(32) +# 加密 +val = crypto.encrypt(inbuf, outbuf) +# golden数据 +val0 = sm4[0].ct +print(val0 == val) +print('SM4-ECB Decrypt......') +# 解密 +crypto = ucryptolib.sm4(sm4[0].key, 1) +inbuf = sm4[0].ct +outbuf = bytearray(32) +val = crypto.decrypt(inbuf, outbuf) +val0 = sm4[0].pt +print(val0 == val) + +print('********************** Test-2: keybits=128, ivlen=16, ptlen=32 ******************') +print('SM4-CBC Encrypt......') +# 初始化sm4密码对象 +crypto = ucryptolib.sm4(sm4[1].key, 2, sm4[1].iv) +# 输入明文数据 +inbuf = sm4[1].pt +outbuf = bytearray(32) +# 加密 +val = crypto.encrypt(inbuf, outbuf) +val0 = sm4[1].ct +print(val0 == val) +print('SM4-CBC Decrypt......') +# 解密 +crypto = ucryptolib.sm4(sm4[1].key, 2, sm4[1].iv) +inbuf = sm4[1].ct +val = crypto.decrypt(inbuf) +val0 = sm4[1].pt +print(val0 == val) +# outbuf = bytearray(32) +# val = crypto.decrypt(inbuf, outbuf) +# val0 = sm4[1].pt +# print(val0 == val) + +print('********************** Test-3: keybits=128, ivlen=16, ptlen=32 ******************') +print('SM4-CFB Encrypt......') +# 初始化sm4密码对象 +crypto = ucryptolib.sm4(sm4[2].key, 3, sm4[2].iv) +# 输入明文数据 +inbuf = sm4[2].pt +outbuf = bytearray(32) +# 加密 +val = crypto.encrypt(inbuf, outbuf) +val0 = sm4[2].ct +print(val0 == val) +print('SM4-CFB Decrypt......') +# 解密 +crypto = ucryptolib.sm4(sm4[2].key, 3, sm4[2].iv) +inbuf = sm4[2].ct +outbuf = bytearray(32) +val = crypto.decrypt(inbuf, outbuf) +val0 = sm4[2].pt +print(val0 == val) + +print('********************** Test-4: keybits=128, ivlen=16, ptlen=32 ******************') +print('SM4-OFB Encrypt......') +# 初始化sm4密码对象 +crypto = ucryptolib.sm4(sm4[3].key, 5, sm4[3].iv) +# 输入明文数据 +inbuf = sm4[3].pt +outbuf = bytearray(32) +# 加密 +val = crypto.encrypt(inbuf, outbuf) +val0 = sm4[3].ct +print(val0 == val) +print('SM4-OFB Decrypt......') +# 解密 +crypto = ucryptolib.sm4(sm4[3].key, 5, sm4[3].iv) +inbuf = sm4[3].ct +outbuf = bytearray(32) +val = crypto.decrypt(inbuf, outbuf) +val0 = sm4[3].pt +print(val0 == val) + +print('********************** Test-5: keybits=128, ivlen=16, ptlen=64 ******************') +print('SM4-CTR Encrypt......') +# 初始化sm4密码对象 +crypto = ucryptolib.sm4(sm4[4].key, 6, sm4[4].iv) +# 输入明文数据 +inbuf = sm4[4].pt +outbuf = bytearray(64) +# 加密 +val = crypto.encrypt(inbuf, outbuf) +val0 = sm4[4].ct +print(val0 == val) +print('SM4-CTR Decrypt......') +# 解密 +crypto = ucryptolib.sm4(sm4[4].key, 6, sm4[4].iv) +inbuf = sm4[4].ct +outbuf = bytearray(64) +val = crypto.decrypt(inbuf, outbuf) +val0 = sm4[4].pt +print(val0 == val) +``` + +具体接口定义请参考 [Cipher](../api/cipher/K230_CanMV_Ucryptolib模块API手册.md)、[Hash](../api/cipher/K230_CanMV_Hashlib模块API手册.md) + +## 2. ADC - ADC例程 + +本示例程序用于对 CanMV 开发板进行一个ADC的功能展示。 + +```python +from machine import ADC + +# 实例化ADC通道0 +adc = ADC(0) +# 获取ADC通道0采样值 +print(adc.read_u16()) +# 获取ADC通道0电压值 +print(adc.read_uv(), "uV") +``` + +具体接口定义请参考 [ADC](../api/machine/K230_CanMV_ADC模块API手册.md) + +## 3. FFT - FFT例程 + +本示例程序用于对 CanMV 开发板进行一个FFT的功能展示。 + +```python +from machine import FFT +import array +import math +from ulab import numpy as np +PI = 3.14159265358979323846264338327950288419716939937510 + +rx = [] +def input_data(): + for i in range(64): + data0 = 10 *math.cos(2* PI *i / 64) + data1 = 20 * math.cos(2 * 2* PI *i / 64) + data2 = 30* math.cos(3 *2* PI *i / 64) + data3 = 0.2* math.cos(4 *2 * PI * i / 64) + data4 = 1000* math.cos(5 *2* PI * i / 64) + rx.append((int(data0 + data1 + data2 + data3 + data4))) +input_data() #初始化需要进行FFT的数据,列表类型 +print(rx) +data = np.array(rx,dtype=np.uint16) #把列表数据转换成数组 +print(data) +fft1 = FFT(data, 64, 0x555) #创建一个FFT对象,运算点数为64,偏移是0x555 +res = fft1.run() #获取FFT转换后的数据 +print(res) +res = fft1.amplitude(res) #获取各个频率点的幅值 +print(res) +res = fft1.freq(64,38400) #获取所有频率点的频率值 +print(res) +``` + +具体接口定义请参考 [FFT](../api/machine/K230_CanMV_FFT模块API手册.md) + +## 4. FPIOA - FPIOA例程 + +本示例程序用于对 CanMV 开发板进行一个FPIOA的功能展示。 + +```python +from machine import FPIOA + +# 实例化FPIOA +fpioa = FPIOA() +# 打印所有引脚配置 +fpioa.help() +# 打印指定引脚详细配置 +fpioa.help(0) +# 打印指定功能所有可用的配置引脚 +fpioa.help(FPIOA.IIC0_SDA, func=True) +# 设置Pin0为GPIO0 +fpioa.set_function(0, FPIOA.GPIO0) +# 设置Pin2为GPIO2, 同时配置其它项 +fpioa.set_function(2, FPIOA.GPIO2, ie=1, oe=1, pu=0, pd=0, st=1, sl=0, ds=7) +# 获取指定功能当前所在的引脚 +fpioa.get_pin_num(FPIOA.UART0_TXD) +# 获取指定引脚当前功能 +fpioa.get_pin_func(0) + +``` + +具体接口定义请参考 [FPIOA](../api/machine/K230_CanMV_FPIOA模块API手册.md) + +## 5. PWM - PWM例程 + +本示例程序用于对 CanMV 开发板进行一个PWM输出的功能展示。 + +```python +from machine import PWM +# 实例化PWM通道0,频率为1000Hz,占空比为50%,默认使能输出 +pwm0 = PWM(0, 1000, 50, enable = True) +# 关闭通道0输出 +pwm0.enable(0) +# 调整通道0频率为2000Hz +pwm0.freq(2000) +# 调整通道0占空比为40% +pwm0.duty(40) +# 打开通道0输出 +pwm0.enable(1) +``` + +具体接口定义请参考 [PWM](../api/machine/K230_CanMV_PWM模块API手册.md) + +## 6. SPI - SPI例程 + +本示例程序用于对 CanMV 开发板进行spi读取flash id的功能展示。 + +```python +from machine import SPI +from machine import FPIOA + +# 实例化SPI的gpio +a = FPIOA() + +# 打印gpio14的属性 +a.help(14) +# 设置gpio14为QSPI0_CS0功能 +a.set_function(14,a.QSPI0_CS0) +a.help(14) + +# 打印gpio15的属性 +a.help(15) +# 设置gpio15为QSPI0_CLK功能 +a.set_function(15,a.QSPI0_CLK) +a.help(15) + +# 打印gpio16的属性 +a.help(16) +# 设置gpio16为QSPI0_D0功能 +a.set_function(16,a.QSPI0_D0) +a.help(16) + +# 打印gpio17的属性 +a.help(17) +# 设置gpio17为QSPI0_D1功能 +a.set_function(17,a.QSPI0_D1) +a.help(17) + +# 实例化SPI,使用5MHz时钟,极性为0,数据位宽为8bit +spi=SPI(1,baudrate=5000000, polarity=0, phase=0, bits=8) + +# 使能 gd25lq128 复位 +spi.write(bytes([0x66])) +# gd25lq128 复位 +spi.write(bytes([0x99])) + +# 读id命令(0x9f) +a=bytes([0x9f]) +# 创建长度为3的接收buff +b=bytearray(3) +# 读id +spi.write_readinto(a,b) +# 打印为:bytearray(b'\xc8`\x18') +print(b) + +# 读id命令(0x90,0,0,0) +a=bytes([0x90,0,0,0]) +# 创建长度为2的接收buff +b=bytearray(2) +# 读id +spi.write_readinto(a,b) +# 打印为:bytearray(b'\xc8\x17') +print(b) +``` + +具体接口定义请参考 [SPI](../api/machine/K230_CanMV_SPI模块API手册.md) + +## 7. Timer - Timer例程 + +本示例程序用于对 CanMV 开发板进行一个Timer的功能展示。 + +```python +from machine import Timer +import time + +# 实例化一个软定时器 +tim = Timer(-1) +# 初始化定时器为单次模式,周期100ms +tim.init(period=100, mode=Timer.ONE_SHOT, callback=lambda t:print(1)) +time.sleep(0.2) +# 初始化定时器为周期模式,频率为1Hz +tim.init(freq=1, mode=Timer.PERIODIC, callback=lambda t:print(2)) +time.sleep(2) +# 释放定时器资源 +tim.deinit() + +``` + +具体接口定义请参考 [Timer](../api/machine/K230_CanMV_Timer模块API手册.md) + +## 8. WDT - WDT例程 + +本示例程序用于对 CanMV 开发板进行一个WDT的功能展示。 + +```python +import time +from machine import WDT + +# 实例化wdt1,timeout为3s +wdt1 = WDT(1,3) +time.sleep(2) +# 喂狗操作 +wdt1.feed() +time.sleep(2) +``` + +具体接口定义请参考 [WDT](../api/machine/K230_CanMV_WDT模块API手册.md) diff --git a/zh/example/peripheral.rst b/zh/example/peripheral.rst deleted file mode 100755 index b604b6c..0000000 --- a/zh/example/peripheral.rst +++ /dev/null @@ -1,14 +0,0 @@ -外设 例程讲解 -=========== -.. toctree:: - :maxdepth: 1 - - cipher/cipher.md - machine/adc/adc.md - machine/fft/fft.md - machine/fpioa/fpioa.md - machine/gpio/gpio.md - machine/pwm/pwm.md - machine/spi/spi.md - machine/timer/timer.md - machine/wdt/wdt.md diff --git a/zh/example/socket_network/http_client.md b/zh/example/socket_network/http_client.md deleted file mode 100755 index d941b3d..0000000 --- a/zh/example/socket_network/http_client.md +++ /dev/null @@ -1,48 +0,0 @@ -# http - client 例程 - -本示例程序用于对 CanMV 开发板进行一个 http client的功能展示。 - -```python -import socket - - -def main(use_stream=True): - #创建socket - s = socket.socket() - #获取地址及端口号 对应地址 - ai = socket.getaddrinfo("www.baidu.com", 80) - #ai = socket.getaddrinfo("10.100.228.5", 8080) - - print("Address infos:", ai) - addr = ai[0][-1] - - print("Connect address:", addr) - #连接 - s.connect(addr) - - if use_stream: - # MicroPython socket objects support stream (aka file) interface - # directly, but the line below is needed for CPython. - s = s.makefile("rwb", 0) - #发送http请求 - s.write(b"GET /index.html HTTP/1.0\r\n\r\n") - #打印请求内容 - print(s.read()) - else: - #发送http请求 - s.send(b"GET /index.html HTTP/1.0\r\n\r\n") - #打印请求内容 - print(s.recv(4096)) - #print(s.read()) - #关闭socket - s.close() - - -#main() -main(use_stream=True) -main(use_stream=False) - - -``` - -具体接口定义请参考 [socket](../../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../../api/extmod/K230_CanMV_network模块API手册.md) diff --git a/zh/example/socket_network/http_server.md b/zh/example/socket_network/http_server.md deleted file mode 100755 index f3ced1c..0000000 --- a/zh/example/socket_network/http_server.md +++ /dev/null @@ -1,90 +0,0 @@ -# http - server 例程 - -本示例程序用于对 CanMV 开发板进行一个 http server 的功能展示。 - -```python -# port from micropython/examples/network/http_server.py -import socket -import network -import time -# print(network.LAN().ifconfig()[0]) -# print("Listening, connect your browser to http://%s:8081/" % (network.LAN().ifconfig()[0])) - -CONTENT = b"""\ -HTTP/1.0 200 OK - -Hello #%d from k230 canmv MicroPython! -""" - - -def main(micropython_optimize=True): - #建立socket - s = socket.socket() - #获取地址及端口号 对应地址 - # Binding to all interfaces - server will be accessible to other hosts! - ai = socket.getaddrinfo("0.0.0.0", 8081) - print("Bind address info:", ai) - addr = ai[0][-1] - #设置属性 - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - #绑定地址 - s.bind(addr) - #开始监听 - s.listen(5) - print("Listening, connect your browser to http://%s:8081/" % (network.LAN().ifconfig()[0])) - - counter = 0 - while True: - #接受连接 - res = s.accept() - client_sock = res[0] - client_addr = res[1] - print("Client address:", client_addr) - print("Client socket:", client_sock) - #非阻塞模式 - client_sock.setblocking(False) - if not micropython_optimize: - # To read line-oriented protocol (like HTTP) from a socket (and - # avoid short read problem), it must be wrapped in a stream (aka - # file-like) object. That's how you do it in CPython: - client_stream = client_sock.makefile("rwb") - else: - # .. but MicroPython socket objects support stream interface - # directly, so calling .makefile() method is not required. If - # you develop application which will run only on MicroPython, - # especially on a resource-constrained embedded device, you - # may take this shortcut to save resources. - client_stream = client_sock - - print("Request:") - #获取内容 - req = client_stream.read() - print(req) - - while True: - ##获取内容 - h = client_stream.read() - if h == b"" or h == b"\r\n": - break - print(h) - #回复内容 - client_stream.write(CONTENT % counter) - #time.sleep(0.5) - #关闭 - client_stream.close() - # if not micropython_optimize: - # client_sock.close() - counter += 1 - #print("wjx", counter) - if counter > 20 : - print("http server exit!") - #关闭 - s.close() - break - - -main() - -``` - -具体接口定义请参考 [socket](../../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../../api/extmod/K230_CanMV_network模块API手册.md) diff --git a/zh/example/socket_network/network.rst b/zh/example/socket_network/network.rst deleted file mode 100644 index 007f3cc..0000000 --- a/zh/example/socket_network/network.rst +++ /dev/null @@ -1,13 +0,0 @@ -网络 例程讲解 -=========== -.. toctree:: - :maxdepth: 1 - - http_client.md - http_server.md - network_lan.md - network_wlan.md - tcp_client.md - tcp_server.md - udp_server.md - upd_client.md diff --git a/zh/example/socket_network/network_lan.md b/zh/example/socket_network/network_lan.md deleted file mode 100755 index 8c53069..0000000 --- a/zh/example/socket_network/network_lan.md +++ /dev/null @@ -1,47 +0,0 @@ -# network - lan 例程 - -本示例程序用于对 CanMV 开发板进行一个 network lan的功能展示。 - -```python -import network - - -def main(): - #获取lan接口 - a=network.LAN() - #获取网口是否在使用 - print(a.active()) - #关闭网口 - print(a.active(0)) - #使能网口 - print(a.active(1)) - #查看网口 ip,掩码,网关,dns配置 - print(a.ifconfig()) - #设置网口 ip,掩码,网关,dns配置 - print(a.ifconfig(('192.168.0.4', '255.255.255.0', '192.168.0.1', '8.8.8.8'))) - #查看网口 ip,掩码,网关,dns配置 - print(a.ifconfig()) - #设置网口为dhcp模式 - print(a.ifconfig("dhcp")) - #查看网口 ip,掩码,网关,dns配置 - print(a.ifconfig()) - #查看网口mac地址 - print(a.config("mac")) - #设置网口mac地址 - print(a.config(mac="42:EA:D0:C2:0D:83")) - #查看网口mac地址 - print(a.config("mac")) - #设置网口为dhcp模式 - print(a.ifconfig("dhcp")) - #查看网口 ip,掩码,网关,dns配置 - print(a.ifconfig()) - - - - -main() - - -``` - -具体接口定义请参考 [socket](../../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../../api/extmod/K230_CanMV_network模块API手册.md) diff --git a/zh/example/socket_network/network_wlan.md b/zh/example/socket_network/network_wlan.md deleted file mode 100755 index fc19ae9..0000000 --- a/zh/example/socket_network/network_wlan.md +++ /dev/null @@ -1,53 +0,0 @@ -# network - wlan 例程 - -本示例程序用于对 CanMV 开发板进行一个 network wlan的功能展示。 - -```python -import network - -def sta_test(): - sta=network.WLAN(0) - #查看sta是否激活 - print(sta.active()) - #查看sta状态 - print(sta.status()) - #扫描并打印结果 - print(sta.scan()) - #sta连接ap - print(sta.connect("wjx_pc","12345678")) - #状态 - print(sta.status()) - #查看ip配置 - print(sta.ifconfig()) - #查看是否连接 - print(sta.isconnected()) - #断开连接 - print(sta.disconnect()) - #连接ap - print(sta.connect("wjx_pc","12345678")) - #查看状态 - print(sta.status()) - - -def ap_test(): - ap=network.WLAN(network.AP_IF) - #查看ap是否激活 - print(ap.active()) - #配置并创建ap - ap.config(ssid='k230_ap_wjx', channel=11, key='12345678') - #查看ap的ssid号 - print(ap.config('ssid')) - #查看ap的channel - print(ap.config('channel')) - #查看ap的所有配置 - print(ap.config()) - #查看ap的状态 - print(ap.status()) - #sta是否连接ap - print(ap.isconnected()) - -sta_test() -ap_test() -``` - -具体接口定义请参考 [socket](../../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../../api/extmod/K230_CanMV_network模块API手册.md) diff --git a/zh/example/socket_network/tcp_client.md b/zh/example/socket_network/tcp_client.md deleted file mode 100755 index 6a74dff..0000000 --- a/zh/example/socket_network/tcp_client.md +++ /dev/null @@ -1,45 +0,0 @@ -# tcp - client 例程 - -本示例程序用于对 CanMV 开发板进行一个 tcp client的功能展示。 - -```python -#配置 tcp/udp socket调试工具 -import socket -import time - -PORT=60000 - -def client(): - #获取地址及端口号 对应地址 - ai = socket.getaddrinfo("10.100.228.5", PORT) - #ai = socket.getaddrinfo("10.10.1.94", PORT) - print("Address infos:", ai) - addr = ai[0][-1] - - print("Connect address:", addr) - #建立socket - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) - #连接地址 - s.connect(addr) - - for i in range(10): - str="K230 tcp client send test {0} \r\n".format(i) - print(str) - #print(s.send(str)) - #发送字符串 - print(s.write(str)) - time.sleep(0.2) - #time.sleep(1) - #print(s.recv(4096)) - #print(s.read()) - #延时1秒 - time.sleep(1) - #关闭socket - s.close() - print("end") - -#main() -client() -``` - -具体接口定义请参考 [socket](../../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../../api/extmod/K230_CanMV_network模块API手册.md) diff --git a/zh/example/socket_network/tcp_server.md b/zh/example/socket_network/tcp_server.md deleted file mode 100755 index 2216f9d..0000000 --- a/zh/example/socket_network/tcp_server.md +++ /dev/null @@ -1,77 +0,0 @@ -# tcp - server 例程 - -本示例程序用于对 CanMV 开发板进行一个 tcp server的功能展示。 - -```python -#配置 tcp/udp socket调试工具 -import socket -import network -import time -PORT=60000 - - -CONTENT = b""" -Hello #%d from k230 canmv MicroPython! -""" - - - -def server(): - counter=1 - #获取地址及端口号 对应地址 - #ai = socket.getaddrinfo("10.100.228.5", 8000) - ai = socket.getaddrinfo("0.0.0.0", PORT) - print("Address infos:", ai,PORT) - addr = ai[0][-1] - - print("Connect address:", addr) - #建立socket - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) - #设置属性 - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - #绑定 - s.bind(addr) - #监听 - s.listen(5) - print("tcp server %s port:%d\n" % ((network.LAN().ifconfig()[0]),PORT)) - - - while True: - #接受连接 - res = s.accept() - client_sock = res[0] - client_addr = res[1] - print("Client address:", client_addr) - print("Client socket:", client_sock) - client_sock.setblocking(False) - - client_stream = client_sock - #发送字符传 - client_stream.write(CONTENT % counter) - - while True: - #读取内容 - h = client_stream.read() - if h != b"" : - print(h) - #回复内容 - client_stream.write("recv :%s" % h) - - if "end" in h : - #关闭socket - client_stream.close() - break - - counter += 1 - if counter > 10 : - print("server exit!") - #关闭 - s.close() - break - -#main() -server() - -``` - -具体接口定义请参考 [socket](../../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../../api/extmod/K230_CanMV_network模块API手册.md) diff --git a/zh/example/socket_network/udp_server.md b/zh/example/socket_network/udp_server.md deleted file mode 100755 index d86fa71..0000000 --- a/zh/example/socket_network/udp_server.md +++ /dev/null @@ -1,54 +0,0 @@ -# udp - server 例程 - -本示例程序用于对 CanMV 开发板进行一个 udp server的功能展示。 - -```python -#配置 tcp/udp socket调试工具 -import socket -import time -import network -PORT=60000 - - -def udpserver(): - #获取地址及端口号对应地址 - ai = socket.getaddrinfo("0.0.0.0", PORT) - #ai = socket.getaddrinfo("10.10.1.94", 60000) - print("Address infos:", ai) - addr = ai[0][-1] - - print("udp server %s port:%d\n" % ((network.LAN().ifconfig()[0]),PORT)) - #建立socket - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - #设置属性 - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - #绑定 - s.bind(addr) - print("a") - #延时 - time.sleep(1) - - for j in range(10): - try: - #接受内容 - data, addr = s.recvfrom(800) - print("b") - except: - continue - #打印内容 - print("recv %d" % j,data,addr) - #回复内容 - s.sendto(b"%s have recv count=%d " % (data,j), addr) - #关闭 - s.close() - print("udp server exit!!") - - - - -#main() -udpserver() - -``` - -具体接口定义请参考 [socket](../../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../../api/extmod/K230_CanMV_network模块API手册.md) diff --git a/zh/example/socket_network/upd_client.md b/zh/example/socket_network/upd_client.md deleted file mode 100755 index 26a5111..0000000 --- a/zh/example/socket_network/upd_client.md +++ /dev/null @@ -1,47 +0,0 @@ -# udp - client 例程 - -本示例程序用于对 CanMV 开发板进行一个 udp client的功能展示。 - -```python -#配置 tcp/udp socket调试工具 -import socket -import time - - -def udpclient(): - #获取地址和端口号 对应地址 - ai = socket.getaddrinfo("10.100.228.5", 60000) - #ai = socket.getaddrinfo("10.10.1.94", 60000) - print("Address infos:", ai) - addr = ai[0][-1] - - print("Connect address:", addr) - #建立socket - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - - - - for i in range(10): - str="K230 udp client send test {0} \r\n".format(i) - print(str) - #发送字符串 - print(s.sendto(str,addr)) - #延时 - time.sleep(0.2) - #time.sleep(1) - #print(s.recv(4096)) - #print(s.read()) - #延时 - time.sleep(1) - #关闭 - s.close() - print("end") - - - -#main() -udpclient() - -``` - -具体接口定义请参考 [socket](../../api/extmod/K230_CanMV_socket模块API手册.md)、[network](../../api/extmod/K230_CanMV_network模块API手册.md) diff --git "a/zh/example/\344\272\272\350\204\270\346\243\200\346\265\213.md" "b/zh/example/\344\272\272\350\204\270\346\243\200\346\265\213.md" new file mode 100755 index 0000000..f361f9f --- /dev/null +++ "b/zh/example/\344\272\272\350\204\270\346\243\200\346\265\213.md" @@ -0,0 +1,31 @@ +# 2.人脸检测 + +## 1. 概述 + +K230 CanMV 人脸检测是通过python语言开发实现的一个简单的具备摄像头数据采集预览,人脸检测画框功能的应用。该示例程序应用到了K230 CanMV 平台的多个硬件模块:AI2D,KPU,Camera,Display等。 + +## 2. 硬件环境 + +运行该示例程序需要如下硬件环境: + +- K230 CanMV开发板及配套的Sensor模组 +- 支持HDMI接口的显示器 + +## 3. 源码位置 + +源码路径位于K230 CanMV SDK的`tests/demo/`,如下: + +```shell +tests/demo/ +└── face_detection.py +``` + +## 4. 运行示例代码 + +通过K230 CanMV IDE 打开示例程序代码,并点击运行按钮开始运行人脸检测示例程序。有关IDE的使用请参考《K230_CanMV_IDE使用说明》,有关镜像烧录及开发环境搭建请参考《K230_CanMV使用说明》 + +通过IDE打开示例程序并运行如下图所示: +![fd-run](images/canmv-face-detect-run.png) + +运行结果如下图所示: +![fd-result](images/canmv-face-detect-result.jpg) diff --git a/zh/images/canaan-cover.png b/zh/images/canaan-cover.png index 031976562d7003b700195222bee17e7a04d0feb0..199a29e600fd86461812c35b554b323b2f945424 100644 GIT binary patch literal 41201 zcmeFZcT`i|voIV$!~$3l6%%D)!f4x~t*J7WuXV0FQJu`dCiGHl7!EucD7zhO7 z(9(Qh2m&1{1A$nM9XSk)z=s=w0T7dqp~gL?kxq#);E$tTnqVIgNT!+bk4ePAVhjZO zh|qd)*Ek@3ae^(~%4CARDx3D`j>R)WW|7Zln2@i-zcQb{_r4Z#R^ZJm>yr{6pSDEQ z9J%U46#6705+)z>Cf?4>Ev)=!tHALihB?a2kCGqX(`I_V7xuTu=Z|)Z1T@ym@qI@>IV~(Vs1-!-NKa zF7T`PWHsE+=RHCFR56BZyy8BQP^CVqhP1x6Bm#Ar`ii2k@4puAcLVE*vGS)2XdEe4 zckLx!(uQui!=b}W`;V{p%+?`15{b)ncaG_ywk0j+yh0<2kdrmr{$o%ZW0m>ZWg0Q3 z_$`kehmFNH-fFAA5mWN-3o|+;_4M&WEkoa{?u|y)E+YppCC3lu&O87f`HLPlf4L;x zPX-S&Xb%E~oBF#{cu@)!51H3BUR@&AE*JTA)ae{dsmKQ~Jq0~hddnkTTS*s+=}7Go#x$_(k9`yc=5LxgV}|ycj)LP)9lR(r z_>~Fa(W_0-D{-CI1IKgyJKX=9iC||bwUk|3LURVkpF0@lMLlGen6QES-7g8Ae*uo? z`FA)!k{Q5F-QvdZ5Z0bxe}HoM;b$!%*Rl!GZqqf2kfX}0gO~HO&vF77YmE*89Faxy zYa9#@YQ?Yt+!(Q{=%DoLqWSd?hI8D~0cOlsmV3OfvL)CvP7T1PGjFp2$i+#Ukf8~A zIbu2M{_ve+ZBN4Gek>>WeNe0{6(rs`An6_EncL0%?vWLEWB(}xI9_)DWzbXJzrYrQ zHAkZvl7Z&;JQyC#*JW(+-6j=~B^ZVuIC+fU;K#SX7B@dsNaydM-X5&?{q*SdW?r}I z-*vn3#uG3u^yI(1I)DEH+r9)9TA9)Pz@&eNV?TE@SnyB903Z#vi5s*7(9I_gnG?r& zov|Iz4S~;pwjhm`2M?~l2DzGBB8V2dJ*cgJ1^4htySG0=-*1E-oRMY_GwRfs7(fLT zBJ6cq~VsRJN)PT9Tf^Dv!&r8zb-vR;7fJuV;Z-rN>hd&Jeml}gd%8ry- zL>P=4chAq~?*w8HzXz3*>$R8}uYSido~9yZp(08F7<6!$NW8R|Qi$g0>cTTMv(;Ml z^84TxHseoQp@GZC`$G$KNSB7@O+uyimw6_2$o!DGZl`yZq2Et!%Mu;W44DZ-Olx}* zIK*f6Z?Gr)uWCIlFzA9}2Rh;#&a2w>&H+xBB=;v?BYAvsqq9TU;>BIFf4Y>-lSZb` ziFve>n{p(=e*d*>89={1Av!Qt$Hcu)*6^HtWFwYz46D>MMp``Kv!#4zmEmlUZOw$O zl{b;$eqKxYW3Q`bu!V~&her^_e{OT}8eiK-_Vr6p3`pDufhk76EEU6teOmNxF<_CP zTe^hWMuNom`aX<}c!8&n?M#&7hLvvRTkOwzEY2*!jOzNZ>ymO~N9M!^Y3duQM-(T>~Lk4L$Aa$Mwe%s=Zogx>`4=zW` z7Jp*x-KS!u46KmH8r2MQv7Yva4`>-14eYDvvYWp=gv~aFkp6~S0?ullY|37yL&Ciw zI-9uol|2HtoQ7}tnPSey!aA?)uahsp*jOE|J}v)VKrZ?v-MF#G|6*$PXb=SOZ zLP)OrTF1U@eQaT{%g|{jN>r)FbmYcx?IXcvw3QlMw&@icSQ<|ZGjdVVOwrR|Q9n>` zC+$wzA=}7R1_-R?sBA$d_2$iHv2{~(koZDO+CqwL>%EGfMab9`yb`JYwe&)#_ zS2TFi5y#o1>7G%9V0F(@6r8qZ*=SVqqw2wR?Zy(M@u zwT%5L4Qlde+76WXQ64a-Typ=Ku~7J{&s-6%oH}IMSzR zHo|)ELe{iHkU+r?Px0-?xlDvt&AM#hYO#T^WSO%k52$#OjWI1>$LR!wZ7QKkZ0*vs z`{9+QJj+pHn~lC^UxnsKyO&&%6(lkI7=|gB^8m+42990nndvea)}Q6leZAn{6wT}p z-<>SNd8s}w=Ty4=NulSw`0=UrU32jyzD><3KbPjo{SDfMTmlGc(My(3-hAThFK@do zdma>dLVQ5s<5S68|BvXG5Z!al4=<;ezC?uq0{TWKCheA8?}5r1WpMEdVvEycuF(KY z!>6z*3(6^(3oZ%c)+awpRA)a`$+huSRemr!qze^&5|~g4$p41nfD&(6)Pc}gA2N5p zXa`kPfYg?n`E9E}*h(Gv=VC9CQKFI&%X~=oJZ~>KRGS`5VY;vIq~txi%b$%{tC=Fh z8F~0@V+m*QzJU!VXoL^tG9l*O3KlLy{%~~%f}d2V5(fIQK1s7~@4oQw+d;d%OU*7A5MHE79o*81l;JX-`N7WX) zTlSEEsDc@(LaKL*g?Y;-94A)1v|47ZPJ=&2uJEKywuFCmUEG0LDh>w~y=9hAT2I?T zW&aewZ7RX1Z!MREAh6L5%ZqcBX{&BG@wVbH%g?cqT)ejq{Fs}9%&1#O%gD!t^6zca zWllhF%)I(+H{-P_6*6ZX{cEjShVBx;xMP`ab>Y$$A(IeY35Lkr2^MU2Nd&nPPQwlp z_TfRl4S(4@*fgkSQn9(f1{%I=@hPW%C1r? z9GS9w7sqMaaWko$8Rk5mm9!)PY{!srUnX{1$HdPoslrkgi~7!)30J+Cv5~NUDiRa` zbdQx)M5q~!Ouyv78cWq#U2m}E*Eh+X3TYX7ymnT{)bIN>Sm;$nuEIVin|YW+kS9Lq z{Ql%;#{gkDYv-I>{}LUc3Slea|*vjOAE7tT2eCU?<~=NmguiJrqMKcw|m~X{ zA0&V^*qnHRli)W~G@U4UySW+?*mYuYh6O&mm>*Y*5+r&IcX{P%Nd{}WdnRiHPAR;) zSbt$?z9&A;(>kvy<#iKfG5DMu-Qu>mg4>6YBev{zyx2k~JO@38+nJ5j+VVJzE7%?z@htw?1=cghoz18&pkAhV#*k zg?yKM+`btcnGKNexNIL>7P~e=4k#8|pH%SjZ!?=UkoLn$1xaLdR(*P?c451glSh&? zI~V8|khQnB)mQGaqqsP#zu8F;vqjlncj&B=^b~eRH2nG2AFuW(SfuXa5SidMM|=iF zl^P5;ShdaaE4&I7qO01r{eA{f7!w!F51V_!mZupOCN%@KUh1wl=Q%DES7SZUD{48Z z-8b@}Q4*chQl0)Fol>Drs!$o}dJ$kKSu z#hFU@*6J<^t6UaON-7PzQmj#>U{qCJEF;ya`a?`&E}?1hl6%#=nQ)@V5C0m62p=9$ve%T?)R*y8BU#; zogx`k4R6c(b+M*(xc0tN-MbT4owwjCabmgs!tYAGWD+UgF~cM)$oCTeUvsIAwRoIF zljHVzh5Amv`vuce20yIUw%Ofm>BVKOo}w^6#B^mlPcm@QhxUnQLi=1m{HNHosiVEHwp#1LgCX2hGS~k7_R=*i^DR}1Sh3#sK1tR%&cDW z(j0V_Kf0&pnK?aMRw)x+W!^NY(Iz`_N%&aV(^qE^{urFC`LVJ;51fLQFWav{=ZW}& zxVvZa+05|7CDBqdELIzV%~+i2#kr~=Mn_+mytThx6Ms%KI9rAo@W;;BsjPNzCp55_ zZsI3MT-Q!Ew``IBz53ChcQQu&SlO?`UiXQQN&9 zhA;a4NEvViHK}dSd#B5UlAx&6BHP)*85aF zqdhsL(>9k#ynh}arp}MOl9)L>)F8>Hmol05d&d^O)NtGDR@mhxQ`| z=f4ZK4`DCx9*({Je3MYvz$M+RIMS;{k4IQ|JaT_WuSpCO4>08Z5=q{O9o#hHWkO8r zymM3Q5z~If05}>xzyU@z6s^^3ya-&2Pc6M z;mRzPk0efK)^oT3deP!y^{3?ik;Zt@?1#hs=)I$c_IF+OC}pyH=(OwY$IDQ^j8hy! zwMJ!AhRdEy`UO;944eNaU5f=3curT6b(*IAwqVNkGK^M{n$?f&?&GrzK6eX6A9`D) z>bl~%vs-5DGDwc&Z6J5%Sok9MJb|-y*W&X$`v`>CRN1BwX3|rg8MA@ z(0F8`(T`SOm&Gw4*K}5MzwNOn0$thyGy&sWHCu2`5~}|74fDw}BWCSI_bL)L+PFT* z;f{GEw@YhNf}Fj1baU5xGf?%`vIAr4aYH16A13^jPLk`1vUNpGzwPY041=VaTssrr z!|kYlxabd<5MlM}(@Gp_uA&xaRQBGZPnRg1lg$IFgB&TsA^8k~ z;7&2fbvhXbb}GB)LXE}QCZT#9motO}_YBtj> zE1nP;|Hp)huP50uOInLy+R2M<6;hAq*<&E+Db*|VNr~NwKo7kp|8LC>kLa0c&q=QK zK@@E|$qm<>M*!g^7B)&khi^I+#;UFk%Uf>$v(O+E`Wk+duY@IT**RA7L+x3R92-6w9nc>0wsv`@}UZT z6AVJffn3K~&Erkvn7+HdF{km%I8PfkOF$P`izj$+#}FN%;KnDPWokB~7ljALLVv6+ zuJpg?OrlW@XmYmz4||j?zYkoXlaNMY|v_$l5*uA#o^(!tIfEY z`N%y;Cyia1zMbN9W@B#aA4-uB>@$JdRycY4dm_pq10B;#4#VV!2% zDkBRw#EW>DLv(ey|^xl)!6SX0|f$SY~=|{ zh30-qi~F5vHb&j1ULotDU3H|&p`99Bef_X;9v{Ep(oPq}#xesBpI)*x${-Tmr$I%= zl>21WNlDWs1_fh5MY=$wDOXxc$XOD}nuQCu;*hCKt8;y+(6+wEL{+Zt1N?-ldg&BI)G&J7v+ zOs0sCI^#9`&ve((<(6iC^2!Yt(f*tY^#42r zOaIgXex?QAzK8g-*cUuKL~aSthyO)?*`4bnS}jS~QVBVI#pD1BVneyh$@3IJyGV1dHEM^F#hS! zs3d)T?sON5V!NSAAch83PQm)U-=0NbEH~B%O`Y&;<{tAhxQ2S$ZVKRde}ta`f&uQ& z`slT`LIR7uMt>p}BF-2KlXMW})G~Cd2A-hPPqYEI_|Fn2hQ?Wk*rJM0+ogQ#Mz*EN zW-XeiT#kEw?n{c|2X_4gMcWQQjPyh4XC_wBXJ;-qi&-mLQ>>MAog((!sgJcIm4bAO zZcC7`zda3)56w?~WZzLTidj8rH;<)8CSZUpz`^~Wm#~g>3$hkcx(=WA!7t~m-&;bE z5XC-7=?UO!A<`P0jFT>;##WmiDU1Cj)aO1$R5VyWEY&y|KgsJ{%2~lNRSJ2xahnNo zYt26>%e?OkWe7`Xe*RNBd9$n{IEQLowI&?cU>zDd)x4DzzkDh6&uw-8y7jpHg0@$z z=Fm_Mm7Z5dV;1Hwbx3D;>KBIJi18%{38;xLeVVIW`dRe?77}%{F@B@YLHa?vXKE$U z9`CjJ2K=|Yzcq+}9KX8=QVhg%I1)YuY%< zZVf66TUgZ@(l@Sj`E7F-OdIb^e!_YABdTiAPM-kf(6nhDG;?- zm~i)881ptg2^JDL*nf>ezT~Wg>%C6Pkck6!%S{d=n_fkcv;QZsC#l?_rd47&@K zW{y7{b`DgpxD+yLzY{(8qoP6fcYw-+CTt8^rC>mu55IXQW7nf7*Z)HZ^=_aZR#`O@ z*YtMxr>&up`^K@dNA2X}$nQP%9zV@#<$ILg{!AUZdEA%26K#|23{CI~2%WfgIkF8y ztPSz985#0>S=)0GF;uQnS;8Wd>EwiQTt--~GI*eUMkYn#-0W>NW0xuQS#b$t>mxom z@7#{H>SVu<%3BWtmuLI=MJ2nIIACubm=KOTfq@zC&b1O;uaDe~Z|ET!hxT>&f4KPe zdEcRtWg8hV*2;pHv=rva~kbSYXcY`6P@H7>F2baWGN)Z!x<2TOZH!K8U7= zB0#F3sN}3@?noj3&dn{|Pg`P&#CvQ)fypKL!XwpQZ2HuGt6M(kzl)|$m4NhQkxIJO{_N+Yq%2~#R@ zia&A(5R#>EMtto{lsz*lXH>FTW_-Tyns>p)mp0B3j;vAnf!{`Z%nRG1AC9OkBgjqj zNjDn7Hmi#Mb9oE)${ zCN9=XP$IN`ai#MNt%Pb>1$R^H760>D`_zVo0TNSJVixFG^k8iK!@N$?jOqxlw<1V( z9NbCyC2(a!<@QM%s$iX^yTu>Lxz|!VLoZwwsq>{quVkwM>H#Z2PZF`VDJqp224nNT ze+TZGhu+?ybOsy5!C@|ZK*}u~!O$}%(=3WiriM#r?ZA`mh8C?(?!LluMIU;=X7DZ_ zB?o1vqQK+TdhTh6Ej^PXVY%dQ>s1OOw&SLgS^n3~+foBUhf0&x!y!A;qT;tj4JHu& z#^lPO2D7B;i|P3SFMaPAo39H(0!p2mO-5)z$Ex_Octl#Kh_ z<+t)4Jx)bS<<0Xfk3UGH-3!niDuv%{M$gtaZzYT4Y=JW$e&~Qf$hA2JxtPu`3u0LeH)==QxuqPU6zw8L9 z8G~5sX>FT z^Yar;$XKtZ1c>qAKz4k2nD7yAkH^A?h9VznF{Ap+$J1PvOjo{bCr7JO!AUNR3|=ZX zBRdiq&CihL({?;Pq23J=+P)3*^$TEHyGiDbxk;T=>W8_K5Dt#A24CqB*~SxJn~vN2 z65jaR6jBS$wT|6|N(Cuy){M2t!{_D0!jVv^CTZGK++kL8BxXxbtI`9nqF$Bqlaw3w zu#gJ)ZO=>(7>{BwUJ9_s{_UJ+Zs~lbA-jG_0mPU2vsLjZ92p-|jkKtr?@GezOMg}F z@y!Y{T+(k$Vnzk{U%A@M3+|T%az%{=1or?>z4LY^*l-nqtKtl<@?uWeT^j}<^96~H zBbP0P0-g89`m57SfsB-0rSevH0l`C&4eyurR$*GW*+sqhlwI*Vd+7|;akkls2`))H zeMS~?l=Xlz!VJosv>WL;qp%s8KIgX*#O!o?0-S7UmaC*SN|4d3*DMak53`B4!lIP< zPht2(NqtUF;{#pU%qa!;&Xe8`1+kh7OTG(bO@w6ORQ;SZ2PoSoowYVst#~N2s|zUm za1*cf@&;r@26D17X_h~!tVb}XC+hSb^_fFByl3Jj8&8kaQM%u0J0?*0=AC_!`K1Mj zjM5?VTukLf@oFJO!ukXo$JSeL^7}#s6@?KErAHWucj$}3ZJ2X)hq=m?_}qXe$(6UG!Piblk5uTR+bnGuASd}ZULPCo+SIg z0ym;6PrZq*nUixCW51wSe}=Hw-(p7n9=+Tw(+ESBaCgR4u9Tqgnq6d}&fvE979P44 zxw_dv{%?sqX&gv9YBrx1ge1ud34q$(DhkX#524(67VhRhUL}gaEF*L2-?@ za4PQ*BXBXpx1Yp%3lgv0JW@6(2-EVr^=6hXLzHGny!s7D_6jtcuP=LNiHeQ#RoX?{})a`F2P0kNwNQ7#B8JP zq@cc|YGC}>kuovaaepm(R&R*yw?fP-5T4ALJO#VCrrKTt-^XM8f8a6St#`TErC6>^ z!c#mjY)^bGI{3YMxZEvqi(ypc{R}Y{MmQ^!wAn>T%Qs~Pk|*KM85u#>E{3-Q2y39f z<1aZNDcl2WV-$5wVV-2h73Bw(XGWEew8%*|#ussx?M)&|Fctbof%^-nP3gZa)=4{_ z|L_ z-ue#Eb z76u+(e#uR$@5=~g4h_tK%2u-z^SEP5Pd8?hr1cW)FVn7&51D7XjZJ9p?mhSdYq(cc z{$MgY3Wu#W#7Mucj_2@X8>$s?%z!gX_+QuwMAIGTFoV7yUF$SDg6&`XKk8z!0_=T67(T!T( zi`Hq+PM_uTqzTHT1Y0u}B1>zX{H-i0*JK2^CwjI=%KA@~9rK7nM|gf!MM#FFxKVx% zE}&e)XRUeFI#zAp`!y&oe_m&HVn)?ea{VvbF?FK;dSs<9L2OLTOvtx8@GPM(3)lH$ z>is31nwvv3-})dGDQ48D@Lo2X`6~<@9g#L8fNe=0+RI#zvmJmHFIb)NbkBdoWEz1H zTKXC6qNm@0zoIcv`x+!}v&4|7k1CApzsZN3lmD8susbf=e!^e+9dz`c`ghl*pX!DR zX}|4l3zpt-g-_@D%>TSpbX&KIa&NI*sF_!av~-0E6PfgTIa@%Q33>jiVJa(TIO_&8 zDg{vDdGt7y&{sYoh0sFi?)ez`Wu)q<|3h-L!y$ES9jm9MAD>s8uZnmIknvdwrgHtp zXwO!+W2P%BW&> zZZ_7*0&$F zDbpfGbN!#Vz~ncOkkAI%$io+y2wtyb@9$DHyI>8b;Fi3>263apKG{TTQ+YrayGGl5 zrA`NoZ9e*>^%prm?%ub;iCB`<_LX3q4^V8_zUy_Mbd180V46mDbVzFZCvH?yYAGUm zl7FFD{QXwnitouv?8PbIgt=B|`c(ups0MfKtmpRz``i$Q)e5#y=9wU85~V}kYlFqc zu;QyVr|meLZVw=_)z@yDl9H$xpI^JEx$QkC^j7zI;BFUHEtJ1@G*J2a^Nif;#P!wq zXy){O)oaZ_mg78$Q5YenqkOdNJuknVu6{$eBWIb86aFg~>6GP7o-pv}Rt_zYOqzf< z{U&`ajSW59anb4Cx>7DCQRmkQBh!Gw28|NKss{aa$m9FM<$%uXCS$q7&;_nX!UPlP z6HnvZqt>Zj^Azl9{cR{vboQjazeVCchcAaY3v%CGaXT7L2LBrihc@Z zB4{K^-s02U^GtSc(C7KK@I}Xn?fIk)Ig~wVNiA*E{IuQMF3w*4GlfY#e<+wi7+NNM zVVNNBsuk6rj2>;d;w+tm4ijfJN6@DorVWh$vXGk;b!pZUDaZ`HD47Q8Z-uz^rD)@b zAOC~uZ6AC~;7c$G;(LqCSr4Ja%yYMRhlAl0!Kn+2vDi=@44e5bN?1uVG}?0LNsDH; zKuj zy0tpEymI@;kuvp>s>yS-H%XhTeElU!u$@yoljZ%K7k;#B;6k&+r_`)NJu_h0A1okzZ{tV&j58zIVtuJ@>dHEuM?@@>A;P5>g64x-q*)g_%}`#p2q{(;G%u0O^QFq}o1$zPBKm>V`vZ zTXqD`COA}}Y3%%rT z`N6U@hEC*2SyYxxEIbZ*YBT|n&W~Ht1A@KA^Hm!cbdA@4P5Z3~LTWq>UDkKjSC%U8 zghOm{hHDgr>&8oM5((?VEkv=y<{8Ngt7`HavYO~;fL+H#Ob+%2xNfgnU2cA}u&xHi z;$auS;`^GbhM~FS%Hle`cOHjDh9peN5l^?tjb&Xvk_*2L_izD0+ah7Osh8jM>n1YKXF3AqC+$)@x4lg`ObX++2U6=RlY^n>$# zz0mIH@?XvpqW}XzFGRPJ@H1Vg`pH|1pPTSM3}lwEZwPUsS#GMArqncqG{@o^-<~~M zb`w7aWD1FvSH2eBYVKeB8G*=mM(z8DH7MlPb2%nc<2?E|gxc|#|MGEN20-Ge%{$fN zW6~WnLSX-TNuK3DGMmDkEPWv-rhy6SL`9sA?>G7@B0^xgS1h!~>=T+_r7NEFL z$0suXgua0(oAKDCOmdR_=l;}{7?^BX9B5({pql1**?OId z-?sD)DC|whvrGik|MDNH4?>CU`nb@*3O7T@PLM6tdeh{tkd6^9(>^LWDYO)*QSM%^ zMugHY6^au(m*==zpe8_B$}IN&X7BNoEhP`Z=E5${IQXXbABvc1hRI=bgN(^~i(SKe z`yMb*6UWH(RjH9PFpvDtT1lzV(IG0Ev(B1rzWzz&G_=lkSPtrAK07J=O>YWN<5!vZ zmo*vA{0@CdJL}y>Fr-$apqAz^GHEom`t;MK)k&>xo8=8=4YRA6miNQqq&{e-kYu*vM)|Btl3n_N zK2u^wJqMy#H0M5t8@tyRIrQxK;|bn3#xtIJR_AHhI-(ohNFd}n>Nl^T2{E8NEOrZX zK}e%-yVSrhAX2Al>gp6JzkzQ{mrOD1T+0+df3MHXS1xg*zu53^9!TtN2!6&=sGU5| z2ULJoJDxojt9r`H%qYCFiJ$H^BHb%jQwo%eS6n=9`&NZVxVbQ{+|KaKQ@LF?rLnNP zE{pGnso(}hAn|q>OaqhC)dle4v!0pa@mcW#5(mCe@P5J=r)Z! zx%nZIoGJ>9+sp*p^Q6xbQ-g zsquU)nA$XFoROgrxc6{xY4E%q&pE%2=jqAV)f2ZIx)qhI&PF;quKTZB0glc8@7c=r zFr)4wRsigTRbz9#jullS zdvrnN3IDAZac#bl4K%s_6WX)XZu_Cn(kyc0DiBM$~IHrp8^X51Kq)m3(=6;}CcdeE$@ZnqBy{-7fsNxwZ zkHWdD{TK)3J3mnE;2aMB+SjD>q2cB4`!vram8&(psv|*ng>J{pZq3+c{%|?gx4kG- z8D>3-w z#3!_8|6bkLkitx}`*ZOmDpvP!jjF3j z7hkBd;J(+$na;_!`&rwK!{3&NE_vG8!ZLbRFJUIMvS5p365W8J~>ro#e@Qt}rN?H@C8~ z@B@1Gtaag^z#&$p>>I6|^E0lqHOdCNJFxk+mj+Y#$@uKX6T`y=($RabDz?O01$NA( z{>gd`i=9t%+%xlpLj9x^RcPhol*jA-s=DgW^R}tcsmcEl==@5qPB=Q0BGVZl(L!e$ z27GP&)xV8o(ll4OspHwa&Ic%?h5G8f_sw%|%rLE=t0wO4G|wZxN95(mD0H-#o; zux&9Kp9QDFrg!ZJeWl)$U^e{v???idZL9xWYwiO->EZ0Bw<0qnbg_S>e#|wRC++sY zOY5^|CKBue^%AJTCxH-3F%Z7$W|3$F)o@?TTU_xkkj$~J%O0$T^Ic4b21chmgq`g^ zWr*Lx6PUy!^-dJtD{VSHUM1&_d7OUH`k3QRHK#(1wyo(l{#m!U_%zQ!eR!&;eEkwOz92c1J?yoIox=-A4;`FjJnivfXfb%2hi z?dCC=iz^ojN}=2o^p_Pipe5ijB5TQdOUKg(5}B3Uq--5pYxmKULc0$+Tc#e0h(|uF)7QAwAi#hHx_NxRx;f5YxbR+p(Vyv|75p($LVIA}#K{Yt>%!`#mPb4< zOnbM4i74GxpKa9QSQ=>aeGQ57FI98x_SNe_N$-Pk00ai;S?cD*PM&eH<8b%;I0o(K zG%+o2`t+{RE&?5KKZojK?ge4n(o!<&Udtm?o`9dH{b6y+Ype?mVgJjQvboRBaKJuS z8iYe?>P3|rDt*=Uxel2@B#$LzzVibQ_6C(H7pR$kM*XbaxN9R?V<9zz)}%GMTH91x zw0+^6$GgqrOCbV{=*vIXNTxeyYhMcPBe!v9pZbn37|XCsgq=*ecM!PKGM2Wt%B|{LF^+k)@S18BR}J1tYlvaD@fn3P;(^fq~;G zy~ch4J|jVoy9XcL?TLraFM=)IX_mhOu?c@m`Mv#y2MmdSb2i;bp&zi1OTR%Q)>kt{ z9*>I8KuIaii1%Fii}?_Clt?dhmr= ztabdXfMBJY`Ta*(BMv*gxUCXn_2-l}_63oyUk?gqH0SXfeZLkjWO3)7dneyGko5dX zgKQ52Ht2jQ<33K114*g{;X*#Y+{}`jSLv3nKp%mv6dNrQZ<(p9i z)?UB%)-$Qe`U(sP-=7N9mbMW%3!h$02h;ZfK67Nm93bT7Ly#Rd5vA=<6> z^`M)gi-DxNLjWJpJb_$XInh_A17bvsixk7rGONVMhg zsEX+TioyV=2xyc5vIV9`l#=H8^A~@LITxkfY#!C)4$8W?ZQW?TYs0~F!DrglL8r&m z)Qg^gXSEfTyh3WU&ZzhY&$_Q;_8Z8G91%(nM_SyRt3|{+t~Bos=EHzk0iM8SuAaey z+Kc4F`Nk!d*W9R^nQo(2vj(S{B*IM@cTVoKaxXNxRW^rOw747u1#*GV z!tcS8b1<>vWtnELw%T?Fmwh#DC&eo16>b?t{XjE`sQ7l$UK+H70R*JU!U(-z!2~7A zH1fe`r49Yc&_|h38A*In0!PX+jbC9c7v$1tt|oU&eP-2ctN1_6VkM^PAMMt}EG%kK zAqNW6k|9b3H_~C+M3o(JV%7Ix(dOO$IhdD!iDft>U<~LV(jIA%h4?!RZk+uxCDN!0 zRAn(%-w*s3lD}!;$aLThU2;t>a8EM7xUtcV1`&@!w(EsTI)OVrCMOr@uNr+DAr7nk zIh6yu!?T@?%ADOQ^>23hdP-!@vxOmV<5N1HG0XmimA;N+~cY8{uUAzoVHSGuCz4HJo@CMx)^$O*KtlqL5p^X`b8c0PN?9-7^bj^#Q zN)9*r5>Z=yHP5Y{12x8a5VmS@d`z{fDL~XW3p}$@VXx|w(R(H=OoWU+WbDxOH;HiW zi^9EB?K7tCM@no=`~r&5R4!wu6D{2PI|Boyp_q+={vHuycj><_%O8EHnTdYgQ)yzapmHU0?}$8w}flVD1)(9pi{UJGpRO% z0Qx^9Y^4)IqbrhK*~>~EJ6S7}M;=!mGB-Vn4JtjJCu~O1nGjG5TxR zA*?UZFHN!!n@3Jc$Cn=_c#y}>9q!40uR;1aM*)HeO6x!X(8RCyBOmG=$@a4wiGu@m zW*^>Q1klh+@g-0WeOcKwj-1RAQ8KqaYX^uw&X@DTMqqAzXY;5TnbJ36Q7wj7GvgQF zL-!2fam!SA2w)4J}Duk9mo2{^;~DpE@!LqpZh1PY7b?ULID;K|U!>ur3j z@}}n34Mv`10V0H4!CkOYA{sc`8t2Lu_x#(R?(DJ#Vl?P!jj0i-;YlMsLWL(YKQ6Aw zKz4@3lva|am(rls`|I3c2U_2#_eaymMYU^V-IT@uQGiH($Whb)uk-D{a1(ory(|;4 zsUU~-d_=I{Z-4}ai&YqeSE`Tp0`2UjW@{8Q|Gz!+#zh>=sLFniPUuFj$r{BQ9525Q z1>|zG2Phh;{mEKbFbajsuGtS|kt`ro@Oh4#<4xP21h@%15K604APYK@tbY@G+Ytzuko7Qqn;ezQzF# zK=b-xb7^hMB%)275n<*)`GhOFmV<~U&S70p=?Qt_^L^yZ8Tw04_RZxFE=R)op}ZrEJ} zdgeXTfKNnh+}TdL+E8ay1&v6ZlD|w$J(zvHhq0S(=obBQ%WTBm5=!9`rsRMTpc!-E zVD|vf>s{GPJrew0FDw%~>JB_0&@(n`ZGWEIT*TUAPF7Tt)Pg#S5M%;!0JS0w*sr&> z_QthH;YXCFiaex>=u$n))Y#j9P3OUD|L_0*M}hyV6!?zP@l2J;s1_MevE^BYlD8`= zp!ZX0lN;nU@mc?Pqd@;6R6PeMpQUx=9<4_}GHeYixuohW{%UW_?rnwAES7C536Wgo zPO#=Tn#>T9Yt(a5)ZL}pJD<=D4%E~YM^X(&;fB4R#7OR$(CVW1{_^i=P1?J#+6v2@ zVqU{!+LkR6Cb-+!AS+3fVw|!{*$L-XL5God^kALg5^Eu4Ni z-d1w5exu_BWNIeU?y0bj5M0$mIkuX&?SC&1{5&OkV|?rC`Iu^_{=e_e_aIhXyf_Q> zqX_f%Qah^%+ZrvgiB6?NhhW9ufi-P({ymL0zXF+t;k^F%n>@>>`ms|5)0fB1-Itor z*BB0Pf|IX*4*V8vm^MkLUz`I1Pdv-ie!Yah`rUPmR`HKA11XmT9Gvh`KZ0G-b7Bcn z@7qzM2WkQM+%)yq*6#SPSt2Au3|r-1|Zao}MV9LdZJMAm*0wY|Q7S z76%{KK;X-U<%JJa>ubH9nRfN_%o1BuN8>TVaMcUKCHB$;rx}{gXc2Uq)FJ~3ipsrh zF?1t4e7jR|Fnf#z=QY*0l6sh@ryTiQw=(zd?`>643`3!ZKVf$|xAPNbpP@SFIZnVu zP2Yh!hRJa7UIkE5>qlF&kNTd=F~-jU-<5#cylGqlx;$7k9!Q^t-HCW!W83Xh|_f9_NX4hE04rB$Cvp9#6*cNqIV$2+~5+VBp) zRQb^GQ_OhDkWTFN&Vc`}dmL5%cag!j>*5}G#6xlzCWjm&d)p6Ke))oDdbjIqtF<5M z#_bpK5#VFR>6bzrH@4fZ{B3j&{s5-sp9Sm=r}UQJam~v;@v6&>1CQ0XQ{LAlv0hCi zmz$yMj=eQl&8V^lKNWX}B@P-wsVe)UzD+J{tMn+=B>?@DGq#gE-0KJ__z7LXXu5hr zy|o^qyF2xzFQ8_Dk!#YX?_i6sC1n1C)$A1iA!A{lPS#c2;B7tOH@@-(m_LD4a=Kn8 zJ8hHZ`$%H^maejds8}d0+LnAdUjV-^LJzzx8wu4DY^99_(sE{oQmd8~BdT^Xjbrvp z!RDB{O)nBrjJwbZOa|IFX5TD5K6DY&8M0XFYEdes^||>@)W5$MWfTd^V;}Sz0JLYr z8rx{A=(wxeKE|aBRD%MzV@y;Hrm^a^MCtfzno%|GbKAM%n4bc|((T&H{y_R!WNuF% z(Q0J&o#xyo{P(nR2P3(EY;g9QO(3R-8j8v~r;g)B4%8I(V-I$2Mn8*i+*jlKX|?s| zaJs^WRtSk)YcRf}I6S*(IZWHsAk_uc^wC6ZK=0_C&l2PH40p z;A{Fh_9FiE^-=3rjE{y3co+R3Y5z|8#lMf4jJ_}(e-8);CAQH)OZ5SaBXbc7XVkLY zB!c!gnSDm?fA5~0;Z0KKSikFuBbKa*_WOfzx|9gv8dEd8CFs`D7DnROOYQgTp~X4s=Ot*Q&0=^yy;I%lFjZ$Dm?hqbZqdAT+ct$14};Ibq8zq zN0lE*tAd?vtnQ&|+rC~DSO0}x?F}F~JTW}+xHf-!cjJCMv+rD4?1$E+!t79+>~i;= z?DhQnlPNmX`d>IpZVCrjzw)HWZ2#y3Pek~Ulb0G*fHM1u=<#m4x$|>$}+kOG?ENEFB@mZ?Ck5^g`>vvj}YNlxeKhl-h&+~QNXgn5fy4T8?(pPeU8)!n3uhNfhGtL+lcAI- znZk(DM}H@LDElcWM*gqc`{lZU|MT)t;^F7Y*A)Ib`E!74JAcq^vg}miE8Q&FQw_Hj zcm4JIpB3*O!DCj-R73tYVDv*jyIVpQ^54YzooriO&$sYe8r&?p7`{5Q$?ks>U|OAP zl5PG2Epx21n%%*=ib|Q+gA>-egn!E|5tO6ornze29r0DYz%ANxCM#a+YJ*~?Cw*!1 zf!_Ao>V6l2^fk52M z@k?+@Bs7?m4OX*^G~O`=9DaDDBalcz39`TQu2nCZ!NZx9DT7n}%f(Kh!cwz$4a<>{ z`enuh5PH<8uWbBS8Z^4q ze`E3f*7>KfljBi0@pn6TrQfvLa}T&I{gWf?{e~i*IPTcS$v+cU)=MzS&%cyyZd5Ix zxgZ$UDx-sQu`H>c+E(p`f7dLLP<9@{wF?R4z~$ z{<&J*%DJ&vN>TBgEl{k|c7jPeA6kHb>>?^uB@YMRXV8_vqukdR*~O$7(LZShm8N#I zV}qj_1cE^K@fH9ozG;n{k|KUHfG_&yEws3;WNq7MwqLm)z+0TfD^`&F6L_!ZO*PvJ zJr02w-dpK;58xk!vNfD~SJ)ojhpgUFi#q$KBwn+ur8J=xUt#NxRaEW>+n@WDlEqAQi5)?9*kS`MbI$(^7C$2M8Cn4 zrK$foCtBFO-3I3TOpl6A7a7kYk>)8hU$8r*(sUPAv7JD7E2q zX8f2k#@{3mdJq470C8sKa})P_&b*$qRJ(1D%Zn{+u`Yu|$5$D;&`~Jfo~D4=Dko9E zINOJugiJZKrm5(KstyQs@hj;gFWSD{_LX)9rF~+RXP5HUh7|KMLQik^D1lp(?67!) zGGNRWm}2(?-eNGTTe)d+IvzW#q!#B(D#@+y%nF{SBS8wdZH|5w1B`qse>dg({6Qf$ z9-m32!L7}aldM&i)Cc&tDBmpI63@FHi6q2je6I^TtYrm5_Jva^eVyYaYk-n5_}O=t zD$-awJqn}S5{}_#BrQJ-xqxp&UCdMa$D!}3;TtJT0Q0582eYD2xfg*J<2vY+=y{i^ z>E+tXXZp16!-Hd0EB*JnBAf1xZzqQhS+ZJW!3`?{EYj%h;;1C_nS*elRrRsCwUIV%zD42>`mv%#u zueIp%B8(e_BdG41sD;^2EJo2x9Vw-9n`1URc~?dAs26iK*rAfdWP57ipGc3dFKZRd zyq@TN;}$w{Z~T>3pQ_(d;96XqhJM6dmW$V8aHC1uAZBJS!Fokhuq4jHeKr`tscnpL zxfduTv`fkw>awc2ho^I*dc(J6&&gphn^92|x8&Td6li~HD~L?S_|4HJUq$53Gj7DD z?FlY-^0SZ1J2ba<^0ymqPus?K~o8T2(qtvF<2BztjVo8bcQockzvn9;Ip`LQDI zs_v@sOB&FBr*WL|RUAwD+;0{AAf;B)eItO?aY+W-+G-{8IbGse>i*ggF}zy;RV0tF zMHgio`j1(l%zfR}BTjE*5BmV&s{$&>YtVb_a#N7KePg6aoSBWzoft}azZ0B|-C{;= zbCdi0Ds}6r4=89TX!N}#yi!+a$a6;ZUWh!^dDE2-q>UUptZ7$AaXK2;cO>o0d|- zseuO5(K~0&9f+#cFB?%8f}n-DCWG%yE{EnL2GVSszAWS@@^MhfTvzBevR?0T(y3rc z_nHh%Qz7clDYSb3&3DKjO?n>hV+L9j`PA4~Jx+;e>tmyab>72AN2R1rs+jee@OFP_ zVU#T}Juz(07m;Cy3Wwd)Hhf-5JKN8Adq=3ZG{LgAcenH00eI9TsmN+&@xWx+CTFoM z{?-HctLg*X$660zw=K$)yfWd+2s1M+rGu6rze6)ce541cV9KxX$K@AE+4{#vL-!+HPX&$wVjLS-ZtAjZz|Q_IhWDnB;lTow_bGe2_U8csOCzEtZIhc3-0&w z0?NxIVOFBx)Ht_nko8tQ<3LSXf$KD~iS^?~xJF~|Pp%T+EbPUEdf@5FZ-(5Tiw;aa z*$gHxjcio6U#X|8Wh4>6>7k<^c)RA$*d@39N>-#^$1-P=ljl&VVz%-uNfD0<=0*qP z^~LBo1hX+kb+PvV+d@Vg#7EQqRB<70ydDl$6t=?e08Hv_)Enzoo0D-k;n%!vx;Qg% zlicO5^R3}v8REUPv(eqKi0m`qR2xW80&l7-o&|mp8S8X|7~Q5cXIeYz-fRGLJ;SFz&F~u*#l30tFv#4an64oPws!g%u^sg>t6ON&H$6X zaR38x_qrFXtV=j1Tcf*|)8uOmo@kW{C~NR6^UW_^?ksTLblZ;KmZY?Fmm8lG>D-CU z6lb4veGTP2CKKh(Gj=yQZw1k1$qe*lZmct%s(KvB*@cH=;ydCD&?F_{5B zA}HHhs*InI1Id&NXL|E%VERc>pLFtX@rZqsG-881Y!tb*M)!TU;m zSiL`w=y0tpy}D&OHrSRP2{Isn96-Si@a1y3IpJIn>c}kmaL+jBOous=l&FUy{@3@< zx_vTiUvTAga-py7tgs(eTC?KTz6Z4MRPNRi8n)iYOj}isqh2^A#!SBy5;pTRXxdbe zU&M&p>(znWq&E!g!h(V^$YqNnV_(&3Q_ka5h`_*EIn^$_?F}oByrPTzVePF}w9=4f znFDkHW@mq9VUlTT%erv0p6~gIMD%SQDtejNOOk!c7C9^j3vvc{M~UjywDkAxP}BFk zoSV!87&yu-#sk@em@{?W?(U4ClcF%86qlz*XR}|53r8#Mq@?)v$cfzOR~uAC>{4Vh z7_QF2K6&4IgV((^gkf)2PAAWg$aP$=AMW^jnxa&l+0Ed8ph!b;T9UDhJ&+{$0~ z^Q{MS7{g88GT5fx?!{TRg0YWNdMVk@uqPr>ghVM!oXywTm>9qHS1zkX8gcryY3LPf zn&q2&2~M!kQO?E1t;!B&%L}_rba8I*WgBff zjckIjvJBn#Y+!H19hiGUqew-k{$1f>@4Y#Q%xG_RQn~ik{cpuP*6TX5Tc4sOPx$5> z4Ta)QxjwjaAE^Dw@-^HNeScP@ebRbZ!MV#@+ARtyEe4ZPTD5i0-P`)pg71%YJ{N=&EaWBw-{`G^G@q*^5 z_L%j3^f%<@rjDT2g*&41o^ISnz!YoeXhxMicR(e55tl$6CGyO0{QzPi;(#MJ-mvAs?|&hITx9 z(15<(amcdQO8MFMrEl>TDHahU07oWz(U4IRlUfidzZ(amCMmLn{t@2D#(7f<=0M$R z{aascZ`xvn{!zwv-4l34#_-~JZy$n>=no3*aqgP5bK9?6(v2;VygP*uHh@d}MxyyK zR;PsHFIyPs4Xn97Pkp!z28kJ<|mF;mIQ2GEP zt&%LiiO^S;V(;9Pr&a`PId6&HwL*&7y|4rBg!(JLKn?+&JOUr%B~_^eR98jot*@3N z%U3?$57IW@>`83a!4g*(Mztg%{_h_=%de2O@i9yGf+oOwf2pely(*pRtBa@8!aQoF zol&R$f%UYN1rcV*9R1#Nj@z3-ab(~AV{G7Zngq{^rIFpu@L|VQVXF+Qj7;ZL?I7fxF;fGGbPH_YLyrK`x|8Ls@aPc_zUCiw6)^}##nQ&D}D?eRIg zORM>&hL+OfepzkX>R^NFROUALd`v#xi+1My|uuxU5RabdP?k8@%j0bm#tP&e9K!)VDPonRQ`YtCtw6P0G6 zZeua#%y5iZ(c#NIV^Yg+{6)AeSBvoV$6a&fd5|xfRm)O=$B&inZXy$=r?nr}TiLzb z8cMuU_{CCU8E#J`W)Q)prwx7A>|(3brAAh=cbP{AcWhtzm{H3-VQQk)k#gnVGjZdP zlA>LfflVvk#6;%<-X!Sx%Azf0IewDUrT4+pC_V=EpGaxvKBuF4Y-KBgPg$zgY)QmY zm)Sz(CxIpf|H{PV5_cl*cb;F+gM1~RgxO9Ot;$dKWYDk5zB7QK4cyp+mSs8bArXJUU*Eqqf}Bwhv}~tW2yN76O+*s$=*w` zr1BIOG_~9NI`XiYEG z@}}iqiX1LHYPNG`-?)iYSC^hI6%YHUq-*K7KXY?o0kh~x=66tJcn%JCI~n<N}6h>{;LfL8c{0TvNy?s*vhFD%omI`e35~f3li-ok#bJjQQq3=|N zqKX%PRS>XSHuR6q>H~h=gunDBcn(Hgz_&hc({B!5UHz|D{q0(>gr80Sh@*OaRu2AU z+Se{1a+irJ-|9G;%FcEpUgl&~y|dP z3Vs=0ZNay3%wO(=)1LLs%o0>~k_90@?=?M{6&Vwmmp#zHigj<^U-OI$(qMX?BRxJs(X#pr+P`f3v>NV$G}1_s+r17? z$xxoX|FW@cuj|^~Nk0FTbGSpI%Spb7?<}@;$Ocdh=rmW%ZwC2#g>hdT{zg$KYx;vx ziaV=t#^ngiQO$1v_ryAu`wf^y%#cwO&KMJ|0GM@8y}S#K_x&oWg7*R+N7`g0#(Rpg7xtOk{iryA-Zl=l*oYb)ISQGhc2_?dUD$_> z1#?=`?-h5If1Y0NlBv_UE6r)95dOnC)4Z?d+4r|utM%zF>ydxj)t~>xd~+}n`NM;8 zA6F^Mi+RYfJvQoBw-Ti%E}1$kQJ#KY+iG`>vD5IN(nl_?UxO>x!mbFmEMUra9mp9y z90+LJ=>7K96YWCVi05jckFdkl?KGvWqjyhyjUZJoh??yi+0ej#McE;D_ltpB%~XrD zTAPH3dj|1!uoa@{e&gy$aIZ?H;UKF#P_u1sU*IX;WD*C!@cntFWb#7bN%om%A(2}q ziD-7qic={rl>e+L{?#t5+3LOLNF*+ixW^R`t-j9waANSxj_;;M=gKI#Mc9>Jq{sJA zlIcP7@yQA`+>lf_4)t(85OLIr&S{L-dyl(VH|fBq^P7QJFsxiu%5>&uw#G{fg!_xn z@$O2iYTzc89h9ZVS4KK!Nbog~2z;mW3MGR z5iD+SvJ<5O>2h(PU^PMW_l62u7QLwkeGi6kHRu7jErA=hs9_VAEaGfiqLgOuCIj8q zA4tT9hkyuPlC_F+;Tg$fytMGN{J3>BatH`>_8@3}6JjWhgQ05GD6Qsx5iCuXT|M3K zlhj)a^BgM^`<&I54d!tdBoL5Zw?3U;}B=P}|0m>__I)iWXd_BEze}LG( zdImVwo0nQ%1I36%FnzD#Cl-XELUk@P`>3spP;r~<%t{T7ksfa%q5q_#lL=)iJrKK;c1F2~a-s!*~ejDA9EYaG@45+8J;mdZbc@Q{sDBFWZ7T0|#WAxepC zO?JswG9`OMtExrZ8W+S(wgMH-1ns{M#0&Aso^s#K4yQ<3ycn{kaXh2@>Lj9+51XuL zb?hQqeti6$Plif{4&koiLOolkm(^Y7I!=(G#!M;9)Qi%p`XeM$n?pLDjS^cE=j~sS z&ruH)z0}0--<;d*Yf3TG0*wleJghz~(F`?JD!&5*f!ugJ0j2>x#h=+ucNRrVnL40? zp&HG^tGeyNFa!4{Yp_^GswP{j?&d$~&To$vw zQd1wLOl$_l?ir6boSrFPEIQl4tK`=w5(hu@L>KRP-6kPZch46Fe2pml!}%PD23Q}^ z5S<+0v1*;0-WvDCF#wBew_*6{%^@AVQL!}bMA)D~;c1{D`|V#ECc108gNDQdXpl}O zg`nM)F3O%P3nMLvU!$|4x!moWbcDKBfeX7=WU!2B_2#Xj^C#?3v0q=NE^06F>%%N$ zvy*)4EA;*Pz?Z7wy>Vk;nw~T>FshYb5;I*)078~$f_@=&=oFhY+~HI#{7lX}1p=#A z@~4^Ot6smm?{S@dr#;gW5vcTV)soA_PyU!aRvW&S$7*EHsN2|p+l5Obe%9`&oiNJ3 z!-FfIfz_;sRj-a@q}pAX>&!99KTH|w332`UnY^e9C&3e(bgPML^9y$9KJtbl$7HG` z)XaYwtX4nAvy$#}I|N3^rv|n}>vTla=ajoHFza>V*(=x7;@gbF;(($12%9N%s9mI1 z)_BCNr9~cqAC3n+b)agtS3Y8m2^>~p1MKV*7f%j$lkV5&DdgWt9hI&5S~%4l{(lB) z3|NBqGard`uDU^$-1-ok>oq5uaT;+K@5Yl}AI+n}#_*mAyp$lvYEl-}b{#*)ASkA1 z8=LpL^=T*eLbrs%GiedB6I;C1OS*RGb15x!xdvZO(KF*nEB_F=^)GWQbQ**E##U*x zURq1xi-kM(1>Z8RJ&jOdxEP&!hf{Y%Rgsi#w*X5F`&4`M(Ug3{%t-m!GEe+k z^;2rm%Ynk^-^V=WXFt5u=NLuaoaIs^l0WB{=>VxL-yu-qnxN9&!7x0l~SnX?&R{^9EQey4tM4dZq*AJotfLZy^oWow3_q&$S z!2GTzdhN6Cx`*hJn-Rx7=Dt$Xvc?rf_vViQCKeC4*iJ>Mr{w3^o!nT%1f+3jfR%p| zAJ;s}$`-IQUK+ z=M>JQV9@UZLQ~w*Ma_QKV9K0_OFV|un9>t(CN{6D=Dc!y6$_ef)JfD zZi_rp?JOqXZjMu(5{||$drz~#?>*aw`4%ZjWokg{TvhLQ5>y8Jy)%I1@p1Lcw;Len z7jICoA;lfMbFq>zg2AK4l{-VV+Mxlk{XqI~g91{Tf^{5h_|0blh-=g15q+ioR_uqcquCWz9C*(fv{B7AdKD+sjQs=5>dmp(=OGnjo zty&hH-yqglxtp#&RmPCQ%;+K+4qx0stn#q!OQ8qLNI58=n6aq;TEeL|-ANH^s5E66 ztbbo;YJC$X0$+4{|HXC{f$Dl9i48aP76F-u`tymN7EEOFO#trn(ZI(-d8_F&rPa&# ztd@h#Q)@9=HQN{1Vi0FKM1+3TB=*C$uuVMlF^HoD56QocUtczBkBOagPpbUW9UKyC z{4Ob`VJnuu0~T!fr?vLUX%+rnU`8FmYaK%J`vxi;;KMH88eqPpppz$ z4l%e>=&kfr5)*MJDzg9zkTjoq>{L^Jfmr}d7^J+7(fL_1Cyz;&6#vv%js<6PCk+^J zrm+WstaN#$VpFU_HNK>3YkD4|e)xp}fBk(&Pqnr00W2$A<6-@263~X9(3T|?SjXjl z4Qc9SzC2nz3+TW=f&TH!%~`x zAZl~VW~fN9aA(NC3Q*r>WP-e|Mo+^9%3h`U?eH|f2^yRhUF)KxJd5DPcgy2=AWVKz z>~dj4U6A*j0q&w2pB8+=!^8+fJ@AZyR)<8<1y}tgL0b)j6L2)ip1li8p%rdk>R6+> zNV1F;d|v5LQsn=?2Y~>6@J4u~11~+E_ccBHsl{|TK=U;m#fJk~ z&^d3_AqW94%qq>{VYu8kT4R}YHE2WW@jDV`lA_S8{O_r;^@6%%0%H}5cDs8O3asi> zbTKwpW2(42$1Fq5!2nWUEouh;;zeQi_Yf}PJc|Bxx#J;dUi2$Ndel;EV%z#Y?CUS< ztH0rk!uE`FQZ6s%%~i|xbsAG~Rp%dmHxjFRisGrTHQ=X6#XPUj>j8K z0y$_owyDabRm)p-u^IwFVZ5z91#TwMg0)Q4URMFIR(fA7wtl)H;f%sS^}-RaDhf0M zPJ5&juG;~Jq((VH?~83UokjC9*ekN2OoA2mzaMxd00{8+#ID5y8ngN)zj(F`VCLu(ri(TYsW|i z6a2Xax8)$g>qPdrOOV=dO|bMhi@j1A5gunv`!+AGL~W`siOLuCNo9(Fk2BneKcn+X z{`Il*A!ybQW{kLeo(+G-&7ypEQ2Uc&=Z<+)ybg}$YP3HK#ChC@sA>@EMcQb3s;$ZC z6k{Yx^o+hFQtA+ype0>C+|9sXLj|1O#DW5b>iN-rdKWf=b+Fg4y2TFZwzp#ORNDIc zfc&VX#%-XoPvD9H-{>_TC#w*lU`7Y2F#IPpJ^+gUBqIGx+^(VMN^ZXO+B03@BQB`J&_;%O7M+nRg`11 zevxv_u5V<#npFw(%GKa-a!p#;ka5K zu=@dic%K`A>nT7HE>H+BQFnphXKz^=HS# z&8O@*lRQt8dztL1Q+MM$VShywC8`Wo(b}A+pdil6$oy;p+@l^?U9=kHljH9pxx}39 zaib}^5eOt!tgIy$c-E0W)No9&h(X18&296$q%b=}-M-v7RQL+;!wS)Ab?V!^m2Oq( zn8fnI^lsNsc6eYALdV-!S;zBoEjJRXT3bNH#h;>nRR=%E(E$YO#g@5Tgh@!|N2L2m zsOec8$i7}GP|U9nD9jyNYB;VBgS~XbA9HD&kQ@+#Zq~K7x;sh0hkLilmD3+WVX@Ez zUN_e017qjK$cgJmW`~num5Tr>SF9SLp zmjaJ`qrVh~4QvvQ8kRNg!Y=#j*}do|k4EDB*TP=%OuSOp;MPW<<^wf1%g6p==K;3x z&icgIb(qGY9S0umujvEO)FS7(IA7i(QX9WEQ-LgaqG3|r9UXsXV7*ERx(0qWUvv>c z=61`d@N0oU?u0Wo>Ze1crH@9#oqu72h5-Va1UQE^Nez%THGMn{Wiw`*z$GN-LZl>D&G)@}=L`?NADN7|W6 z_?c+XGUSbFEgbe%RlZlu9kCDpm;5Pe+>9IOG!tA+9GqvqbfdlJn7KATt4eOrsww>P z`Ntg@pmcdq@-(K7#*GYMltyXbY?an8M8IEJ8b~{5ga0s}I9yv*9~p}RZD0DzgY~C* zdZ28wHB8Rkjmwa&**^C!4dECbnp%Lrcoyd-79hYu%P;MD9I^B`B70m@HJxX!P2kN} zd@Lx@*n+2o=DVP70;I`_hrU~hbe9sf?01t+4|d>)!imp6xZN$Mp4$MC`1GMXoaA!oc5?t7HOhI>q0PUtJ%RijYlW(5Leo+rxfc4Y=R!>SZr zdd;207M!|*Sid>&kseSIN!@961_We(jI30itu{eW1bF5Q0E;;5wilYF8Eycc0TA!x z-ri2tU^C`EY;fUsSF_SfM8tmqpLaxk7F7;*g)st@&PP)K3UtrC^3XfS8M|FQ3wJY5 z=u`}lV^{Bt`u!Gw9KO=n_1J&TAX#cTh7w6qMQ^>1n!ayxX8D`AImUA;n$MBK&`7gs z$_N5f@w5UgxVEjnk`}C@Ps(_aR8Bb7`El~}R5Rcm9oSY|KoA2MFT?$qiZ6;ZMi{!R zFsU6@zAUsZ@bcdmXUE3%-(w4W@6GfZFHqs z`yA!lyiEO-Kdo>Q8-OB-UHm#@{^LPM{uW)R3Z+GEX|ExpZ-VFgI>m}TI|J0-zVn{F zLs9;udn#^eD~?f0a88sI^vQ*~D|&S|>iYF2K60L3C=1g}X=(H&cjE@gsxA^qM-NUd z(-0CH$GmvB$MA#HGFz_E#1EJ~W)QE+3@i>D=zK>ZMt|?3SI`)#fh^#yHgXI0{0zj; zpAdyWAcnY)Wr`O+Y9TjYJBMF5ikr?~KPc!}zmDjy zN+If%!Mm{O)lKZ-j#3OM7M;Q_EK2RmvqRp*Q$W?OJV~ZfBdIaFx%`yws1hFWUKU{+ zZ8sEkq56H?osOn$LIzP>cdYJ177n^<<^2U{MvYY3Gep(wNGgxEV>d}3Cr(!ptda}KzpVAw0J6L@`& zdq}QvD{Ru0<#V1Xmq_>)cMkOp80&0-2_~@@9igW2&+2Qh6F27n9>eqw;%L*cA7b6P zto5t+gMG$SqQy3=B;nCUwaC`1_ z9Hm6i9&}7yWRAS2r0%#?7X$NXS+CuAhf3g?IYCg`H6~NqRKIX-w{Z!`Qw`RnmXn-}9}chwpO7tr*H6${{_&6PL4|Fc zA1w2Zxn4lt7-&NL0r6PpI?vT6n%z1^R<0ZVu-E$DxzTZ*_R1oYrpPZ)U2jw3)ys|P zF)&ZWt$ID@M1w$BfdeIW!*Q8yH9`yux$l0cFt3K~V65WM=SL_1&q7-EU~`pvT|;Iz z?iL1~YVnEsH$=$R?!7sVj@E1!P6MM%)m=oqnxE)tA9I;wD+aoG$=JEb0|CbX?i(%F z`=yo+Z9(9WqkticZJ(X;-@R}kf{g=!L_kAVJ&%9CIc`v>iufRlv%|7A1A{KYJ>Rs>`t)Zjdcgw~ z6>8$413Z0|AQ4FezgZSgo%Za9VDVBj?UcGlLgx1)e}7Me`&`!B{-ITL;brK?I{toU&r zIQ~sMr7O@1QncK`>f-ijAwo>`65Il!QmSDIja4=#?N?eAm0nHHM=T?)^@Hck%j-&oms3E?F$B3=x;#ohRttx6RfKV>K z@$vgOyl}meXi9xQXlLEpbK)1qH5Cyt5MT!staC&~4=);_tyVfGaK@?2rjx>~k{`GF2IzaJ|EvUNfv~f)UHF8J|6l02>g?Mxs=vL4~ zzb){J5A;Tbb^Or$#cTS?>@ndMzIIDajdHo!YTosI-~7v)9;cO5Gl4vEvXJjOUF1LR9< ze~c08?UB61(5+y9&)PQgumq=VQbl}^U97c2(zEZAas%zIpz0io%k5ICU@geUDCE7q zj^Xx7tK~h2M2Xp?qG1ZbH)D2 zUdDM^Tz6N-yU}_ZjLDnWIE_~q29r*v0!_?2J6vhUarp>;SPoauD)rd>5(LfD{$7;i zaxw@F>@Mo){X@jVZbL{)t@xkB(0sj@dK>rMfv0UBKdKYrI}LglAFK-r6J?b}WfPpX z`n%qZRUWIVimbTnz4|5n4)sHv61Xnj*fi}b^Ph7}XR0VGJCLFaXiwg1v}&&12nw@n zI@^~7F8mo4M}aTQ4$9GuTdvr*g^wKD?MkQpz6)zpK5lpPIqBBh$<}K09#{ASptvea zgrGYt<>n?#YV|gD#GO^LXgc>;LI0F;o&pvx?p9jO8?q^>)(_Ni03GC-O^?601OwhZ zN`%CAsyC|IxkM3<%Y|6OJAZxaC$O!hsE*m|E+%pG7dZ$HcDALBZ&}iQw>`In$NZ zCMBNJw41b1c#Ax3yUFBqC=n3+7uiiHz|lHWU<`tpwk)uy67B{VcBe#$X8gPD+1-IL z2yYv7Gs|F|s|R@EH{*rG21T=-;8`KHJe=H1q4oaha2;TtzbDA`f4)^U?F71haX@OB zD9@ujIUX8CQs^ZzVazUMFhk^0vsZSd`(%(@so0Os$b6!~U?r=P5E&WFkZ*UpJO zuFPRez!-S#+K(s4fV?nLF(>0Fe;B&4yMqR(l1sNc;>f7){b{{uoOADWsA=ue+Lvmp9`q>^OS10qbu6j}z-J9Kh`0 zC8YuALY9SMym3%s7wduoL2c=J06yy5@4LVAzu zTt?MIqx*Q?sIdWv@v{up9c3Jn9bOk3>kzr(EeJf%^QQq);Bl9-_~vfYit9O$hZp%F zIf^- z@hHUNJv|rS{f6x^$fWSPJF(k0mRCE0$9GCiJ3nX(u8ZihhMBw=kJShEolyXjE|R~P z62d5HUjLpuj!N%1Pzk6H-$~?E1utESEX<7ziRs-f;M6m^h-OFFmgt{PC4i^A{86bJ zimwzNy7qd6sUeN-vfa~+pulAamefF-Rt9=eI2+)^(doC+Ti48{S73Kaz||Sm(?vcyC~Nsaqs;zF4fFg z`GH>Lk?N3@D(&UPxG+OmLtG5oZ9=647k4tJcXkC0PU*2yjvN5;fLL|KUq z5LuFYK?vy8rNWKdzb71}e%*zQTRM4xcZ}ZH?U|`JA%!t1>%2VKSm2T>no+DGDWG<{ z0@Bvk+Ew6jZ}eD1xyoaiQBxV6U`AyRV~FNL2h<{eoZ7}}trktK_B@VZwCMpuenAu$ znyK*SNE3_xXbzb9VGw((9-<*F4`Fp-WM(FTPcMc+!N_W>R&z)eUkXKAO^dqSqP-L4-1!1j!Ti}$GfR95}$+5g$t zsL@)&+>V9{b2`=U%?)*p5sPR6E1svbs&7A&tQ;pO+yn4k?a0C;x(iT+c#}dEtj=8f zlLc0YN>+71KKL5@&m()_2^XOIggXSu&l!^*k;mu?{7Fs&oicU8Td5n{W>B~fTdR~+ za{^9JPbngI8@Mz8teAUHLPT^HzG?f``X<+DjUiB_SwK+B-_g*{F2 z9g2#gO>#v%aIg%!N1Tpn7kzOqd=2+7dK+3ar-cLMrz&>_hu`6k{4^JP?<<}GZCoHl z=w3r<&AULJ1M`h`Hh1`|w40&)N1%bsz7;;UbwD3r-D}X4A;^`Sj7)b?H!i6HCxvlN zTy30CS9WgfMIW3`F?$UTHWnRR29eflSPF5mPVl6sEYJB4*(&w-r-O@D`~+WQzQbq|1MA@8+Ay;g`!z`}Nx^k6JeB8!hxLTDS5| zH-7PA-nAl&4-?AnRLM%l-jh~c^kD>EQ_CH64?d3+6*Zv z)YTGkB^6~_OOg!us$xYsVB8|FB`7jn;_?-sOqBm54Fs3sBjN(RkfW?21|+6c5M5Oe zdyjYJ+#G`+IXo0qXK&Q{_eAjM`}8Q^?kfZfrMnW=J?3y#Z${FbD&XVBVgT+Jto+jy zi=JOOVBFr9fxGQdo+-$SW~06twpW^wb%JVPL^pUb^7Ms+cSnj$j1D?1lus14Zd@W# z7FQLFkrL(ErxcNE>kWz>N^8^3oxlEM<(o2FIU;WC+4nvr6tOYD#r-}<2hzBRpWsIo z?~EhSI@@PVFtK;5qNGsqq>%t7KkKFsnfzN_=)_CfP(5bT?Q)k2g>WE?!iYNDC!Q3F zEE5{BfT@N7$a7=t*Aq*s(Fyu2FWX2&@qqub_5sf#>G9JZ27xWUZqS8Pk=B)u-l4f{ z3(8ttfqqYTh(&JDX^yC!dB-9| ze(@7`b+5vW-ImTaxX*0IOxMcRWD$Ny1%AMB>#O*qD_6juw0;5dSF{|(2xK!+z$hs&4CyAbX~fwzu+%!c(9_#x)(oc6g^bJua!*i3(P2 zkH4pB^M{w;i!cXF+t0S34g+{*(AfyV-DA?@Z4gdzePyV}{Q^_|AP~g%3nMpi4W_0B z=RmI`(2$S-5wy{+nh|5Y(?Dgi0xNjX2A6`xxFBqT(UX*KuYuSQUnZbYgO?dfXWHFo z4$EL4TQwN*3y%YC{RT*$k-UTHb1+AC?v7ip%ho&#aitK)0?e4q724cX9`NVO_o~zv z&cDmbMpHfmp8%L=niU!w2;S)yJjy(=wi|wy{hL*aEjSh9ar}32U?UZ046s#PAWusb zS)o`n0~tcqm;|R~PFAt*xvJ3yf(jt|(z-@3KeSB;mPnDo0=Z?A&e!xhdsk6kQ`xib z&Y#W~UU?CF``D>)wy@}RJp`Rz z&mnrM`2G3-J*J%`IhTu}p(8El1-KZ!*~Q0+7Wqr5NdqdqXJ{$3&d{C=bJ zUI(gsdA#&^+PG#rAWc9^GRLS*q**|v5$z7{4F%nFz=dmiQ%#_)*Pa-{WLbS%lKJ5clV}P~Oh5Nu z+vj#m>ImbG?K2&$ZCJW%J+LYlSoPM&f(=VcRH}#$48`Q)r(0b*50p8FtN(P=D^t~- zgIjeJ1v2U0g2z&UqjBQNi)IPUX3lErNe{zsuQaQZTl`QSHUKDLeGhnrxTX?lV|2!4 zjx1)2Oj^t4^sdHr=odK^JO{K(jRcSS$eks94+*Yg_>)bS&yC{F(JlgvgA-~S%dAD) zbFVA$k6hnZ6TlWs{^kr?H2v9&0`z>MdASM#*L6FL4~wsMTn=L;+VUUn)4eZn^FX11 zm)>)7-kW^v)fE&~5aa$|d)NNg)Um|lt5l&-tq%$YtXie=QmLXw9tx-hB)lRKgdh~q z@D>O!4^S0RspwS@d7~7B5J*6hh>$=<%~e1^AV30%NP!v>B4P+lfZRO^_0vD#-e2y= z{5WTKX7;<2IeT_zXK)X>Q%0vAb0$5uorpp{8XbKe^6nb(&puwVhy!vfdV&z)si*U` zrFPRLPA{vodpttXAY)~~JTng0I->Yk(aX&qUV#nA*LV07glaUH)-lM-u@3&m;#^6r z;cygcA1GI~L#yLXQqAtFU0GUpXYYcQja|uyTPLm1-yf_zP&0e+>aP`FmaOcZdni~g z(COVU%MJX&k%zzrs_`=j&d6*|{=1T#FV(VEEwLd(It};zf5QeS3>K zCF5*;hoEC%LTLF4GHBkhexj06nA7X=wG*9}+_CraMXvtxusn{~1zB8t@+D*ppTu5(fDnu0;$ zlgbhxbFJNH*p}W%hatYKjFgAnbho^qT@!{SBZSw~@pgsxGHVhctub zk<@k=t=g)I;NZ2IRrvj-C6N$Gr<U*7y`TD<+5>GV|@;qr*;Dr)v`Ww}Lr)3{JZ$4p}vk#>hjxE8s~pd~$RRzcsw= z$b;WZqnjRR`+>KqM;zvD5|_toXq}lELIkrm4sMd2)-p_XVw7=S8KWvI)g3%?F;?I% zQ`OVR`LYDlgLHgi@)@wZ>f_g4VRn)kmo7+;nEzNS4r&i`*w=)|kvSl2gtrYPvuoM+ zZphj6tryRvvPc7SDT-a*r035-gm=ZhKsaJohdEQT1KX41opE1DM~RF_vi*_W%vPKm z^5=EKvm~DsNK}-_UH`SI`hn!Ds zuLLI+&XRF)fL;7MW~+CvtAPivoH*T4J9XfMM(HyAWk$Y~y*! z!%F5-aSb<#$%q{2cqACN>35xv59tld3^mdcGz=QB+$Ce9lFBKIVjY%Y_H~g6q z93sl(HXt?~@&NU@KL0)=@ZU#Zr=uAwoYB@c$mY6>!YgNfCqfK7IX8!UE?P2?HijUGP5WzdWbqH$&1Pty*43oPGSo>|WduYW>&Hjv!4`;jNlrw|#-apc>;Ip=)Qgu%1&j=M4<;wKT03DH-k~2ad^a~7|Xdn`X zGcQz%M1xG0Cd@6R{a{FI0N@53K(?D^wMiJ%p-f%5?8m4%L(~0c^gUz_UB0SXS-%HC zIs=3c!;qd3H8NY21%yE2hPDDiCjjCoMnq!T{Db%g5F6Y@-B20^4y*tANl9TCy;rJV?!bvE{zWy7|B{+F%k;nWg#9!|LL&SQP9}(KHh2q^{cYVCxX(V0uqT4SzsE<#hVtzJqy&MjpE@kyb_+K ztFMAfaDnR#e_PddchNYchW>}X)fhRp*F=blR`HFf$*DIs&(Gn?6f(`+MYV*p-GT?| zmP|bV9qQ*r`L_Y;yt6=kg}aCjsX3}xn(PG-U?Hl?Knjfw*!+X4o$ zP%O6o=@T+E1wh7%$T=$0y!AONv@*SJ4x&bRDVYZZf zSI3N1W_{1cC0vL6#8@^ap~p@zWx=d z>n|)UIjbQp#B>jQacJq@CAwY3rORJ`xn_0A)KR}R2lGh2yGqWmp^iyQt@C-!hhODS zwzM&p!fwR6Z@JMDc=cKwy!4LJ!*uRh;-GZ0f99rquk_c^$D7wE{pjOuzAHh@IRfGR z4P`m}hD)hliTxk3aS_}TTKC9n#bEOe;TwxOFnQgzBjD)$!feP6R-wTsQ|3z8Du^GD z!pjWnZ^hSKn>sY@7$$N!xgP@ic)^$?t+jC>w=$K!{0@5=yKZ5pv~{4SoE5+ayPhD& zwnof;dyCu1^n_F3s|Wlj-@W{JJ~QD5l(vK>OoZL@8}!3SbmoCz`3{thq)r&Dq>%$h zWF0H@uekEV8g3zPBw%NRS+Nd1|!IK!2X2X|haRE><*+E{3pCJhpPK)Eh>E&$+9Bv~Q7&s-1Fw81rKDmS}XQ%#)icj8mB<#W|CjcE9KLzvgaA zh00}IN@3O%+aQKLLseR`HT`~%8aIRL&3l^?vqvt35enJjQfA{K7_W%|m=r*PFpZX@ zU4|-gBp<1;3O&@EU?Fc*wsdnTlcY5!_s>i#v_XIxAQypd-drf@#+{dSx1haB@=XJYFL(PyxK`tfILUDh$+4MunBM=3UyNsl zs|WX$f)y9@-Q|RR@{KTE7@=!rc#ta;qeEpL;W!&-kcltl}g%7s;)(Q4C5s07c LoNTKP`(F4DRM(|B literal 73229 zcmZU52UL^S`@e0!);g&bL?e9gjuDE$P^hu7y<$!BSM$~5?coq1S$|vf*`Vl z0AV8|L`8^<5CKCNArT@8gc*|%M#ld|1^k`=dydCr?tAb1+~+>y^Ni~Q8!OY@I}YvG zvSrI|vx|S(ZrQR^d&`z>=YRPb_=Oo(8VUUM1Ju^^+?Mhl*(u;3|MNX-d3MW|%H*8_ zkFCJJx8J(x1l_Xb^sBW$KPX;z9@(-*TwwOsS-VKLnbGZ8$9&J~h^6GnX)!B%DDfY)?FW9{df8wpy7|trrcsh%>UER<-TC=Z6-ws=;L-rS-=1q1fEN$-xFA-5ccKEjb7crmG6YTn zDDfdebe2H--};MNw&?y~eU;WgFO|;0(M!##VRnBnFNG|xt?#7kFYE}@IJEih$Coo; z#QU`|Ki5k^>MA)GskOjNiE(Ha#Xx5By;pl}i=(vfF^+P|YYv5SN;gNgBob7#+;i4e zh@nPn9sO>kuC`1O80u*w;L zJKDin039!_f-d~B`9^o`Npk{u!gXx>v_9(5cS}N*=50AV5GN(0y^@MPsI@ueluo1V z%@(7Kxaxr%%0s!$fydkQdm2Ag9gS~w?eK4H;ax!yHV3!p`O{W9)?SD|x})`QSHdVR{eF)YD+=W&8h|-vA)&O-_Lz-JA+KqRCkhC9hfK0oY)+?mhNnh zt$MU3M9Glw?}X))imJC8NDs4nb-+3>{NUz{Q-P|--Ta7B`aLlA?=xWIhM};{!B;gk zl93aGQQ*ioD?L8^?{?q+-j;%-cRa}`jr+ow?Zp{^Zdl-)9PaHmXedUA6nn(EdaEc24Qi)>3ANIvY z{t6rwMOJTpXv>zz@~T^%JM_QuLDuNI0SVPMr>S#2&DRarR2X_-le}jm&gRv9NEbQ4 zbjLsyzZ+T#zjpYp-rfrM=yfNdelAF+ z^3K%!2M5MF8@>~VcP8d+`#O_#HfZkNoO8{;Hr>s;>C@+111oe?@#er5a8bdG@`(CZ z?JWGxI|a0)r=SCfE`TkPmUz>`OGOoLa&HT&sNkdW$QL3Ie`y+SE_e-EBMCmgXI7V! z!UIGs%_nnn>{UQ*y7T!H-xQ*IG&OMFaxlhQfq=~p}IM&3s>E0|7c&w$eT5* zYoY$#h>>cW-J^ZP4LWvhjwO}VrYWC(`c*VFNt**7+iTM)f%=vZ;SLir@J#tJVyK&!z9C9Ij2XIA8aFsMxnL@Ahnd!8PS7 zWE2=+;w4SPO)3V}oZ}0QfHQ9~R{Bv>n`Yuwc(21rsiE=a$|ucr(oE_s8SdTuX#%sBaP{la+ z{o!RX!5kH^FY|Tkc;uYxh63Dfu1zyNACl;)`6+-eioPGh`*YG>ytk8u&6O2KD8?bA z5%BP<&P}nLB5iy*>yKRH7TqMes>b|wEIWEU(oJfA9Bo9fDS09N-*)z2UcW=oz7@3Z zJ<(E`$MVPAiWpIDMJV-Hm$dAT=y42^j@yN03o~&V;DkH)IMMKg2l1YcY}a*J^4*Xn zum@iRxgpy0mJK()G4Y)xg!|&XjM(1wcPk}yUcb!Y9lnCjT$^3$JCo&TmCLB=%e(fQ zAb6cJ@?A?du90f!`qWAI+LvJI`MarMK=A!r2+DBQA14?r!Ok* z+-2c;?8@eb0VncR9z4)lbs;F5UHp60NkUjI2>Ca->ka;*b8Q1I^U~XIm}9RMVf<75@=595x9!^#w7Q^ z97HY(7T+s0(O7z~s_X(ehAf}9e?c~qtmjMn)>Um1HoGoc+E;=I1U__4boiG0z25O%ObbY>zG+SeM{5AGcXfHQ*X}9CM_oGwA2^`%r5528mn?0X|B@$toqNM zr}`DCX-iP|Wv>(|XdQi$6jIZy)W%4o-l!@GgJ=bHO-7i2Vyk`|Mmjf% z5t_L6Se~cXeV!|=3WW>w1WgrHA!wg7{gqGJ7>W6$vVIm}VxgMN^!Er}m>}HPAnWDv zb#btI`=(l67M4fbjwSiC046nhT#nGRl}4ev7|#crABkA`A6@L`x3?|eTl4S{0T(u; z^A(}@o6)#sp2mdxDVpZJmqC=MeJn)YAKB4}dejk5KxjIy5fTVano4xvSKV}Q6Aa?f z-!x^6akYgT?h(w*w@ISzU3807{9XnnJwhgu!NtvhA*jg}F~%imy;8+d}#K z9H2TSeQgWmOw}jZYEmej6bG^u9wRFRPws}FGT@GnXoEMlR$;hq$VNtbkWF_(cuW(7 z;5Dp`R`VGG_RwYmCIb}XQGLF@zs(c+wD-Eyv|WgrDW=qMY{O#?mVXo3C$nf-)i^3k zh9w2h=+S>JJCduYU9XTBT?OMv1&a;TCG%Ewz#H>`c|vyXj%+W~~5VhLIg96-J3d{4iueH3kU0G8)_zkgeVf8DR^EWIY$*aLq2W0wi0 z@f5e+@q@~8IJTnck&_fk+EgMwIXcR9S5P#y{J7K*|HJu3!~@PXNHubOjV+wAZ$keB zv6J-F1hVk(y$rTq?F`ZUa>P-<9Kot<`>Xj`t8tzSEseiY7Mooyhuwv>XC&b2Ts&IS%YJ9MBSv2upF-0)- zUf%uJ&an(F@a}brPPwmhb=gjCiD9!#{5!XuY!#2o%j+uRq}1#HT!T;bM878Si$Q6c z{)aQ%WCDU;S_2o=o1r-;cYR~(06aK{(KAkKwI<&q)trWabHlO$dlDyhuijSZt1pSt zsR|WcpO~hyjPE`lCe3Up^zFHI$HHhWN9y0DvXbvt!6K@Eghc-xCYLv8@RWPOvLPxa zP{5wQfA94j)Fz~U@A^&NE%?0lCKCuZ!eiduPeW@oHu5dBX*JcCc}UYS*}uA8Ed3 zB?7cUT;denEvOyQiQbU2XZq`$oEvHo%K?^Ng-bSa1{~RrrogrjvnhaxB>WU1ah}Jh zh=H$if)XC?xmWN~$TK?7tT)G;_g~xyd;io~7unHZz|_1MU)Y{&3U$3ylA*)1@w z;B}tgmR;ARs7oqi>8^0?7z!EqIEmF9O+O*KV7AHM*A0DxFd}%Ks zd&8UnasO9@otjat-V&&2YAoByqOqic`#WC--m`5miYI_kHSv*H7>6a5YYaQ9*wcWh z-nJoVb#CkPlS39pb6>a}T=9>4)3+RV4uA-MDVzEN-lN}PPO@`MMhcaz>1WS2d%gTq zt`5xSstc%KD0O`!Ybk@TXm1FHW&MyVYhav4LZ*7YIL9G$mc|qGC9);s$7pdAkru<* zj>FZVZdY!};gm#z%3$lc$1^tT>)cj-|IN+@Je?SG1$><^z}MG}zhpWz2dcz(R#r1X zapy3@BQvBa<@=b&l11Qb$D~KSxv&DjPR=E|%kk+EXQDS??KS8+gTY73PEo+IqhS-o zfshxJ>wT;NVeJzA^%NodX}RZ;Z#+TaKk7>-Y!t6L*U|L=03=a{8!=D?XkDTS_PgW` z!O%E&Oiy!61$LJ*KF2Og^}ZuucScZ#2CnghezVN^u-8#sYK3kL?COg-%8bXx?uOP) zl{UVwfxlhIb_OtZqgrR-xghWlc0q0cl2S3LTsch0&Q@z+P=>G!5I6ir7hJ%)BW~)5b86aK@lay z(+poutkaEI6#S$(HOk=-LDGA7VdX+aPIGMaWWqo&ser-cldcVTx)?}65!oEQ8 zqbr3OHNS;x2(ArAe?({IopQyFVHC9?w&^{_cJtHJB@!U!7SPS4m6gS}wV>nSWt2Z8 z?^K&OVe{0^6iM9}iAS9l<{t^rhykyOGw4u%EpTS8FKbzgCyviCLoHVQ9RWX|VlyXh zWL#W7#VwT6>zsSwBnyWqq8-U9+lqb+Q1+?YukFn;^QOy3x~;GiGV?TI+p2LVj6_I> zKN~Z()1NscOH)3C^1J6)w z<9b59#CRv5TZGF`5<`~qD{59=H{KX=4?LLMebSG|SrHt<8Op_JwtnuM&CJ`dQMY2& zB^OoXvYfI8`?we6cSm384WqcdOB=dgtkjrJLbbm67}x5jQty}49Z>K&6vA`#Dz~{# zn+cE{qF1V$BBb_TNWPi00l!~auKTAXauVPvY`VeIp8n$Iqn#P2`}2`03irm7?}aRN zdHBsHWYVh|;#oDi>5g{`I11h)b(Ng~eKPpi>`SwB^}B9iW>;UdG=etMDmvxgEJ=f3 zarZD-*k2KveR|qQq3ofHbSB<$z$r`#8i(ysXKfc#U-C8;D>MlaKOKZM1r+UH+#`Y5Li&(4ENRR#*${KnvO6R=|Prc65M73}db-mYAOdHEN$!c?E%` ztKV=S$YGCg+i4_Osdb8PKlp}&8L;9%o373R%c5AGvwcyf8H(QFQ)N~=2v`!pXkJDu zA45TdPLJB`u}U6cv!)$09d1!;@=r&t{CW-&b6yco+}$J*;|;ajSbBDy%}*5LBzAi9 zOnIFczpYmbK3G~%$h=E*Y z8r%;t2Tld`D-h7C7o7vKYF!u+6o?(aC-%iRnW#rkAocIH0o;8KbH~2%cb=*(`(3HJ zBTQvFNvx7aFS_fh5^*D`J4WeeiQN;E_2c1ghDyWE+haH>4nN|qRox6n@FdP@2+|oZ z);Y0d#O#}afS4xSfh(dBB?*Oo-sAz>{O)6Z6XSL_)J(-P@x4)FTI!HRrtCy8?kgc{J3;~%z_Q}xj%TXZDCH0 zZLl)0FvSlPGgRkd$ge@B0#MR93n4aiIvz7SDLhu))vi1yWRIMS`iMep>T`F~H<6%P z?T@pTh*q;v9G%Ixue?&+Q0gYXAN?G3XvEREpb^)22(X!?t*ebQqgAx)KOa22BN_;S zN#NGclWv5)8z;qZTHn0%n%{n5J7geQX|VJO}`1D_{Q^5sWmwXtfRTQ3-*K!RJ3IyZwT4el|j`@%fnp>$PT*@_vSMI zOc1KbOn15XpClkjTXc?>`Had7LnjkOMQ)b5W#g2_WN$q)s8(5u;nkp|jU(R3Cr0T5 zhW@xDX@u1m=-+YKO4n~IrSXFS++;0OyKADKi6s&$?P9~S(6~dLy~tF1jOpwu*vi1y zC2et@jxyZHtZ(Eose8iZ43yGZb*vn21S;uvs#-fcy;S>H4p-z}FqV+`Dl z8{lBfdIm2A=E}M|VNOvTLeuEh&S!Oz0jqS!`)<5qKv4qBq6n=}C$CL!Dze)(-ymI}GQz?Svim zqzRAUNUN)Iqv)YLAn!MMnf+K`e^n2sG*u_7>1PeZgCz7h4NCvaMqcz9^&8&fgkLF- zF`KkU>|Hcs+Dw+!WypJ{)Ev3xeDGyydE#=SO%kqrC?TM>#yK3qU3xE%D>J&_r!`yW zrAYQ&{--9#NGhIH>Z86P-y>(&^~5w;hNi*2fcq_dpuBcG5PTu-1o{W}|KK7PV0W=%WD%40QWj4G;!fDV>##}mGuHyRo0y9VO6i|sSy zRvjSasSYAbl`Z^s@!t5}IgT4+6^?D)x-gb^+O`HzCg6(V^VR(Im48fnC5{ zqT6rb$zn)#{rbvl6_>AgyuQh`b@l%9wGiF?#m-!8pz|+fj>3vA=wGV)eM=0 zn&F0}I(um(l}APP^&?5V@OKx^;&3bNFO55W=GWKUGIIGF4>T)vBmYvwBkVKI*4zIP z`l0k@6D&-?hTz?!FIJmQmH^>kT*y+5jotiEIQkKJtMDaZVv^2g5_c3y%ni(J__u3K z>v*`lzB#1H#?Ch`ggX~_`4`Krb7sZMIp;t|*rA=>2^}{-G4Wo)CorR;KFEl{~LP8w3VKY&zi{thJ&kIv2MxRKffxkUF>OAIF+yVX!J(y!Dqv`%nm@ z_e}#$GQL-Sfb&wVDtVZB-myBAn*V`<7J+)0h6IOAh`Szz>Xhp)EW8H`QyX;~V&Kum zRpw@G109{%oTQO3IQF^v$lKh*xw2~aLIjPClq^Q*i0qWDHUsJuds335e*dXTgrD|? z>~BJbN>z25-pM_3^63sVmkh=JoA|)vU8S0tr5{l3RB&t*;GzBNl4co~TbtTJ?9^bZ z=IGNoj$nrQoF4<~9MyQBB9*SWa9STHJsUF1F9XBh>3c9*e z92k~#RkY51ATxLuz%k&r;dQH7Vwuif=(vH%g~{NA80Luo=wZ%7>NX6(S_$%NrA5aY zwAB6{9P4-08C7}+<&YexaxEyV?^3hhd1}>)jpBXcuC+P=cncW}B)BIN-HnuX=9oT^jbyz??Y??*#x}sE5_OL-} zD)EHM8UBSFiHVO}a~%zz9n_$_3~82x(RC(ta`1$BdLFSY$GFJN?+|0iYoJB(WO;0C z^L=Aw$vgPAC~?|*new+7`|bo!w_oGOAb?Qou&2>M26>Kc#;f|N#_mFxFd|_L^S1OY z-fepVNfW3hu-VM$@n!KSn=fyhX%#N#>9bnJ56xW-EF z%2i2HH9dH#HqwIZ<#qt-XCRkhnM6-KCWM0o*p($YcmR-$1Sr%vxd(pA2!C{1!b`Oc zv=x6;3b)d_t-XO8%QH~^S*+eeOknw6yo?P!Egr9(XvC1*@gTkon;AUU9vzx_=REJz z68)YHkk`7k@-c{Tw3-)wcT}39DTj)?E1J|`PkW)f1PQ6p-5(n?*mCd!FV{xS^w*v=8koeNAU}(gpO%o5-Cq(Nck70!=AO^>M_bgEQgp3b z{*a@Y;qrf-pQ|=djTHr$#Yj|>SKj# z6{7e0^D5)b%h|f}2;H7u-}Wf&hn$myN@+Z3QQDNy#a$^m@Rt0xHGwXRa1gB2tmr#S zp=lZoR$p9bh3dMB)>FRZRRz%8`jx*MW1cu5166#3E?r2Kis--8cb?~)JBV=ByrgU7 zXFpG!ls>^(ezS|g*&4hn*oa?ZyggwRSAFn!cN`VO-`O@fmg{0UQ3a)ES$hv49`Mun zZ!tOllG*AXnAaN1bJ!|zZ>R|;&=c6PN2(oQGxH zB;!fdCgN9KtRKZooW>kn}DJr?TC_{ZsKdj;Qu18~cja$QF}F`mQ0c!~5G zbT+xXgn8LehfdKOXl{Nj`w-D0nv-bt_cA(+`2(m1v59PUw#*7WmZvqZp%TAXT~iaj zxz&2r1}HwSX=53>mAM(^y1$k%z}!ONAA zE_QU4X z^3#;)Y3Ij4elo#IBl3nYEGm-h6WZ^H3rfPc!swC010uJqeUM~~|M>L4lCuIKH!=Es zC1av}x)8Rfy&(%;2FKb7A+b-D;~)>06C-gj=d1|YVN4OlV3{KCF3V&y>qCu-r1JZB zxV#T5zTr>`w^zjbzU556iL@%P|3n2)2Wz@Zdg_jn64P=AzBvC;=YCCn@i@H@Z$*5R zt#bTn^we^{ZeJwpw~JSPRw_|B2a1UnMEli#ly1YVRdxu3d#y%m z;D8L6*Bc;K7+KN!3v{TBp0;C5EW;U6wS;J8LrMDM$+*&kqYun!={SS=KT?s(K;WFb zYI=fmgg`!Hikc=qaEacNgCG;IKGXt#H;Cl??9?IK*{7_!K*!>Xk1yqT^HPIz+4=zLo+LhMDEm&dUis)C8bcN;=JXyrXok$IB#iRMB?a&yt5NNb=lI$xKq z(k61FK&JGEuiwHQYSYLMmK_{=IQO=Ie57qs#BN;N7o08f5UiBLtOlU9Pm0KdUa+3A zM3ci|BYqz=AFpQ0mXmcqio7GAfC5zf9@YF!H#p&r>0Rn1MkmmyNGH92hclLKdID`K_cB`Zx5?~+TJ;jg zgD1D$u}N~ZOJun zMw>A|QeEVetYc2`?_0 zW=!Y53>$2I9NZUn3@JIyOd5H-d^=jX;Nql6YzprdWthI%_@(Z`aV#}}~LF2FS%t+P9BYp9MQzritl9+FW z%ju6hZqASi$VfRKV@JYJ7T|SQ=k|{YI^%JJgUnGjiXP zdo))Ti-jL8RZFaCb_zdY2B{&bY%g;=h!8wqD=RXFJQ~A|c=@_`Ie8cf@!?)~hfGty zWhQZ|GMbL|f*(COBOYc9<%0We*7M?1^D^FI#ID3yyOdH#qzokJ53d2Uk(BuJGWXIA zr@;Mq&dX9Gd5-%ZW#x{T-}P1&D}b6Q%J74$pG-VE8`cD)OAL5FPmGRnZJ%DLG@OYn z3^ey z17S`Eb@J$BKjkyb)9(FR#|ZVIas^U%6UK1(9C6I#Qz*`7r!Hl!%+xl5fx$-KZRe0R>PA2Fw;^UE^kxyjmK?K6^q{De2uRS`mJ82@AuHPLYTzqrR#?{V`T!1L|%9Z!;chAa37zXPZvmm;Rn-=Ig+2=pv+rtPWPY zb#6`T6p}?HYKLC?C<0wBOj`9h2)DAhhiSKD!D%oSp@1(HMrltb)jSWxz^-QziOI~i zk?Ca@%rmm&zrze6#pjGt?uXUivq$`lEjra8yW1v@vsc(ljkie0*89eGI`Y$mX%+MDKbxQhCTr7L; zy*wOzRgPM}>{{1UbLbjqrSd&mYQYYt$7w zBoWWM7u=m;9j`qi7%Zee-r+SBX0fs;7P28^a{F3FH9Ej3v}v$8&y7@P44VxrK!ah) z)J_-UMTxf!b-$(Wq2>*>%oapY9p4IrgA_~nBxn^H54~x|PdTI07dL&uv2B_|d&{AB zf5EwOBP~Gwk#3}?!x$wa4z#)WF*-@Pvf|igZ6l47)+7yLcutANuMtS)hKb~L4B)fWJH?%V^r=`L_b!;I-4FDr-EbVoE8@r(|+ z$pC2{>`-ddaO_7F`qdnHjCD>+%Z=2teu}8Kv5yuzKIvt{MJ17jg4aOr(Q>TrMEc~- z%Bb9xLfyp-65dDcQev>cZ}lTAZhjh<&h?kU(cEL*fijsk5Nd>Q6uYbM zMebHZGw}@zQ!vu-8EI!`V~vlkOM~*sJ-D)=yr`jukc>#zm1Q8NaYLWlOYqEi-r4IxRVjhttEXw8rKKziy$EI_nBV;G&mCxp>@1Y`|vg)aU#DkPGt zIX5^%2x56wHnX4^L+{D`_hY2(BoWxDsgp7$AWjrOD)N!Fv#%wg+@ee|-#ZW(QVw zjZL4^a1IbT$fXa=?ISkq9KUL;qvT6_c-l+I2%}I#xcH$m0|FOlfO+dB$cpTy^PIbt)b-s_?pfu`Vn&98#Tf2_1|p;I?CujDedCL(A2tM8iwNyuOZU zf9S;i<5FY&I7=Y?{wq+Xfh-2_LyHHqL{IJPaBj)X z(RzpH!L&P#S)mUVXz@a?To>Wg7k^+KVd@x9I3MOfA<753X;K&F3DCaEDNL-k_7n>=h9ClUGR(GHs#4pW{1W^m4Bb9sk7l=EX)LKlcb-qB`ck5Zt2(#dibGbUh z@&G2Wpq{+xppC}u4(7DGK?IF9yAaAVGw)GD33t>b+CByc3vj>UMRbp-NE##JSu`xq zlmuEEAnhTLtpB^wIyKE2DspV4%xJ+(>>E zh=5R`qjh*M-iMdQjml-$ocAF7KSLe|!cO`k-I@!@@_I62@L=q-RkTh{c1zR6ER`u* zBcn4kbxxex@Og2GbI&I^!bCbAXcaUImPedKt+IR+W-~gr{U=gBSzd?We{BfUcK@br z5$C^j$_SxCh8OHgJ$xU8p4iO5PZ9FC?M}j zNAVy<#=t0hL>|&ve>(Lo)Yo?Z>NN_@cyMM}FyRtl6nm>)wysX;#s}OaXELMJ1ZWwl<~JQ5)r>}CAmv4~BT5R{ z+#l{xCs|8$==40RZ!2g&(5lnKsw@yAl;W6o3zi@+0^Qq_SC0#2Ds@D06|1it2Ufp; zwcBOuJ>jnJ(Q?0b_Uf~KYh_Ck`eb6rm~nwZuB=gM)Uj(#PHqiXCX?ZMT)cc~!XNl* zJ%898BjFHN6+ooOwOqu`(|8!v2)VP9wL>K&%nr0N7D=H^fSZYmsj-cuHU_s$Ao14! zRc*XBDs5$?0^_#NJ)!I_kc5KDenxr$^3dtZn*Ve;NfZX^Q1ae2exP8o!D9D^^Zy3NvN(}k8qlcujPc8tNTS028&Yl98PDRMK#UrB9C6(%Pk;92j{wcKm(nX}v5Wh*q@s5(g9(Zy{^MbEZw>3+39a+H+ZI+m;eUt( z#?v}1usFz-(yW~Yr#hY?T1ltT)gRzjQ($n)U?p-N(LowDO$tKOUJxPYQ3OlZ{pOgq z`|4{A$l9jItcRpf=!g&JE8Td+R0)2qpkX$vwTocxUk^$0Z0YP(C!I$C+o$A{!&4Ea zEho4?k47ds5Rq(qM&}7%ukw6SnDjPxnrxzhw_`vOhTYj4B3Ox$|E>9vMN1PA{H7ea zs%^UWOAe%Ly}IdYV+S$HeQu^q$|pH?tu@{osTu^u;2DqpjP}D-cZU#L}F`L8a9PqBv6Y0MJItI$i zQTCOfM31HAC3*x&yx8es(QslE1It6qye;|o#UgIpa%g&Xpv6P^d@A=uurRO}O&(CG z_>tr7CpH8+r+{{qWSy<)i~g_2%pk9<`{8*n0iE9M z&3hX;W2~{fpBH!7^oeC%$zbzZCZ0$lTN+XFZ&VkUoiBos#(GuW^mR&WnJTKU?Yhs% zod2SZmO7nUu0Hirbq~=m>AH~|Y%!32fSkOl^`KijY&)Dw%Qb$7p)>#`c77qW3`y+d3#h|a(vH}XfVcfZY{MNpOLZl)`%Xed0 zDevWtDx&s3L)!+waDOTVLqm$YOz_p6uF8oY3huFe$BE=~QPd3WDscOO4U&ZPSj^VDw;`AmzySA&Mm8YrKnzP4k|onts%Hm5i>QLc@&vUi6?{z z%*b5PuvT!;tbY8>;wQIjregCeXh5p`d+df3opfBiFC#1D{S2n#=`+%zmPA0mu`qKtQ&QK_06-@j2}%(vaLXN0qHKP}GjlpQ8OSrT+j( zH~A+ie%(gF#y0k^H7pZRrR^VNuu5rp6O4m@Q%LAa2jcFDrFe#HkTG7uTG_C`-wYy9 zXV0r@C5Li=CHu?AV(GHW(~Zxb|F3F!sM`|?S&+(=ZLul_(LdbFwmWy&AP;mi$|hhA zenO?R1#WKsFOStEqoUPByZ$89rH`$2L4Et!`y|G@z!sp>;&JJRfb#24xle!YWJSEm zRn1Z{wOyMk-@a_o{IfwRsN@UT0rxi~%4t6!cNvGT^!m*5O@Q1ir~A(!mVE?Jlbe_{ zuQoSOHpQBCZeUo-!(-tEV=~aNjj?>OaX@_!&;!I`tQC*iHJTM?u!=npKB_b| z$y$$=Fs6yht8dtoqvD48^fd8^V0lp!f8YAa%i|VoGSSTyx7{Tp0o+4xy&}EWHa9B9yZgAu*pU0E zL7!^TAVqa8Hxn5ETsi)Lp6zh(u8t=FCDHvr-Fr1%bhzVF?Wq2^#%dMgmmt+I`evyX^PRQA#I}$g;dW;W@|aC26#LBjALiXO}C=DWb4mnnJN>C zqUh(a>(Kwzr+ad;jtRk9Ek3IES5Zg5bTqyy{Z`0>N(VSpMjf59eA9vI25QZ$(p!r| z^??_D47ev+_?t};BsMNRD&LCdoDlUbr+$Vaco36@l}W}Ut1M4w#LOY8j$9Fqd$IpGq%ZNR4ID&qQ8^a_ayJ1lL1kX+XWafU&UJ;2;#H^mp>eMl#70^*W|s{!fM5PU}q# zYJ)Sty_ItH3nK5eX2pZME1cGnkBTK?X(B$^!r_`>QlBXds$13rw6drG{TPR=z@Ubx znkhiO@7vk15`lAYE(Rq8_*8g7c3XOaKS;H6}I3cuaIVzF` z^sxv)D=q?#^jqj6oN&i7!xJh>9|)39ZtFs3YIgcaqf+s5&E_#Z8P=Ty9>=`g48x8e z!v!rXKaMAGkY-c!pDTc?9E@KeEj1yW&&zne-i10f9{XUd@{C-p3G|b;J1BCk&D+2H zdUl7)vWucb3>5mQ?mi$j$6Kr{o&H=s; zh)hYTQBwh0TOpxUp{>%z3GoX$Fsq7Z#*d@kn4Y^L)A^J20}KRn+VOPH^>P=PPGNFT z(l7()bHe3&JuxZ;+gS%Co&Li2>J6Or-<3*G0lZ%+(KjYcunY-`a{5s(aKZs-cy%$r zP;I3pl6{elc8V_?2V`RPL;}i}eDXFFUxfEucXJEc*<}4NJix~hjosnvQFnm+qD2XxxmjnS{K&IQsQAdT{?7l+f?EZ zE#7Et6>XyL;#>0XZ42(D!(TXk%H&&ChqAQ+02x)6%V!deoTY=;G*i>_W8_IYnj|qV zgM5CB4#Mjg+1W_%GtFoRI!8DcQ2>*zfi|>Yc+4}mshI`(k*Mo*!C)uyRE9dX7Kzd% z+UgMxeY{8v(JgCe{#OO9+fY!KHQ|OR51^;*#B#7Z@(+(v zH4f_D%ImB5R!3WF$?v&gT)&4HHo@$jZXJI?Gw1|Pt%nG--Ja8koVYGE-tt{ zp%w|g*T!n#xlF0(_+})YvvxA91YKLB8eYu@`H>jz0ZH6}ODZ}=A)J&qH`9w9lQ7vd z*|IWw0JMb@K85*qUG?^_MxtK>Oop65Ym1%!e_|;Kgs{xhR<5=OT29XBEf5Hke;<{) zBX-Orf8n7QrPSXzkAUlKIQLx`>}%RcWOJiUUB`&# zY(86>M9OuEz6#CPP6mvD58e6duKlPyEU%MQ$rkY*SgMy6N`!tOC#L(`Tz1_j?*8O_ z#X-D}1L0}CIMD8Y8K@_a3s(+>we1#B$H+A+hdzu|0mW&e=fXb1qhVOWol)Qxb2Ake z=#_2X2f89p^CBSae#9`lQ1wig2P2hN4f8;@fHFlT?Mt4zKCuKv<$Y=Ue|2WV@dIhI z=11HMUMqEEEZe_I>G~T7Fc6q4mLPIU%SS?}0r_f1`A|4;duRG2HmZL^<(&hR+uBwS78Wj=h22Il@h69%cVt#+#QvPLKF5hbu}8RO67SD1MJ_mgKm zrhv=`Ig=XYz88LtmoV*b)40EYtTQ#7R(=*};Ai!gG2QH&$EdkJYkpAoAeWlm?}1}KR> zGTmTH%g9`=0xlo{_0UkUUPyWVs!JmcnMYcRM%WUOAxl%R>`++w+scgAu5ny&5LvdE zP^lVEAkNK*bIr%;!fpbY>Ce%cj(|iIr)AFVhB~tT9Z6+eRd%ps z4BNBGVuiF;TmqAb!OyyGZ~JIUCuWS#&+cktnb7BrKm9y~G18sOfoz{x;x74iF8St2 zGAA|1$vZva`iwAkrO|JQ`FNPPK=^phvA8?!l4ZZzzX4hDfGbJ$ToPM$68xIL?Qrwz zN_U0hHP%4)>Na)*+(0rfE)Gw4@gil1_fFDLI$SgqIpe-J2V#550$&^Wu;D}^sBERd z0BFYbMtQj<5T*p?oKsKfGJK5PZT0&(56J&m(@v4Z?X)IF<7%+0Zr109O8eq^EX;He zxWPu)OEd|-=7!ZrXAcSyxe*^%zuW}@RtR^E ziCY4Ya?W&VByJ@UR-XSnsPX5Bo8@n({UKZ&P=mSe)1{54>_^cTKR_#&Zx6Ij<3k~O zpW#S1dOl!*u(j;QD7i&5>}a0%^Xk8k>9_~ohnEa0Om5c9(L2671FzX3*{~RX-~|E<$jw2Zeo#c|wsWkH>pK znOz1_sD2Qt{5bFCbKdf5`eWJy&e#*Sfwf93!rV2;68*P)c7KlWcH`2pj8U$eR+FsQ zTM(V|*hQkRdG1z6166@7tN8y2d-Jd+udRLfw7%9>RB8ngMK~=gAP6EdDMPCTWDt}o z%n=X}2}B5E7}8b|WmK7jFjSd@Kq4SRfRHMN$Pf?^!We`|0tA5&LV!T>?SS@oeW$0V zf9iGdZqE&2p$%tts zP<98bTzi(^%lG8$Q*GqQv>HVnPs}KMU!- zIe_-1#Z!)OZY`ag(pN`G*nn$4Zc=}yF}t4#4=2thEkKCI^GPwM1w+f{R;Ji2l&cWg z5K}&XB9~6Og+jdEjzBX0j6krU!l&JDY~?T7B4q4Pu$v5smYJsTV1L3*=XnV<2SoK1 z&$Xk?H(Qc_Qs^6%?>&hS!Azj$q!wBRr9e(*v_i15M7m|AXpCX!MEUD!3rke*!^&5j zy18h_!ZyNSPT?z`;pT=1qg8v!smoys%Bjst%NV(@JCwt2%0~O0I@)BS=WUhZ85QPZ zZ+TfcPBl5EcIY>Xx;Tju4Ab82N?J6eduYal$~oOhY>; zuEFV+CBOJtnfJ)jBYXPCB*TacvHignm&qkE(f?+YRIeqgm)g1}_k~G^yZYFFVTCBt z-e~a_Te6mQxX0J%N1uzXw{3&WqD47f{R9^F`7)SlWwbV6h7!8@w&$}ZSX0qVI=vDP;r(Qj? zowc;pk53L&+yS&t^r_B{`5Igz2k`F6-AT;Fky(6RYwmo9NnVF`2Ds}$d+d?@8%v`n z&ej2zMx(atb9Tf-fFtBDtFC6gtNDyqCOj;pF83^&;;TlQr^|_$kU&JCYSO#C)Z$k9CW86=SSNKqO@{({7F}m{w zyfDTN$vA}y7q>lzk3JH&`)v)Mm0%iwtdwT7Tn5Lt>=E7uMspG;tk#AoosyK;H!gHF zJ8Az;`D_XebmBBF1pd3b*B1TaDMD8;e`QZ=ptGRTK)kosqCM^72)ZgT=7Sr|^59*c zR?lG42R-;uhJ0#mg(@@A?Ld4_3)-Y@C|PG9e=niGyT2%_*1!FGU!hcs_?@LuHUC!q zNKrGIn$6V=92Mc(@hHF*+xi=hF6lY6RENaU2sSHz=J#=Lguk z*EE+Tj>{o;t!Sk@sw0)%3ouLYMFrM`}?X&m}Z zj|-8LY3~;{x4pXQdee%G;@XKI)oUYTS918L*bYrDrO8 zYMpRn!d?@%RD`MPyCX0fs{|{FW?7HS*02R@3E#^M_rSnW_$XY_iK7|lx}BnTp7JTI znq3t78Rz2w4WF*bZSA1UG*zJB!M@?|y2onPoX4#9$sJ{~eX2HpF(RPiSd<%_np^Xg z9TcvM>CU#pYh$7p$zQ>KkO_^ z(@thuvFQD=(GcPZX#qQfM&G(R7@xcUVZVB4bnYT~TmB4KF{R&;jeDbJ^26+MG%yoA z^z&NI8o1Ec{xO&I^3@WVuXnK~FAWQ9=jNTjdh7!0K`#JrrA30O#aZ1HTo03M8OMsj zfFs7X#mu(LL~rJ|4a}h-+FPu)3pCsViJNU7T-u_9{ha7k zC|YYf&Z)iq`3n7#6ge>z2|!H;&xyn#XO;SV%kl^6+`zleq6BXlANSM!e)^y3{!oVf(} zk%hge+^J3dTpuIu{pZ2Yk6>ifKs|)Kkx%{D(Qw5H_b%%B+JUhQvr5upchyGMQLc-0 z%o&lUjK=WLtVN|RC=sOGmh$3$@hgdARm!xU7h)fGV!GeqwuSf}C;w#yQsEFe=9~Qp z1!iIY3~1lq0s2YV6@z&q~(%)i+qf@}f-cbIl{MVr`b-*<&Pw8v7LT7@Fz6j_rX1!pDMGQpK1?}N3k}K%yVQ?8OHGC`^dZ> zGYfS`j*wZ4`>D(di^Jw-sQMD=DcBUa`ITQv?OM?;pg5xdiLnl8WWle6dG-2n&H^_f zPr1?d(@B`xJ|RXmuJ&l%DbpO~kcq(O1Bs7hREiePoZeVyVwiP)sF~eKQS?#fd2l{7 znJV993f-Si>V~Cw#+Zk4eB}Ybvl**hu!Zk<|MQ<^5=E_+s4FlAV>n__H87v)tfiy3 z*`MZjo_C7%z2mcaneG}CWtMA?+?(EosY04*v=6o82VKU)%6NB0!G|%LNP4+oVV?~f z$~$$LGu(7Ya6!X5sM2B6GL-qA#w^6C-88K&cteHsJMVN2*8mM&GO-PLydVd8XRN z8mT06VsxXQxi5E6zV=b30?2B#DcLLBQlmEjkjoPIXK{9aXg^wEPQJ%eF0~o0^Q@6c z={ES&>AMpqIxgtED3Cs`3%rCn5yXX{N+&6L3J7eYqbmew%gaa0r2Pf5kzk4D79MXL z!Fi!;&v03fdd6c#6+z?erqliZyt^ss&Dr7Vw{F0_zYAn!pX$qrDeFIks+!~dVr?$vVh^>^S zd`<-@8;6wot}c0}jal1T<<0eVwuhAty4Vs&5+AvU;5@udacV5kS+12VO`!Bc6}5c4 z0Yoa?b>oUX!YfD^9wHJF0v6O(GnnI6EpA;ouB}Zrw$eH4-m$%URcWnfkgfpp zTLWw%eC}oA9ok}kHCkDu;v>?IJtPM*>IU-;o#-#5g&J%}MXH>Qt{v-A$s&=&l3+C~cotGHy{xpm=c&-51uN zw8X7|{I6C)=S~C6UQ|S6C?-laFF4wYS1hW^K6^3(^des4H=&NXVIzb=J*EE>UL0I| zmzsSw)XH&hwjCT^@9&&tTDgXs4LTO#D6eXL`G>?uxrsl;xA5W2Xh4k$TL;}il z>G~|NAsgF9Ct+?~Isl6p9$ZPDAOG7ovP+b8Tj&<-M;tGC=T$N>(lWVLa<<@ZfT*6E z(Rghk`7W@1y1h>5DtiSO(GmcR&X@`%Iv$DY>4A^cNu_q>pc3N>4vy5=7Y^xRl74 z@|XbY2XCG|54`<4u*ky&zG_3c_k^E^Tr3PfC191iA^d^waP@XNF99?R;KqZO6K>;D z_$#yJSSB|*YIWrbxQj$DrtU<+mX{6Yl+aeANQ^`*CX!yMSeB_JKUWMi3Fx&=1}Z+w zU*Ohj{+RuKTb=Y5rB7O=e`W~mlXp>%`{R>LishI;0@WB1b0P6i103W}fickH!O=6x z6_*-t?}}=G6wud=GeG3wKpk__vEmmH?BYCv^S}okcdeF0kC3Zo?$aF%9@nA<6Q4IC z`{nNFOVz^Y2aPJXr2MWm{8>$qK)V|#Y9aKx>fh%DVw*gx^2JBUy2mR9T=vA&NUMVg zov{&5AuSF+#`%J;2l{99l?@wv9-dGj78V2ZnX#J=E=6`6FKah z0Nfs;s1Yj^(w{T0@wcAGDH`l#gD|V|!=%+L+pdDbgNYNWz#cwmA83We7Q=9IAQ)%9>Pea=$ zph{YGiJr=5tbkC{6|gcc@ja(=*nk;XT@mkr08l-}Mm*?pZGs;h{iKb1n*5*3ZzCdA z@K-fbe|zoD*Uq$7RT*Qo@mb7K_!G2A)AAe&! zUQyI@{(<8Fivf4g>qzG2DN+EzqK@S;*Ig>(S00-hZe`LKstkpl#qZ1!UBz|ZBoI<=8R~9klZCvSC8FBEF zO7UateC$i6=&i$QhP3*5>KgqqbGru>NY&S{8dW)2nWGF z9BjI=t`>+kh<5!()fKTK1H*qsDjYYQJ^bLO*+8$*z1eXdoSEB=Bs_aMJc$o5fGoBf zx7f1_^oGtr0%Ce&Y+?4Wyilz!n@^5$=0C%)%Sv}KLkH0|zZM>fI-AV)%kHVlopZup zj7~BmJ%74B65d55=1G;7XYF>MX?TF$5z@<@xaW6gv}$_;xXmB0RYm{=?3Enehj6*| zmdAimybF&gJ6XhwxJ3!Z4-CXT=8A{y#+nGbkpH7U`f1H?P ze-p*8)b@7lg4e?lrVZkf{lqP8fYy$Uk^Soh@OwV;8*#Zkd&a$ErER&l$OEyE!zbx3z0;u?rmWdU z>zk`Xv=p{B;NBnc+pLM9XV`48e#`Z`WvutqJBk&6m2bY3pX9Q3J~{RLK+0`|$2&q$3&T&Qk3ay;gz1%=coNkcP3m9*EhjkC4 zJ9@fwiVbl%{m4nZ7n1?9fbYNHDvgU@p@wC zj!>5-vM4vQq-GlWvq_Egg>Yf z3ePLuI~r(dE_+pUq>O<5%MI@*FQvW37|5)+gfIKax5?wd@dedsUA_Q`U8ba~Y}>-$ z-3!S@&ny%0=Q@I@#-+bIMBmVGYABa6P?7CJ>pX{_0 z?*T#r73+nOQzy05Y*P|>sdY|8c=@4r4+N4jIk!QhI-lZd6FmrFg6w z$BeMvnx;DB6RvPIz?Up@0t>xAjl!>V-$MPnY^*gR7OH>#RftyT^ply_*K zB0DT4Zhcmjq)7X4d40#3RQgHRHh?1U*kP5zE5l?nGq6AZ?*%gl;1rhVj=wT^9^`T_ zf=OK^c@j7^q8kpGWo~MeY`E>O0rW1i-|8LdMO)nULi3?H(Vvteu_{^0xd~e@@pN0x z{`6-#*3q5ul+$t`O346_ovB67CxF9vwxCEXLyf@Y7BTiP(?^51*NUP7)++xV?}1-h z_W5k^FSX+IY)X39KWw#m7s!t4v3s}ZvqfU)%2f&92t8 z43z_MQ9XIks~FGW^^*SKfV4FXmfOD71(7JXZfT*Y>T-vAdcgKV4kdbUCIt(7K(ipe z>}a<)SnG+FNmjm9?9@8G5c5wq6i=}*Z^&3~%?By5KiQ_?z_#53@k(+YuZSH1RIp|A z*6~HoSs|hh#Zx32e`eP|JruB|2y)e+>GNm~aE@PmIGO&VP30w1n8DL658YWc*d&U8 z_aaTf3iWg=k9Bk=P;QT(g)ol&gB`}Rd8FuYe=VcmjvRCCpjx|oK{cF$?CiU*eupmD zUc_n@kT?B&QpY<&Eft;X2>jrLG}i?@8^l`RqW>TF44DqKg)ki#O{q(^0f#rDz?0u$ zWTkQPkjuNZ{K;3(No_BVJ&7Tru`PHm?CS`Twr6|~lzB_Y%4I?=2;-j)Qm9?Ylrs#d zMisb)Tm?T+4chnY!yF!v23nyjjBT6B84R%IrqgpZ9vd!R;MYhmouY}eHF_n%0Qmer zzX3o846}CO529K8x60_AM`+SSs#Evnnb2*^b_8A$NJAs05%!7s)tw-^HC*_Wt+_D8 z9yM5BD{S1}*uTwhsz8VZ{B+f+*dqRwljWjdMZ+VgGF+gIqvI4`XL$azEMND2lC@wSEA*c zqW6dJ>uE488vX#l(>;3nxLIm#3k%wo^jkm1$tC~_dv{^u3 z&Y>kfmdS}``_r4&$X!jAm#Fb)2Ta|yrwZ6eKw{%o)gL8FB7nXOLQ9`CeQjfX^fAxJ zu4^kIG@6Y*Px0W~(wxK3=X9OtYEEy22e|4ebx8Ir&~ZeK=H@j%_;WBc9kTr%QASX~rA;G(P0$|57_ zOa9w{N(5l$P55PhcQtRwS&9>bl~&hP+%S}^4_#%wOFhhtOd zTxJ|#_K5EWOq5X=Wx$-?eiLg^Ip_k!IC7L1e7inIrP`-jB8sF?E_0jzE~{|w&G6Fy8(wj7&b7Taa$o^Sh3=vsLe<0^ ztriVpnC93dbCfZb7*I|$d|)TdeY4!_a-Y{nn{Ej#S9`{67wNq$ugxTNLttIkRh6{5 zRsSy7K|g9Hgm)e9w^dQRE0;A~SIB%lM?-aoyGB54{zXw?AAZ<=$BWC>@v*-E7@w49 zoErGl2OW^l*9O9jPH>z+R#eLS_#&Rt-%(9~7o|`3`K1O02ZDM~gLEJbsAM9X0o`}J zEfib$aV`m$yjJu4xzG>2p9vy^ts2VERal<)S4@C%QE`MdPS0Qit65{y}QTp1cOV6I#r2eHwj?Pc6**q9&MT%$Zsx} z_NwdjtHd92JL}j+oYJj5nJPZfWRMdx9}Oy$37{t;JUrZ5^$(i^s(|x!VcXKkI=rwM zOJ+;9xM|$1ef3I!OCrBtT6cJHw4~zF$@|b))joS$XNERpj__Jc0<|sxTwTh30je!WNsOVtt*+U0HLunN z<~1GffKy|_d-Xwc&Kr?guGuUI|Qmbf4@ictI2K;H#Je# zC)R?e7le1c-@$6D&S_&V^yM|o#kgrS1XzobhIG7&mUv`fmrLs9HUBeVP}h92tG&G+ z0s>RrRYG113UNbttu{OdasuG?T+UB& zKh(X-yKN$uS#gQrgy0gNplCbL?2tYiAivIIVI*MR2UKnjnP71cv*SIv1VFV3a|RdDT$yS)YWLa4mW8zpK0cu_UI~TLv2Z&~t%fRz|mJs|fdrKkBp!d^AMo z5o}`oTXUoA40=_TDdP7-G1voC9Qu)2o);Im;SAu0CD1;Z2To}%0MVwXu^>lT&hcj1 z(vIAM60HIjR>bPSmEhhKpUw+dFL9Ahg@eet>a`Il764X|px?FM!1dn>I-*-w!{ z8c1({-up&6_rtJ?nZgY6immV{NU%aQ8s?oq@iV$>Dww9URX+r__NxcydI%?s$}6oU zk9WSqv_kT~0g8X3*L>0trl-09W5mdUPeB9#s9_9JwY@~tm@3EcLFg<=bF9hq)oox&qh8nt;DMm2o2JXb&~z!cii-t0MGfR z@OjBrII!}6@sy5&ND?^$a?qU1Ygj*`XdO1~qXiLNL?uW6uCBrcd?Bqss1RYJIBT3C-io6W#X$Q3U2)Am2xJlc z4WPjsU_9V&#GPx?%{0Awa(hFwKt?c{@f2xA_|2cny*rM!bJXy{##6YOQ?Bk2Q*I(R zA!E>>dXxE2FF?6!f4lhIVD22@bj;D_Ey?AIYX^grwKDq+yGZI*J16x^e;=>Pb>1o} zi>i8v8V`=iE9~Z$byuz92dECtS@@G$RL@n1(F|~lP+fuWW>wBVgq@6OyOcz}W>{YCCH|j(3?>>Pph$XHhNl z`ZaM*^hWez@_G`n z=w83qMTzm*M|3A7(e!a;OxSe|$O+~|w`KvWxXS4spELW9>w*VU_zv>?obE5(1T$QuJejk?I z6Jnzt?i)SsKhj+Bzw{6GY8IB zIYz9!*}lsaoM8LhP4t*3ZH)NXyQ22lHp*&4fm;u1b*atl)v1x1evxOjwp}Yf-Nc6X ziC{d_4MheEXIv_Dw|&mvHebH;-Ps#-6TcK41Lpyte$@T@PW3<-BB8+Se(a!SNKPkET!oF~xOXdwL$UH^KNN zM-XP&Id=a;8_YbtMXX8{Dhsz(ch~hNuR`fj3vr-?uI)T{d2?b19nU`y*eFm87#9v; z1eUu;P2l{q@pq_>c)KXtIOS$+_X`RjaNZT6_kLpF!rlup;Ii9;IOVODD{Ft(=z814bwQE+>m*`=d`$oKJ?HvIW!C`qRU^nos0a z=RaHME**vB?=Z=cit{NWF>X{kb=<6Zm-a#}{^gN#l|O{3M|nFq`7zu(YCn9pUDrBH zEr{Kbl0+r=q4_A;(u8Ds%H5bI9|jhTv;#2b{Ip8v+T)oP1%snWW2Q= z{|QBrQ{xbgl3@vhJ+LD4vjTXEEo{PHYW)YU?n8O1f@v(5vNdLCT1pwsfG0~Xv0u;4 z0C>u8#4zH?oXyfep)gd>WZWs`{Lwhu_oWa^y)J7CsD4bnzt0tP6uql{h_vgPfX7am zb6k@6Z-L}DM8DA>6rY)Gvwdawx}>zTwaL`~%csoinNfcS80zPK;p5(gYmSHV_9HR* zZC0m7h|Kk^dfkZqu^W4?e zCS6nBecPYSi*lc?U&}Ek9!B=lZmkbTXP+9^8#io#C>p2Sr?1{$QvKLkMZTDop40H|wDxE#JVYkbWIMC>Lh3x`t3jn$)! z^%k9e!`N6KrY1yG1!A`Ux-L6j1lru7bN2DS-c@nvlUh75?`@+Epaj@wsZaftp&48*(f5llbRWFb-10&c8n2TD~?CTUX=_mk`TFTH2u zDW(F%6tQJe@KpNafZ5a_ZB^}rvRN4H)0Y01Ht5SJ33UE2Y%dH-KB<*i;CBHu5;+P0 zpV3rfEnTO*K5m)*G{Ces%8a9u{(JAH+GahFzv+IWeU~(AMbD48nv>9SB{5GLd7Dfg z=l{CumqJhlIMY^Wim9?1gm_3e8o^da6$l!ACA+JTkf?HHg%p%SFQ>EYXWM*j8x<{u zsYUo~V4ePWhJu^??04*86Bb?6ZPb{hypUH(mfO$#L+(yYj?K_K#`9C*bAbEonE+Zn z>7QrHjDiPy<%~#?`7d{5Uxi}<43)%eK257L9K(y-7g}HjbLL69sMDA{WjcoL65$?o z^wv%A=$b3hA){8i058s@d7ul-OZ%@y=HTW*7vlGV*WP~D__HP2@qH0JOU8lg_VYk} zC3cKYe6rRTA>!+h79q2CN#f%|PF|Ar#gno{9=0bV10?eLM$L)i8Ma7M-tV^G4oCKU zU`?Rl8s-=KPJSngl@SCDrmnGW2k$w7bEkb5|5EL4&}qJ8PRB$hi5UsgYgJ2u;yO9=7#TB|8WZNo4H6jABZdxF3-S^D5R@ueaD3Q_3G(+U_pJ_?wH;vNKpU^co+{!equd7_sOd}0j~~(o}%({;QrmIHrC$+SvzfB*jQt+JbN+ChRZAYhYxlWiwMiw)S@xA{q9}tuolBul$b6MVn=+Kz|w1y}P5ME!E;%JQ*9Pa<<=< z^#ExvZt42CQ$mYjGBpXBKux<1tR|5d)55y4$(fa+FF@aY(5B}D#q1PZ0@TofxX+9_ zxqeQfk1K8myY56{6yBq*HJlz@($n<@YG2V0y0Sc0hgMlQ;!A*+%}>xx#-ACMMQlYC z!+#0Eegk}%z9Uwp@4@f~l*1tvZRanJ0mVP=T9gg^AT<_k8|tvhdhQTy#i^^y^FYXo zM&oo9K|;#@{bEdm^25YL`|M74c$`^mT!|{?tPyj$;d@C#n}^yTF|~zjF|D}Qh6jW$ z>zoc9Yd*j~mLvNB8CGS$GB=G|<2}Z9UHdx=3nvu4y=`dO>pW2lY zETfIJn^`C{jC6e&v=8@FALyJW%R(2(ssI%OOUYIa+$TeI@tuc^xPLlMN2tD^6XieA zF(P&lVJ40C;v>#&KW9=TaHr+`FPP{&vvXohUGLVHYSXp5N@=U@tMqX&xb_IzWC(zG z*0ylen;IdN^hM9_BZ>{+-dA95GIRR~wsVxiU(O^I`azu;m0yS2e7Zp*^oK)UM$`Rn zTUo|OpX%->y>LZY*U~j_WP_S@lUh;=_bS56ayum`^eq3>PS*cZT*FmStAyPTS4|eG z0LR>uD08D4xQ*$(!hg?2SaOWRJ9IgDTa*AS^s z+lcSr25~@iI7KTGY@nmdreWYXQF%oCk2l9e?-Bq;xZh)bD4Bg^HG(h?`}2KXh?=I{ zmq%FCGydHQW1`5AKVX%&F*4wi)wq2({3ZSrNw*MQrtby-W4sO)`i*FipW?61H^F3r zw{l<-w*9zWK9p=@X#rmK9bHFT>zwPsvd5N zKxIU1=elPSSXUkONt!qIX?z4+Q%1bFCk46{hyE2_10*h@!tYnf5LD7y;xr3(6nlYE z*R&lSjKh@$Se7WbB2`Z7nQ)Hjktdf?h6|AO*8=-w2YlZ8;Bs%s`g2eDi0hy`Z@a2! z0H#AS6NPT!nd#w7m18G<`~Fv~&)_UTo1~s0u)#FA z6B4&c-nM^~FYglHw^=iu%p4WXkYp2l^J$9}kVv=9TN{zxc{jh9x)o1}P{!0CWj|OO zKVXTl8rFNDGGJEr@}FDOIg5M&n0)$iK}tv3@B17iF|G$LzH4*wjj)^n0EPelE@j|d zs-LTxVYkOv`6?@D%YUhLQQc_|fj!@7}*kt#qNjgl%eaOb#F?zS!ai4p4hG`so+dXC8&lA z!!WbJAdf5{eCQ~{rMtA1{HUa`-M9JcQP%wA|oIID|=h;y#zPrxmRXGGpfZ&;*$wzu@_dYuit+0h`#9v$8fRc`9srNTG{@D#Z6(UV>2<5dI(u#owH;(=4C` zwpJN)Feinbfddwy5FHfgY6WJiLzU&(xDtBtuT|5 zJ64Q|M>0|nd2e9V04Gx@qe_ZwkhEcbzr9rDI1_MQ*%1c4X&yx#S%5cSiaU}pdz;ZN zvmFHg^`gv@M4O~)|C>dY6-IBY1x3{!O;y-gRU0X+BO_K)SlZk^-6%cyT#0sU-|l$O zr@BK6M-A$3R55yz^F0uCqIo#sSypMG&Dk#!Y!?z{>a z<+J*WQJv@<^ny8@dfk1jzJh${Cm_vVcMt#~Ea9?N@d0o!F4@!_Xy<18>}V)Fv;wAq zXE*2tH(|z#e)!SpC7!rGzZ!-*p#o&VIXATZ!SuLz%25%x>!SKF6v|UD`e`U07Frnr zbzroV~p<=s^8*!8c44%cLv}u01qeyAlwi1=#frGu`l*L2zkr1781%gOO;_ zzFcu5$n{C!#>=-y^f}&=hkw<}1!CRP6@nXjCF57~$4mbh4i(5{*}}?jy9(+^tj?6J z*@PxdqM;fVyRVu^d;`1{1PtdCM97JE#ABTw0f9yZAjdL^Kpn$W-JCDQc}YvMw$CJ` zL3Z=~`+z_iF>Jt6t^tBZ!wjMuWl@0}xVS!uvM)}zlTT0YD zx@^3mJq`&Ww$Xz0>w)Ct4;LC*m&;K52sq2B4%I(}8x{sgJe&l3yV+^o2_4TsC37A(V)1*?YX z=oI;oU*R9GL~Vv&W-Y*tMCVy0NeGYfUm7UKUc`GB%sJXZUI+xA8ef04YoHxfSHI_q zR5ptkc^T*(&hGvGah0g&1jNEFptv91btUi6*D~zUU@%%@gWlQoOPyjZufX78qNX!I zYE}?8BORk_4TLGoq?zU7z{7APysaX$FM_NRcTUpVX3?~;#?4Bk}@U@Fp zU)^65q(_oGSDrR}dqzp6&HFkAeDSNr&bQ8uPWZJ$rp${dZdrI@-a(XL0Jf68{n10z z0-)&04NJJ{KwuYAZ>B~rA*1&4hS#&-tuVFDqa9(E@h>mZn^&(4dSXX!^tzY|8H#b; z2{;P$5oOctzoG;M!l6c~)8^RU+?M_RxU;zLz5K2Zo2M1s9z0XH_uT{X(WS@Ova#H6 z#AlZO4{>GU$K3`UQ1*w85c z$+R%89}Q8$xE^zXjbNFvT}I%32C*qus6QtK~mLnfLPiMp(?Z zezObsgr$B@4NGqKJ|JAQ9}E2yMjdemF!Ady1%pj)5>skaQ_=S$x+OLNsj-w@q)0s%0Hy6K1CwGXjWae6@GVu z-@VtSnyeM+;uwKHWxzNer)-wu5c3TBb21riwtEKA%aolHq|TrB9q}J)W9)ZSt8z`b z$5{m6?tzXq-=FjH)MiHb>Y?2a!^{en_aw3P+xjIv0Vg4Wndc>2JT%fez&!|rI*I4R9 zdb#&_v$6O`A-rTt^!cM`kwFK2rdLFW0P3xp@f4QP$saA#aOKcdtXS7fY!mJD%;xfM z-mYCUB~mf8qgAxv!S&zHYv$|cSx+>mx~!;)JFd+<;58qOpEc*U+l;i!-K_nM{qSRh zm10Bn_qb-gUbVwADl@UdWtY4hVTTSgzjrR$tBC*8)!d^gYw}!zhr=P~yuzwbt+4(w z$r{HPqyGDA{E5*+sb~6)JDeoHkut{=AM97Ss~iGfs&Z%u3t`~)I`U>H(<)K$r-OR? zq+@ceVQ%-!r#7cwEKRTv(Hy%!1}b$`*xePz#D#jh zZg7jT=A;ToB_@WU;rcm+Qobypy5fp^9J$_g0Bz#zSo-e_URMJ7mcJ`ZZx~^U zAxjd%)n+$goUh9v2dc?r(B{@rz?Ckvgs}ZnrJ$LoC>;F-w<9OinxGn!ekG41FEtIo z@lz}~OCA2ca6#L;w93X+vX-*xpmFkIJjFc4z2fO$Q;;JI`XW4Z!?CE3_@Y^u*n#l3 zOKm>!nh$r2R=4O3$-XLW4QsAQWiH5|x(JHFlii8vKMM>7nE`yqj4V0S2c+Lml?)oAodx?~Cuz{E7#5`4voW*JFoMC*L5=8+H}Gy1Hq; zhXYOVTpzx*KIfYK(9OA|Q&y^Kscy1n)7a*@GjijI`V1{+-qjJ5FYp|Gc29k}2dCH5 z0aM5-9BevMN)Fv)|G_G*>6?E+>B;QwW=%>S?vShxMC76NQKrAlU8IPds+j?Ql1zgR6F6k5C24S64l@$#a)R=38^V zs;y{(=SUHy;{^(=2=$!&kBAEamyyjSK~|ewv-xaO1(eA2?5_6wsau&S|{jS+LY+ znr0UiBKa3ea>Q(^!~Bfj*Gizp=M%+bWti<#fqUSzV~8mX_MGIAm3I$Jn zSm>A23i(tHh!F9RMQ*9Q$zwk2$uq%|m|wNhwMX2ZpR=7kV~q@~LtPy0CVjskxK{1s z5Obh0MLd^=BjLkb^%nwgAJ9SO?pR*U10Fx*>J8@z-h=KbHj&sE^91GawjtNH#UpUI zc3K$mCN!^CroUaeJ&ufsHht4jdIyhF?JMFyb@GDumFhec5T;S*12~!P9`pApKGhG4 zenSjI#^e+%BvChui{HsIUp`JVZT%i}Je!+~DogW;1gimzhR;|Dm6sN%t&3u=)^Jg+ zEIzz0r^VRur7oo}c;Rr@kn${1CJ;cYMF;cDMgx#e{0^8KSPZdR(n41JY&2KKV(2u~ zb26fkoo8m)VGD@`RBeiMoeiuxdT8`#y<#406o~n?^hZv1?>8LDVZQ|AKe>*Z0O>aM z2wV9HD!U}&t|t0PpdEE1cpt9VqU?#^YDWDUIwNKm2j*nE_bg}Eu9@?Zfo>}_nI)RlS$>0^>me1Cx`C8|W)4w4jW5-{5yAbq z%2vLp&U@euS6(RWNWP!KyYnDK2vKaO#+64!MhWHUoHc?vJXyFh!q$TmZ@g?pw9oNg z>*6)LCLpa5Y&=pG{^Gm2iR%-7y^FH95F2$?B>s;wXuMCHMyTLNjhn2FnXz53{rJvo zq+6u9J8aBtBz$yc)uyLxnxGbwqwN!MZn|)PgoL?nCKE*HtK=kS8Yb~AW4I4DR#fBR{vt=f8tVp--qRroX zIu+vNZ@ca`HJxRkTkK$&Ydgv7vE%xsmymWF;m(|Aw?6bHyfs=l#Z4$K$k=g^;qFGz zwpgzAw%VoLLqxg4P9mYw25QFPmG!EY10w>H$BHOj3&O^Vn0m*lbjQB@Kl1YQv1?La zhrCo`S(0@(TXi5pg4@t}eYxI|hEeqsMyXR~u4Rwq^2T4`8DsF*oG71-1x}!zrC14J z2|#DcQNHQ(Px-7T)2Qce_s`|R8<=Gzq zy=rQqNVcEF+R9@zt9N!$m$2;8dTQoSU!`iAEoZ67eeyTHwofP@oM$W7TiCp#CmP^M zyyxfvFt3SNatTp{y1dbRQwyfwNwg>&9F0-I>@xwPzrf}g6$XpT zQIb5}Q~W;!V{Jl6Z(o{spL7?7q?KIk>h)f zv^T9uVl3#2o=ZH%L^aLC#9ydI+w8}xOCwyaa zp%TC8V1J$~<16M;+2`sM(_&6fFZ9Vwtfz-SW%D#{7}mcNLzJPq^c-QAg+k0AV}{ZARr1vq}N~pR;r5hwxEF2NUupyq(}$|NQZzxLMIYR z2oQ27xbQsh=iWbl|K5AqUH$CmnarG-?>Tek%sFRfT(zrr`-)-*4;5V$m@Xh_3SxEa zTPm4nrD%V<@J&icCLYE8oXb5K!ybft*csBW@gWu&)@D+1Zleqpj@b3dQ{LT3hE`}5 z|3>k=@_O7IlevCynKR3d+rW}1^whD_hTnM5edf%M*ZT`{-IC{+!N`OYn@8z}?`^G>d`Z=`cefX5^<~nX>G%@itn^pfULQb6 zmy23qE|-hA)JlG(Coc|Oxl1pvyz>RThOH(%HK^4m!gMh-f3Lo9A#b94G6LJ=DQx-H zVPPs~-az~vKJaXQX_Pz1A!m-@g>@#oA)WV}{-=tye3=-;L1!h?Khzgw6&j{KMQFF< zx}UdJEe;raBqX2MP53Bfx3)51*3WzPkMbCcqG=J zZ{G9~g{%skth^XdT)nQfjvd`Kzsf+&GtBq*dm*ar&1p2s&~G$PyNQ{NS`YOHDUuS5 zS{Vc?oh2XcwjalmS>oYB?&w!#Uz0cy`esq}U;XA_|J5yATY!;CJe-1`){H;2)cSj8 z?>v#ww{nrpKkuV>D5YwC{G|s9H|Tx)X1{_E88Du#=rXJzgdtQ8g?2BdjEpigW8e8* zRCHHoKUBqm3RsbvoL1M|Sne5k+V1rByr51b!(8f+tTU1V)jCSL}^^vEUYX=z@A3yzE*(#W@=B8j<#T~#u?~RL!p7k=}Ta%H_ zNpC1PJh?d9zry&!Qzg*Fl5)Q;!bo{I9z2Kmu7M_m$~wZzT=7rcvOiBm2LGyjkyj4G zUHI(n&5Svg) zI88*G|Az3hZmdH6w$Oc!`5R*6st?guoE+j5jykr{A7hCv`NOWWC3kHp_M6+%GRZb5 zWe6h&tgh6F4MU-(;`Z+W#}d3PVsojtn-z|!E&IvX4^~h`_Bpz+Ecyo8w{sf7G_-BK zQ%YF(@RCzC^EkP7{wV6WSrGFb`j*jSX^LMS)=?Er(nnSVdU`sjtybegQkWSl8$Rna z=V{rF$&Kr#@w?v8IyVM-ddR`BC#dhe%ne+8Z7WyhJWya*dQVw`=60f^18ijK0_I zmMB`7ynf-_figc#rFH&W^KyFR3Q{5hN#NQ{ExNP5J&W_jQneLNaAG35L^RVMSsPD2 z5{tZ|l3x%QX0h1<9ekG(+DfU|V>#k`JzZ-XPp<3usrS2ibqD5pZxrU|x(T@+ z%cMnn{Zw9gd+?EcNMxyMKR1c_ykWJ2kbK8iRYWf%(d`>!s*`fc4I!i$gAy8ZMpXS# zH=un*ScuD+V-MJHb&FIcDfZ`VUh)Sqvalj@%3G&WKixap`wO|q zBY*6YV2MlqC(5P5IgLQcc#5q0Mg2_}{9Reszp(b+HZJt_`2%)p$UIY$Y@aBZ!} zCE|tA-3<}rSBPsHWVN3C>M0o2#&NS>(I83hBJ(+`OL{4 zLtkT(9tWI98O_T^m3Q1P-z3BO>&8D$pyt_rDeob$RY>h%qBG=s&GPxW-&)=?{dLw? zbZjv0EOy<4k!@XN`Ny#cu5>c`cPi^F3I; zQo)v+9d;$usY6a{&L7Vd4Ry?H-Y3T**^|qm7A7c z2fTWa$<$v;Nw69HJTAaLKF#ZB_(3KD*-!rX_=dBkK@anK<63L5HxC+d(lkX|I;n%O z#HT*qi5$V_FA*i{ve06(InOLpc(ZzEL8go4M) zzF!!~?tnQ>zkeM|@{Xq%dIogE9 zi-r-kWmoht$M04Nw1skx4i{H=*G(_J@<>uNH_z16+~5v z4GeD?wHZY$&&94!GyGS~6i#zuzT-PZ6zsZ&3tO5%_P>-J~lo6crkzEt9k0^YWO z=a0N&lQ2q>PjaK5Q*Ezkn#oolw!?;UAnws~OD?)!5JZP2y~w zF=WXt9R0&GwuYrvXxjQq5WO>*R6 z`haSn2z%M=G`9^2@lHn_AJuEgrqs+*$_{KFk0iBQkhFN{M=r<-;}{ z&|gFQJSm!lza)dJ*Pc4o%dtg{(;ufQz5hv20fV>`xuue%Lvja<(cx9) zUxe@}I-m0N8}f+KRZ^@OBaMLjI!6ta2@o%BvZWa@%clo~v$JcC9VeHXu^` zQbsSY%1G$k=lP>P)&`bl@?G5NBP)oYtlp}NKc4+&0miGm6*Ms%8_etqs81^^{>q{@ za4UTvt?~?c$->n?Xgh-KPj0Uj5X4z?}1nO~knuNryKA z#%8Y4W%5&U_cc3MVHLt@SLs!RZMDwEyR|r18>-zJJa?DGWwDmNrlk1sK1gP~p-ilN zYwW8yHW(A|IcccddWl#!+5(ipGQKg z$UObeiUwJ-gtpK01f?#q|Kt3Tl(lN*cvkJ>E;CK4J#S_FSyS&+cQ_ChZtecQ813T(+G?vkdJ%m!Vm0#ppip9=EvSi7Z>Ipf-^l;;H9-Xo{T zeivFYhc4w-MAfI6wN=szR_2|hUA}o=yjwMQsBn(Sp0vRoC}K_PZ}2^4cXBojr7h%^ z)Hk!GlGtF%PdF#}Fm(#~@oTb8y2(%Dx~jtK>>;?Jy~>nMi^BC(a(1o;*pNvlcdL%_ zt!nl7*$JO1OQAhJvr0I!h&v+3hzZEMghR801~~opfW7Hx59+O-dN+w4?Swd+wU}6% zG_he*wE4OkYqWPxZ25hpi-4HdUZoG*))7RjX4oxtmHzu}u9{Xsu~}X@3akzKAG>f?10>xS9D zbn?D<%oet6yt@BLtJ*KNGwY9UySN8d(>lKiSDwf%7s1DT4hcNjq0W4o`c96Z%Y&>N zI5bj#%`@XEH=C~_RUba>5YrxqvTgLzKrVlJDVOKt(O88J?*3+c0)f<~SBR(5m|~77 zb(kG;;=w<5NJZz>rPvoSIKkkJTdr+xucOSZ<6h~V&&Lz8{$8~`HnemI5R_1+fA{b~ zhc~PdEaKdr$!ek1OwsM@pUW8%o9@_|uoDGKBi_$)Vq-oBooDPLUIqstOmf~cS zO4!=CDzU6|uadzuci*B{551pj>78;LN zn~Du-yN;#c6BL}@wg(S-A~mYnr9;=(RdehzwR|*vrOb-V_ZZt!+Fe2;C$lbOG)`@WTSKda7B^DaIG zqGt`~^I|u$yi8OK&nXZhiyh4TF8R%!JXSTFN+3cgZrOF@?_-0XZ-a5$&hg~8^y|HU z9d$$a2nW8Fsf$kY?OSKW=M+{46L*b{no66TsU*p;tNE%e1-B3FH*|H1jrG1=#2v0< z=D06%fs%!vIib*?HSg;_P%4gKDp#KZdy`Ec5sU2KJ)+aX!2~{R9{11*mZdC&;4aPV zyt@_>99@`pBZt;yJwlBKK0U#2U|QSdH&Yv{Bp&E(C|r%#eLEfXEQcvA(4cWKh48Wo z>`sD-yh@qw;KtsLz0G-6sLNXOo-*8uzZC-4Jur?6dbf?Pn%i5l`7&W(>HQ0Lb1v#s zf9-J_q}{xho_J&x_su+Teq(In7D0d)J-IucQF=PcuH$}-GDT@=FOe_Ff#f?{9_=KL zlcz2SG0y*q@p4}Z@HpTn$udP@bR4!K*?bt5ooI$h2afl=kp#U32NA0#s%?28b$9KV zqK1q5qZ4Y;2u>P}URgb`?6D+FV6+CL(ePkLLn9Z-{Kit#d>!L~>J;$~}3bT~NTdOwWZ3C)l5w16!A3b}O!RM>> zFIf=;Z*WD>h3;^t-%zf%(!^^7 z(d6-)NL>xyId{zhS>MDcpf#N}>`KS3yHp~6qt20BU&<;Vf)DN`Ove0Y%3+Pkw-wv> z-n@2_MX%gM^ea z`=*Be>RxgpMDvAL(-_AnSkdrrF|~0qq02tk`@iD&Zu=fHi|qaWwU>fi{&{OmXDhd+ zcT`kVCI}Rlp7m_oI9pPMsX3#PTnVnLRBv1bJF1no&*RD3+(>SOx~(FHakJ4)A+7h7 zrWz2L;HjC|hy$ukOEN_D_|C*j7##zYQZ+Wh9-{JaWos9#N4zmEa7Yd_`6X+^Zt+oO z=xdU=l&$lnkISktit4_E%I+W#&?H0;Bo_y}Y4V$8q0T$Tl!~pQ4pE7AgygMmvxS>% z3H6k$@3Oa&ndz@`42jO1e`jhr8dqpvYX$E4cLGD9)V9x6AX8H13;^-e8dyTe2^Lc$ z9%0$(qZKkq8ynwl_I6%O?eI2}p;up$hVYk>+rmEwt<5k`ouC|0rqoxi#6{d*f4%&b z^nEJh1$sx_d z7Ok??_rA%+WC1F%s`hI#!{R=zy*)ED`rA#MHsPH>QkvY_-*x2NwN4Nm4@wY|z1^iC z$;qH~D>FWaM28Q((s62O@I;6raI*FyC}7raK~qEBTI&ssHD(7dj&<(4VrC{u6)tu*=e!{_5MtVTEg$y-rJ;{+c2ZS#MF`Na0l4 zirXSR{^pvUeaKYN{OH2RB5|w=Z0bgBw`@kG790~{mWw~m;CdKxP_Jfi#*k>akpGlB z0so-3D}?gZzCzYPgd#9LzHhX;x>Wo^MBzsdVTEqe_(u1rYu}+$WPz{HN^*y^HCaP5 z)0(enVsPLoan+JX??sUX^ec7br;Y9oAq!N-w`R@pyB9^rGpWv*G+QS{1QrJ6)RuYA zV-6w}`#hJPe92NYjd4&h`IqO7p5>7$n3NjuZb|91O6jx{Nbb^%O%%*e?AH?=480+; z%eIqF5aLf#;!a@tf|HCIaSGyUddw4Jow$o9N$H~nGIm_!S;beJk>(SNX8fC~5V_6t z2U|%vHjK~SwgD~cyQi;E-ao9Wh^z+VQzNFmsStW3twE)YRWP=aO64E%J%;dMY z?=C+cTQ~Y8+N=M9BBm_pIq^_}!nUmx8nmTymh7fy6t1)}J8v&;_>SqiO5dO2N~ppS zLz~;;ge3B4Wh>{VULk47D?PaM-bYrSDptWu{wPKT(jgo zBskt|KcQoOB>J_tYOAkPyVR122%5u_X1%1i4NCo%S=(HH`?Urz*z+u?X^gzOfv`l9 zsFW>TRqM74pC9^FQI zRE9QS1w8rehMumbmuEw7>ka;*X|E`M$XR2*@=~eA9;_m||HegmX0gM;hTo?ZF)(3W z*|LCpJxb@+v0$A)Or@?qB%fSUYI|@d$MNsCmJ3&D`Uyn{Io^sMACUT_WQGxM38Fm9 zlCgQy9HJABR;6N4VS)&0d(4VDOks6xb1dwE<(!I=8N;zTtoHbWPBSx83J-V}-l1i6 z!v2%x!l$`WCA}oUVB+|6ns+8{asBY=sN|0F!x)K%2>g4H00W!(ORU$2H~q|`_$rM9 zp7fT@l}DG`MOrUVd2Gm`WqD$(lb6HHKA%_uuBOmNR$4WZlldcoH zHy|LDmojW}=Jgkoe~nm^_;|Ov#Wl|NS!d<+h_`&}XFBauL>bH^HW=D&JO-p=UTN#+ zS%_lcJlenfYTB#m5xB5}=wd%GG*psrZvxbpzO->Tdge(`{R6KX@?<0jn(|8kS?3YD zY!)T#H@8qbww?srOXQ-qCUru`r+|n(%FN){guuGtgL~hP#*>7lkbpVIM21k`EuEM8 zZ1D3%uFC@RM>lTD*S7}F#>{XeZL~WZBPw6!Kx>R==gW zYsWm@*_D~xS29tzFi()C9u8+}!ykY&NO~pNK77DSL~5L5rIbknY;qI`|L{wWW2Uoe zw~PdW4?$}#j}fa)+Exwrw>O7Q2uiTq$H^^!mXi^3rLF3iJ?;-K&zQqj4d=4HCJ-=K z*^m=u;BIj3&ONExx*%w9-&9N7OEtpXNsa;ZxyP*{s`IgF;Opq|^??tY_1^pW(Fm_DbQ4zKz_+fA zxcchKfUPL3O+sqPW#RMTaTV*jgyNqL=o>%QxyyH`g-5;sG%s|DoO~ph7?m2S_mg_S$U(hd%_c?l3p}&ETg!3i zuj8dF=d^%wM&6g9A(XGoo3#@#9d%FS zMR2G^{=SZ-a53j~<$7_825^~H)4x@Y_q(RN&?_Hr)@Y(^+shmpvCFf426<>b)`us}8cwko6B`L_H9KP&uQtM?wQ!`N{ zesV8gUWa?~5$cpHkl(Jxx&e}!u$1i}&d42{i3EF#yCmeZwDl`J$-&<+=@C^j$^)mO z8bgRNqsz|6aNltkp=STS_28w>U7ts$u>y%-)b#IE07oTQbo5#4lEr-u?{n@aUkONv zAda?RW@5?L!<)exTlP$y9tsxSRDn#F^$=ag1VyWq)!^v+XbyNGf408jSeA(a=&faxoko81}R+$5?%+v|Y zNte|(F*u}HFiwKgx(kYl`D+$O-B7MSF6oW7&p7k`70bh$+JXCh41@0aY1}LQTXg%} zqq{(=X;gH+@7RL;s_kD$? znL)YL!fJB@hhHvOi^_0>5K{JTIvek)oZ?C-qT+X(7gYFI6p`*xg+UwvY_KwA$CcMi z5-ZpF%lPxj>*N>iAoG9QRZ#QtzDQJXuAR-YS+APLoxDUv@0b^q)jAM~%MSVI@YBeN z8^WsNq)Q)xhdy3K?tE+Iioh`u+!rsCtcQvE-tl4_V$&OUg_;ogG;QuE6(Oh0k%c(d zEP!+D^aaSdGkmY)u&Q#G_}a(C;XetB|LHPn^S;wq#dx7_zR%XI@cotfm#3rNE>PQT zC@OOUAiuC-X|Ag?DsxIVm8qzPnG6RPa3*>EDTh`_eTUT)><}97oNO3HHw^c^h>~Cj zCi4TxCaZaL?@V-XuHnb<4-4^GdqN|iQv^V2;PV!kv4^7FFkC#Rk=XQboF6HL#pp(N&oPj|B zBdy)mW2D#aLl*|z@m3jXb40;XT=khVw7xRgvq|NIxR|#NxFggJ)sG!EltK9)FHy2A zSdqNEyTXM{71?p&HC`7$?$}vbX7rc1A;FC085|+FF-xtQ(O#Ur00If_h4fHTq;3Sb zv1+!q!K>-nl)yC6XIN{!l!@Q)dWf+k7-| z5L4EE+1ZFaD=C9e;|PC{as2(kL@9Ul3tCw~n!L2ya#>KzKturRj0n@!T8(AyM(Ojr zRmkR@RWM1XPp!~__65tF0tD+}qrI8OMH1pL`hk-lp2nZ~<>BI~RT_uqQhwz?;$FOi zGC6<~1I!22k+n{4CB51GnJvj*W;P#XVvW_mW-9U1-@Oo$> zIm-r;&K5K+3Yj=1Gc0Rg7LdCZu(IKLE@i(mMPsx<16jJQ6YeAyzrL*;EdkW#ez3|V zO#b}7lI86z+$V4}qmttn)&Usi=2K!UiZwF##oh$-`R4Dt^s}MlkK1!LdoujRXm8eW zl{ox3nDi?)!P}s-)yzJS$Oz`sBp%GzeOgL;^Odh3oxj$trOqWgV8RU)-u;Zt_qBc$ z@L{80Wj9)kQ3t_zd?b^8d}aMT+*ll=tL!MPA29jBD&B5;6l`oNnd$F^AH6dX+|-^N zP!-XL=#lK}-ZtVW-BAvJ!e$N(5AdN2FQ*+8XN#GQu<-me?o%ssxZLa}+j*;ln6 znLL}N2C`z-dM_EqHp}c_oX>3Hp*l59_r*$@Y+5=&pC3soKm~VPi#d(0oyn^}>~L|L zFWvnpd|zp_H@~p=d=V{pb4`J!6x;h%qmcYU!$+<%w(m>gw&7=Ed+%n{lwUYsB+r2r4{+5K}hj6=E5;u>$h%D!xKRXR${63JQtNrak8x<5l-V1*7P6PLXNpwbGe# zA92z;u)-9qvW-XwGXRnGF25WumY#z*S6MtlFsxTmXwkUVNzP( zZj=_~)TGPeo^u6M=Z@9=Gwn?tW8h5t_iFNac#STtcTpuR&BQ-hHj>G4s7}v{QAkOl z9{nLG7%({sE08y8Dgl=hZpxJjjNd&zMmI|f;{njhn~lL3)RSn0FD*P;A(trWVd=ab zvPkBF?7|xmSv$RbhA5_I4 zH~kripZNm8#$I4CEpXe>&&GDIL#sZD)CerhTGoPocH5oI#un6W)D-pRv+K9S4_!7M zFhZKeoX(`iLL@$XQzMky+=CaEaJrBlN*2+Y08a~ew4iu} zZOU^d2HX$vVV-byTLFWK{{~Ym64c0IoTa1>)D+7yn7irC(p0w6wcD;~Ha5}B=zpU{ zKa;A!XjW_*p&I_AF0-+yQ$KV|DvX5QO<{_-@Iq5KraOrL>G(w@nBK`P7L)Wg@6(zY ztY+=kHWijU)Mib#jP3VNT~8=vd=Q{FW}^d@`RiU(ka5I^W7ZBw`ABQ$b4WhrzW4^h zi7ht@w2ihuv+lUwJj4HN^1~j&S*SYOgZnn$)4E0|^@L7<|DlAU-B4jYVEs8U(ok6J zbZztSIyo0=*54`eCNAV=4X=o#H}elQ4)uHL+aX;xwj(MmlJ*7`vHZtI|cQ1qvku)=V~AhO0ftTO8p-g;tax?x?_qO8}M72472O z7*}fo-}^NYx&!#1=E~lO$lj@q6G(i|qI{U^d4o_)Yn_#RzN``a2k`>(_wXxG2}1`X zrazH?fdO^P!skbmMe%Q7;vkg-7N63Y9&bik+PpBCIyiqM92pMlOHXlE#()Erae z5IAJ>^EeXxur{RWb5OzxJ*G9i^X~)&7@T~MfvkWn<_t@IdH`%#I&!=b7S4Cj`%O{ws>#j%zzzaC z2<#xRgTM{~|3d_7n5`|Ce~&v^KRB{%`Q*uyz`$Jx4*YbWRt}^5?_CE7`*$61cQ{yk zSqc32r_!H)evoZRbQSNIQ}byf4Naqxvt#}!RUO0(>%%-T|3k_~b`aP> zUP5ZFOr2Z8?w5RlogiV~Qm-YT9y*436$ywNev zbbG~wWw#9rh%#nwOd}fPh|0XuDQn}7(;iHED}zdJMG$b?_p7n7ec!b~DLao^x2#!r zHD%Cg%yoCFIV=-yWp5sCy?>}_-PaVg;a)W~2Frk5FG)v)xKRPjtFBl$cu2Phb3J< zbcy!+ZQ6t1VZia{`iS0?QF7$^9$5ZbyZEYS@w}a+uwrP}Jh2>>J1T6v+JOkFt$I45 z)CB*&*J$-d?x<@qxDx5FmD|P*RqK{P|GN27Oq+%sAOR;>0dEk%V_zu)P5vx4Conhs0Wt@+b< z)i8IIhOzj*=@e(y140I zp^7zs8nD_NB(jy;PkSV3z*6DlD@$;}QjXXo8(kD7ENp1QZ6=G{ZS|ctGu}E1hO2hi z?Q8!)I{Ket4AHK6N92UJy@DOX8VgGmAO5g8>>~aZ0b~r01QP?cxj|*rK?f6G*%^h^ zeCh!hF`5kvN37(8ZZr~>{#efafO)a6yqDc`i9+g!NygmQc7UH?*)Fr$7$mG?<%d*( z2rgpg^EWDeykP)v7_Xt*>IrVeIXH=--=TuZW%F8vijt*{zM)~;cN@u`w->Wqj% z!lZFUHIMn5!vMHbxw49S3zo-dZm6r|G<<*6B_!?rgDEWvrQb;&)YxstqBda#HR9Y zDdkPr^lYOZhq>QDsD{jORi;Yue1n@wPJ;*zHtqQyr>9)20%t>WJ4d6_L2@UH| zq~U6jLBks6WiD9+2TM&=vz$ELCO&kPx2)eOmn?=8hc)&0kES?t=y(Yj7`yE!TP^u1 zw4V#IHT~?M-^*N9MLc(-pXw_gWVg`GSs#Il9^Uz(t}glO5~w+l6x2L6SnXhSYd&Wx z+8oAHf!=lvOMdj5u>RQz-6gyT)Re!9OIz!kFpd6#obb$Q2oW}q|EQvlPJ7^_T{@kj zxFHDHIPOiJ*67+0X7$U&HIZZuHCD5$VqzR90Jom(QE@cXFSfbfb}&X9CCn(j8kVYF zep(gbS-7mgG6ocA&$iW(;Qwe{;&3>vS^nI9pQ<1C@{dN09Bw%^mbB&v7 zuK!#1VoK9A)M0-?mp1dOKy!Y`Uc5-+f(mNfjee*&1Krg=D(rm9@TGtBQP#~NBQs8j zw$Ia$jpKdgec{^a6fp_d`5f#=*7zvPfm!Vlueo7b8)Y?%P76zz{av>*gxUnPG%O*? z01J-OrK?eMh1F6}K@Usqg69=(r&o!iVP_jQQc9RdSrb3A-8U)*HtZC!9}USx%~e%j zh8ha*V|B?F`A}Hls^@d)J6~QG>baC-sgm~OG5iCF;ln~D8y&<(9j^)MaR?(BIiMY- z^2>H!@xw}=iegW%hMgg3GFr7OcEa$#x*4Qn2Acu8bu68IU6^0XGaqXBH4vJa zuIbV@`6vI_lgetd5j8I=i% z{^gSvJ-MHo{)^;U`>wnHP40-)gTnvr?EZv{hMtuWah=}+3Gul*-C-f}?vMW|IyU{E z!SDI!Z(RLTXNPet>~`b`t`h#IaH*p;G8OR`u)q7=CcpK8KWB?KMo8G1b?0crcKYdIn45C zsE=GN#LiJFvmI?z?5QVk&dTkvYHb{_3i~NWhzhgCTUdoHCwzr^%^Yl?{u5?LEZI7-es|nLAp1P zy&wh;bz%#wtN(J?C>1Q9lE>#j6#P#Y{viEDcU;#D78{jMwKl1f8xlBR<|;sf2}#YK za-MD$UeFM}G@nmykQ$oZT`UJ8;Sd&=wfjAIfGXm!b2TD$L!|xdqvu7t zWW#?hkqe`$Lcg!!F{lseAqO$u>1mkz9fJtS9Kx4Z8#LtfkQsg^Ihi6j*px|(v-aU; zsjI@yG6@`{5ZDs49M(Bm)E$-_@bF9tTl;^c=3x#}+YbtbvJG4ENLVJ|W>w0()~PUL z*_gKcXiG3(=JN#5J_}I4YH)@>l+G{u9OhV8s9^XDp*-AyI2%;%)kAfFn8${?_m9$P z?5ur_JPOrV(Z;VRmeTzYO`QmdA=tHP-(&Jd$9u?`3>keC&Zh{fQKP>}y!*||d@R-n zCge3kk2a@1P07^Plr9C~>yLBbiP;p|nJIJ>62|nEhUj^D7WKZ)lo;f%5D|~FEEcX8 z3sR;c@Y1mPE&q-+VH(>0(l?KWXYzP(Zi=l10$k%6gM@+TaRJD!D;A(a*^OBpqTYh~ z#TMbn(`uDKS$<sQlCRz@sBJC{c(&XN5Q7Hoe?HoY3kdYtU-Nsx&?2Gs*hmAR(>>%tZl!OQzJB3= z7Od7Q07z-Sw93Bf`5{|;M-a;Jh8}{+4n%7sBz0bT zcKOx@Y%QoU?61cI(UwqYh!~Jl*?=ulY$;-+Q?%6iZ*25)=kLQ9zp**9{8lXgTK{`+ z;}s6JMh+u@`d~5F6&vP2M9HdeWGmKtt(kKJ8)=~x(>w%?g6(@$^!#za%2K+cw9&>J zB2zJRW1Wd@O@aTW21pN>E_C0TL?yBYu=PysM>9jLt ND63!1zjEiv{{n>M$`$|s diff --git a/zh/images/canaan-cover.png:Zone.Identifier b/zh/images/canaan-cover.png:Zone.Identifier new file mode 100644 index 0000000..7cf85db --- /dev/null +++ b/zh/images/canaan-cover.png:Zone.Identifier @@ -0,0 +1,3 @@ +[ZoneTransfer] +ZoneId=3 +HostUrl=https://ps.gaoding.com/ diff --git "a/zh/userguide/K230_CanMV_IDE\344\275\277\347\224\250\350\257\264\346\230\216.md" "b/zh/userguide/IDE\344\275\277\347\224\250\350\257\264\346\230\216.md" similarity index 53% rename from "zh/userguide/K230_CanMV_IDE\344\275\277\347\224\250\350\257\264\346\230\216.md" rename to "zh/userguide/IDE\344\275\277\347\224\250\350\257\264\346\230\216.md" index cb363b3..69380ee 100755 --- "a/zh/userguide/K230_CanMV_IDE\344\275\277\347\224\250\350\257\264\346\230\216.md" +++ "b/zh/userguide/IDE\344\275\277\347\224\250\350\257\264\346\230\216.md" @@ -1,54 +1,4 @@ -# K230 CanMV IDE使用说明 - -![cover](images/canaan-cover.png) - -版权所有©2023北京嘉楠捷思信息技术有限公司 - -

- -## 免责声明 - -您购买的产品、服务或特性等应受北京嘉楠捷思信息技术有限公司(“本公司”,下同)及其关联公司的商业合同和条款的约束,本文档中描述的全部或部分产品、服务或特性可能不在您的购买或使用范围之内。除非合同另有约定,本公司不对本文档的任何陈述、信息、内容的正确性、可靠性、完整性、适销性、符合特定目的和不侵权提供任何明示或默示的声明或保证。除非另有约定,本文档仅作为使用指导参考。 - -由于产品版本升级或其他原因,本文档内容将可能在未经任何通知的情况下,不定期进行更新或修改。 - -## 商标声明 - -![logo](images/logo.png)、“嘉楠”和其他嘉楠商标均为北京嘉楠捷思信息技术有限公司及其关联公司的商标。本文档可能提及的其他所有商标或注册商标,由各自的所有人拥有。 - -**版权所有 © 2023北京嘉楠捷思信息技术有限公司。保留一切权利。** -非经本公司书面许可,任何单位和个人不得擅自摘抄、复制本文档内容的部分或全部,并不得以任何形式传播。 - -
- -## 目录 - -[TOC] - -## 前言 - -### 概述 - -本文档主要介绍CanMV IDE的使用。 - -### 读者对象 - -本文档(本指南)主要适用于以下人员: - -- 技术支持工程师 -- 软件开发工程师 - -### 缩略词定义 - -| 简称 | 说明 | -| ---- | ---- | -| MV | Machine Vision | - -### 修订记录 - -| 文档版本号 | 修改说明 | 修改者 | 日期 | -| ---------- | -------- | ---------- | ---------- | -| V1.0 | 初版 | 黄子懿 | 2023-09-18 | +# 2.IDE使用说明 ## 1. 概述 @@ -56,7 +6,7 @@ CanMV 基于 OpenMV 项目开发,CanMV IDE 与 OpenMV IDE 基本一致,主 ![IDE](images/ide.png) -K230 使用的 CanMV IDE 与 K210 的一致,可以在[这里](https://github.com/kendryte/canmv_ide/releases)下载。 +K230 使用的 CanMV IDE 版本要求4.0.5以上,可以在[这里](https://github.com/kendryte/canmv_ide/releases)下载。 用户也可以选择使用 [OpenMV IDE](https://github.com/openmv/openmv-ide/releases),但是 OpenMV IDE 只能连接 K230,不能连接 K210,使用 4.0 以上版本的 OpenMV IDE 连接可以获得更高的图像显示帧率。 diff --git "a/zh/userguide/K230_CanMV_nncase_runtime_\344\275\277\347\224\250\350\257\264\346\230\216.md" "b/zh/userguide/nncase_runtime_\344\275\277\347\224\250\350\257\264\346\230\216.md" similarity index 53% rename from "zh/userguide/K230_CanMV_nncase_runtime_\344\275\277\347\224\250\350\257\264\346\230\216.md" rename to "zh/userguide/nncase_runtime_\344\275\277\347\224\250\350\257\264\346\230\216.md" index 92932e6..9a1af6c 100755 --- "a/zh/userguide/K230_CanMV_nncase_runtime_\344\275\277\347\224\250\350\257\264\346\230\216.md" +++ "b/zh/userguide/nncase_runtime_\344\275\277\347\224\250\350\257\264\346\230\216.md" @@ -1,58 +1,8 @@ -# K230 CanMV nncase_runtime 使用说明 - -![cover](images/canaan-cover.png) - -版权所有©2023北京嘉楠捷思信息技术有限公司 - -
- -## 免责声明 - -您购买的产品、服务或特性等应受北京嘉楠捷思信息技术有限公司(“本公司”,下同)及其关联公司的商业合同和条款的约束,本文档中描述的全部或部分产品、服务或特性可能不在您的购买或使用范围之内。除非合同另有约定,本公司不对本文档的任何陈述、信息、内容的正确性、可靠性、完整性、适销性、符合特定目的和不侵权提供任何明示或默示的声明或保证。除非另有约定,本文档仅作为使用指导参考。 - -由于产品版本升级或其他原因,本文档内容将可能在未经任何通知的情况下,不定期进行更新或修改。 - -## 商标声明 - -![logo](images/logo.png)、“嘉楠”和其他嘉楠商标均为北京嘉楠捷思信息技术有限公司及其关联公司的商标。本文档可能提及的其他所有商标或注册商标,由各自的所有人拥有。 - -**版权所有 © 2023北京嘉楠捷思信息技术有限公司。保留一切权利。** -非经本公司书面许可,任何单位和个人不得擅自摘抄、复制本文档内容的部分或全部,并不得以任何形式传播。 - -
- -## 目录 - -[toc] - -## 前言 - -### 概述 - -此文档介绍CanMV nncase_runtime模块,用于指导开发人员使用MicroPython调用KPU和AI2D模块。 - -### 读者对象 - -本文档(本指南)主要适用于以下人员: - -- 技术支持工程师 -- 软件开发工程师 - -### 缩略词定义 - -| 简称 | 说明 | -| ---- | ---- | -| | | - -### 修订记录 - -| 文档版本号 | 修改说明 | 修改者 | 日期 | -| ---------- | -------- | ------ | ---------- | -| V1.0 | 初版 | 杨浩琪 | 2023-11-21 | +# 3.nncase_runtime模块 ## 1. 概述 -此文档介绍CanMV nncase_runtime模块如何使用。 +此文档介绍CanMV nncase_runtime模块,用于指导开发人员使用MicroPython调用KPU和AI2D模块。 ## 2. 功能介绍 @@ -120,12 +70,12 @@ data = result.to_numpy() # 将输出tensor转换为numpy对象 ### 2.6. 使用AI2D+KPU进行推理 -这里使用AI2D对图像进行预处理,然后使用KPU进行推理。如果使用摄像头等输入设备,请参考[AI Demo示例说明](../example/K230_CanMV_AI_Demo示例说明.md#14-nncase使用ai2d) +使用AI2D对摄像头采集的数据进行预处理,然后使用KPU进行推理。摄像头等输入设备的配置请参考[AI Demo示例说明](../example/AI_Demo示例说明.md#14-nncase使用ai2d),这里仅介绍AI2D+KPU的使用流程。 #### 2.6.1. 配置AI2D参数 AI2D功能有:`crop`,`shift`,`pad`,`resize`,`affine`。可以根据实际需求配置对应的参数,不使用的功能不需要配置。 -各个场景的不同用法请参考[AI demo](../example/K230_CanMV_AI_Demo示例说明.md)中第三章<<三、AI Demo多模型示例解析>>。 +各个场景的不同用法请参考[AI demo 多模型示例解析](../example/AI_Demo示例说明.md#三ai-demo多模型示例解析)。 ```Python # 基础配置: 输入、输出layout,输入、输出dtype @@ -141,7 +91,7 @@ ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pi ai2d_builder = ai2d.build([1,3,224,224], [1,3,256,256]) ``` -#### 2.6.2. 串行使用AI2D和KPU +#### 2.6.2. AI2D+KPU推理 ```Python data = np.zeros((1,3,224,224),dtype=np.uint8) @@ -161,7 +111,7 @@ data = result.to_numpy() # 将输出tensor转换为numpy对象 ### 2.7. 释放内存 -如果定义了`global`变量,则需要确保在程序结束前,所有`global`变量的引用计数为0,否则无法释放内存。 +如果定义了`global`变量,则需要确保在程序结束前,所有`global`变量的引用计数为0,否则程序异常退出时无法释放内存。或者在程序开始时调用一次`gc.collect()`,可以将上一个程序因异常退出而未释放的内存释放掉。 ```Python import nncase_runtime as nn diff --git "a/zh/userguide/K230_CanMV ramdisk\344\275\277\347\224\250\350\257\264\346\230\216.md" "b/zh/userguide/ramdisk\344\275\277\347\224\250\350\257\264\346\230\216.md" similarity index 62% rename from "zh/userguide/K230_CanMV ramdisk\344\275\277\347\224\250\350\257\264\346\230\216.md" rename to "zh/userguide/ramdisk\344\275\277\347\224\250\350\257\264\346\230\216.md" index 6de3e53..12abb2d 100755 --- "a/zh/userguide/K230_CanMV ramdisk\344\275\277\347\224\250\350\257\264\346\230\216.md" +++ "b/zh/userguide/ramdisk\344\275\277\347\224\250\350\257\264\346\230\216.md" @@ -1,56 +1,4 @@ -# K230 CanMV ramdisk使用说明 - -![cover](images/canaan-cover.png) - -版权所有©2023北京嘉楠捷思信息技术有限公司 - -
- -## 免责声明 - -您购买的产品、服务或特性等应受北京嘉楠捷思信息技术有限公司(“本公司”,下同)及其关联公司的商业合同和条款的约束,本文档中描述的全部或部分产品、服务或特性可能不在您的购买或使用范围之内。除非合同另有约定,本公司不对本文档的任何陈述、信息、内容的正确性、可靠性、完整性、适销性、符合特定目的和不侵权提供任何明示或默示的声明或保证。除非另有约定,本文档仅作为使用指导参考。 - -由于产品版本升级或其他原因,本文档内容将可能在未经任何通知的情况下,不定期进行更新或修改。 - -## 商标声明 - -![logo](images/logo.png)、“嘉楠”和其他嘉楠商标均为北京嘉楠捷思信息技术有限公司及其关联公司的商标。本文档可能提及的其他所有商标或注册商标,由各自的所有人拥有。 - -**版权所有 © 2023北京嘉楠捷思信息技术有限公司。保留一切权利。** -非经本公司书面许可,任何单位和个人不得擅自摘抄、复制本文档内容的部分或全部,并不得以任何形式传播。 - -
- -## 目录 - -[TOC] - -## 前言 - -### 概述 - -本文档主要介绍小核ramdisk的使用。 - -### 读者对象 - -本文档(本指南)主要适用于以下人员: - -- 技术支持工程师 -- 软件开发工程师 - -### 缩略词定义 - -| 简称 | 说明 | -| ---- | ---- | -| MV | Machine Vision | -| initrd | initial RAM disk | -| initramfs | initial RAM File System | - -### 修订记录 - -| 文档版本号 | 修改说明 | 修改者 | 日期 | -| ---------- | -------- | ---------- | ---------- | -| V1.0 | 初版 | 王建新 | 2023-09-18 | +# 4.ramdisk使用说明 ## 1. 说明 @@ -123,3 +71,4 @@ chosen { + diff --git a/zh/userguide/userguide.rst b/zh/userguide/userguide.rst index 261796d..27e79fb 100755 --- a/zh/userguide/userguide.rst +++ b/zh/userguide/userguide.rst @@ -1,9 +1,9 @@ -CanMV 用户指南 +用户指南 =========== .. toctree:: :maxdepth: 1 - K230_CanMV使用说明.md - K230_CanMV_IDE使用说明.md - K230_CanMV_nncase_runtime_使用说明.md - K230_CanMV ramdisk使用说明.md \ No newline at end of file + 使用说明.md + IDE使用说明.md + nncase_runtime_使用说明.md + ramdisk使用说明.md \ No newline at end of file diff --git "a/zh/userguide/K230_CanMV\344\275\277\347\224\250\350\257\264\346\230\216.md" "b/zh/userguide/\344\275\277\347\224\250\350\257\264\346\230\216.md" similarity index 66% rename from "zh/userguide/K230_CanMV\344\275\277\347\224\250\350\257\264\346\230\216.md" rename to "zh/userguide/\344\275\277\347\224\250\350\257\264\346\230\216.md" index c26da47..4491476 100755 --- "a/zh/userguide/K230_CanMV\344\275\277\347\224\250\350\257\264\346\230\216.md" +++ "b/zh/userguide/\344\275\277\347\224\250\350\257\264\346\230\216.md" @@ -1,54 +1,4 @@ -# K230_CanMV使用说明 - -![cover](images/canaan-cover.png) - -版权所有©2023北京嘉楠捷思信息技术有限公司 - -
- -## 免责声明 - -您购买的产品、服务或特性等应受北京嘉楠捷思信息技术有限公司(“本公司”,下同)及其关联公司的商业合同和条款的约束,本文档中描述的全部或部分产品、服务或特性可能不在您的购买或使用范围之内。除非合同另有约定,本公司不对本文档的任何陈述、信息、内容的正确性、可靠性、完整性、适销性、符合特定目的和不侵权提供任何明示或默示的声明或保证。除非另有约定,本文档仅作为使用指导参考。 - -由于产品版本升级或其他原因,本文档内容将可能在未经任何通知的情况下,不定期进行更新或修改。 - -## 商标声明 - -![logo](images/logo.png)、“嘉楠”和其他嘉楠商标均为北京嘉楠捷思信息技术有限公司及其关联公司的商标。本文档可能提及的其他所有商标或注册商标,由各自的所有人拥有。 - -**版权所有 © 2023北京嘉楠捷思信息技术有限公司。保留一切权利。** -非经本公司书面许可,任何单位和个人不得擅自摘抄、复制本文档内容的部分或全部,并不得以任何形式传播。 - -
- -## 目录 - -[TOC] - -## 前言 - -### 概述 - -本文档主要介绍 K230 CanMV 的安装和使用。 - -### 读者对象 - -本文档(本指南)主要适用于以下人员: - -- 技术支持工程师 -- 软件开发工程师 - -### 缩略词定义 - -| 简称 | 说明 | -| ---- | ---- | -| | | - -### 修订记录 - -| 文档版本号 | 修改说明 | 修改者 | 日期 | -| ---------- | -------- | ---------- | ---------- | -| V1.0 | 初版 | 软件部 | 2023-09-18 | +# 1.使用说明 ## 1. 概述 @@ -193,27 +143,24 @@ windows串口显示: ```sh k230_canmv ├── configs +├── fs_resource +├── images ├── k230_sdk ├── k230_sdk_overlay -├── Kconfig -├── Makefile ├── micropython -├── micropython_port ├── output -├── README.md -├── scripts -└── tests +└── scripts ``` 目录介绍: 1. `configs`: 各种板级配置 +1. `fs_resource`: 在编译时拷贝到镜像中的资源文件 1. `k230_sdk`: k230_sdk源码 1. `k230_sdk_overlay`: 基于k230源码的修改 -1. `micropython`: micropython源码 -1. `micropython_port`: k230 micropython 移植 +1. `micropython`: micropython的移植 +1. `output`: 编译产物结果,镜像文件 1. `scripts`: 各种辅助脚本 -1. `tests`: 各模块测试代码 其中`k230_sdk`, `micropython`是git submodule, 子项目地址为: @@ -223,42 +170,43 @@ k230_canmv `k230_sdk_overlay`中的目录结构与`k230_sdk`相同, 编译时会将`k230_sdk_overlay`同步到`k230_sdk` `output`为编译生成目录 -`micropython_port`目录大体如下: +`micropython`目录大体如下: ```sh -micropython_port/ -├── boards -│ ├── k230_canmv -│ ├── k230_evb -│ ├── manifest.py -│ └── mpconfigport.mk -├── core -├── include +micropython +├── micropython +├── overlay +├── port +│ ├── 3d-party +│ ├── Kconfig +│ ├── Makefile +│ ├── ai_cube +│ ├── ai_demo +│ ├── boards +│ ├── builtin_py │ ├── core +│ ├── include │ ├── kpu +│ ├── lextab.py │ ├── machine │ ├── maix -│ ├── mpp -│ └── omv -├── Kconfig -├── kpu -├── machine -├── maix -├── Makefile -├── micropython_overlay -├── mpconfigport.h -├── mpp -└── omv +│ ├── modules +│ ├── mpconfigport.h +│ ├── omv +│ ├── socket_network +│ └── yacctab.py +└── sync.mk ``` 目录介绍: -1. `boards`: 各种板级配置 -1. `core`: micropython core模块 -1. `machine`: machine模块, 包含GPIO, SPI, IIC, UART, PWM, WDT等 -1. `kpu`: k230 kpu模块, 包含KPU, AI2D -1. `mpp`: k230 mpp模块, 包含VO, VI, AI, AO, MMZ, VPU, DPU等 -1. `maix`: k230 其他模块, 包含IOMUX, PM等 -1. `omv`: openmv模块 -1. `include`: 各模块头文件 -1. `micropython_overlay`: 基于micropython源码的修改 +1. `micropython`: micropython源码 +1. `overlay`: 对micropython源码的一些修改patch +1. `port/boards`: 各种板级配置 +1. `port/core`: micropython core模块 +1. `port/machine`: machine模块, 包含GPIO, SPI, IIC, UART, PWM, WDT等 +1. `port/kpu`: k230 kpu模块, 包含KPU, AI2D +1. `port/mpp`: k230 mpp模块, 包含VO, VI, AI, AO, MMZ, VPU, DPU等 +1. `port/maix`: k230 其他模块, 包含IOMUX, PM等 +1. `port/omv`: openmv模块 +1. `port/include`: 各模块头文件 diff --git "a/zh/\344\272\247\345\223\201\347\256\200\344\273\213.md" "b/zh/\344\272\247\345\223\201\347\256\200\344\273\213.md" new file mode 100644 index 0000000..4f0bbf5 --- /dev/null +++ "b/zh/\344\272\247\345\223\201\347\256\200\344\273\213.md" @@ -0,0 +1,33 @@ +# 产品简介 + +![cover](images/canaan-cover.png) + +## 1. 开发板概况 + +CanMV-K230开发板采用的是嘉楠科技Kendryte®系列AIoT芯片中的最新一代SoC芯片K230。该芯片采用全新的多异构单元加速计算架构,集成了2个RISC-V高能效计算核心,内置新一代KPU(Knowledge Process Unit)智能计算单元,具备多精度AI算力,广泛支持通用的AI计算框架,部分典型网络的利用率超过了70%。 + +该芯片同时具备丰富多样的外设接口,以及2D、2.5D等多个标量、向量、图形等专用硬件加速单元,可以对多种图像、视频、音频、AI等多样化计算任务进行全流程计算加速,具备低延迟、高性能、低功耗、快速启动、高安全性等多项特性。 + +![K230_block_diagram](images/K230_block_diagram.png) + +CanMV-K230采用单板设计,扩展接口丰富,极大程度的发挥K230高性能的优势,可直接用于各种智能产品的开发,加速产品落地。 + +![board-front](images/CanMV-K230_front.png) +![board-behind](images/CanMV-K230_behind.png) + +版权所有©2023北京嘉楠捷思信息技术有限公司 + +
+ +## 2. 免责声明 + +您购买的产品、服务或特性等应受北京嘉楠捷思信息技术有限公司(“本公司”,下同)及其关联公司的商业合同和条款的约束,本文档中描述的全部或部分产品、服务或特性可能不在您的购买或使用范围之内。除非合同另有约定,本公司不对本文档的任何陈述、信息、内容的正确性、可靠性、完整性、适销性、符合特定目的和不侵权提供任何明示或默示的声明或保证。除非另有约定,本文档仅作为使用指导参考。 + +由于产品版本升级或其他原因,本文档内容将可能在未经任何通知的情况下,不定期进行更新或修改。 + +## 3. 商标声明 + +![logo](images/logo.png)、“嘉楠”和其他嘉楠商标均为北京嘉楠捷思信息技术有限公司及其关联公司的商标。本文档可能提及的其他所有商标或注册商标,由各自的所有人拥有。 + +**版权所有 © 2023北京嘉楠捷思信息技术有限公司。保留一切权利。** +非经本公司书面许可,任何单位和个人不得擅自摘抄、复制本文档内容的部分或全部,并不得以任何形式传播。