-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathPlateDetectionDeepOCR.cpp
9327 lines (9000 loc) · 408 KB
/
PlateDetectionDeepOCR.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
///////////////////////////////////////////////////////////////////////////////
// File generated by HDevelop for HALCON/C++ Version 23.11.0.0
// Non-ASCII strings in this file are encoded in local-8-bit encoding (utf8).
// Ensure that the interface encoding is set to locale encoding by calling
// SetHcppInterfaceStringEncodingIsUtf8(false) at the beginning of the program.
//
// Please note that non-ASCII characters in string constants are exported
// as octal codes in order to guarantee that the strings are correctly
// created on all systems, independent on any compiler settings.
//
// Source files with different encoding should not be mixed in one project.
///////////////////////////////////////////////////////////////////////////////
#include "HalconCpp.h"
#include "HDevThread.h"
using namespace HalconCpp;
// Procedure declarations
// External procedures
// Chapter: Image / Channel
void add_colormap_to_image (HObject ho_GrayValueImage, HObject ho_Image, HObject *ho_ColoredImage,
HTuple hv_HeatmapColorScheme);
// Chapter: Image / Channel
// Short Description: Create a lookup table and convert a gray scale image.
void apply_colorscheme_on_gray_value_image (HObject ho_InputImage, HObject *ho_ResultImage,
HTuple hv_Schema);
// Chapter: Deep Learning / Model
// Short Description: Compute zoom factors to fit an image to a target size.
void calculate_dl_image_zoom_factors (HTuple hv_ImageWidth, HTuple hv_ImageHeight,
HTuple hv_TargetWidth, HTuple hv_TargetHeight, HTuple hv_DLPreprocessParam, HTuple *hv_ZoomFactorWidth,
HTuple *hv_ZoomFactorHeight);
// Chapter: Deep Learning / Model
// Short Description: Check the content of the parameter dictionary DLPreprocessParam.
void check_dl_preprocess_param (HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Model
// Short Description: Compute 3D normals.
void compute_normals_xyz (HObject ho_x, HObject ho_y, HObject ho_z, HObject *ho_NXImage,
HObject *ho_NYImage, HObject *ho_NZImage, HTuple hv_Smoothing);
// Chapter: OCR / Deep OCR
// Short Description: This procedure converts Deep OCR Detection results to an Object Detection results.
void convert_ocr_detection_result_to_object_detection (HTuple hv_OcrResults, HTuple *hv_DetectionResults);
// Chapter: Tools / Geometry
// Short Description: Convert the parameters of rectangles with format rectangle2 to the coordinates of its 4 corner-points.
void convert_rect2_5to8param (HTuple hv_Row, HTuple hv_Col, HTuple hv_Length1, HTuple hv_Length2,
HTuple hv_Phi, HTuple *hv_Row1, HTuple *hv_Col1, HTuple *hv_Row2, HTuple *hv_Col2,
HTuple *hv_Row3, HTuple *hv_Col3, HTuple *hv_Row4, HTuple *hv_Col4);
// Chapter: Tools / Geometry
// Short Description: Convert for four-sided figures the coordinates of the 4 corner-points to the parameters of format rectangle2.
void convert_rect2_8to5param (HTuple hv_Row1, HTuple hv_Col1, HTuple hv_Row2, HTuple hv_Col2,
HTuple hv_Row3, HTuple hv_Col3, HTuple hv_Row4, HTuple hv_Col4, HTuple hv_ForceL1LargerL2,
HTuple *hv_Row, HTuple *hv_Col, HTuple *hv_Length1, HTuple *hv_Length2, HTuple *hv_Phi);
// Chapter: Deep Learning / Model
// Short Description: Crops a given image object based on the given domain handling.
void crop_dl_sample_image (HObject ho_Domain, HTuple hv_DLSample, HTuple hv_Key,
HTuple hv_DLPreprocessParam);
// Chapter: Graphics / Output
// Short Description: Display a map of the confidences.
void dev_display_confidence_regions (HObject ho_ImageConfidence, HTuple hv_DrawTransparency,
HTuple *hv_Colors);
// Chapter: Deep Learning / Model
// Short Description: Visualize different images, annotations and inference results for a sample.
void dev_display_dl_data (HTuple hv_DLSample, HTuple hv_DLResult, HTuple hv_DLDatasetInfo,
HTuple hv_KeysForDisplay, HTuple hv_GenParam, HTuple hv_WindowHandleDict);
// Chapter: Deep Learning / Model
// Short Description: Try to guess the maximum class id based on the given sample/result.
void dev_display_dl_data_get_max_class_id (HTuple hv_DLSample, HTuple *hv_MaxClassId,
HTuple *hv_Empty);
// Chapter: Deep Learning / Anomaly Detection and Global Context Anomaly Detection
// Short Description: Display the ground truth anomaly regions of the given DLSample.
void dev_display_ground_truth_anomaly_regions (HTuple hv_SampleKeys, HTuple hv_DLSample,
HTuple hv_CurrentWindowHandle, HTuple hv_LineWidth, HTuple hv_AnomalyRegionLabelColor,
HTuple hv_AnomalyColorTransparency, HTuple *hv_AnomalyRegionExists);
// Chapter: Graphics / Output
// Short Description: Display the ground truth bounding boxes of DLSample.
void dev_display_ground_truth_detection (HTuple hv_DLSample, HTuple hv_SampleKeys,
HTuple hv_LineWidthBbox, HTuple hv_ClassIDs, HTuple hv_BboxColors, HTuple hv_BboxLabelColor,
HTuple hv_WindowImageRatio, HTuple hv_TextColor, HTuple hv_ShowLabels, HTuple hv_ShowDirection,
HTuple hv_WindowHandle, HTuple *hv_BboxIDs);
// Chapter: Graphics / Output
// Short Description: Display a color bar next to an image.
void dev_display_map_color_bar (HTuple hv_ImageWidth, HTuple hv_ImageHeight, HTuple hv_MapColorBarWidth,
HTuple hv_Colors, HTuple hv_MaxValue, HTuple hv_WindowImageRatio, HTuple hv_WindowHandle);
// Chapter: Deep Learning / Anomaly Detection and Global Context Anomaly Detection
// Short Description: Display the detected anomaly regions.
void dev_display_result_anomaly_regions (HObject ho_AnomalyRegion, HTuple hv_CurrentWindowHandle,
HTuple hv_LineWidth, HTuple hv_AnomalyRegionResultColor);
// Chapter: Graphics / Output
// Short Description: Display result bounding boxes.
void dev_display_result_detection (HTuple hv_DLResult, HTuple hv_ResultKeys, HTuple hv_LineWidthBbox,
HTuple hv_ClassIDs, HTuple hv_TextConf, HTuple hv_Colors, HTuple hv_BoxLabelColor,
HTuple hv_WindowImageRatio, HTuple hv_TextPositionRow, HTuple hv_TextColor, HTuple hv_ShowLabels,
HTuple hv_ShowDirection, HTuple hv_WindowHandle, HTuple *hv_BboxClassIndices);
// Chapter: Graphics / Output
// Short Description: Display the ground truth/result segmentation as regions.
void dev_display_segmentation_regions (HObject ho_SegmentationImage, HTuple hv_ClassIDs,
HTuple hv_ColorsSegmentation, HTuple hv_ExcludeClassIDs, HTuple *hv_ImageClassIDs);
// Chapter: Graphics / Output
// Short Description: Display a map of weights.
void dev_display_weight_regions (HObject ho_ImageWeight, HTuple hv_DrawTransparency,
HTuple hv_SegMaxWeight, HTuple *hv_Colors);
// Chapter: Develop
// Short Description: Open a new graphics window that preserves the aspect ratio of the given image size.
void dev_open_window_fit_size (HTuple hv_Row, HTuple hv_Column, HTuple hv_Width,
HTuple hv_Height, HTuple hv_WidthLimit, HTuple hv_HeightLimit, HTuple *hv_WindowHandle);
// Chapter: Deep Learning / Object Detection and Instance Segmentation
// Short Description: Filter the instance segmentation masks of a DL sample based on a given selection.
void filter_dl_sample_instance_segmentation_masks (HTuple hv_DLSample, HTuple hv_BBoxSelectionMask);
// Chapter: XLD / Creation
// Short Description: Create an arrow shaped XLD contour.
void gen_arrow_contour_xld (HObject *ho_Arrow, HTuple hv_Row1, HTuple hv_Column1,
HTuple hv_Row2, HTuple hv_Column2, HTuple hv_HeadLength, HTuple hv_HeadWidth);
// Chapter: OCR / Deep OCR
// Short Description: Generate ground truth characters if they don't exist and words to characters mapping.
void gen_dl_ocr_detection_gt_chars (HTuple hv_DLSampleTargets, HTuple hv_DLSample,
HTuple hv_ScaleWidth, HTuple hv_ScaleHeight, HTupleVector/*{eTupleVector,Dim=1}*/ *hvec_WordsCharsMapping);
// Chapter: OCR / Deep OCR
// Short Description: Generate target link score map for ocr detection training.
void gen_dl_ocr_detection_gt_link_map (HObject *ho_GtLinkMap, HTuple hv_ImageWidth,
HTuple hv_ImageHeight, HTuple hv_DLSampleTargets, HTupleVector/*{eTupleVector,Dim=1}*/ hvec_WordToCharVec,
HTuple hv_Alpha);
// Chapter: OCR / Deep OCR
// Short Description: Generate target orientation score maps for ocr detection training.
void gen_dl_ocr_detection_gt_orientation_map (HObject *ho_GtOrientationMaps, HTuple hv_ImageWidth,
HTuple hv_ImageHeight, HTuple hv_DLSample);
// Chapter: OCR / Deep OCR
// Short Description: Generate target text score map for ocr detection training.
void gen_dl_ocr_detection_gt_score_map (HObject *ho_TargetText, HTuple hv_DLSample,
HTuple hv_BoxCutoff, HTuple hv_RenderCutoff, HTuple hv_ImageWidth, HTuple hv_ImageHeight);
// Chapter: OCR / Deep OCR
// Short Description: Preprocess dl samples and generate targets and weights for ocr detection training.
void gen_dl_ocr_detection_targets (HTuple hv_DLSampleOriginal, HTuple hv_DLPreprocessParam);
// Chapter: OCR / Deep OCR
// Short Description: Generate link score map weight for ocr detection training.
void gen_dl_ocr_detection_weight_link_map (HObject ho_LinkMap, HObject ho_TargetWeight,
HObject *ho_TargetWeightLink, HTuple hv_LinkZeroWeightRadius);
// Chapter: OCR / Deep OCR
// Short Description: Generate orientation score map weight for ocr detection training.
void gen_dl_ocr_detection_weight_orientation_map (HObject ho_InitialWeight, HObject *ho_OrientationTargetWeight,
HTuple hv_DLSample);
// Chapter: OCR / Deep OCR
// Short Description: Generate text score map weight for ocr detection training.
void gen_dl_ocr_detection_weight_score_map (HObject *ho_TargetWeightText, HTuple hv_ImageWidth,
HTuple hv_ImageHeight, HTuple hv_DLSample, HTuple hv_BoxCutoff, HTuple hv_WSWeightRenderThreshold,
HTuple hv_Confidence);
// Chapter: Deep Learning / Model
// Short Description: Store the given images in a tuple of dictionaries DLSamples.
void gen_dl_samples_from_images (HObject ho_Images, HTuple *hv_DLSampleBatch);
// Chapter: OCR / Deep OCR
// Short Description: Generate a word to characters mapping.
void gen_words_chars_mapping (HTuple hv_DLSample, HTupleVector/*{eTupleVector,Dim=1}*/ *hvec_WordsCharsMapping);
// Chapter: Deep Learning / Anomaly Detection and Global Context Anomaly Detection
// Short Description: Get the ground truth anomaly label and label ID.
void get_anomaly_ground_truth_label (HTuple hv_SampleKeys, HTuple hv_DLSample, HTuple *hv_AnomalyLabelGroundTruth,
HTuple *hv_AnomalyLabelIDGroundTruth);
// Chapter: Deep Learning / Anomaly Detection and Global Context Anomaly Detection
// Short Description: Get the anomaly results out of DLResult and apply thresholds (if specified).
void get_anomaly_result (HObject *ho_AnomalyImage, HObject *ho_AnomalyRegion, HTuple hv_DLResult,
HTuple hv_AnomalyClassThreshold, HTuple hv_AnomalyRegionThreshold, HTuple hv_AnomalyResultPostfix,
HTuple *hv_AnomalyScore, HTuple *hv_AnomalyClassID, HTuple *hv_AnomalyClassThresholdDisplay,
HTuple *hv_AnomalyRegionThresholdDisplay);
// Chapter: Graphics / Window
// Short Description: Get the next child window that can be used for visualization.
void get_child_window (HTuple hv_HeightImage, HTuple hv_Font, HTuple hv_FontSize,
HTuple hv_Text, HTuple hv_PrevWindowCoordinates, HTuple hv_WindowHandleDict,
HTuple hv_WindowHandleKey, HTuple *hv_WindowImageRatio, HTuple *hv_PrevWindowCoordinatesOut);
// Chapter: Deep Learning / Classification
// Short Description: Get the ground truth classification label id.
void get_classification_ground_truth (HTuple hv_SampleKeys, HTuple hv_DLSample, HTuple *hv_ClassificationLabelIDGroundTruth);
// Chapter: Deep Learning / Classification
// Short Description: Get the predicted classification class ID.
void get_classification_result (HTuple hv_ResultKeys, HTuple hv_DLResult, HTuple *hv_ClassificationClassID);
// Chapter: Deep Learning / Semantic Segmentation and Edge Extraction
// Short Description: Get the confidences of the segmentation result.
void get_confidence_image (HObject *ho_ImageConfidence, HTuple hv_ResultKeys, HTuple hv_DLResult);
// Chapter: Deep Learning / Model
// Short Description: Generate NumColors distinct colors
void get_distinct_colors (HTuple hv_NumColors, HTuple hv_Random, HTuple hv_StartColor,
HTuple hv_EndColor, HTuple *hv_Colors);
// Chapter: Deep Learning / Model
// Short Description: Generate certain colors for different ClassNames
void get_dl_class_colors (HTuple hv_ClassNames, HTuple hv_AdditionalGreenClassNames,
HTuple *hv_Colors);
// Chapter: Deep Learning / Model
// Short Description: Get an image of a sample with a certain key.
void get_dl_sample_image (HObject *ho_Image, HTuple hv_SampleKeys, HTuple hv_DLSample,
HTuple hv_Key);
// Chapter: 3D Matching / 3D Gripping Point Detection
// Short Description: Extract gripping points from a dictionary.
void get_gripping_points_from_dict (HTuple hv_DLResult, HTuple *hv_Rows, HTuple *hv_Columns);
// Chapter: Graphics / Window
// Short Description: Get the next window that can be used for visualization.
void get_next_window (HTuple hv_Font, HTuple hv_FontSize, HTuple hv_ShowBottomDesc,
HTuple hv_WidthImage, HTuple hv_HeightImage, HTuple hv_MapColorBarWidth, HTuple hv_ScaleWindows,
HTuple hv_ThresholdWidth, HTuple hv_PrevWindowCoordinates, HTuple hv_WindowHandleDict,
HTuple hv_WindowHandleKey, HTuple *hv_CurrentWindowHandle, HTuple *hv_WindowImageRatioHeight,
HTuple *hv_PrevWindowCoordinatesOut);
// Chapter: Deep Learning / Semantic Segmentation and Edge Extraction
// Short Description: Get the ground truth segmentation image.
void get_segmentation_image_ground_truth (HObject *ho_SegmentationImagGroundTruth,
HTuple hv_SampleKeys, HTuple hv_DLSample);
// Chapter: Deep Learning / Semantic Segmentation and Edge Extraction
// Short Description: Get the predicted segmentation result image.
void get_segmentation_image_result (HObject *ho_SegmentationImageResult, HTuple hv_ResultKeys,
HTuple hv_DLResult);
// Chapter: Deep Learning / Semantic Segmentation and Edge Extraction
// Short Description: Get the weight image of a sample.
void get_weight_image (HObject *ho_ImageWeight, HTuple hv_SampleKeys, HTuple hv_DLSample);
// Chapter: Deep Learning / Model
// Short Description: Shuffle the input colors in a deterministic way
void make_neighboring_colors_distinguishable (HTuple hv_ColorsRainbow, HTuple *hv_Colors);
// Chapter: Graphics / Window
// Short Description: Open a window next to the given WindowHandleFather.
void open_child_window (HTuple hv_WindowHandleFather, HTuple hv_Font, HTuple hv_FontSize,
HTuple hv_Text, HTuple hv_PrevWindowCoordinates, HTuple hv_WindowHandleDict,
HTuple hv_WindowHandleKey, HTuple *hv_WindowHandleChild, HTuple *hv_PrevWindowCoordinatesOut);
// Chapter: Graphics / Window
// Short Description: Open a new window, either next to the last ones, or in a new row.
void open_next_window (HTuple hv_Font, HTuple hv_FontSize, HTuple hv_ShowBottomDesc,
HTuple hv_WidthImage, HTuple hv_HeightImage, HTuple hv_MapColorBarWidth, HTuple hv_ScaleWindows,
HTuple hv_ThresholdWidth, HTuple hv_PrevWindowCoordinates, HTuple hv_WindowHandleDict,
HTuple hv_WindowHandleKey, HTuple *hv_WindowHandleNew, HTuple *hv_WindowImageRatioHeight,
HTuple *hv_PrevWindowCoordinatesOut);
// Chapter: Deep Learning / Model
// Short Description: Preprocess 3D data for deep-learning-based training and inference.
void preprocess_dl_model_3d_data (HTuple hv_DLSample, HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Model
// Short Description: Preprocess anomaly images for evaluation and visualization of deep-learning-based anomaly detection or Global Context Anomaly Detection.
void preprocess_dl_model_anomaly (HObject ho_AnomalyImages, HObject *ho_AnomalyImagesPreprocessed,
HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Model
// Short Description: Preprocess the provided DLSample image for augmentation purposes.
void preprocess_dl_model_augmentation_data (HTuple hv_DLSample, HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Object Detection and Instance Segmentation
// Short Description: Preprocess the bounding boxes of type 'rectangle1' for a given sample.
void preprocess_dl_model_bbox_rect1 (HObject ho_ImageRaw, HTuple hv_DLSample, HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Object Detection and Instance Segmentation
// Short Description: Preprocess the bounding boxes of type 'rectangle2' for a given sample.
void preprocess_dl_model_bbox_rect2 (HObject ho_ImageRaw, HTuple hv_DLSample, HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Model
// Short Description: Preprocess images for deep-learning-based training and inference.
void preprocess_dl_model_images (HObject ho_Images, HObject *ho_ImagesPreprocessed,
HTuple hv_DLPreprocessParam);
// Chapter: OCR / Deep OCR
// Short Description: Preprocess images for deep-learning-based training and inference of Deep OCR detection models.
void preprocess_dl_model_images_ocr_detection (HObject ho_Images, HObject *ho_ImagesPreprocessed,
HTuple hv_DLPreprocessParam);
// Chapter: OCR / Deep OCR
// Short Description: Preprocess images for deep-learning-based training and inference of Deep OCR recognition models.
void preprocess_dl_model_images_ocr_recognition (HObject ho_Images, HObject *ho_ImagesPreprocessed,
HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Object Detection and Instance Segmentation
// Short Description: Preprocess the instance segmentation masks for a sample given by the dictionary DLSample.
void preprocess_dl_model_instance_masks (HObject ho_ImageRaw, HTuple hv_DLSample,
HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Semantic Segmentation and Edge Extraction
// Short Description: Preprocess segmentation and weight images for deep-learning-based segmentation training and inference.
void preprocess_dl_model_segmentations (HObject ho_ImagesRaw, HObject ho_Segmentations,
HObject *ho_SegmentationsPreprocessed, HTuple hv_DLPreprocessParam);
// Chapter: Deep Learning / Model
// Short Description: Preprocess given DLSamples according to the preprocessing parameters given in DLPreprocessParam.
void preprocess_dl_samples (HTuple hv_DLSampleBatch, HTuple hv_DLPreprocessParam);
// Chapter: Image / Manipulation
// Short Description: Change value of ValuesToChange in Image to NewValue.
void reassign_pixel_values (HObject ho_Image, HObject *ho_ImageOut, HTuple hv_ValuesToChange,
HTuple hv_NewValue);
// Chapter: Deep Learning / Model
// Short Description: Remove invalid 3D pixels from a given domain.
void remove_invalid_3d_pixels (HObject ho_ImageX, HObject ho_ImageY, HObject ho_ImageZ,
HObject ho_Domain, HObject *ho_DomainOut, HTuple hv_InvalidPixelValue);
// Chapter: Deep Learning / Model
// Short Description: Replace legacy preprocessing parameters or values.
void replace_legacy_preprocessing_parameters (HTuple hv_DLPreprocessParam);
// Chapter: Filters / Arithmetic
// Short Description: Scale the gray values of an image from the interval [Min,Max] to [0,255]
void scale_image_range (HObject ho_Image, HObject *ho_ImageScaled, HTuple hv_Min,
HTuple hv_Max);
// Chapter: Graphics / Text
// Short Description: Set font independent of OS
void set_display_font (HTuple hv_WindowHandle, HTuple hv_Size, HTuple hv_Font, HTuple hv_Bold,
HTuple hv_Slant);
// Chapter: OCR / Deep OCR
// Short Description: Split rectangle2 into a number of rectangles.
void split_rectangle2 (HTuple hv_Row, HTuple hv_Column, HTuple hv_Phi, HTuple hv_Length1,
HTuple hv_Length2, HTuple hv_NumSplits, HTuple *hv_SplitRow, HTuple *hv_SplitColumn,
HTuple *hv_SplitPhi, HTuple *hv_SplitLength1Out, HTuple *hv_SplitLength2Out);
// Chapter: Graphics / Window
// Short Description: Set and return meta information to display images correctly.
void update_window_meta_information (HTuple hv_WindowHandle, HTuple hv_WidthImage,
HTuple hv_HeightImage, HTuple hv_WindowRow1, HTuple hv_WindowColumn1, HTuple hv_MapColorBarWidth,
HTuple hv_MarginBottom, HTuple *hv_WindowImageRatioHeight, HTuple *hv_WindowImageRatioWidth,
HTuple *hv_SetPartRow2, HTuple *hv_SetPartColumn2, HTuple *hv_PrevWindowCoordinatesOut);
// Procedures
// External procedures
// Chapter: Image / Channel
void add_colormap_to_image (HObject ho_GrayValueImage, HObject ho_Image, HObject *ho_ColoredImage,
HTuple hv_HeatmapColorScheme)
{
// Local iconic variables
HObject ho_RGBValueImage, ho_Channels, ho_ChannelsScaled;
HObject ho_Channel, ho_ChannelScaled, ho_ChannelScaledByte;
HObject ho_ImageByte, ho_ImageByteR, ho_ImageByteG, ho_ImageByteB;
// Local control variables
HTuple hv_Type, hv_NumChannels, hv_ChannelIndex;
HTuple hv_ChannelMin, hv_ChannelMax, hv__;
//
//This procedure adds a gray-value image to a RGB image with a chosen color map.
//
GetImageType(ho_GrayValueImage, &hv_Type);
//The image LUT needs a byte image. Rescale real images.
if (0 != (int(hv_Type==HTuple("real"))))
{
scale_image_range(ho_GrayValueImage, &ho_GrayValueImage, 0, 1);
ConvertImageType(ho_GrayValueImage, &ho_GrayValueImage, "byte");
}
else if (0 != (int(hv_Type!=HTuple("byte"))))
{
throw HException(HTuple("For this transformation, a byte or real image is needed!"));
}
//
//Apply the chosen color scheme on the gray value.
apply_colorscheme_on_gray_value_image(ho_GrayValueImage, &ho_RGBValueImage, hv_HeatmapColorScheme);
//
//Convert input image to byte image for visualization.
ImageToChannels(ho_Image, &ho_Channels);
CountChannels(ho_Image, &hv_NumChannels);
GenEmptyObj(&ho_ChannelsScaled);
{
HTuple end_val19 = hv_NumChannels;
HTuple step_val19 = 1;
for (hv_ChannelIndex=1; hv_ChannelIndex.Continue(end_val19, step_val19); hv_ChannelIndex += step_val19)
{
SelectObj(ho_Channels, &ho_Channel, hv_ChannelIndex);
MinMaxGray(ho_Channel, ho_Channel, 0, &hv_ChannelMin, &hv_ChannelMax, &hv__);
scale_image_range(ho_Channel, &ho_ChannelScaled, hv_ChannelMin, hv_ChannelMax);
ConvertImageType(ho_ChannelScaled, &ho_ChannelScaledByte, "byte");
ConcatObj(ho_ChannelsScaled, ho_ChannelScaledByte, &ho_ChannelsScaled);
}
}
ChannelsToImage(ho_ChannelsScaled, &ho_ImageByte);
//
//Note that ImageByte needs to have the same number of channels as
//RGBValueImage to display color map image correctly.
CountChannels(ho_ImageByte, &hv_NumChannels);
if (0 != (int(hv_NumChannels!=3)))
{
//Just take the first channel and use this to generate
//an image with 3 channels for visualization.
AccessChannel(ho_ImageByte, &ho_ImageByteR, 1);
CopyImage(ho_ImageByteR, &ho_ImageByteG);
CopyImage(ho_ImageByteR, &ho_ImageByteB);
Compose3(ho_ImageByteR, ho_ImageByteG, ho_ImageByteB, &ho_ImageByte);
}
//
AddImage(ho_ImageByte, ho_RGBValueImage, &ho_RGBValueImage, 0.5, 0);
(*ho_ColoredImage) = ho_RGBValueImage;
//
return;
}
// Chapter: Image / Channel
// Short Description: Create a lookup table and convert a gray scale image.
void apply_colorscheme_on_gray_value_image (HObject ho_InputImage, HObject *ho_ResultImage,
HTuple hv_Schema)
{
// Local iconic variables
HObject ho_ImageR, ho_ImageG, ho_ImageB;
// Local control variables
HTuple hv_X, hv_Low, hv_High, hv_OffR, hv_OffG;
HTuple hv_OffB, hv_A1, hv_A0, hv_R, hv_G, hv_B, hv_A0R;
HTuple hv_A0G, hv_A0B;
//
//This procedure generates an RGB ResultImage for a gray-value InputImage.
//In order to do so, create a color distribution as look up table
//according to the Schema.
//
hv_X = HTuple::TupleGenSequence(0,255,1);
TupleGenConst(256, 0, &hv_Low);
TupleGenConst(256, 255, &hv_High);
//
if (0 != (int(hv_Schema==HTuple("jet"))))
{
//Scheme Jet: from blue to red
hv_OffR = 3.0*64.0;
hv_OffG = 2.0*64.0;
hv_OffB = 64.0;
hv_A1 = -4.0;
hv_A0 = 255.0+128.0;
hv_R = (((((hv_X-hv_OffR).TupleAbs())*hv_A1)+hv_A0).TupleMax2(hv_Low)).TupleMin2(hv_High);
hv_G = (((((hv_X-hv_OffG).TupleAbs())*hv_A1)+hv_A0).TupleMax2(hv_Low)).TupleMin2(hv_High);
hv_B = (((((hv_X-hv_OffB).TupleAbs())*hv_A1)+hv_A0).TupleMax2(hv_Low)).TupleMin2(hv_High);
//
}
else if (0 != (int(hv_Schema==HTuple("inverse_jet"))))
{
//Scheme InvJet: from red to blue.
hv_OffR = 64;
hv_OffG = 2*64;
hv_OffB = 3*64;
hv_A1 = -4.0;
hv_A0 = 255.0+128.0;
hv_R = (((((hv_X-hv_OffR).TupleAbs())*hv_A1)+hv_A0).TupleMax2(hv_Low)).TupleMin2(hv_High);
hv_G = (((((hv_X-hv_OffG).TupleAbs())*hv_A1)+hv_A0).TupleMax2(hv_Low)).TupleMin2(hv_High);
hv_B = (((((hv_X-hv_OffB).TupleAbs())*hv_A1)+hv_A0).TupleMax2(hv_Low)).TupleMin2(hv_High);
//
}
else if (0 != (int(hv_Schema==HTuple("hot"))))
{
//Scheme Hot.
hv_A1 = 3.0;
hv_A0R = 0.0;
hv_A0G = ((1.0/3.0)*hv_A1)*255.0;
hv_A0B = ((2.0/3.0)*hv_A1)*255.0;
hv_R = (((hv_X*hv_A1)-hv_A0R).TupleMax2(hv_Low)).TupleMin2(hv_High);
hv_G = (((hv_X*hv_A1)-hv_A0G).TupleMax2(hv_Low)).TupleMin2(hv_High);
hv_B = (((hv_X*hv_A1)-hv_A0B).TupleMax2(hv_Low)).TupleMin2(hv_High);
//
}
else if (0 != (int(hv_Schema==HTuple("inverse_hot"))))
{
//Scheme Inverse Hot.
hv_A1 = -3.0;
hv_A0R = hv_A1*255.0;
hv_A0G = ((2.0/3.0)*hv_A1)*255.0;
hv_A0B = ((1.0/3.0)*hv_A1)*255.0;
hv_R = (((hv_X*hv_A1)-hv_A0R).TupleMax2(hv_Low)).TupleMin2(hv_High);
hv_G = (((hv_X*hv_A1)-hv_A0G).TupleMax2(hv_Low)).TupleMin2(hv_High);
hv_B = (((hv_X*hv_A1)-hv_A0B).TupleMax2(hv_Low)).TupleMin2(hv_High);
//
}
else
{
//
throw HException(("Unknown color schema: "+hv_Schema)+".");
//
}
//
LutTrans(ho_InputImage, &ho_ImageR, hv_R);
LutTrans(ho_InputImage, &ho_ImageG, hv_G);
LutTrans(ho_InputImage, &ho_ImageB, hv_B);
Compose3(ho_ImageR, ho_ImageG, ho_ImageB, &(*ho_ResultImage));
//
return;
}
// Chapter: Deep Learning / Model
// Short Description: Compute zoom factors to fit an image to a target size.
void calculate_dl_image_zoom_factors (HTuple hv_ImageWidth, HTuple hv_ImageHeight,
HTuple hv_TargetWidth, HTuple hv_TargetHeight, HTuple hv_DLPreprocessParam, HTuple *hv_ZoomFactorWidth,
HTuple *hv_ZoomFactorHeight)
{
// Local iconic variables
// Local control variables
HTuple hv_ScaleWidthUnit, hv_ScaleHeightUnit;
HTuple hv_PreserveAspectRatio, hv_Scale, hv___Tmp_Ctrl_Dict_Init_0;
//Calculate the unit zoom factors, which zoom the input image to 1px.
hv_ScaleWidthUnit = 1.0/(hv_ImageWidth.TupleReal());
hv_ScaleHeightUnit = 1.0/(hv_ImageHeight.TupleReal());
//
//Calculate the required zoom factors for the available target size.
(*hv_ZoomFactorWidth) = hv_TargetWidth*hv_ScaleWidthUnit;
(*hv_ZoomFactorHeight) = hv_TargetHeight*hv_ScaleHeightUnit;
//
//Aspect-ratio preserving zoom is supported for model type 'ocr_detection' only.
CreateDict(&hv___Tmp_Ctrl_Dict_Init_0);
SetDictTuple(hv___Tmp_Ctrl_Dict_Init_0, "comp", "ocr_detection");
hv_PreserveAspectRatio = (hv_DLPreprocessParam.TupleConcat(hv___Tmp_Ctrl_Dict_Init_0)).TupleTestEqualDictItem("model_type","comp");
hv___Tmp_Ctrl_Dict_Init_0 = HTuple::TupleConstant("HNULL");
//
if (0 != hv_PreserveAspectRatio)
{
//
//Use smaller scaling factor, which results in unfilled domain
//on the respective other axis.
hv_Scale = (*hv_ZoomFactorWidth).TupleMin2((*hv_ZoomFactorHeight));
//Ensure that the zoom factors result in lengths of at least 1px.
(*hv_ZoomFactorWidth) = hv_Scale.TupleMax2(hv_ScaleWidthUnit);
(*hv_ZoomFactorHeight) = hv_Scale.TupleMax2(hv_ScaleHeightUnit);
}
return;
}
// Chapter: Deep Learning / Model
// Short Description: Check the content of the parameter dictionary DLPreprocessParam.
void check_dl_preprocess_param (HTuple hv_DLPreprocessParam)
{
// Local iconic variables
// Local control variables
HTuple hv_CheckParams, hv_KeyExists, hv_DLModelType;
HTuple hv_Exception, hv_SupportedModelTypes, hv_Index;
HTuple hv_ParamNamesGeneral, hv_ParamNamesSegmentation;
HTuple hv_ParamNamesDetectionOptional, hv_ParamNamesPreprocessingOptional;
HTuple hv_ParamNames3DGrippingPointsOptional, hv_ParamNamesAll;
HTuple hv_ParamNames, hv_KeysExists, hv_I, hv_Exists, hv_InputKeys;
HTuple hv_Key, hv_Value, hv_Indices, hv_ValidValues, hv_ValidTypes;
HTuple hv_V, hv_T, hv_IsInt, hv_ValidTypesListing, hv_ValidValueListing;
HTuple hv_EmptyStrings, hv_ImageRangeMinExists, hv_ImageRangeMaxExists;
HTuple hv_ImageRangeMin, hv_ImageRangeMax, hv_IndexParam;
HTuple hv_SetBackgroundID, hv_ClassIDsBackground, hv_Intersection;
HTuple hv_IgnoreClassIDs, hv_KnownClasses, hv_IgnoreClassID;
HTuple hv_OptionalKeysExist, hv_InstanceType, hv_IsInstanceSegmentation;
HTuple hv_IgnoreDirection, hv_ClassIDsNoOrientation, hv_SemTypes;
//
//This procedure checks a dictionary with parameters for DL preprocessing.
//
hv_CheckParams = 1;
//If check_params is set to false, do not check anything.
GetDictParam(hv_DLPreprocessParam, "key_exists", "check_params", &hv_KeyExists);
if (0 != hv_KeyExists)
{
GetDictTuple(hv_DLPreprocessParam, "check_params", &hv_CheckParams);
if (0 != (hv_CheckParams.TupleNot()))
{
return;
}
}
//
try
{
GetDictTuple(hv_DLPreprocessParam, "model_type", &hv_DLModelType);
}
// catch (Exception)
catch (HException &HDevExpDefaultException)
{
HDevExpDefaultException.ToHTuple(&hv_Exception);
throw HException(HTuple(HTuple("DLPreprocessParam needs the parameter: '")+"model_type")+"'");
}
//
//Check for correct model type.
hv_SupportedModelTypes.Clear();
hv_SupportedModelTypes[0] = "counting";
hv_SupportedModelTypes[1] = "3d_gripping_point_detection";
hv_SupportedModelTypes[2] = "anomaly_detection";
hv_SupportedModelTypes[3] = "classification";
hv_SupportedModelTypes[4] = "detection";
hv_SupportedModelTypes[5] = "gc_anomaly_detection";
hv_SupportedModelTypes[6] = "multi_label_classification";
hv_SupportedModelTypes[7] = "ocr_recognition";
hv_SupportedModelTypes[8] = "ocr_detection";
hv_SupportedModelTypes[9] = "segmentation";
TupleFind(hv_SupportedModelTypes, hv_DLModelType, &hv_Index);
if (0 != (HTuple(int(hv_Index==-1)).TupleOr(int(hv_Index==HTuple()))))
{
throw HException(HTuple("Only models of type '3d_gripping_point_detection', 'anomaly_detection', 'classification', 'detection', 'gc_anomaly_detection', 'multi_label_classification', 'ocr_recognition', 'ocr_detection' or 'segmentation' are supported"));
return;
}
//
//Parameter names that are required.
//General parameters.
hv_ParamNamesGeneral.Clear();
hv_ParamNamesGeneral[0] = "model_type";
hv_ParamNamesGeneral[1] = "image_width";
hv_ParamNamesGeneral[2] = "image_height";
hv_ParamNamesGeneral[3] = "image_num_channels";
hv_ParamNamesGeneral[4] = "image_range_min";
hv_ParamNamesGeneral[5] = "image_range_max";
hv_ParamNamesGeneral[6] = "normalization_type";
hv_ParamNamesGeneral[7] = "domain_handling";
//Segmentation specific parameters.
hv_ParamNamesSegmentation.Clear();
hv_ParamNamesSegmentation[0] = "ignore_class_ids";
hv_ParamNamesSegmentation[1] = "set_background_id";
hv_ParamNamesSegmentation[2] = "class_ids_background";
//Detection specific parameters.
hv_ParamNamesDetectionOptional.Clear();
hv_ParamNamesDetectionOptional[0] = "instance_type";
hv_ParamNamesDetectionOptional[1] = "ignore_direction";
hv_ParamNamesDetectionOptional[2] = "class_ids_no_orientation";
hv_ParamNamesDetectionOptional[3] = "instance_segmentation";
//Optional preprocessing parameters.
hv_ParamNamesPreprocessingOptional.Clear();
hv_ParamNamesPreprocessingOptional[0] = "mean_values_normalization";
hv_ParamNamesPreprocessingOptional[1] = "deviation_values_normalization";
hv_ParamNamesPreprocessingOptional[2] = "check_params";
hv_ParamNamesPreprocessingOptional[3] = "augmentation";
//3D Gripping Point Detection specific parameters.
hv_ParamNames3DGrippingPointsOptional.Clear();
hv_ParamNames3DGrippingPointsOptional[0] = "min_z";
hv_ParamNames3DGrippingPointsOptional[1] = "max_z";
hv_ParamNames3DGrippingPointsOptional[2] = "normal_image_width";
hv_ParamNames3DGrippingPointsOptional[3] = "normal_image_height";
//All parameters
hv_ParamNamesAll.Clear();
hv_ParamNamesAll.Append(hv_ParamNamesGeneral);
hv_ParamNamesAll.Append(hv_ParamNamesSegmentation);
hv_ParamNamesAll.Append(hv_ParamNamesDetectionOptional);
hv_ParamNamesAll.Append(hv_ParamNames3DGrippingPointsOptional);
hv_ParamNamesAll.Append(hv_ParamNamesPreprocessingOptional);
hv_ParamNames = hv_ParamNamesGeneral;
if (0 != (HTuple(int(hv_DLModelType==HTuple("segmentation"))).TupleOr(int(hv_DLModelType==HTuple("3d_gripping_point_detection")))))
{
//Extend ParamNames for models of type segmentation.
hv_ParamNames = hv_ParamNames.TupleConcat(hv_ParamNamesSegmentation);
}
//
//Check if legacy parameter exist.
//Otherwise map it to the legal parameter.
replace_legacy_preprocessing_parameters(hv_DLPreprocessParam);
//
//Check that all necessary parameters are included.
//
GetDictParam(hv_DLPreprocessParam, "key_exists", hv_ParamNames, &hv_KeysExists);
if (0 != (int(((hv_KeysExists.TupleEqualElem(0)).TupleSum())>0)))
{
{
HTuple end_val54 = hv_KeysExists.TupleLength();
HTuple step_val54 = 1;
for (hv_I=0; hv_I.Continue(end_val54, step_val54); hv_I += step_val54)
{
hv_Exists = HTuple(hv_KeysExists[hv_I]);
if (0 != (hv_Exists.TupleNot()))
{
throw HException(("DLPreprocessParam needs the parameter: '"+HTuple(hv_ParamNames[hv_I]))+"'");
}
}
}
}
//
//Check the keys provided.
GetDictParam(hv_DLPreprocessParam, "keys", HTuple(), &hv_InputKeys);
{
HTuple end_val64 = (hv_InputKeys.TupleLength())-1;
HTuple step_val64 = 1;
for (hv_I=0; hv_I.Continue(end_val64, step_val64); hv_I += step_val64)
{
hv_Key = HTuple(hv_InputKeys[hv_I]);
GetDictTuple(hv_DLPreprocessParam, hv_Key, &hv_Value);
//Check that the key is known.
TupleFind(hv_ParamNamesAll, hv_Key, &hv_Indices);
if (0 != (int(hv_Indices==-1)))
{
throw HException(("Unknown key for DLPreprocessParam: '"+HTuple(hv_InputKeys[hv_I]))+"'");
return;
}
//Set expected values and types.
hv_ValidValues = HTuple();
hv_ValidTypes = HTuple();
if (0 != (int(hv_Key==HTuple("normalization_type"))))
{
hv_ValidValues.Clear();
hv_ValidValues[0] = "all_channels";
hv_ValidValues[1] = "first_channel";
hv_ValidValues[2] = "constant_values";
hv_ValidValues[3] = "none";
}
else if (0 != (int(hv_Key==HTuple("domain_handling"))))
{
if (0 != (int(hv_DLModelType==HTuple("anomaly_detection"))))
{
hv_ValidValues.Clear();
hv_ValidValues[0] = "full_domain";
hv_ValidValues[1] = "crop_domain";
hv_ValidValues[2] = "keep_domain";
}
else if (0 != (int(hv_DLModelType==HTuple("3d_gripping_point_detection"))))
{
hv_ValidValues.Clear();
hv_ValidValues[0] = "full_domain";
hv_ValidValues[1] = "crop_domain";
hv_ValidValues[2] = "keep_domain";
}
else
{
hv_ValidValues.Clear();
hv_ValidValues[0] = "full_domain";
hv_ValidValues[1] = "crop_domain";
}
}
else if (0 != (int(hv_Key==HTuple("model_type"))))
{
hv_ValidValues.Clear();
hv_ValidValues[0] = "counting";
hv_ValidValues[1] = "3d_gripping_point_detection";
hv_ValidValues[2] = "anomaly_detection";
hv_ValidValues[3] = "classification";
hv_ValidValues[4] = "detection";
hv_ValidValues[5] = "gc_anomaly_detection";
hv_ValidValues[6] = "multi_label_classification";
hv_ValidValues[7] = "ocr_recognition";
hv_ValidValues[8] = "ocr_detection";
hv_ValidValues[9] = "segmentation";
}
else if (0 != (int(hv_Key==HTuple("augmentation"))))
{
hv_ValidValues.Clear();
hv_ValidValues[0] = "true";
hv_ValidValues[1] = "false";
}
else if (0 != (int(hv_Key==HTuple("set_background_id"))))
{
hv_ValidTypes = "int";
}
else if (0 != (int(hv_Key==HTuple("class_ids_background"))))
{
hv_ValidTypes = "int";
}
//Check that type is valid.
if (0 != (int((hv_ValidTypes.TupleLength())>0)))
{
{
HTuple end_val97 = (hv_ValidTypes.TupleLength())-1;
HTuple step_val97 = 1;
for (hv_V=0; hv_V.Continue(end_val97, step_val97); hv_V += step_val97)
{
hv_T = HTuple(hv_ValidTypes[hv_V]);
if (0 != (int(hv_T==HTuple("int"))))
{
TupleIsInt(hv_Value, &hv_IsInt);
if (0 != (hv_IsInt.TupleNot()))
{
hv_ValidTypes = ("'"+hv_ValidTypes)+"'";
if (0 != (int((hv_ValidTypes.TupleLength())<2)))
{
hv_ValidTypesListing = hv_ValidTypes;
}
else
{
hv_ValidTypesListing = (((hv_ValidTypes.TupleSelectRange(0,HTuple(0).TupleMax2((hv_ValidTypes.TupleLength())-2)))+HTuple(", "))+HTuple(hv_ValidTypes[(hv_ValidTypes.TupleLength())-1])).TupleSum();
}
throw HException(((((("The value given in the key '"+hv_Key)+"' of DLPreprocessParam is invalid. Valid types are: ")+hv_ValidTypesListing)+". The given value was '")+hv_Value)+"'.");
return;
}
}
else
{
throw HException("Internal error. Unknown valid type.");
}
}
}
}
//Check that value is valid.
if (0 != (int((hv_ValidValues.TupleLength())>0)))
{
TupleFindFirst(hv_ValidValues, hv_Value, &hv_Index);
if (0 != (int(hv_Index==-1)))
{
hv_ValidValues = ("'"+hv_ValidValues)+"'";
if (0 != (int((hv_ValidValues.TupleLength())<2)))
{
hv_ValidValueListing = hv_ValidValues;
}
else
{
hv_EmptyStrings = HTuple((hv_ValidValues.TupleLength())-2,"");
hv_ValidValueListing = (((hv_ValidValues.TupleSelectRange(0,HTuple(0).TupleMax2((hv_ValidValues.TupleLength())-2)))+HTuple(", "))+(hv_EmptyStrings.TupleConcat(HTuple(hv_ValidValues[(hv_ValidValues.TupleLength())-1])))).TupleSum();
}
throw HException(((((("The value given in the key '"+hv_Key)+"' of DLPreprocessParam is invalid. Valid values are: ")+hv_ValidValueListing)+". The given value was '")+hv_Value)+"'.");
}
}
}
}
//
//Check the correct setting of ImageRangeMin and ImageRangeMax.
if (0 != (HTuple(HTuple(int(hv_DLModelType==HTuple("classification"))).TupleOr(int(hv_DLModelType==HTuple("multi_label_classification")))).TupleOr(int(hv_DLModelType==HTuple("detection")))))
{
//Check ImageRangeMin and ImageRangeMax.
GetDictParam(hv_DLPreprocessParam, "key_exists", "image_range_min", &hv_ImageRangeMinExists);
GetDictParam(hv_DLPreprocessParam, "key_exists", "image_range_max", &hv_ImageRangeMaxExists);
//If they are present, check that they are set correctly.
if (0 != hv_ImageRangeMinExists)
{
GetDictTuple(hv_DLPreprocessParam, "image_range_min", &hv_ImageRangeMin);
if (0 != (int(hv_ImageRangeMin!=-127)))
{
throw HException(("For model type "+hv_DLModelType)+" ImageRangeMin has to be -127.");
}
}
if (0 != hv_ImageRangeMaxExists)
{
GetDictTuple(hv_DLPreprocessParam, "image_range_max", &hv_ImageRangeMax);
if (0 != (int(hv_ImageRangeMax!=128)))
{
throw HException(("For model type "+hv_DLModelType)+" ImageRangeMax has to be 128.");
}
}
}
//
//Check segmentation specific parameters.
if (0 != (HTuple(int(hv_DLModelType==HTuple("segmentation"))).TupleOr(int(hv_DLModelType==HTuple("3d_gripping_point_detection")))))
{
//Check if detection specific parameters are set.
GetDictParam(hv_DLPreprocessParam, "key_exists", hv_ParamNamesDetectionOptional,
&hv_KeysExists);
//If they are present, check that they are [].
{
HTuple end_val157 = (hv_ParamNamesDetectionOptional.TupleLength())-1;
HTuple step_val157 = 1;
for (hv_IndexParam=0; hv_IndexParam.Continue(end_val157, step_val157); hv_IndexParam += step_val157)
{
if (0 != (HTuple(hv_KeysExists[hv_IndexParam])))
{
GetDictTuple(hv_DLPreprocessParam, HTuple(hv_ParamNamesDetectionOptional[hv_IndexParam]),
&hv_Value);
if (0 != (int(hv_Value!=HTuple())))
{
throw HException(((("The preprocessing parameter '"+HTuple(hv_ParamNamesDetectionOptional[hv_IndexParam]))+"' was set to ")+hv_Value)+HTuple(" but for segmentation it should be set to [], as it is not used for this method."));
}
}
}
}
//Check 'set_background_id'.
GetDictTuple(hv_DLPreprocessParam, "set_background_id", &hv_SetBackgroundID);
if (0 != (HTuple(int(hv_SetBackgroundID!=HTuple())).TupleAnd(int(hv_DLModelType==HTuple("3d_gripping_point_detection")))))
{
throw HException(HTuple(HTuple("The preprocessing parameter '")+"set_background_id")+HTuple("' should be set to [] for 3d_gripping_point_detection, as it is not used for this method."));
}
if (0 != (int((hv_SetBackgroundID.TupleLength())>1)))
{
throw HException("Only one class_id as 'set_background_id' allowed.");
}
//Check 'class_ids_background'.
GetDictTuple(hv_DLPreprocessParam, "class_ids_background", &hv_ClassIDsBackground);
if (0 != (HTuple(int(hv_ClassIDsBackground!=HTuple())).TupleAnd(int(hv_DLModelType==HTuple("3d_gripping_point_detection")))))
{
throw HException(HTuple(HTuple("The preprocessing parameter '")+"class_ids_background")+HTuple("' should be set to [] for 3d_gripping_point_detection, as it is not used for this method."));
}
if (0 != (HTuple(HTuple(int((hv_SetBackgroundID.TupleLength())>0)).TupleAnd(HTuple(int((hv_ClassIDsBackground.TupleLength())>0)).TupleNot())).TupleOr(HTuple(int((hv_ClassIDsBackground.TupleLength())>0)).TupleAnd(HTuple(int((hv_SetBackgroundID.TupleLength())>0)).TupleNot()))))
{
throw HException("Both keys 'set_background_id' and 'class_ids_background' are required.");
}
//Check that 'class_ids_background' and 'set_background_id' are disjoint.
if (0 != (int((hv_SetBackgroundID.TupleLength())>0)))
{
TupleIntersection(hv_SetBackgroundID, hv_ClassIDsBackground, &hv_Intersection);
if (0 != (hv_Intersection.TupleLength()))
{
throw HException("Class IDs in 'set_background_id' and 'class_ids_background' need to be disjoint.");
}
}
//Check 'ignore_class_ids'.
GetDictTuple(hv_DLPreprocessParam, "ignore_class_ids", &hv_IgnoreClassIDs);
if (0 != (HTuple(int(hv_IgnoreClassIDs!=HTuple())).TupleAnd(int(hv_DLModelType==HTuple("3d_gripping_point_detection")))))
{
throw HException(HTuple(HTuple("The preprocessing parameter '")+"ignore_class_ids")+HTuple("' should be set to [] for 3d_gripping_point_detection, as it is not used for this method."));
}
hv_KnownClasses.Clear();
hv_KnownClasses.Append(hv_SetBackgroundID);
hv_KnownClasses.Append(hv_ClassIDsBackground);
{
HTuple end_val194 = (hv_IgnoreClassIDs.TupleLength())-1;
HTuple step_val194 = 1;
for (hv_I=0; hv_I.Continue(end_val194, step_val194); hv_I += step_val194)
{
hv_IgnoreClassID = HTuple(hv_IgnoreClassIDs[hv_I]);
TupleFindFirst(hv_KnownClasses, hv_IgnoreClassID, &hv_Index);
if (0 != (HTuple(int((hv_Index.TupleLength())>0)).TupleAnd(int(hv_Index!=-1))))
{
throw HException("The given 'ignore_class_ids' must not be included in the 'class_ids_background' or 'set_background_id'.");
}
}
}
}
else if (0 != (int(hv_DLModelType==HTuple("detection"))))
{
//Check if segmentation specific parameters are set.
GetDictParam(hv_DLPreprocessParam, "key_exists", hv_ParamNamesSegmentation, &hv_KeysExists);
//If they are present, check that they are [].
{
HTuple end_val205 = (hv_ParamNamesSegmentation.TupleLength())-1;
HTuple step_val205 = 1;
for (hv_IndexParam=0; hv_IndexParam.Continue(end_val205, step_val205); hv_IndexParam += step_val205)
{
if (0 != (HTuple(hv_KeysExists[hv_IndexParam])))
{
GetDictTuple(hv_DLPreprocessParam, HTuple(hv_ParamNamesSegmentation[hv_IndexParam]),
&hv_Value);
if (0 != (int(hv_Value!=HTuple())))
{
throw HException(((("The preprocessing parameter '"+HTuple(hv_ParamNamesSegmentation[hv_IndexParam]))+"' was set to ")+hv_Value)+HTuple(" but for detection it should be set to [], as it is not used for this method."));
}
}
}
}
//Check optional parameters.
GetDictParam(hv_DLPreprocessParam, "key_exists", hv_ParamNamesDetectionOptional,
&hv_OptionalKeysExist);
if (0 != (HTuple(hv_OptionalKeysExist[0])))
{
//Check 'instance_type'.
GetDictTuple(hv_DLPreprocessParam, HTuple(hv_ParamNamesDetectionOptional[0]),
&hv_InstanceType);
if (0 != (int((((HTuple("rectangle1").Append("rectangle2")).Append("mask")).TupleFind(hv_InstanceType))==-1)))
{
throw HException(("Invalid generic parameter for 'instance_type': "+hv_InstanceType)+HTuple(", only 'rectangle1' and 'rectangle2' are allowed"));
}
}
//If instance_segmentation is set we might overwrite the instance_type for the preprocessing.
if (0 != (HTuple(hv_OptionalKeysExist[3])))
{
GetDictTuple(hv_DLPreprocessParam, HTuple(hv_ParamNamesDetectionOptional[3]),
&hv_IsInstanceSegmentation);
if (0 != (int(((((HTuple(1).Append(0)).Append("true")).Append("false")).TupleFind(hv_IsInstanceSegmentation))==-1)))
{
throw HException(("Invalid generic parameter for 'instance_segmentation': "+hv_IsInstanceSegmentation)+HTuple(", only true, false, 'true' and 'false' are allowed"));
}
}
if (0 != (HTuple(hv_OptionalKeysExist[1])))
{
//Check 'ignore_direction'.
GetDictTuple(hv_DLPreprocessParam, HTuple(hv_ParamNamesDetectionOptional[1]),
&hv_IgnoreDirection);
if (0 != (int(((HTuple(1).Append(0)).TupleFind(hv_IgnoreDirection))==-1)))
{
throw HException(("Invalid generic parameter for 'ignore_direction': "+hv_IgnoreDirection)+HTuple(", only true and false are allowed"));
}
}
if (0 != (HTuple(hv_OptionalKeysExist[2])))
{
//Check 'class_ids_no_orientation'.
GetDictTuple(hv_DLPreprocessParam, HTuple(hv_ParamNamesDetectionOptional[2]),
&hv_ClassIDsNoOrientation);
TupleSemTypeElem(hv_ClassIDsNoOrientation, &hv_SemTypes);
if (0 != (HTuple(int(hv_ClassIDsNoOrientation!=HTuple())).TupleAnd(int(((hv_SemTypes.TupleEqualElem("integer")).TupleSum())!=(hv_ClassIDsNoOrientation.TupleLength())))))
{
throw HException(("Invalid generic parameter for 'class_ids_no_orientation': "+hv_ClassIDsNoOrientation)+HTuple(", only integers are allowed"));
}
else
{
if (0 != (HTuple(int(hv_ClassIDsNoOrientation!=HTuple())).TupleAnd(int(((hv_ClassIDsNoOrientation.TupleGreaterEqualElem(0)).TupleSum())!=(hv_ClassIDsNoOrientation.TupleLength())))))
{
throw HException(("Invalid generic parameter for 'class_ids_no_orientation': "+hv_ClassIDsNoOrientation)+HTuple(", only non-negative integers are allowed"));
}
}
}
}
//
return;
}
// Chapter: Deep Learning / Model
// Short Description: Compute 3D normals.
void compute_normals_xyz (HObject ho_x, HObject ho_y, HObject ho_z, HObject *ho_NXImage,
HObject *ho_NYImage, HObject *ho_NZImage, HTuple hv_Smoothing)
{
// Local iconic variables
HObject ho_xScaled, ho_yScaled, ho_zScaled, ho_xDiffRow;
HObject ho_xDiffCol, ho_yDiffRow, ho_yDiffCol, ho_zDiffRow;
HObject ho_zDiffCol, ho_ImageResult, ho_ImageResult2, ho_NXRaw;
HObject ho_NYRaw, ho_NZRaw, ho_NXSquare, ho_NYSquare, ho_NZSquare;
HObject ho_ImageResult1, ho_SqrtImage;
// Local control variables
HTuple hv_Factor, hv_MaskRow, hv_MaskCol;
//For numerical reasons we scale the input data
hv_Factor = 1e6;
ScaleImage(ho_x, &ho_xScaled, hv_Factor, 0);
ScaleImage(ho_y, &ho_yScaled, hv_Factor, 0);
ScaleImage(ho_z, &ho_zScaled, hv_Factor, 0);
//Filter for diffs in row/col direction
hv_MaskRow.Clear();
hv_MaskRow[0] = 2;
hv_MaskRow[1] = 1;
hv_MaskRow[2] = 1.0;
hv_MaskRow[3] = 1;
hv_MaskRow[4] = -1;
hv_MaskCol.Clear();
hv_MaskCol[0] = 1;
hv_MaskCol[1] = 2;
hv_MaskCol[2] = 1.0;
hv_MaskCol[3] = -1;
hv_MaskCol[4] = 1;
ConvolImage(ho_xScaled, &ho_xDiffRow, hv_MaskRow, "continued");
ConvolImage(ho_xScaled, &ho_xDiffCol, hv_MaskCol, "continued");
ConvolImage(ho_yScaled, &ho_yDiffRow, hv_MaskRow, "continued");
ConvolImage(ho_yScaled, &ho_yDiffCol, hv_MaskCol, "continued");
ConvolImage(ho_zScaled, &ho_zDiffRow, hv_MaskRow, "continued");
ConvolImage(ho_zScaled, &ho_zDiffCol, hv_MaskCol, "continued");
//
//Calculate normal as cross product
MultImage(ho_yDiffRow, ho_zDiffCol, &ho_ImageResult, 1.0, 0);
MultImage(ho_zDiffRow, ho_yDiffCol, &ho_ImageResult2, -1.0, 0);
AddImage(ho_ImageResult, ho_ImageResult2, &ho_NXRaw, 1.0, 0);
//
MultImage(ho_xDiffRow, ho_zDiffCol, &ho_ImageResult, -1.0, 0);
MultImage(ho_zDiffRow, ho_xDiffCol, &ho_ImageResult2, 1.0, 0);
AddImage(ho_ImageResult, ho_ImageResult2, &ho_NYRaw, 1.0, 0);
//
MultImage(ho_xDiffRow, ho_yDiffCol, &ho_ImageResult, 1.0, 0);
MultImage(ho_yDiffRow, ho_xDiffCol, &ho_ImageResult2, -1.0, 0);
AddImage(ho_ImageResult, ho_ImageResult2, &ho_NZRaw, 1.0, 0);
//Smooth
//-> 5 is used as it is used in surface_normals_object_model_3d - 'xyz_mapping'