diff --git a/angel_system/activity_classification/tcn_hpl/predict.py b/angel_system/activity_classification/tcn_hpl/predict.py index c3e446afd..9d9168d5e 100644 --- a/angel_system/activity_classification/tcn_hpl/predict.py +++ b/angel_system/activity_classification/tcn_hpl/predict.py @@ -156,9 +156,6 @@ def objects_to_feats( window_size = len(frame_object_detections) - # print(f"frame_object_detections: {frame_object_detections}") - # print(f"frame_patient_poses: {frame_patient_poses}") - # print(f"frame_patient_poses: {len(frame_patient_poses)}") # Shape [window_size, None|n_feats] feature_list: List[Optional[npt.NDArray]] = [None] * window_size feature_ndim = None @@ -171,7 +168,6 @@ def objects_to_feats( joint_object_offset_all_frames = [None] * window_size # for pose in frame_patient_poses: for i, (pose, detection) in enumerate(zip(frame_patient_poses, frame_object_detections)): - # print(f"detection: {detection}") if detection is None: continue labels = detection.labels @@ -185,7 +181,6 @@ def objects_to_feats( # iterate over all detections in that frame joint_object_offset = [] for j, label in enumerate(labels): - # print(f"label: {label}") if label == "hand (right)" or label == "hand (left)": x, y, w, h = bx[j], by[j], bw[j], bh[j] @@ -194,11 +189,8 @@ def objects_to_feats( offset_vector = [] if pose is not None: - # print(f"pose: {pose}") for joint in pose: - # print(f"joint: {joint}") jx, jy = joint.positions.x, joint.positions.y - # jx, jy = joint['xy'] joint_point = np.array((jx, jy)) dist = np.linalg.norm(joint_point - hand_point) offset_vector.append(dist) @@ -211,20 +203,14 @@ def objects_to_feats( joint_right_hand_offset_all_frames[i] = offset_vector else: # if objects_joints and num_objects > 0: - # bx, by, bw, bh = xs[i], ys[i], ws[i], hs[i] - # ocx, ocy = bx+(bw//2), by+(bh//2) x, y, w, h = bx[j], by[j], bw[j], bh[j] cx, cy = x+(w//2), y+(h//2) object_point = np.array((cx, cy)) offset_vector = [] if pose is not None: - # print(f"pose: {pose}") for joint in pose: - # print(f"joint: {joint}") jx, jy = joint.positions.x, joint.positions.y - # jx, jy = joint['xy'] joint_point = np.array((jx, jy)) - # print(f"joint_points: {joint_point.dtype}, object_point: {object_point.dtype}") dist = np.linalg.norm(joint_point - object_point) offset_vector.append(dist) else: @@ -233,7 +219,6 @@ def objects_to_feats( joint_object_offset_all_frames[i] = joint_object_offset - # print(f"det_label_to_idx: {det_label_to_idx}") for i, frame_dets in enumerate(frame_object_detections): frame_dets: ObjectDetectionsLTRB @@ -270,7 +255,6 @@ def objects_to_feats( ) offset_vector = [] - # if hands_joints: if joint_left_hand_offset_all_frames[i] is not None: offset_vector.extend(joint_left_hand_offset_all_frames[i]) @@ -291,11 +275,6 @@ def objects_to_feats( else: offset_vector.extend(zero_offset) - # print(f"num of dets: {len(frame_dets.labels)}") - # print(f"feat length: {len(feat)}") - # print(f"offset_vector length: {len(offset_vector)}") - # print(f"offset_vector: {offset_vector}") - feat.extend(offset_vector) feat = np.array(feat, dtype=np.float64).ravel() feat_memo[f_id] = feat diff --git a/angel_system/activity_classification/train_activity_classifier.py b/angel_system/activity_classification/train_activity_classifier.py index 9f9aec3f0..03e72ead5 100644 --- a/angel_system/activity_classification/train_activity_classifier.py +++ b/angel_system/activity_classification/train_activity_classifier.py @@ -39,9 +39,7 @@ def data_loader( - ann_by_image: Image id to annotation dict """ print("Loading data....") - # Description to ID map. - # print(f"act labels: {act_labels}") - # exit() + act_map = {} inv_act_map = {} for step in act_labels["labels"]: @@ -316,7 +314,6 @@ def compute_feats( for joint in pose_keypoints: jx, jy = joint['xy'] joint_point = np.array((jx, jy)) - # print(f"joint_points: {joint_point.dtype}, object_point: {object_point.dtype}") dist = np.linalg.norm(joint_point - object_point) offset_vector.append(dist) else: @@ -363,9 +360,6 @@ def compute_feats( offset_vector.extend(joint_object_offset[i]) else: offset_vector.extend(zero_offset) - - # print(f"feat length: {len(feature_vec)}") - # print(f"offset_vector length: {len(offset_vector)}") feature_vec.extend(offset_vector) diff --git a/angel_system/activity_classification/utils.py b/angel_system/activity_classification/utils.py index 9ffb4e567..9bbfaf024 100644 --- a/angel_system/activity_classification/utils.py +++ b/angel_system/activity_classification/utils.py @@ -250,8 +250,6 @@ def obj_det2d_set_to_feature_by_method( remove_classes_count = [1 for label in non_objects_labels if label in label_to_ind.keys()] num_det_classes = len(label_to_ind) - len(remove_classes_count)# accomedate 2 hands instead of 1, accomedate top 3 objects - # print(f"label_to_ind: {label_to_ind}") - # print(f"num_det_classes: {num_det_classes}") det_class_max_conf = np.zeros((num_det_classes, top_n_objects)) # The bounding box of the maximally confident detection @@ -284,9 +282,7 @@ def obj_det2d_set_to_feature_by_method( # util functions ######################### def find_hand(hand_str): - # hand_str = "hands" hand_idx = label_to_ind[hand_str] - # print(f"hand_index: {hand_idx}") hand_conf = det_class_max_conf[hand_idx][0] hand_bbox = kwimage.Boxes([det_class_bbox[0, hand_idx]], "xywh") @@ -461,12 +457,9 @@ def dist_to_center(center1, center2): if use_hand_dist: feature_vec.append(right_hand_dist[left_hand_idx]) - # print(f"use_hand_dist feature_vec: {len(feature_vec)}") - if use_intersection: feature_vec.append([right_hand_intersection[left_hand_idx]]) - # print(f"use_hand_dist feature_vec: {len(feature_vec)}") # Add object data for i in range(num_det_classes): if i in [right_hand_idx, left_hand_idx]: