Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Marshall Vielmetti Starter Project PR #1

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
156 changes: 156 additions & 0 deletions src/perception/tag_detector/long_range_tag_detector.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
#include "tag_detector.hpp"

namespace mrover {
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you may want to make a commit to remove src/perception/tag_detector from this branch


void TagDetectorNodelet::onInit() {
mNh = getMTNodeHandle();
mPnh = getMTPrivateNodeHandle();
mDetectorParams = new cv::aruco::DetectorParameters();
auto defaultDetectorParams = cv::aruco::DetectorParameters::create();
int dictionaryNumber;

mPnh.param<bool>("publish_images", mPublishImages, true);
using DictEnumType = std::underlying_type_t<cv::aruco::PREDEFINED_DICTIONARY_NAME>;
mPnh.param<int>("dictionary", dictionaryNumber, static_cast<DictEnumType>(cv::aruco::DICT_4X4_50));
mPnh.param<int>("min_hit_count_before_publish", mMinHitCountBeforePublish, 5);
mPnh.param<int>("max_hit_count", mMaxHitCount, 5);
mPnh.param<int>("tag_increment_weight", mTagIncrementWeight, 2);
mPnh.param<int>("tag_decrement_weight", mTagDecrementWeight, 1);

mIt.emplace(mNh);
mImgPub = mIt->advertise("long_range_tag_detection", 1);
mDictionary = cv::aruco::getPredefinedDictionary(dictionaryNumber);

mPcSub = mNh.subscribe("zoom_cam/image", 1, &LongRangeTagDetectorNodelet::imageCallback, this);
mServiceEnableDetections = mNh.advertiseService("enable_detections", &TagDetectorNodelet::enableDetectionsCallback, this);

// Lambda handles passing class pointer (implicit first parameter) to configCallback
mCallbackType = [this](mrover::DetectorParamsConfig& config, uint32_t level) { configCallback(config, level); };
mConfigServer.setCallback(mCallbackType);

mPnh.param<double>("adaptiveThreshConstant",
mDetectorParams->adaptiveThreshConstant, defaultDetectorParams->adaptiveThreshConstant);
mPnh.param<int>("adaptiveThreshWinSizeMax",
mDetectorParams->adaptiveThreshWinSizeMax, defaultDetectorParams->adaptiveThreshWinSizeMax);
mPnh.param<int>("adaptiveThreshWinSizeMin",
mDetectorParams->adaptiveThreshWinSizeMin, defaultDetectorParams->adaptiveThreshWinSizeMin);
mPnh.param<int>("adaptiveThreshWinSizeStep",
mDetectorParams->adaptiveThreshWinSizeStep, defaultDetectorParams->adaptiveThreshWinSizeStep);
mPnh.param<int>("cornerRefinementMaxIterations",
mDetectorParams->cornerRefinementMaxIterations,
defaultDetectorParams->cornerRefinementMaxIterations);
mPnh.param<double>("cornerRefinementMinAccuracy",
mDetectorParams->cornerRefinementMinAccuracy,
defaultDetectorParams->cornerRefinementMinAccuracy);
mPnh.param<int>("cornerRefinementWinSize",
mDetectorParams->cornerRefinementWinSize, defaultDetectorParams->cornerRefinementWinSize);

bool doCornerRefinement = true;
mPnh.param<bool>("doCornerRefinement", doCornerRefinement, true);
if (doCornerRefinement) {
bool cornerRefinementSubPix = true;
mPnh.param<bool>("cornerRefinementSubPix", cornerRefinementSubPix, true);
if (cornerRefinementSubPix) {
mDetectorParams->cornerRefinementMethod = cv::aruco::CORNER_REFINE_SUBPIX;
} else {
mDetectorParams->cornerRefinementMethod = cv::aruco::CORNER_REFINE_CONTOUR;
}
} else {
mDetectorParams->cornerRefinementMethod = cv::aruco::CORNER_REFINE_NONE;
}

mPnh.param<double>("errorCorrectionRate",
mDetectorParams->errorCorrectionRate, defaultDetectorParams->errorCorrectionRate);
mPnh.param<double>("minCornerDistanceRate",
mDetectorParams->minCornerDistanceRate, defaultDetectorParams->minCornerDistanceRate);
mPnh.param<int>("markerBorderBits",
mDetectorParams->markerBorderBits, defaultDetectorParams->markerBorderBits);
mPnh.param<double>("maxErroneousBitsInBorderRate",
mDetectorParams->maxErroneousBitsInBorderRate,
defaultDetectorParams->maxErroneousBitsInBorderRate);
mPnh.param<int>("minDistanceToBorder",
mDetectorParams->minDistanceToBorder, defaultDetectorParams->minDistanceToBorder);
mPnh.param<double>("minMarkerDistanceRate",
mDetectorParams->minMarkerDistanceRate, defaultDetectorParams->minMarkerDistanceRate);
mPnh.param<double>("minMarkerPerimeterRate",
mDetectorParams->minMarkerPerimeterRate, defaultDetectorParams->minMarkerPerimeterRate);
mPnh.param<double>("maxMarkerPerimeterRate",
mDetectorParams->maxMarkerPerimeterRate, defaultDetectorParams->maxMarkerPerimeterRate);
mPnh.param<double>("minOtsuStdDev", mDetectorParams->minOtsuStdDev, defaultDetectorParams->minOtsuStdDev);
mPnh.param<double>("perspectiveRemoveIgnoredMarginPerCell",
mDetectorParams->perspectiveRemoveIgnoredMarginPerCell,
defaultDetectorParams->perspectiveRemoveIgnoredMarginPerCell);
mPnh.param<int>("perspectiveRemovePixelPerCell",
mDetectorParams->perspectiveRemovePixelPerCell,
defaultDetectorParams->perspectiveRemovePixelPerCell);
mPnh.param<double>("polygonalApproxAccuracyRate",
mDetectorParams->polygonalApproxAccuracyRate,
defaultDetectorParams->polygonalApproxAccuracyRate);

NODELET_INFO("Tag detection ready, use odom frame: %s, min hit count: %d, max hit count: %d, hit increment weight: %d, hit decrement weight: %d", mUseOdom ? "true" : "false", mMinHitCountBeforePublish, mMaxHitCount, mTagIncrementWeight, mTagDecrementWeight);
}

void TagDetectorNodelet::configCallback(mrover::DetectorParamsConfig& config, uint32_t level) {
// Don't load initial config, since it will overwrite the rosparam settings
if (level == std::numeric_limits<uint32_t>::max()) return;

mDetectorParams->adaptiveThreshConstant = config.adaptiveThreshConstant;
mDetectorParams->adaptiveThreshWinSizeMin = config.adaptiveThreshWinSizeMin;
mDetectorParams->adaptiveThreshWinSizeMax = config.adaptiveThreshWinSizeMax;
mDetectorParams->adaptiveThreshWinSizeStep = config.adaptiveThreshWinSizeStep;
mDetectorParams->cornerRefinementMaxIterations = config.cornerRefinementMaxIterations;
mDetectorParams->cornerRefinementMinAccuracy = config.cornerRefinementMinAccuracy;
mDetectorParams->cornerRefinementWinSize = config.cornerRefinementWinSize;
if (config.doCornerRefinement) {
if (config.cornerRefinementSubpix) {
mDetectorParams->cornerRefinementMethod = cv::aruco::CORNER_REFINE_SUBPIX;
} else {
mDetectorParams->cornerRefinementMethod = cv::aruco::CORNER_REFINE_CONTOUR;
}
} else {
mDetectorParams->cornerRefinementMethod = cv::aruco::CORNER_REFINE_NONE;
}
mDetectorParams->errorCorrectionRate = config.errorCorrectionRate;
mDetectorParams->minCornerDistanceRate = config.minCornerDistanceRate;
mDetectorParams->markerBorderBits = config.markerBorderBits;
mDetectorParams->maxErroneousBitsInBorderRate = config.maxErroneousBitsInBorderRate;
mDetectorParams->minDistanceToBorder = config.minDistanceToBorder;
mDetectorParams->minMarkerDistanceRate = config.minMarkerDistanceRate;
mDetectorParams->minMarkerPerimeterRate = config.minMarkerPerimeterRate;
mDetectorParams->maxMarkerPerimeterRate = config.maxMarkerPerimeterRate;
mDetectorParams->minOtsuStdDev = config.minOtsuStdDev;
mDetectorParams->perspectiveRemoveIgnoredMarginPerCell = config.perspectiveRemoveIgnoredMarginPerCell;
mDetectorParams->perspectiveRemovePixelPerCell = config.perspectiveRemovePixelPerCell;
mDetectorParams->polygonalApproxAccuracyRate = config.polygonalApproxAccuracyRate;
}

bool TagDetectorNodelet::enableDetectionsCallback(std_srvs::SetBool::Request& req, std_srvs::SetBool::Response& res) {
mEnableDetections = req.data;
if (mEnableDetections) {
res.message = "Enabled tag detections.";
NODELET_INFO("Enabled tag detections.");
} else {
res.message = "Disabled tag detections.";
NODELET_INFO("Disabled tag detections.");
}

res.success = true;
return true;
}

} // namespace mrover

int main(int argc, char** argv) {
ros::init(argc, argv, "tag_detector");

// Start the ZED Nodelet
nodelet::Loader nodelet;
nodelet.load(ros::this_node::getName(), "mrover/TagDetectorNodelet", ros::names::getRemappings(), {});

ros::spin();

return EXIT_SUCCESS;
}

#include <pluginlib/class_list_macros.h>
PLUGINLIB_EXPORT_CLASS(mrover::TagDetectorNodelet, nodelet::Nodelet)
45 changes: 45 additions & 0 deletions src/perception/tag_detector/long_range_tag_detector.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
namespace mrover {

class LongRangeTagDetectorNodelet : public nodelet::Nodelet {
ros::NodeHandle mNh, mPnh;

std::optional<image_transport::ImageTransport> mIt;
image_transport::Publisher mImgPub;
std::unordered_map<int, image_transport::Publisher> mThreshPubs; // Map from threshold scale to publisher
ros::ServiceServer mServiceEnableDetections;

image_transport::Subscriber mImgSub;

bool mEnableDetections = true;
bool mUseOdom{};
std::string mOdomFrameId, mMapFrameId, mCameraFrameId;
bool mPublishImages{}; // If set, we publish the images with the tags drawn on top
int mMinHitCountBeforePublish{};
int mMaxHitCount{};
int mTagIncrementWeight{};
int mTagDecrementWeight{};

cv::Ptr<cv::aruco::DetectorParameters> mDetectorParams;
cv::Ptr<cv::aruco::Dictionary> mDictionary;

cv::Mat mImg;
cv::Mat mGrayImg;
sensor_msgs::Image mImgMsg;
sensor_msgs::Image mThreshMsg;
uint32_t mSeqNum{};
std::optional<size_t> mPrevDetectedCount; // Log spam prevention
std::vector<std::vector<cv::Point2f>> mImmediateCorners;
std::vector<int> mImmediateIds;
std::unordered_map<int, Tag> mTags;
dynamic_reconfigure::Server<mrover::DetectorParamsConfig> mConfigServer;
dynamic_reconfigure::Server<mrover::DetectorParamsConfig>::CallbackType mCallbackType;

LoopProfiler mProfiler{"Tag Detector"};

void onInit() override;

void publishThresholdedImage();

std::optional<SE3> getTagInCamFromPixel(sensor_msgs::PointCloud2ConstPtr const& cloudPtr, size_t u, size_t v);
}
}
21 changes: 21 additions & 0 deletions src/perception/tag_detector/long_range_tag_detector.processing.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#include "long_range_tag_detector.hpp"

#include "../point.hpp"

namespace mrover {

/**
* Detects tags in an image, draws the detected markers onto the image, and publishes them to /long_range_tag_detection
*
* @param msg image message
*/
void TagDetectorNodelet::imageCallback(sensor_msgs::Image const& msg) {
// 1. Detect tags
// 2. Update the hit counts of the tags in the mTags map
// 3. We only want to publish the tags if the topic has subscribers
if (mPublishImages && mImgPub.getNumSubscribers()) {
// Draw the tags on the image using OpenCV
}
}

} // namespace mrover
1 change: 1 addition & 0 deletions starter_project/autonomy/AutonomyStarterProject.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ add_message_files(
DIRECTORY
${CMAKE_CURRENT_LIST_DIR}/msg
FILES
StarterProjectTag.msg
#add new messages here
)

Expand Down
4 changes: 2 additions & 2 deletions starter_project/autonomy/launch/starter_project.launch
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,13 @@ starter_project/
Navigation
===========
-->
<!-- <node name="nav" pkg="mrover" type="navigation_starter_project.py" />-->
<node name="nav" pkg="mrover" type="navigation_starter_project.py" />

<!--
============
Localization
============
-->
<!-- <node name="localization" pkg="mrover" type="localization.py" output="screen" /> -->
<node name="localization" pkg="mrover" type="localization.py" output="screen" />

</launch>
4 changes: 4 additions & 0 deletions starter_project/autonomy/msg/StarterProjectTag.msg
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
int32 tagId
float32 xTagCenterPixel
float32 yTagCenterPixel
float32 closenessMetric
32 changes: 29 additions & 3 deletions starter_project/autonomy/src/localization.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,17 @@
class Localization:
pose: SE3

gps_sub: rospy.Subscriber
imu_sub: rospy.Subscriber


def __init__(self):
# create subscribers for GPS and IMU data, linking them to our callback functions
# TODO

self.gps_sub = rospy.Subscriber("/gps/fix", NavSatFix, self.gps_callback)
self.imu_sub = rospy.Subscriber("/imu/imu_only", Imu, self.imu_callback)

# create a transform broadcaster for publishing to the TF tree
self.tf_broadcaster = tf2_ros.TransformBroadcaster()

Expand All @@ -34,15 +41,30 @@ def gps_callback(self, msg: NavSatFix):
convert it to cartesian coordinates, store that value in `self.pose`, then publish
that pose to the TF tree.
"""
# TODO

latlong = np.array([msg.latitude, msg.longitude])
reference = np.array([42.293195, -83.7096706])

xyz = Localization.spherical_to_cartesian(latlong, reference)

self.pose = SE3.from_pos_quat(xyz.copy(), self.pose.rotation.quaternion.copy())

self.pose.publish_to_tf_tree(self.tf_broadcaster, "map", "base_link")



def imu_callback(self, msg: Imu):
"""
This function will be called every time this node receives an Imu message
on the /imu topic. It should read the orientation data from the given Imu message,
store that value in `self.pose`, then publish that pose to the TF tree.
"""
# TODO

rot = np.array([msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w])

self.pose = SE3.from_pos_quat(self.pose.position.copy(), rot.copy())

self.pose.publish_to_tf_tree(self.tf_broadcaster, "map", "base_link")

@staticmethod
def spherical_to_cartesian(spherical_coord: np.ndarray, reference_coord: np.ndarray) -> np.ndarray:
Expand All @@ -56,8 +78,12 @@ def spherical_to_cartesian(spherical_coord: np.ndarray, reference_coord: np.ndar
given as a numpy array [latitude, longitude]
:returns: the approximated cartesian coordinates in meters, given as a numpy array [x, y, z]
"""
# TODO
R = 6371000 #CIRCUMF of earth

x = R*(np.radians(spherical_coord[1]) - np.radians(reference_coord[1])) * np.cos(np.radians(reference_coord[0]))
y = R*(np.radians(spherical_coord[0]) - np.radians(reference_coord[0]))

return np.array([x, y, 0]) # rotate by 90 degrees

def main():
# initialize the node
Expand Down
24 changes: 16 additions & 8 deletions starter_project/autonomy/src/navigation/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,21 @@ class Rover:

def get_pose(self) -> Optional[SE3]:
# TODO: return the pose of the rover (or None if we don't have one (catch exception))
pass

pose = None
try:
pose = SE3.from_tf_tree(self.ctx.tf_buffer, "map", "base_link")
except:
pose = None
print("No SE3 Pose")
finally:
return pose

def send_drive_command(self, twist: Twist):
# TODO: send the Twist message to the rover
pass
self.ctx.vel_cmd_publisher.publish(twist)

def send_drive_stop(self):
# TODO: tell the rover to stop
pass
self.send_drive_command(Twist())


@dataclass
Expand All @@ -40,16 +46,18 @@ class Environment:
fid_pos: Optional[StarterProjectTag]

def receive_fid_data(self, message: StarterProjectTag):
# TODO: handle incoming FID data messages here
pass
self.fid_pos = message

def get_fid_data(self) -> Optional[StarterProjectTag]:
"""
Retrieves the last received message regarding fid pose
if it exists, otherwise returns None
"""
# TODO: return either None or your position message
pass
if self.fid_pos is not None:
return self.fid_pos

return None


class Context:
Expand Down
Loading