diff --git a/libs/camera_submodule/camera_code.py b/libs/camera_submodule/camera_code.py index 8eb3e51a..15b4820d 100644 --- a/libs/camera_submodule/camera_code.py +++ b/libs/camera_submodule/camera_code.py @@ -1,6 +1,9 @@ +# flake8: noqa +import os import time + import sensor -import os +import tf from pyb import UART # set up camera @@ -62,6 +65,24 @@ def process_image() -> str: img.save(filepath, quality=90) return filepath +def label_image(labels, net): + start_time = time.ticks_ms() + img_found = False + while start_time - 2 < time.ticks_ms(): + img = sensor.snapshot() + for obj in net.classify(img, min_scale=1.0, scale_mul=0.8, x_overlap=0.5, y_overlap=0.5): + predictions_list = list(zip(labels, obj.output())) + for t in predictions_list: + if t[1] >= 0.7: + img.draw_rectangle(obj.rect()) + img.draw_string(0, 0, t[0]) + img.save(filepath, quality=90) + img_found = True + if img_found: + break + return filepath + + def send_image(image_filepath) -> None: # send packets and wait for ack after each packet # gets stat.ST_SIZE @@ -127,12 +148,21 @@ def send_flag(flag): except Exception as e: print(f"could not create images directory: {e}") + net = None + labels = None + + try: + # Load built in model + labels, net = tf.load_builtin_model('trained') + except Exception as e: + raise Exception(e) + # check that the UART connection is good req = check_connection() if req == CONFIRMATION_RECEIVE_CODE: # take, process, and save an image - img_filepath = process_image() + img_filepath = label_image(labels, net) # send the current image if img_filepath == NO_IMAGE: diff --git a/libs/camera_submodule/edge_impulse_firmware_openmv_cam_h7.bin b/libs/camera_submodule/edge_impulse_firmware_openmv_cam_h7.bin new file mode 100755 index 00000000..b7992a8f Binary files /dev/null and b/libs/camera_submodule/edge_impulse_firmware_openmv_cam_h7.bin differ diff --git a/libs/camera_submodule/readme.md b/libs/camera_submodule/readme.md new file mode 100644 index 00000000..3a7b6667 --- /dev/null +++ b/libs/camera_submodule/readme.md @@ -0,0 +1,21 @@ +# Camera Submodule Code +Files that **need** to go onto camera board for flight: +- `camera_code.py` + +## Setting up the camera +I recommend that you install the [OpenMV IDE](https://openmv.io/pages/download) as this streamlines the process of using the camera. The quick tutorial I have created follows the process of using this IDE. + +First open the OpenMV IDE. Then plug your camera into your computer via USB. The connect button just above the run button in the bottom left corner should change its icon. This lets you know the camera is able to be recognized. Click the connect button. This will likely prompt you to upgrade the firmware and register the device. Do it if you want, but it is not necessary. We will be loading new firmware anyway. + +Once you are past the prompts the camera should be connected and also show up as a drive on your file system. In order to test that the camera is set up properly with our flight software you will need to first [flash the firmware](https://docs.edgeimpulse.com/docs/run-inference/running-your-impulse-openmv#deploying-your-impulse-as-an-openmv-firmware), then load the camera_code.py file as main.py on the camera board drive. + +To run our system check test you also need to load a test image onto the camera board at `images/test_image.jpeg` and put the same exact image on the main board at `/sd/images/test_image.jpeg`. + +## classification_openmv_cam_m7_firmware.bin +This is the firmware that must be flashed onto the camera board in order for the camera to have access to the trained classification model. You can do this easily with the [OpenMV IDE](https://openmv.io/pages/download) by following [this tutorial](https://docs.edgeimpulse.com/docs/run-inference/running-your-impulse-openmv#deploying-your-impulse-as-an-openmv-firmware). + +## camera_code.py +This is the code that should loaded onto the camera board as `main.py` in order for flight-software to communicate properly with the camera board and send images back and forth with acknowledgements. + +## testing.py +This is a file that can be loaded as `main.py` **for testing only**. This file can be used verify that the classification model and labels are being loaded properly and the camera board is able to run the model to guess what the camera is seeing. \ No newline at end of file diff --git a/libs/camera_submodule/testing.py b/libs/camera_submodule/testing.py new file mode 100644 index 00000000..5c910835 --- /dev/null +++ b/libs/camera_submodule/testing.py @@ -0,0 +1,43 @@ +# flake8: noqa +import time + +import sensor +import tf + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +net = None +labels = None + +try: + # Load built in model + labels, net = tf.load_builtin_model('trained') +except Exception as e: + raise Exception(e) + + +clock = time.clock() +while (True): + clock.tick() + + img = sensor.snapshot() + + # default settings just do one detection... change them to search the image... + for obj in net.classify(img, min_scale=1.0, scale_mul=0.8, x_overlap=0.5, y_overlap=0.5): + print("**********\nPredictions at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) + img.draw_rectangle(obj.rect()) + # This combines the labels and confidence values into a list of tuples + predictions_list = list(zip(labels, obj.output())) + + for t in predictions_list: + if t[1] >= 0.7: + img.draw_string(0, 0, t[0]) + + for i in range(len(predictions_list)): + print("%s = %f" % (predictions_list[i][0], predictions_list[i][1])) + + print(clock.fps(), "fps")