Skip to content

Commit

Permalink
Make quality adjustable
Browse files Browse the repository at this point in the history
- switch to imageio for video writing
- make error messages more expressive
  • Loading branch information
tfaehse committed Sep 5, 2021
1 parent a9a51cc commit d9d9831
Show file tree
Hide file tree
Showing 7 changed files with 309 additions and 264 deletions.
5 changes: 1 addition & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ To get a local copy up and running follow these simple steps.

You need a working Python environment with a Python version of 3.8 or higher that satisfies the listed `requirements.txt`. Depending on your machine, you can leverage GPU acceleration for pytorch - see [here](https://pytorch.org/get-started/locally/) or just use `requirements-gpu.txt`.

Further, h264 was chosen for the output video file. You can download an open source library to add support [here](https://github.com/cisco/openh264/releases). OpenCV currently expects version 1.8.0.

Since OpenCV does not care about audio channels, ffmpeg is used to combine the edited video and the audio channel of the input video. The environment variable `FFMPEG_BINARY` needs to be set to the ffmpeg executable for this to work.

Expand All @@ -73,9 +72,7 @@ Since OpenCV does not care about audio channels, ffmpeg is used to combine the e
conda activate py38
pip install -r requirements.txt
```

3. Place `openh264-1.8.0-win64.dll` in the same folder as `main.py`.

3. Install ffmpeg binaries (release essentials is enough) and create an environment variable "FFMPEG_BINARY" that points to the ffmpeg.exe binary.
<!-- USAGE EXAMPLES -->
## Usage
On first launch, the YOLOv5 model is automatically downloaded and fused with the custom weights for face and plate detection from this repo.
Expand Down
15 changes: 13 additions & 2 deletions dashcamcleaner/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,20 @@ def setup_blurrer(self):
self.blurrer.setMaximum.connect(self.setMaximumValue)
self.blurrer.updateProgress.connect(self.setProgress)
self.blurrer.finished.connect(self.blurrer_finished)
self.blurrer.alert.connect(self.blurrer_alert)
msg_box = QMessageBox()
msg_box.setText(f"Successfully loaded {weights_name}.pt")
msg_box.exec_()

def blurrer_alert(self, message: str):
"""
Display blurrer messages in the GUI
:param message: Message to be displayed
"""
msg_box = QMessageBox()
msg_box.setText(message)
msg_box.exec_()

def button_abort_clicked(self):
"""
Callback for button_abort
Expand Down Expand Up @@ -84,7 +94,7 @@ def button_start_clicked(self):
self.ui.button_start.setEnabled(False)

# read inference size
inference_size = int(self.ui.combo_box_scale.currentText()[:-1]) * 16 / 9 # ouch again
inference_size = int(self.ui.combo_box_scale.currentText()[:-1]) * 16 / 9 # ouch again

# set up parameters
parameters = {
Expand All @@ -94,7 +104,8 @@ def button_start_clicked(self):
"blur_memory": self.ui.spin_memory.value(),
"threshold": self.ui.double_spin_threshold.value(),
"roi_multi": self.ui.double_spin_roimulti.value(),
"inference_size": inference_size
"inference_size": inference_size,
"quality": self.ui.spin_quality.value()
}
if self.blurrer:
self.blurrer.parameters = parameters
Expand Down
29 changes: 19 additions & 10 deletions dashcamcleaner/src/blurrer.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from timeit import default_timer as timer

import cv2
import imageio
import numpy as np
import torch
from PySide2.QtCore import QThread, Signal
Expand All @@ -12,6 +13,7 @@
class VideoBlurrer(QThread):
setMaximum = Signal(int)
updateProgress = Signal(int)
alert = Signal(str)

def __init__(self, weights_name, parameters=None):
"""
Expand Down Expand Up @@ -111,6 +113,7 @@ def run(self):
temp_output = f"{os.path.splitext(self.parameters['output_path'])[0]}_copy{os.path.splitext(self.parameters['output_path'])[1]}"
output_path = self.parameters["output_path"]
threshold = self.parameters["threshold"]
quality = self.parameters["quality"]

# customize detector
self.detector.conf = threshold
Expand All @@ -125,26 +128,25 @@ def run(self):
fps = cap.get(cv2.CAP_PROP_FPS)

# save the video to a file
fourcc = cv2.VideoWriter_fourcc(*'H264')
writer = cv2.VideoWriter(temp_output, fourcc, fps, (width, height))
writer = imageio.get_writer(temp_output, codec="libx264", fps=fps, quality=quality)

# update GUI's progress bar on its maximum frames
self.setMaximum.emit(length)

if cap.isOpened() == False:
print('error file not found')
if not cap.isOpened():
self.alert.emit('Error: Video file could not be found')
return

# loop through video
current_frame = 0
while cap.isOpened():
ret, frame = cap.read()

if ret == True:
if ret:
new_detections = self.detect_identifiable_information(frame.copy())
frame = self.apply_blur(frame, new_detections)
writer.write(frame)

frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
writer.append_data(frame_rgb)
else:
break

Expand All @@ -153,18 +155,25 @@ def run(self):

self.detections = []
cap.release()
writer.release()
writer.close()

# copy over audio stream from original video to edited video
ffmpeg_exe = os.getenv("FFMPEG_BINARY")
if not ffmpeg_exe:
self.alert.emit(
"FFMPEG could not be found! Please make sure the ffmpeg.exe is available under the envirnment variable 'FFMPEG_BINARY'.")
return
subprocess.run(
[ffmpeg_exe, "-y", "-i", temp_output, "-i", input_path, "-c", "copy", "-map", "0:0", "-map", "1:1",
"-shortest", output_path])

# delete temporary output that had no audio track
os.remove(temp_output)
try:
os.remove(temp_output)
except:
self.alert.emit("Could not delete temporary, muted video. Maybe another process (like a cloud service or antivirus) is using it already.")

## store sucess and elapsed time
# store success and elapsed time
self.result["success"] = True
self.result["elapsed_time"] = timer() - start

Expand Down
Loading

0 comments on commit d9d9831

Please sign in to comment.