diff --git a/musicalgestures/__init__.py b/musicalgestures/__init__.py index 441eb20..36f0a06 100644 --- a/musicalgestures/__init__.py +++ b/musicalgestures/__init__.py @@ -14,6 +14,7 @@ ffmpeg_cmd, get_length, generate_outfilename, + get_cuda_device_count, show_progress, ) from musicalgestures._mglist import MgList diff --git a/musicalgestures/_blurfaces.py b/musicalgestures/_blurfaces.py index 2ebca4b..e558e85 100644 --- a/musicalgestures/_blurfaces.py +++ b/musicalgestures/_blurfaces.py @@ -80,6 +80,7 @@ def mg_blurfaces(self, save_data=True, data_format='csv', color=(0, 0, 0), + use_gpu=False, target_name=None, overwrite=False): """ @@ -101,6 +102,7 @@ def mg_blurfaces(self, save_data (bool, optional): Whether to save the scaled coordinates of the face mask (time (ms), x1, y1, x2, y2) for each frame to a file. Defaults to True. data_format (str, optional): Specifies format of blur_faces-data. Accepted values are 'csv', 'tsv' and 'txt'. For multiple output formats, use list, e.g. ['csv', 'txt']. Defaults to 'csv'. color (tuple, optional): Customized color of the rectangle boxes. Defaults to black (0, 0, 0). + use_gpu (bool, optional): Whether to attempt GPU (CUDA) acceleration for face detection. Falls back to CPU automatically if CUDA is unavailable. Defaults to False. target_name (str, optional): Target output name. Defaults to None (which assumes that the input filename with the suffix "_blurred" should be used). overwrite (bool, optional): Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False. @@ -123,7 +125,7 @@ def mg_blurfaces(self, pb = MgProgressbar(total=self.length, prefix='Blurring faces:') # Create an instance of the CenterFace class - centerface = CenterFace() + centerface = CenterFace(use_gpu=use_gpu) output_stream = cv2.VideoWriter(target_name, cv2.VideoWriter_fourcc('M','J','P','G'), self.fps, (self.width, self.height)) # Create an empty list to append the mask coordinates data = [] diff --git a/musicalgestures/_centerface.py b/musicalgestures/_centerface.py index 85d9de6..0f2dc9c 100644 --- a/musicalgestures/_centerface.py +++ b/musicalgestures/_centerface.py @@ -3,15 +3,24 @@ import numpy as np import musicalgestures +from musicalgestures._utils import get_cuda_device_count class CenterFace(object): - def __init__(self, landmarks=True): + def __init__(self, landmarks=True, use_gpu=False): module_path = os.path.abspath(os.path.dirname(musicalgestures.__file__)) self.landmarks = landmarks self.net = cv2.dnn.readNetFromONNX(module_path + '/models/centerface.onnx') + + if use_gpu: + if get_cuda_device_count() > 0: + self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA) + self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA) + else: + print('OpenCV CUDA backend is unavailable. CenterFace will use CPU.') + self.img_h_new, self.img_w_new, self.scale_h, self.scale_w = 0, 0, 0, 0 def __call__(self, img, height, width, threshold=0.5): diff --git a/musicalgestures/_flow.py b/musicalgestures/_flow.py index 7cca289..63b0ed4 100644 --- a/musicalgestures/_flow.py +++ b/musicalgestures/_flow.py @@ -7,7 +7,7 @@ from scipy.stats import entropy import musicalgestures -from musicalgestures._utils import MgFigure, extract_wav, embed_audio_in_video, MgProgressbar, convert_to_avi, generate_outfilename, ffmpeg_cmd +from musicalgestures._utils import MgFigure, extract_wav, embed_audio_in_video, MgProgressbar, convert_to_avi, generate_outfilename, ffmpeg_cmd, get_cuda_device_count class Flow: @@ -47,6 +47,7 @@ def dense( angle_of_view=0, scaledown=1, skip_empty=False, + use_gpu=False, target_name=None, overwrite=False): """ @@ -68,6 +69,7 @@ def dense( angle_of_view (int, optional): angle of view of camera, for reporting flow in meters per second. Defaults to 0. scaledown (int, optional): factor to scaledown frame size of the video. Defaults to 1. skip_empty (bool, optional): If True, repeats previous frame in the output when encounters an empty frame. Defaults to False. + use_gpu (bool, optional): Whether to attempt GPU (CUDA) acceleration using `cv2.cuda.FarnebackOpticalFlow`. When `True`, falls back to CPU automatically if CUDA is unavailable or the required OpenCV CUDA modules are not installed. When `False`, CPU processing is used unconditionally. Defaults to False. target_name (str, optional): Target output name for the video. Defaults to None (which assumes that the input filename with the suffix "_flow_dense" should be used). overwrite (bool, optional): Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False. @@ -100,6 +102,27 @@ def dense( size = (int(width/scaledown), int(height/scaledown)) + # Determine whether to use GPU-accelerated Farneback optical flow + _use_gpu = False + farneback_gpu = None + if use_gpu: + if not hasattr(cv2, 'cuda') or not hasattr(cv2.cuda, 'FarnebackOpticalFlow'): + print('cv2.cuda.FarnebackOpticalFlow is unavailable (requires opencv-contrib built with CUDA). Switching to CPU for dense optical flow.') + elif get_cuda_device_count() <= 0: + print('OpenCV CUDA backend is unavailable. Switching to CPU for dense optical flow.') + else: + _use_gpu = True + farneback_gpu = cv2.cuda.FarnebackOpticalFlow.create( + numLevels=levels, + pyrScale=pyr_scale, + fastPyramids=False, + winSize=winsize, + numIters=iterations, + polyN=poly_n, + polySigma=poly_sigma, + flags=flags, + ) + if velocity: pb = MgProgressbar(total=length, prefix='Rendering dense optical flow velocity:') @@ -118,6 +141,11 @@ def dense( ret, frame1 = vidcap.read() prev_frame = cv2.cvtColor(cv2.resize(frame1, size), cv2.COLOR_BGR2GRAY) + + if _use_gpu: + gpu_prev_frame = cv2.cuda_GpuMat() + gpu_next_frame = cv2.cuda_GpuMat() + gpu_prev_frame.upload(prev_frame) prev_rgb = None hsv = np.zeros_like(frame1) @@ -134,7 +162,15 @@ def dense( if ret == True: next_frame = cv2.cvtColor(cv2.resize(frame2, size), cv2.COLOR_BGR2GRAY) - flow = cv2.calcOpticalFlowFarneback(prev_frame, next_frame, None, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags) + if _use_gpu: + gpu_next_frame.upload(next_frame) + gpu_flow_result = farneback_gpu.calc(gpu_prev_frame, gpu_next_frame, None) + flow = gpu_flow_result.download() + # Swap references so gpu_next_frame becomes gpu_prev_frame for the + # next iteration without allocating a new GpuMat object each frame + gpu_prev_frame, gpu_next_frame = gpu_next_frame, gpu_prev_frame + else: + flow = cv2.calcOpticalFlowFarneback(prev_frame, next_frame, None, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags) if velocity: # Cumulative sum of optical flow vectors @@ -285,6 +321,7 @@ def sparse( of_max_level=2, of_criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03), + use_gpu=False, target_name=None, overwrite=False): """ @@ -299,6 +336,7 @@ def sparse( of_win_size (tuple, optional): Size of the search window at each pyramid level. Defaults to (15, 15). of_max_level (int, optional): 0-based maximal pyramid level number. If set to 0, pyramids are not used (single level), if set to 1, two levels are used, and so on. If pyramids are passed to input then the algorithm will use as many levels as pyramids have but no more than `maxLevel`. Defaults to 2. of_criteria (tuple, optional): Specifies the termination criteria of the iterative search algorithm (after the specified maximum number of iterations criteria.maxCount or when the search window moves by less than criteria.epsilon). Defaults to (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03). + use_gpu (bool, optional): Whether to attempt GPU (CUDA) acceleration using `cv2.cuda.SparsePyrLKOpticalFlow`. When `True`, falls back to CPU automatically if CUDA is unavailable or the required OpenCV CUDA modules are not installed. When `False`, CPU processing is used unconditionally. Defaults to False. target_name (str, optional): Target output name for the video. Defaults to None (which assumes that the input filename with the suffix "_flow_sparse" should be used). overwrite (bool, optional): Whether to allow overwriting existing files or to automatically increment target filenames to avoid overwriting. Defaults to False. @@ -330,6 +368,23 @@ def sparse( height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)) length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) + # Determine whether to use GPU-accelerated sparse optical flow + _use_gpu = False + lk_gpu = None + if use_gpu: + if not hasattr(cv2, 'cuda') or not hasattr(cv2.cuda, 'SparsePyrLKOpticalFlow'): + print('cv2.cuda.SparsePyrLKOpticalFlow is unavailable (requires opencv-contrib built with CUDA). Switching to CPU for sparse optical flow.') + elif get_cuda_device_count() <= 0: + print('OpenCV CUDA backend is unavailable. Switching to CPU for sparse optical flow.') + else: + _use_gpu = True + iters = of_criteria[1] if len(of_criteria) > 1 else 10 + lk_gpu = cv2.cuda.SparsePyrLKOpticalFlow.create( + winSize=of_win_size, + maxLevel=of_max_level, + iters=iters, + ) + pb = MgProgressbar( total=length, prefix='Rendering sparse optical flow video:') @@ -362,6 +417,13 @@ def sparse( old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY) p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params) + if _use_gpu: + gpu_old_gray = cv2.cuda_GpuMat() + gpu_frame_gray = cv2.cuda_GpuMat() + gpu_old_gray.upload(old_gray) + gpu_p0 = cv2.cuda_GpuMat() + gpu_p0.upload(p0) + # Create a mask image for drawing purposes mask = np.zeros_like(old_frame) @@ -373,8 +435,17 @@ def sparse( frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # calculate optical flow - p1, st, err = cv2.calcOpticalFlowPyrLK( - old_gray, frame_gray, p0, None, **lk_params) + if _use_gpu: + gpu_frame_gray.upload(frame_gray) + gpu_p1, gpu_st = lk_gpu.calc(gpu_old_gray, gpu_frame_gray, gpu_p0, None, None) + p1 = gpu_p1.download() + st = gpu_st.download() + # Swap references so current frame becomes old frame for next + # iteration, avoiding new GpuMat allocation each frame + gpu_old_gray, gpu_frame_gray = gpu_frame_gray, gpu_old_gray + else: + p1, st, err = cv2.calcOpticalFlowPyrLK( + old_gray, frame_gray, p0, None, **lk_params) # Select good points good_new = p1[st == 1] @@ -400,6 +471,8 @@ def sparse( # Now update the previous frame and previous points old_gray = frame_gray.copy() p0 = good_new.reshape(-1, 1, 2) + if _use_gpu: + gpu_p0.upload(p0) else: pb.progress(length) diff --git a/musicalgestures/_pose.py b/musicalgestures/_pose.py index 30daea0..af6a4eb 100644 --- a/musicalgestures/_pose.py +++ b/musicalgestures/_pose.py @@ -4,7 +4,7 @@ import sys import numpy as np import pandas as pd -from musicalgestures._utils import MgProgressbar, convert_to_avi, extract_wav, embed_audio_in_video, roundup, frame2ms, generate_outfilename, in_colab, ffmpeg_cmd +from musicalgestures._utils import MgProgressbar, convert_to_avi, extract_wav, embed_audio_in_video, roundup, frame2ms, generate_outfilename, in_colab, get_cuda_device_count, ffmpeg_cmd import musicalgestures import itertools @@ -155,12 +155,7 @@ def pose( print('Sorry, OpenCV GPU acceleration is not supported in Colab. Switching to CPU.') device = 'cpu' elif device == 'gpu': - cuda_devices = 0 - try: - cuda_devices = cv2.cuda.getCudaEnabledDeviceCount() - except Exception: - cuda_devices = 0 - if cuda_devices <= 0: + if get_cuda_device_count() <= 0: print('OpenCV CUDA backend is unavailable. Switching to CPU.') device = 'cpu' diff --git a/musicalgestures/_utils.py b/musicalgestures/_utils.py index f0b3d6d..c09ca49 100644 --- a/musicalgestures/_utils.py +++ b/musicalgestures/_utils.py @@ -1646,6 +1646,20 @@ def unwrap_str(string): return string +def get_cuda_device_count(): + """ + Returns the number of CUDA-capable GPU devices visible to OpenCV. + + Returns: + int: Number of available CUDA devices, or 0 if the OpenCV CUDA + module is unavailable or no devices are detected. + """ + try: + return cv2.cuda.getCudaEnabledDeviceCount() + except Exception: + return 0 + + def in_colab(): """ Check's if the environment is a Google Colab document. diff --git a/tests/test_flow.py b/tests/test_flow.py index e7d692d..76a74e8 100644 --- a/tests/test_flow.py +++ b/tests/test_flow.py @@ -54,6 +54,19 @@ def test_with_target_name(self, testvideo_avi): assert type(result) == musicalgestures.MgVideo assert os.path.isfile(result.filename) == True + def test_use_gpu_true(self, testvideo_avi): + # use_gpu=True should work (falls back to CPU when CUDA is unavailable) + mg = musicalgestures.MgVideo(testvideo_avi) + result = mg.flow.dense(use_gpu=True, overwrite=True) + assert type(result) == musicalgestures.MgVideo + assert os.path.isfile(result.filename) == True + + def test_use_gpu_false(self, testvideo_avi): + mg = musicalgestures.MgVideo(testvideo_avi) + result = mg.flow.dense(use_gpu=False, overwrite=True) + assert type(result) == musicalgestures.MgVideo + assert os.path.isfile(result.filename) == True + class Test_flow_sparse: def test_normal_case(self, testvideo_avi): @@ -80,3 +93,53 @@ def test_with_target_name(self, testvideo_avi): result = mg.flow.sparse(target_name=target_name, overwrite=True) assert type(result) == musicalgestures.MgVideo assert os.path.isfile(result.filename) == True + + def test_use_gpu_true(self, testvideo_avi): + # use_gpu=True should work (falls back to CPU when CUDA is unavailable) + mg = musicalgestures.MgVideo(testvideo_avi) + result = mg.flow.sparse(use_gpu=True, overwrite=True) + assert type(result) == musicalgestures.MgVideo + assert os.path.isfile(result.filename) == True + + def test_use_gpu_false(self, testvideo_avi): + mg = musicalgestures.MgVideo(testvideo_avi) + result = mg.flow.sparse(use_gpu=False, overwrite=True) + assert type(result) == musicalgestures.MgVideo + assert os.path.isfile(result.filename) == True + + +class Test_get_cuda_device_count: + def test_returns_int(self): + result = musicalgestures.get_cuda_device_count() + assert isinstance(result, int) + assert result >= 0 + + +class Test_blur_faces_gpu: + def test_use_gpu_false(self, testvideo_avi): + mg = musicalgestures.MgVideo(testvideo_avi) + result = mg.blur_faces(use_gpu=False, overwrite=True) + assert type(result) == musicalgestures.MgVideo + assert os.path.isfile(result.filename) == True + + def test_use_gpu_true(self, testvideo_avi): + # use_gpu=True should work (falls back to CPU when CUDA is unavailable) + mg = musicalgestures.MgVideo(testvideo_avi) + result = mg.blur_faces(use_gpu=True, overwrite=True) + assert type(result) == musicalgestures.MgVideo + assert os.path.isfile(result.filename) == True + + +class Test_pose_gpu: + def test_device_cpu(self, testvideo_avi): + mg = musicalgestures.MgVideo(testvideo_avi) + result = mg.pose(device='cpu', overwrite=True) + assert type(result) == musicalgestures.MgVideo + assert os.path.isfile(result.filename) == True + + def test_device_gpu_fallback(self, testvideo_avi): + # device='gpu' should fall back to CPU when CUDA is unavailable + mg = musicalgestures.MgVideo(testvideo_avi) + result = mg.pose(device='gpu', overwrite=True) + assert type(result) == musicalgestures.MgVideo + assert os.path.isfile(result.filename) == True