====== Streamer des images OpenCV avec v4l2-loopback ====== Le **[[streaming_over_network_with_opencv_et_zeromq|stream avec zeromq]]** est sans latence, mais ne peut pas être reçu par Pure Data et VLC.\\ **v4l2-loopback** a un peu de latence (0.1 à 0.2 seconde) en lecture dans VLC, mais pas de latence si lecture avec OpenCV\\ **Nous utilisons pyfakewebcam** =====Ressources===== **pyfakewebcam** Il y a divers projets qui font ça sur GitHub, celui ci à cette qualité de marcher! * https://codingshiksha.com/python/python-3-pyfakewebcam-script-to-build-fake-webcam-with-custom-background-image-gui-desktop-app-full-project-for-beginners/ * https://github.com/jremmons/pyfakewebcam * https://github.com/umlaeute/v4l2loopback =====Sources des exemples===== * **[[https://github.com/sergeLabo/fakewebcam|fakewebcam de github.com/sergeLabo]]** ====Installation==== Voir le README ci-dessus. =====Exemple simple pour tester===== import pyfakewebcam import cv2 import numpy as np cap = cv2.VideoCapture(0) camera = pyfakewebcam.FakeWebcam('/dev/video11', 640, 480) while True: ret, image = cap.read() if ret: # # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) camera.schedule_frame(image) if cv2.waitKey(1) == 27: break Run the following command to see the output of the fake webcam.\\ ffplay /dev/video11\\ or open the camera 11 in vlc ===== Profondeur d'une OAK-D Lite ===== import cv2 import depthai as dai import numpy as np import pyfakewebcam pipeline = dai.Pipeline() # Define a source - two mono (grayscale) cameras left = pipeline.createMonoCamera() left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) left.setBoardSocket(dai.CameraBoardSocket.LEFT) right = pipeline.createMonoCamera() right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) right.setBoardSocket(dai.CameraBoardSocket.RIGHT) # Create a node that will produce the depth map # (using disparity output as it's easier to visualize depth this way) depth = pipeline.createStereoDepth() depth.setConfidenceThreshold(200) # Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default) median = dai.StereoDepthProperties.MedianFilter.KERNEL_7x7 # For depth filtering depth.setMedianFilter(median) # Better handling for occlusions: depth.setLeftRightCheck(False) # Closer-in minimum depth, disparity range is doubled: depth.setExtendedDisparity(False) # Better accuracy for longer distance, fractional disparity 32-levels: depth.setSubpixel(False) left.out.link(depth.left) right.out.link(depth.right) # Create output xout = pipeline.createXLinkOut() xout.setStreamName("disparity") depth.disparity.link(xout.input) camera = pyfakewebcam.FakeWebcam('/dev/video11', 640, 480) with dai.Device(pipeline) as device: device.startPipeline() # Output queue will be used to get the disparity frames from the outputs defined above q = device.getOutputQueue(name="disparity", maxSize=4, blocking=False) while True: inDepth = q.get() # blocking call, will wait until a new data has arrived frame = inDepth.getFrame() frame = cv2.normalize(frame, None, 0, 255, cv2.NORM_MINMAX) depth_gray_image = cv2.resize(np.asanyarray(frame), (640, 480), interpolation = cv2.INTER_AREA) # v4l2 doit être RGB color = cv2.cvtColor(depth_gray_image, cv2.COLOR_GRAY2RGB) camera.schedule_frame(color) if cv2.waitKey(1) == 27: break Ouvrir /dev/video11 dans VLC {{ :media_15:oak_depth_in_vlc.png?400 |}} =====Profondeur d'une RealSense D455===== Pour l'installation, voir **https://github.com/sergeLabo/grande_echelle#installation** """ Voir https://github.com/sergeLabo/fakewebcam Suppression du fond, voir https://github.com/IntelRealSense/librealsense/blob/master/wrappers/python/examples/align-depth2color.py """ import os import time import pyfakewebcam import cv2 import numpy as np import pyrealsense2 as rs # Le faux device VIDEO = '/dev/video11' # Avec ou sans slider pour régler CLIPPING_DISTANCE_IN_MILLIMETER SLIDER = 1 # Réglable avec le slider # We will be removing the background of objects more than # CLIPPING_DISTANCE_IN_MILLIMETER away CLIPPING_DISTANCE_IN_MILLIMETER = 2000 class MyRealSense: def __init__(self, video, slider, clip): self.video = video self.slider = slider self.clip = clip self.width = 1280 self.height = 720 self.pose_loop = 1 self.pipeline = rs.pipeline() config = rs.config() pipeline_wrapper = rs.pipeline_wrapper(self.pipeline) try: pipeline_profile = config.resolve(pipeline_wrapper) except: print('\n\nPas de Capteur Realsense connecté\n\n') os._exit(0) device = pipeline_profile.get_device() config.enable_stream( rs.stream.color, width=self.width, height=self.height, format=rs.format.bgr8, framerate=30) config.enable_stream( rs.stream.depth, width=self.width, height=self.height, format=rs.format.z16, framerate=30) profile = self.pipeline.start(config) self.align = rs.align(rs.stream.color) unaligned_frames = self.pipeline.wait_for_frames() frames = self.align.process(unaligned_frames) # Getting the depth sensor's depth scale (see rs-align example for explanation) depth_sensor = profile.get_device().first_depth_sensor() self.depth_scale = depth_sensor.get_depth_scale() print("Depth Scale is: " , self.depth_scale) # Affichage de la taille des images color_frame = frames.get_color_frame() img = np.asanyarray(color_frame.get_data()) print(f"Taille des images:" f" {img.shape[1]}x{img.shape[0]}") self.camera = pyfakewebcam.FakeWebcam(VIDEO, 1280, 720) if self.slider: self.create_slider() def create_slider(self): cv2.namedWindow('controls') cv2.createTrackbar('background', 'controls', 1000, 8000, self.remove_background_callback) cv2.setTrackbarPos('background', 'controls', self.clip) cv2.namedWindow('depth', cv2.WND_PROP_FULLSCREEN) def remove_background_callback(self, value): if value != 1000: self.clip = int(value) def run(self): """Boucle infinie, quitter avec Echap dans la fenêtre OpenCV""" while self.pose_loop: # Get frameset of color and depth frames = self.pipeline.wait_for_frames() # frames.get_depth_frame() is a 640x360 depth image # Align the depth frame to color frame aligned_frames = self.align.process(frames) # aligned_depth_frame is a 640x480 depth image aligned_depth_frame = aligned_frames.get_depth_frame() color_frame = aligned_frames.get_color_frame() # Validate that both frames are valid if not aligned_depth_frame or not color_frame: continue depth_image = np.asanyarray(aligned_depth_frame.get_data()) color_image = np.asanyarray(color_frame.get_data()) # Remove background - Set pixels further than clipping_distance to grey # depth image is 1 channel, color is 3 channels depth_image_3d = np.dstack((depth_image, depth_image, depth_image)) clipping_distance = self.clip / (1000*self.depth_scale) bg_removed = np.where((depth_image_3d > clipping_distance) |\ (depth_image_3d <= 0), 0, color_image) depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET) images = np.hstack((bg_removed, depth_colormap)) if self.slider: cv2.imshow('depth', images) self.camera.schedule_frame(bg_removed) if cv2.waitKey(1) == 27: break if __name__ == '__main__': mrs = MyRealSense(VIDEO, SLIDER, CLIPPING_DISTANCE_IN_MILLIMETER) mrs.run() =====Réception===== Ouvrir /dev/video11 dans VLC\\ {{:media_15:color_depth.png?800|}} ou import cv2 cap = cv2.VideoCapture(2) while 1: ret, image = cap.read() if ret: cv2.imshow("frame", image) if cv2.waitKey(1) == 27: break {{tag>opencv pure-data pure_data python sb vlc}}