streamer_des_images_opencv_avec_v4l2-loopback
Différences
Ci-dessous, les différences entre deux révisions de la page.
Les deux révisions précédentesRévision précédenteProchaine révision | Révision précédente | ||
streamer_des_images_opencv_avec_v4l2-loopback [2022/02/20 12:59] – serge | streamer_des_images_opencv_avec_v4l2-loopback [2022/03/03 12:35] (Version actuelle) – [Profondeur d'une RealSense D455] serge | ||
---|---|---|---|
Ligne 3: | Ligne 3: | ||
<WRAP center round box 60%> | <WRAP center round box 60%> | ||
Le **[[streaming_over_network_with_opencv_et_zeromq|stream avec zeromq]]** est sans latence, mais ne peut pas être reçu par Pure Data et VLC.\\ | Le **[[streaming_over_network_with_opencv_et_zeromq|stream avec zeromq]]** est sans latence, mais ne peut pas être reçu par Pure Data et VLC.\\ | ||
- | **v4l2-loopback** a un peu de latence (0.1 à 0.2 seconde) mais c'est un flux v4l2 | + | **v4l2-loopback** a un peu de latence (0.1 à 0.2 seconde) |
+ | **Nous utilisons pyfakewebcam** | ||
</ | </ | ||
=====Ressources===== | =====Ressources===== | ||
+ | **pyfakewebcam** | ||
+ | Il y a divers projets qui font ça sur GitHub, celui ci à cette qualité de marcher! | ||
* https:// | * https:// | ||
* https:// | * https:// | ||
* https:// | * https:// | ||
- | =====Installation==== | + | =====Sources des exemples===== |
- | A voir, nécessaire mais peut-être pas suffisant: | + | * **[[https:// |
- | sudo apt install v4l2loopback-utils | + | |
- | + | ||
- | <code bash> | + | |
- | sudo apt install python3-pip | + | |
- | python3 -m pip install --upgrade pip | + | |
- | sudo apt install python3-venv | + | |
- | cd /le/dossier/de/votre/projet | + | ====Installation==== |
- | python3 | + | Voir le README ci-dessus. |
- | source | + | |
- | python3 | + | =====Exemple simple pour tester===== |
- | </code> | + | <file python cam_relay.py> |
- | + | import pyfakewebcam | |
- | + | import cv2 | |
- | + | import numpy as np | |
+ | |||
+ | cap = cv2.VideoCapture(0) | ||
+ | camera = pyfakewebcam.FakeWebcam(' | ||
+ | |||
+ | while True: | ||
+ | ret, image = cap.read() | ||
+ | if ret: | ||
+ | # # gray = cv2.cvtColor(image, | ||
+ | camera.schedule_frame(image) | ||
+ | if cv2.waitKey(1) == 27: | ||
+ | break | ||
+ | </ | ||
+ | |||
+ | Run the following command to see the output of the fake webcam.\\ | ||
+ | ffplay | ||
+ | or open the camera 11 in vlc | ||
+ | ===== Profondeur d'une OAK-D Lite ===== | ||
+ | <file python sender_oak_depth.py> | ||
+ | import cv2 | ||
+ | import depthai as dai | ||
+ | import numpy as np | ||
+ | import pyfakewebcam | ||
+ | |||
+ | pipeline = dai.Pipeline() | ||
+ | |||
+ | # Define a source | ||
+ | left = pipeline.createMonoCamera() | ||
+ | left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) | ||
+ | left.setBoardSocket(dai.CameraBoardSocket.LEFT) | ||
+ | |||
+ | right = pipeline.createMonoCamera() | ||
+ | right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) | ||
+ | right.setBoardSocket(dai.CameraBoardSocket.RIGHT) | ||
+ | |||
+ | # Create a node that will produce the depth map | ||
+ | # (using disparity output as it's easier to visualize depth this way) | ||
+ | depth = pipeline.createStereoDepth() | ||
+ | depth.setConfidenceThreshold(200) | ||
+ | |||
+ | # Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default) | ||
+ | median = dai.StereoDepthProperties.MedianFilter.KERNEL_7x7 # For depth filtering | ||
+ | depth.setMedianFilter(median) | ||
+ | |||
+ | # Better handling for occlusions: | ||
+ | depth.setLeftRightCheck(False) | ||
+ | # Closer-in minimum depth, disparity range is doubled: | ||
+ | depth.setExtendedDisparity(False) | ||
+ | # Better accuracy for longer distance, fractional disparity 32-levels: | ||
+ | depth.setSubpixel(False) | ||
+ | |||
+ | left.out.link(depth.left) | ||
+ | right.out.link(depth.right) | ||
+ | |||
+ | # Create output | ||
+ | xout = pipeline.createXLinkOut() | ||
+ | xout.setStreamName(" | ||
+ | depth.disparity.link(xout.input) | ||
+ | |||
+ | camera = pyfakewebcam.FakeWebcam(' | ||
+ | |||
+ | with dai.Device(pipeline) as device: | ||
+ | device.startPipeline() | ||
+ | |||
+ | # Output queue will be used to get the disparity frames from the outputs defined above | ||
+ | q = device.getOutputQueue(name=" | ||
+ | |||
+ | while True: | ||
+ | inDepth = q.get() | ||
+ | frame = inDepth.getFrame() | ||
+ | frame = cv2.normalize(frame, | ||
+ | |||
+ | depth_gray_image = cv2.resize(np.asanyarray(frame), | ||
+ | interpolation = cv2.INTER_AREA) | ||
+ | # v4l2 doit être RGB | ||
+ | color = cv2.cvtColor(depth_gray_image, | ||
+ | camera.schedule_frame(color) | ||
+ | |||
+ | if cv2.waitKey(1) == 27: | ||
+ | break | ||
+ | </ | ||
+ | |||
+ | Ouvrir / | ||
+ | {{ : | ||
+ | =====Profondeur d'une RealSense D455===== | ||
+ | Pour l' | ||
+ | |||
+ | <file python sender_rs_depth.py> | ||
+ | """ | ||
+ | Voir https:// | ||
+ | |||
+ | Suppression du fond, voir | ||
+ | https:// | ||
+ | """ | ||
+ | |||
+ | import os | ||
+ | import time | ||
+ | import pyfakewebcam | ||
+ | import cv2 | ||
+ | import numpy as np | ||
+ | import pyrealsense2 as rs | ||
+ | |||
+ | |||
+ | # Le faux device | ||
+ | VIDEO = '/ | ||
+ | |||
+ | # Avec ou sans slider pour régler CLIPPING_DISTANCE_IN_MILLIMETER | ||
+ | SLIDER = 1 | ||
+ | # Réglable avec le slider | ||
+ | # We will be removing the background of objects more than | ||
+ | # CLIPPING_DISTANCE_IN_MILLIMETER away | ||
+ | CLIPPING_DISTANCE_IN_MILLIMETER = 2000 | ||
+ | |||
+ | |||
+ | class MyRealSense: | ||
+ | |||
+ | def __init__(self, | ||
+ | self.video = video | ||
+ | self.slider = slider | ||
+ | self.clip = clip | ||
+ | |||
+ | self.width = 1280 | ||
+ | self.height = 720 | ||
+ | self.pose_loop = 1 | ||
+ | self.pipeline = rs.pipeline() | ||
+ | config = rs.config() | ||
+ | pipeline_wrapper = rs.pipeline_wrapper(self.pipeline) | ||
+ | try: | ||
+ | pipeline_profile = config.resolve(pipeline_wrapper) | ||
+ | except: | ||
+ | print(' | ||
+ | os._exit(0) | ||
+ | device = pipeline_profile.get_device() | ||
+ | config.enable_stream( | ||
+ | width=self.width, | ||
+ | height=self.height, | ||
+ | format=rs.format.bgr8, | ||
+ | framerate=30) | ||
+ | config.enable_stream( | ||
+ | width=self.width, | ||
+ | height=self.height, | ||
+ | format=rs.format.z16, | ||
+ | framerate=30) | ||
+ | |||
+ | profile = self.pipeline.start(config) | ||
+ | self.align = rs.align(rs.stream.color) | ||
+ | unaligned_frames = self.pipeline.wait_for_frames() | ||
+ | frames = self.align.process(unaligned_frames) | ||
+ | |||
+ | # Getting the depth sensor' | ||
+ | depth_sensor = profile.get_device().first_depth_sensor() | ||
+ | self.depth_scale = depth_sensor.get_depth_scale() | ||
+ | print(" | ||
+ | |||
+ | # Affichage de la taille des images | ||
+ | color_frame = frames.get_color_frame() | ||
+ | img = np.asanyarray(color_frame.get_data()) | ||
+ | print(f" | ||
+ | f" | ||
+ | |||
+ | self.camera = pyfakewebcam.FakeWebcam(VIDEO, | ||
+ | |||
+ | if self.slider: | ||
+ | self.create_slider() | ||
+ | |||
+ | def create_slider(self): | ||
+ | cv2.namedWindow(' | ||
+ | cv2.createTrackbar(' | ||
+ | self.remove_background_callback) | ||
+ | cv2.setTrackbarPos(' | ||
+ | cv2.namedWindow(' | ||
+ | |||
+ | def remove_background_callback(self, | ||
+ | if value != 1000: | ||
+ | self.clip = int(value) | ||
+ | |||
+ | def run(self): | ||
+ | """ | ||
+ | |||
+ | while self.pose_loop: | ||
+ | |||
+ | # Get frameset of color and depth | ||
+ | frames = self.pipeline.wait_for_frames() | ||
+ | # frames.get_depth_frame() is a 640x360 depth image | ||
+ | |||
+ | # Align the depth frame to color frame | ||
+ | aligned_frames = self.align.process(frames) | ||
+ | |||
+ | # aligned_depth_frame is a 640x480 depth image | ||
+ | aligned_depth_frame = aligned_frames.get_depth_frame() | ||
+ | color_frame = aligned_frames.get_color_frame() | ||
+ | |||
+ | # Validate that both frames are valid | ||
+ | if not aligned_depth_frame or not color_frame: | ||
+ | continue | ||
+ | |||
+ | depth_image = np.asanyarray(aligned_depth_frame.get_data()) | ||
+ | color_image = np.asanyarray(color_frame.get_data()) | ||
+ | |||
+ | # Remove background - Set pixels further than clipping_distance to grey | ||
+ | # depth image is 1 channel, color is 3 channels | ||
+ | depth_image_3d = np.dstack((depth_image, | ||
+ | clipping_distance = self.clip | ||
+ | bg_removed = np.where((depth_image_3d | ||
+ | | ||
+ | |||
+ | | ||
+ | | ||
+ | | ||
+ | images = np.hstack((bg_removed, | ||
+ | |||
+ | if self.slider: | ||
+ | cv2.imshow(' | ||
+ | |||
+ | self.camera.schedule_frame(bg_removed) | ||
+ | |||
+ | if cv2.waitKey(1) == 27: | ||
+ | break | ||
+ | |||
+ | |||
+ | |||
+ | if __name__ == ' | ||
+ | |||
+ | mrs = MyRealSense(VIDEO, | ||
+ | mrs.run() | ||
+ | </ | ||
| | ||
+ | =====Réception===== | ||
+ | Ouvrir / | ||
+ | |||
+ | {{: | ||
+ | |||
+ | ou | ||
+ | <file python receiver.py> | ||
+ | import cv2 | ||
+ | cap = cv2.VideoCapture(2) | ||
+ | while 1: | ||
+ | ret, image = cap.read() | ||
+ | if ret: | ||
+ | cv2.imshow(" | ||
+ | if cv2.waitKey(1) == 27: | ||
+ | break | ||
+ | </ | ||
+ | |||
{{tag> | {{tag> |
streamer_des_images_opencv_avec_v4l2-loopback.1645361966.txt.gz · Dernière modification : 2022/02/20 12:59 de serge