streaming_over_network_with_opencv_et_zeromq
Différences
Ci-dessous, les différences entre deux révisions de la page.
Les deux révisions précédentesRévision précédenteProchaine révision | Révision précédente | ||
streaming_over_network_with_opencv_et_zeromq [2022/02/20 10:00] – [Utilisation] serge | streaming_over_network_with_opencv_et_zeromq [2022/02/25 13:15] (Version actuelle) – [Profondeur d'une OAK-D Lite] serge | ||
---|---|---|---|
Ligne 6: | Ligne 6: | ||
</ | </ | ||
- | **Pas de latence, peu ce consommation CPU, en python super facile à implémenter** | + | **Pas de latence, peu ce consommation CPU, en python super facile à implémenter**\\ |
+ | **mais pas de réception dans VLC et Pure Data.**\\ | ||
+ | Utiliser **[[streamer_des_images_opencv_avec_v4l2-loopback|Streamer des images OpenCV avec v4l2-loopback]]** | ||
=====ZeroMQ===== | =====ZeroMQ===== | ||
**[[https:// | **[[https:// | ||
Ligne 119: | Ligne 121: | ||
</ | </ | ||
- | ====Profondeur d'une OAK-D Lite==== | ||
- | <code bash> | ||
- | cd / | ||
- | source mon_env/ | ||
- | python3 -m pip install depthai numpy | ||
- | </ | ||
- | <file python sender_oak_depth.py> | ||
- | import time | ||
- | import imagezmq | ||
- | import cv2 | ||
- | import depthai as dai | ||
- | import numpy as np | ||
- | |||
- | sender = imagezmq.ImageSender(connect_to=' | ||
- | time.sleep(2.0) | ||
- | |||
- | pipeline = dai.Pipeline() | ||
- | # Define a source - two mono (grayscale) cameras | ||
- | left = pipeline.createMonoCamera() | ||
- | left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) | ||
- | left.setBoardSocket(dai.CameraBoardSocket.LEFT) | ||
- | right = pipeline.createMonoCamera() | ||
- | right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) | ||
- | right.setBoardSocket(dai.CameraBoardSocket.RIGHT) | ||
- | # Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) | ||
- | depth = pipeline.createStereoDepth() | ||
- | depth.setConfidenceThreshold(200) | ||
- | |||
- | # Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default) | ||
- | median = dai.StereoDepthProperties.MedianFilter.KERNEL_7x7 # For depth filtering | ||
- | depth.setMedianFilter(median) | ||
- | |||
- | # Better handling for occlusions: | ||
- | depth.setLeftRightCheck(False) | ||
- | # Closer-in minimum depth, disparity range is doubled: | ||
- | depth.setExtendedDisparity(False) | ||
- | # Better accuracy for longer distance, fractional disparity 32-levels: | ||
- | depth.setSubpixel(False) | ||
- | |||
- | left.out.link(depth.left) | ||
- | right.out.link(depth.right) | ||
- | |||
- | # Create output | ||
- | xout = pipeline.createXLinkOut() | ||
- | xout.setStreamName(" | ||
- | depth.disparity.link(xout.input) | ||
- | |||
- | with dai.Device(pipeline) as device: | ||
- | device.startPipeline() | ||
- | # Output queue will be used to get the disparity frames from the outputs defined above | ||
- | q = device.getOutputQueue(name=" | ||
- | |||
- | while True: | ||
- | inDepth = q.get() | ||
- | frame = inDepth.getFrame() | ||
- | frame = cv2.normalize(frame, | ||
- | # Convert depth_frame to numpy array to render image in opencv | ||
- | depth_gray_image = np.asanyarray(frame) | ||
- | # Resize Depth image to 640x480 | ||
- | resized = cv2.resize(depth_gray_image, | ||
- | sender.send_image(" | ||
- | cv2.imshow(" | ||
- | if cv2.waitKey(1) == 27: | ||
- | break | ||
- | </ | ||
- | Le receiver est le même que ci-dessus. | ||
- | |||
- | ====Profondeur d'une RealSense D455==== | ||
- | Pour l' | ||
- | <code bash> | ||
- | cd / | ||
- | source mon_env/ | ||
- | python3 -m pip install | ||
- | </ | ||
- | |||
- | A tester, je n'ai pas la cam ! | ||
- | <file python sender_rs_depth.py> | ||
- | import os | ||
- | import time | ||
- | |||
- | import imagezmq | ||
- | import cv2 | ||
- | import numpy as np | ||
- | |||
- | import pyrealsense2 as rs | ||
- | |||
- | |||
- | class MyRealSense: | ||
- | """ | ||
- | |||
- | def __init__(self): | ||
- | |||
- | self.width = 1280 | ||
- | self.height = 720 | ||
- | self.pose_loop = 1 | ||
- | |||
- | self.pipeline = rs.pipeline() | ||
- | config = rs.config() | ||
- | pipeline_wrapper = rs.pipeline_wrapper(self.pipeline) | ||
- | |||
- | try: | ||
- | pipeline_profile = config.resolve(pipeline_wrapper) | ||
- | except: | ||
- | print(' | ||
- | os._exit(0) | ||
- | |||
- | device = pipeline_profile.get_device() | ||
- | config.enable_stream( | ||
- | width=self.width, | ||
- | height=self.height, | ||
- | format=rs.format.bgr8, | ||
- | framerate=30) | ||
- | config.enable_stream( | ||
- | width=self.width, | ||
- | height=self.height, | ||
- | format=rs.format.z16, | ||
- | framerate=30) | ||
- | self.pipeline.start(config) | ||
- | self.align = rs.align(rs.stream.color) | ||
- | unaligned_frames = self.pipeline.wait_for_frames() | ||
- | frames = self.align.process(unaligned_frames) | ||
- | depth = frames.get_depth_frame() | ||
- | self.depth_intrinsic = depth.profile.as_video_stream_profile().intrinsics | ||
- | |||
- | # Affichage de la taille des images | ||
- | color_frame = frames.get_color_frame() | ||
- | img = np.asanyarray(color_frame.get_data()) | ||
- | print(f" | ||
- | f" | ||
- | |||
- | self.sender = imagezmq.ImageSender(connect_to=' | ||
- | time.sleep(2.0) | ||
- | |||
- | def run(self): | ||
- | """ | ||
- | |||
- | |||
- | while self.pose_loop: | ||
- | |||
- | frames = self.pipeline.wait_for_frames(timeout_ms=80) | ||
- | |||
- | # Align the depth frame to color frame | ||
- | aligned_frames = self.align.process(frames) | ||
- | self.depth_color_frame = aligned_frames.get_depth_frame() | ||
- | depth_gray_image = cv2.cvtColor(depth_color_image, | ||
- | # Convert 16bit data | ||
- | detph_gray_16bit = np.array(depth_gray_image, | ||
- | detph_gray_16bit *= 256 | ||
- | |||
- | self.sender.send_image(" | ||
- | |||
- | cv2.imshow(" | ||
- | |||
- | if cv2.waitKey(1) == 27: | ||
- | break | ||
- | |||
- | |||
- | if __name__ == ' | ||
- | mrs = MyRealSense() | ||
- | mrs.run() | ||
- | </ | ||
{{tag> | {{tag> |
streaming_over_network_with_opencv_et_zeromq.1645351227.txt.gz · Dernière modification : 2022/02/20 10:00 de serge