streamer_des_images_opencv_avec_v4l2-loopback
Différences
Ci-dessous, les différences entre deux révisions de la page.
Les deux révisions précédentesRévision précédenteProchaine révision | Révision précédenteDernière révisionLes deux révisions suivantes | ||
streamer_des_images_opencv_avec_v4l2-loopback [2022/02/26 11:15] – [Profondeur d'une OAK-D Lite] serge | streamer_des_images_opencv_avec_v4l2-loopback [2022/02/26 11:17] – [Profondeur d'une RealSense D455] serge | ||
---|---|---|---|
Ligne 112: | Ligne 112: | ||
<file python sender_rs_depth.py> | <file python sender_rs_depth.py> | ||
- | |||
- | """ | ||
- | Créer un device / | ||
- | Commande non persistante, | ||
- | |||
- | sudo modprobe v4l2loopback video_nr=11 | ||
- | |||
- | Necéssite pyrealsense2 | ||
- | |||
- | Sans VirtualEnv | ||
- | |||
- | python3 -m pip install numpy opencv-python pyfakewebcam pyrealsense2 | ||
- | |||
- | Lancement du script, dans le dossier du script | ||
- | |||
- | python3 sender_rs_depth.py | ||
- | |||
- | Avec VirtualEnv | ||
- | |||
- | Dans le dossier du projet: | ||
- | |||
- | python3 -m venv mon_env | ||
- | source mon_env/ | ||
- | python3 -m pip install numpy opencv-python pyfakewebcam pyrealsense2 | ||
- | |||
- | Lancement du script, dans le dossier du script | ||
- | |||
- | ./ | ||
- | """ | ||
- | |||
import os | import os | ||
import time | import time | ||
Ligne 149: | Ligne 119: | ||
import pyrealsense2 as rs | import pyrealsense2 as rs | ||
+ | # Le faux device | ||
+ | VIDEO = '/ | ||
+ | # Avec ou sans slider pour régler GRAY_BACKGROUND | ||
+ | SLIDER = 1 | ||
+ | # GRAY_BACKGROUND: | ||
+ | # si elle est modifiée par le slider, il faut le modifier ci-dessous | ||
GRAY_BACKGROUND = 153 | GRAY_BACKGROUND = 153 | ||
- | VIDEO = '/ | ||
class MyRealSense: | class MyRealSense: | ||
- | |||
def __init__(self): | def __init__(self): | ||
global VIDEO | global VIDEO | ||
+ | global SLIDER | ||
+ | global GRAY_BACKGROUND | ||
+ | |||
self.width = 1280 | self.width = 1280 | ||
self.height = 720 | self.height = 720 | ||
Ligne 201: | Ligne 178: | ||
self.camera = pyfakewebcam.FakeWebcam(VIDEO, | self.camera = pyfakewebcam.FakeWebcam(VIDEO, | ||
+ | |||
+ | if SLIDER: | ||
+ | self.create_slider(GRAY_BACKGROUND) | ||
+ | |||
+ | def create_slider(self, | ||
+ | # # global GRAY_BACKGROUND | ||
+ | cv2.namedWindow(' | ||
+ | cv2.createTrackbar(' | ||
+ | self.gray_background_callback) | ||
+ | cv2.setTrackbarPos(' | ||
+ | cv2.namedWindow(' | ||
+ | |||
+ | def gray_background_callback(self, | ||
+ | global GRAY_BACKGROUND | ||
+ | GRAY_BACKGROUND = int(value) | ||
def run(self): | def run(self): | ||
""" | """ | ||
global GRAY_BACKGROUND | global GRAY_BACKGROUND | ||
+ | global SLIDER | ||
while self.pose_loop: | while self.pose_loop: | ||
- | |||
# Get frameset of color and depth | # Get frameset of color and depth | ||
frames = self.pipeline.wait_for_frames() | frames = self.pipeline.wait_for_frames() | ||
Ligne 223: | Ligne 214: | ||
continue | continue | ||
+ | # Suppression du fond, voir | ||
+ | # https:// | ||
depth_image = np.asanyarray(aligned_depth_frame.get_data()) | depth_image = np.asanyarray(aligned_depth_frame.get_data()) | ||
color_image = np.asanyarray(color_frame.get_data()) | color_image = np.asanyarray(color_frame.get_data()) | ||
# Remove background - Set pixels further than clipping_distance to grey | # Remove background - Set pixels further than clipping_distance to grey | ||
- | grey_color = GRAY_BACKGROUND | ||
# depth image is 1 channel, color is 3 channels | # depth image is 1 channel, color is 3 channels | ||
depth_image_3d = np.dstack((depth_image, | depth_image_3d = np.dstack((depth_image, | ||
bg_removed = np.where((depth_image_3d > self.clipping_distance) |\ | bg_removed = np.where((depth_image_3d > self.clipping_distance) |\ | ||
- | | + | |
- | # Render images: | ||
- | # depth align to color on left | ||
- | # depth on right | ||
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, | depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, | ||
| | ||
| | ||
+ | images = np.hstack((bg_removed, | ||
- | | + | |
- | # # cv2.imshow(' | + | cv2.imshow(' |
- | self.camera.schedule_frame(depth_colormap) | + | self.camera.schedule_frame(bg_removed) |
if cv2.waitKey(1) == 27: | if cv2.waitKey(1) == 27: | ||
Ligne 250: | Ligne 240: | ||
mrs = MyRealSense() | mrs = MyRealSense() | ||
mrs.run() | mrs.run() | ||
- | |||
- | |||
</ | </ | ||
| |
streamer_des_images_opencv_avec_v4l2-loopback.txt · Dernière modification : 2022/03/03 12:35 de serge