giovedì 14 marzo 2024

Pyrealsense

ATTENZIONE: per funzionare alla massima risoluzione la Realsense deve usare una porta USB 3 ed un cavo idoneo ad USB 3 altrimenti si limita a 640x480

 

==================================================

Solo Ottico 1920x1080

import pyrealsense2 as rs
import numpy as np
import cv2
import time
import math

pipeline = rs.pipeline()
config = rs.config()

config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)

profile = pipeline.start(config)

align_to = rs.stream.color
align = rs.align(align_to)

frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
color_frame = aligned_frames.get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
imageName1 = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_Color.png'
cv2.imwrite(imageName1, color_image)
pipeline.stop()


 

================================================== 

Ottico + Profondita' 1280x720 

 

================================================== 

import pyrealsense2 as rs
import numpy as np
import cv2
import time
import math

pipeline = rs.pipeline()
config = rs.config()

config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)

profile = pipeline.start(config)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()

# We will be removing the background of objects more than
#  clipping_distance_in_meters meters away
clipping_distance_in_meters = 1.5
clipping_distance = clipping_distance_in_meters / depth_scale


align_to = rs.stream.color
align = rs.align(align_to)

frames = pipeline.wait_for_frames()

aligned_frames = align.process(frames)
aligned_depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()

depth_image = np.asanyarray(aligned_depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())


# Remove background - Set pixels further than clipping_distance to grey
grey_color = 153
depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels
bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)

# Render images
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
images = np.hstack((bg_removed, depth_colormap))
#cv2.namedWindow('Align Example', cv2.WINDOW_AUTOSIZE)

# Filename
imageName1 = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_Color.png'
imageName2 = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_Depth.png'
imageName3 = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_bg_removed.png'
imageName4 = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_ColorDepth.png'
imageName5 = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_DepthColormap.png'

# Saving the image
cv2.imwrite(imageName1, color_image)
cv2.imwrite(imageName2, depth_image)
cv2.imwrite(imageName3, images)
cv2.imwrite(imageName4, bg_removed )
cv2.imwrite(imageName5, depth_colormap )

pipeline.stop()



================================================== 

Infrarosso' 1280x720

================================================== 

import pyrealsense2 as rs
import numpy as np
import cv2
import time
import math
 
pipeline = rs.pipeline()
config = rs.config()

config.enable_stream(rs.stream.infrared, 1, 1280, 720, rs.format.y8, 30)
profile = pipeline.start(config)
 
frames = pipeline.wait_for_frames()
ir1_frame = frames.get_infrared_frame(1)
image = np.asanyarray(ir1_frame.get_data())
imageIR = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_IR.png'
cv2.imwrite(imageIR, image)
pipeline.stop()




================================================== 

Infrarosso' IR Emitter OFF 1280x720

==================================================

import pyrealsense2 as rs
import numpy as np
import cv2
import time
import math
 
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.infrared, 1, 1280, 720, rs.format.y8, 30)

#disabilita disable IR emitter
pipeline_profile = pipeline.start(config)
device = pipeline_profile.get_device()
depth_sensor = device.query_sensors()[0]
if depth_sensor.supports(rs.option.emitter_enabled):
    depth_sensor.set_option(rs.option.emitter_enabled, 0)

frames = pipeline.wait_for_frames()
ir1_frame = frames.get_infrared_frame(1)
image = np.asanyarray(ir1_frame.get_data())
imageIR = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_IR_OFF.png'
cv2.imwrite(imageIR, image)
pipeline.stop() 

 


================================================== 

IMU

==================================================

import pyrealsense2 as rs

import numpy as np


def initialize_camera():
    # start the frames pipe
    p = rs.pipeline()
    conf = rs.config()
    conf.enable_stream(rs.stream.accel)
    conf.enable_stream(rs.stream.gyro)
    prof = p.start(conf)
    return p


def gyro_data(gyro):
    return np.asarray([gyro.x, gyro.y, gyro.z])


def accel_data(accel):
    return np.asarray([accel.x, accel.y, accel.z])

p = initialize_camera()
try:
    while True:
        f = p.wait_for_frames()
        accel = accel_data(f[0].as_motion_frame().get_motion_data())
        gyro = gyro_data(f[1].as_motion_frame().get_motion_data())
        print("accelerometer: ", accel)
        print("gyro: ", gyro)

finally:
    p.stop()

 

gyro:  [0.         0.         0.00349066]
accelerometer:  [-0.24516624 -8.78675842 -2.75566864]

 

================================================== 

REGOLA ESPOSIZIONE

==================================================

import pyrealsense2 as rs
pipeline = rs.pipeline()
config = rs.config()
profile = pipeline.start(config) # Start streaming
sensor_dep = profile.get_device().first_depth_sensor()
print("Trying to set Exposure")
exp = sensor_dep.get_option(rs.option.exposure)
print ("exposure = %d" % exp)
print ("Setting exposure to new value")
exp = sensor_dep.set_option(rs.option.exposure, 25000)
exp = sensor_dep.get_option(rs.option.exposure)
print ("New exposure = %d" % exp)
profile = pipeline.stop

l'esposizione si puo' regolare anche su ROI

p = rs.pipeline()
prof = p.start()
s = prof.get_device().first_roi_sensor()
roi = s.get_region_of_interest()
s.set_region_of_interest(roi)

 

================================================== 

ADVANVCED MODE (regola parametri di dettaglio)

==================================================

 

import pyrealsense2 as rs
import time
import json

DS5_product_ids = ["0AD1", "0AD2", "0AD3", "0AD4", "0AD5", "0AF6", "0AFE", "0AFF", "0B00", "0B01", "0B03", "0B07", "0B3A", "0B5C"]

def find_device_that_supports_advanced_mode() :
    ctx = rs.context()
    ds5_dev = rs.device()
    devices = ctx.query_devices();
    for dev in devices:
        if dev.supports(rs.camera_info.product_id) and str(dev.get_info(rs.camera_info.product_id)) in DS5_product_ids:
            if dev.supports(rs.camera_info.name):
                print("Found device that supports advanced mode:", dev.get_info(rs.camera_info.name))
            return dev
    raise Exception("No D400 product line device that supports advanced mode was found")

try:
    dev = find_device_that_supports_advanced_mode()
    advnc_mode = rs.rs400_advanced_mode(dev)
    print("Advanced mode is", "enabled" if advnc_mode.is_enabled() else "disabled")

    # Loop until we successfully enable advanced mode
    while not advnc_mode.is_enabled():
        print("Trying to enable advanced mode...")
        advnc_mode.toggle_advanced_mode(True)
        # At this point the device will disconnect and re-connect.
        print("Sleeping for 5 seconds...")
        time.sleep(5)
        # The 'dev' object will become invalid and we need to initialize it again
        dev = find_device_that_supports_advanced_mode()
        advnc_mode = rs.rs400_advanced_mode(dev)
        print("Advanced mode is", "enabled" if advnc_mode.is_enabled() else "disabled")

    # Get each control's current value
    print("Depth Control: \n", advnc_mode.get_depth_control())
    print("RSM: \n", advnc_mode.get_rsm())
    print("RAU Support Vector Control: \n", advnc_mode.get_rau_support_vector_control())
    print("Color Control: \n", advnc_mode.get_color_control())
    print("RAU Thresholds Control: \n", advnc_mode.get_rau_thresholds_control())
    print("SLO Color Thresholds Control: \n", advnc_mode.get_slo_color_thresholds_control())
    print("SLO Penalty Control: \n", advnc_mode.get_slo_penalty_control())
    print("HDAD: \n", advnc_mode.get_hdad())
    print("Color Correction: \n", advnc_mode.get_color_correction())
    print("Depth Table: \n", advnc_mode.get_depth_table())
    print("Auto Exposure Control: \n", advnc_mode.get_ae_control())
    print("Census: \n", advnc_mode.get_census())

    #To get the minimum and maximum value of each control use the mode value:
    query_min_values_mode = 1
    query_max_values_mode = 2
    current_std_depth_control_group = advnc_mode.get_depth_control()
    min_std_depth_control_group = advnc_mode.get_depth_control(query_min_values_mode)
    max_std_depth_control_group = advnc_mode.get_depth_control(query_max_values_mode)
    print("Depth Control Min Values: \n ", min_std_depth_control_group)
    print("Depth Control Max Values: \n ", max_std_depth_control_group)

    # Set some control with a new (median) value
    current_std_depth_control_group.scoreThreshA = int((max_std_depth_control_group.scoreThreshA - min_std_depth_control_group.scoreThreshA) / 2)
    advnc_mode.set_depth_control(current_std_depth_control_group)
    print("After Setting new value, Depth Control: \n", advnc_mode.get_depth_control())

    # Serialize all controls to a Json string
    serialized_string = advnc_mode.serialize_json()
    print("Controls as JSON: \n", serialized_string)
    as_json_object = json.loads(serialized_string)

    # We can also load controls from a json string
    # For Python 2, the values in 'as_json_object' dict need to be converted from unicode object to utf-8
    if type(next(iter(as_json_object))) != str:
        as_json_object = {k.encode('utf-8'): v.encode("utf-8") for k, v in as_json_object.items()}
    # The C++ JSON parser requires double-quotes for the json object so we need
    # to replace the single quote of the pythonic json to double-quotes
    json_string = str(as_json_object).replace("'", '\"')
    advnc_mode.load_json(json_string)

except Exception as e:
    print(e)
    pass

Nessun commento:

Posta un commento

Dockerizza Flask

Un esempio semplice per inserire in un container Docker una applicazione Flask Partiamo da una semplice applicazione che ha un file app.py ...