Visualizzazione post con etichetta Realsense. Mostra tutti i post
Visualizzazione post con etichetta Realsense. Mostra tutti i post

lunedì 18 agosto 2025

Preparazione dati Realsense D415 per monocular depth estimation

 Volevo provare a crearmi la mia rete neurale per Monocular Depth Estimation ma non ho dati da fornire alla rete...mi sono quindi armato di Realsense di D415 e sono andato alla mia palestra (cava di Maiano vicino a Firenze) a fare qualche acquisizione

Il problema con i Realsense e' che il sensore di profondita' e' molto rumoroso quindi il programma di acquisizione effettua piu' scansioni e poi media

 


import pyrealsense2 as rs
import numpy as np
from PIL import Image
import os

# -------------------
# Config
# -------------------
output_rgb_dir = "rgb"
output_depth_dir = "depth"
os.makedirs(output_rgb_dir, exist_ok=True)
os.makedirs(output_depth_dir, exist_ok=True)

num_frames = 30 # Number of frames to accumulate

# -------------------
# RealSense pipeline
# -------------------
pipeline = rs.pipeline()
config = rs.config()

config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

profile = pipeline.start(config)

# Depth scale
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth scale:", depth_scale)

# Align depth to color
align_to = rs.stream.color
align = rs.align(align_to)

try:
print(f"Capturing {num_frames} frames...")
depth_frames_list = []
rgb_frame_to_save = None

for i in range(num_frames):
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)

color_frame = aligned_frames.get_color_frame()
depth_frame = aligned_frames.get_depth_frame()

if not color_frame or not depth_frame:
continue

# Convert frames to numpy
color_image = np.asanyarray(color_frame.get_data()) # RGB
depth_image = np.asanyarray(depth_frame.get_data()) * depth_scale # meters

depth_frames_list.append(depth_image)

# Keep the last RGB frame (or you could store them all and take median too)
rgb_frame_to_save = color_image

# Compute median depth per pixel
depth_median = np.median(np.stack(depth_frames_list, axis=0), axis=0).astype(np.float32)

# Save RGB
rgb_filename = os.path.join(output_rgb_dir, "rgb_median.png")
Image.fromarray(rgb_frame_to_save).save(rgb_filename)

# Save depth as NPY
depth_npy_filename = os.path.join(output_depth_dir, "depth_median.npy")
np.save(depth_npy_filename, depth_median)

# Save depth as PNG (scaled to 16-bit for visualization)
depth_png_filename = os.path.join(output_depth_dir, "depth_median.png")
depth_mm = (depth_median * 1000).astype(np.uint16) # convert meters to mm
Image.fromarray(depth_mm).save(depth_png_filename)

print(f"Saved median RGB to {rgb_filename}")
print(f"Saved median depth to {depth_npy_filename} and {depth_png_filename}")

except KeyboardInterrupt:
print("Stopped by user.")

finally:
pipeline.stop()

 

I dati di profondita' presentano dei buchi dove non e' stato possibile acquisire il dato. il prossimo script usa inpainting di OpenCV per ovviare al problema (solo dati npy)

import sys
import numpy as np
import cv2
from scipy import ndimage

if len(sys.argv) != 2:
print(f"Usage: python {sys.argv[0]} depth_file.npy")
sys.exit(1)

filename = sys.argv[1]

# Load depth map
depth = np.load(filename)
mask = depth == 0

# Fill holes using nearest neighbor
nearest_filled = ndimage.distance_transform_edt(
mask,
return_distances=False,
return_indices=True
)
depth_filled = depth[tuple(nearest_filled)]

# Smooth with bilateral filter to preserve edges
depth_filtered = cv2.bilateralFilter(depth_filled.astype(np.float32), 9, 75, 75)

# Save with "_filled" suffix
output_filename = filename.replace(".npy", "_filled.npy")
np.save(output_filename, depth_filtered)

print(f"Saved filled depth map to: {output_filename}")

 

 

 

 

Intrinsics di Realsense D415

Uno dei vantaggi delle camere Realsense e' che sono gia' calibrate e si possono leggere i valori dei parametri intrinsics senza dover passare tramite la calibrazione con checkerboard

 


 

Width: 640
Height: 480
Focal length (fx, fy): 616.6863403320312 616.5963134765625
Principal point (ppx, ppy): 318.4041748046875 238.97064208984375
Distortion model: distortion.inverse_brown_conrady
Distortion coefficients: [0.0, 0.0, 0.0, 0.0, 0.0]
 

import pyrealsense2 as rs

# Create pipeline and config
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

# Start streaming
profile = pipeline.start(config)

# Get the stream profile and intrinsics
color_stream_profile = profile.get_stream(rs.stream.color)
intr = color_stream_profile.as_video_stream_profile().get_intrinsics()

# Print intrinsics
print("Width:", intr.width)
print("Height:", intr.height)
print("Focal length (fx, fy):", intr.fx, intr.fy)
print("Principal point (ppx, ppy):", intr.ppx, intr.ppy)
print("Distortion model:", intr.model)
print("Distortion coefficients:", intr.coeffs)

pipeline.stop()


 

martedì 4 marzo 2025

Realsense L515 e SDK 2.56

Intel lo ha fatto di nuovo....cercando di usare di nuovo la Realsense L515 questa non veniva riconosciuta dal viewer perche' Intel la ha resa obsoleta ed ha rimosso il supporto dall'SDK

Per tornare ad usarla ho dovuto ricompilare a mano i sorgenti con 


wget https://github.com/IntelRealSense/librealsense/archive/refs/tags/v2.54.2.zip
unzip v2.54.2.zip
cd librealsense-2.54.2/
mkdir build
cd build
cmake ..
sudo make install
cd ../config
sudo cp ~/.99-realsense-libusb.rules /etc/udev/rules.d/99-realsense-libusb.rules
sudo udevadm control --reload-rules
sudo udevadm trigger

nota: l'installazione la avevo fatta su Debian Stable. Passando ad Ubuntu gli stessi comandi non permettevano di compilare. In sintesi di deve aggiungere la riga

#include <cstdint>

al file /third-party/rsutils/include/version.h 


 

Per il avere il wrapper su Python si deve compilare come segue

sudo apt install python3-pybind11

cmake ../ -DBUILD_EXAMPLES=true -DBUILD_PYTHON_BINDINGS:bool=true

giovedì 14 marzo 2024

Pyrealsense

ATTENZIONE: per funzionare alla massima risoluzione la Realsense deve usare una porta USB 3 ed un cavo idoneo ad USB 3 altrimenti si limita a 640x480

 

==================================================

Solo Ottico 1920x1080

import pyrealsense2 as rs
import numpy as np
import cv2
import time
import math

pipeline = rs.pipeline()
config = rs.config()

config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)

profile = pipeline.start(config)

align_to = rs.stream.color
align = rs.align(align_to)

frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
color_frame = aligned_frames.get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
imageName1 = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_Color.png'
cv2.imwrite(imageName1, color_image)
pipeline.stop()


 

================================================== 

Ottico + Profondita' 1280x720 

 

================================================== 

import pyrealsense2 as rs
import numpy as np
import cv2
import time
import math

pipeline = rs.pipeline()
config = rs.config()

config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)

profile = pipeline.start(config)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()

# We will be removing the background of objects more than
#  clipping_distance_in_meters meters away
clipping_distance_in_meters = 1.5
clipping_distance = clipping_distance_in_meters / depth_scale


align_to = rs.stream.color
align = rs.align(align_to)

frames = pipeline.wait_for_frames()

aligned_frames = align.process(frames)
aligned_depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()

depth_image = np.asanyarray(aligned_depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())


# Remove background - Set pixels further than clipping_distance to grey
grey_color = 153
depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels
bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)

# Render images
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
images = np.hstack((bg_removed, depth_colormap))
#cv2.namedWindow('Align Example', cv2.WINDOW_AUTOSIZE)

# Filename
imageName1 = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_Color.png'
imageName2 = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_Depth.png'
imageName3 = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_bg_removed.png'
imageName4 = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_ColorDepth.png'
imageName5 = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_DepthColormap.png'

# Saving the image
cv2.imwrite(imageName1, color_image)
cv2.imwrite(imageName2, depth_image)
cv2.imwrite(imageName3, images)
cv2.imwrite(imageName4, bg_removed )
cv2.imwrite(imageName5, depth_colormap )

pipeline.stop()



================================================== 

Infrarosso' 1280x720

================================================== 

import pyrealsense2 as rs
import numpy as np
import cv2
import time
import math
 
pipeline = rs.pipeline()
config = rs.config()

config.enable_stream(rs.stream.infrared, 1, 1280, 720, rs.format.y8, 30)
profile = pipeline.start(config)
 
frames = pipeline.wait_for_frames()
ir1_frame = frames.get_infrared_frame(1)
image = np.asanyarray(ir1_frame.get_data())
imageIR = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_IR.png'
cv2.imwrite(imageIR, image)
pipeline.stop()




================================================== 

Infrarosso' IR Emitter OFF 1280x720

==================================================

import pyrealsense2 as rs
import numpy as np
import cv2
import time
import math
 
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.infrared, 1, 1280, 720, rs.format.y8, 30)

#disabilita disable IR emitter
pipeline_profile = pipeline.start(config)
device = pipeline_profile.get_device()
depth_sensor = device.query_sensors()[0]
if depth_sensor.supports(rs.option.emitter_enabled):
    depth_sensor.set_option(rs.option.emitter_enabled, 0)

frames = pipeline.wait_for_frames()
ir1_frame = frames.get_infrared_frame(1)
image = np.asanyarray(ir1_frame.get_data())
imageIR = str(time.strftime("%Y_%m_%d_%H_%M_%S")) +  '_IR_OFF.png'
cv2.imwrite(imageIR, image)
pipeline.stop() 

 


================================================== 

IMU

==================================================

import pyrealsense2 as rs

import numpy as np


def initialize_camera():
    # start the frames pipe
    p = rs.pipeline()
    conf = rs.config()
    conf.enable_stream(rs.stream.accel)
    conf.enable_stream(rs.stream.gyro)
    prof = p.start(conf)
    return p


def gyro_data(gyro):
    return np.asarray([gyro.x, gyro.y, gyro.z])


def accel_data(accel):
    return np.asarray([accel.x, accel.y, accel.z])

p = initialize_camera()
try:
    while True:
        f = p.wait_for_frames()
        accel = accel_data(f[0].as_motion_frame().get_motion_data())
        gyro = gyro_data(f[1].as_motion_frame().get_motion_data())
        print("accelerometer: ", accel)
        print("gyro: ", gyro)

finally:
    p.stop()

 

gyro:  [0.         0.         0.00349066]
accelerometer:  [-0.24516624 -8.78675842 -2.75566864]

 

================================================== 

REGOLA ESPOSIZIONE

==================================================

import pyrealsense2 as rs
pipeline = rs.pipeline()
config = rs.config()
profile = pipeline.start(config) # Start streaming
sensor_dep = profile.get_device().first_depth_sensor()
print("Trying to set Exposure")
exp = sensor_dep.get_option(rs.option.exposure)
print ("exposure = %d" % exp)
print ("Setting exposure to new value")
exp = sensor_dep.set_option(rs.option.exposure, 25000)
exp = sensor_dep.get_option(rs.option.exposure)
print ("New exposure = %d" % exp)
profile = pipeline.stop

l'esposizione si puo' regolare anche su ROI

p = rs.pipeline()
prof = p.start()
s = prof.get_device().first_roi_sensor()
roi = s.get_region_of_interest()
s.set_region_of_interest(roi)

 

================================================== 

ADVANVCED MODE (regola parametri di dettaglio)

==================================================

 

import pyrealsense2 as rs
import time
import json

DS5_product_ids = ["0AD1", "0AD2", "0AD3", "0AD4", "0AD5", "0AF6", "0AFE", "0AFF", "0B00", "0B01", "0B03", "0B07", "0B3A", "0B5C"]

def find_device_that_supports_advanced_mode() :
    ctx = rs.context()
    ds5_dev = rs.device()
    devices = ctx.query_devices();
    for dev in devices:
        if dev.supports(rs.camera_info.product_id) and str(dev.get_info(rs.camera_info.product_id)) in DS5_product_ids:
            if dev.supports(rs.camera_info.name):
                print("Found device that supports advanced mode:", dev.get_info(rs.camera_info.name))
            return dev
    raise Exception("No D400 product line device that supports advanced mode was found")

try:
    dev = find_device_that_supports_advanced_mode()
    advnc_mode = rs.rs400_advanced_mode(dev)
    print("Advanced mode is", "enabled" if advnc_mode.is_enabled() else "disabled")

    # Loop until we successfully enable advanced mode
    while not advnc_mode.is_enabled():
        print("Trying to enable advanced mode...")
        advnc_mode.toggle_advanced_mode(True)
        # At this point the device will disconnect and re-connect.
        print("Sleeping for 5 seconds...")
        time.sleep(5)
        # The 'dev' object will become invalid and we need to initialize it again
        dev = find_device_that_supports_advanced_mode()
        advnc_mode = rs.rs400_advanced_mode(dev)
        print("Advanced mode is", "enabled" if advnc_mode.is_enabled() else "disabled")

    # Get each control's current value
    print("Depth Control: \n", advnc_mode.get_depth_control())
    print("RSM: \n", advnc_mode.get_rsm())
    print("RAU Support Vector Control: \n", advnc_mode.get_rau_support_vector_control())
    print("Color Control: \n", advnc_mode.get_color_control())
    print("RAU Thresholds Control: \n", advnc_mode.get_rau_thresholds_control())
    print("SLO Color Thresholds Control: \n", advnc_mode.get_slo_color_thresholds_control())
    print("SLO Penalty Control: \n", advnc_mode.get_slo_penalty_control())
    print("HDAD: \n", advnc_mode.get_hdad())
    print("Color Correction: \n", advnc_mode.get_color_correction())
    print("Depth Table: \n", advnc_mode.get_depth_table())
    print("Auto Exposure Control: \n", advnc_mode.get_ae_control())
    print("Census: \n", advnc_mode.get_census())

    #To get the minimum and maximum value of each control use the mode value:
    query_min_values_mode = 1
    query_max_values_mode = 2
    current_std_depth_control_group = advnc_mode.get_depth_control()
    min_std_depth_control_group = advnc_mode.get_depth_control(query_min_values_mode)
    max_std_depth_control_group = advnc_mode.get_depth_control(query_max_values_mode)
    print("Depth Control Min Values: \n ", min_std_depth_control_group)
    print("Depth Control Max Values: \n ", max_std_depth_control_group)

    # Set some control with a new (median) value
    current_std_depth_control_group.scoreThreshA = int((max_std_depth_control_group.scoreThreshA - min_std_depth_control_group.scoreThreshA) / 2)
    advnc_mode.set_depth_control(current_std_depth_control_group)
    print("After Setting new value, Depth Control: \n", advnc_mode.get_depth_control())

    # Serialize all controls to a Json string
    serialized_string = advnc_mode.serialize_json()
    print("Controls as JSON: \n", serialized_string)
    as_json_object = json.loads(serialized_string)

    # We can also load controls from a json string
    # For Python 2, the values in 'as_json_object' dict need to be converted from unicode object to utf-8
    if type(next(iter(as_json_object))) != str:
        as_json_object = {k.encode('utf-8'): v.encode("utf-8") for k, v in as_json_object.items()}
    # The C++ JSON parser requires double-quotes for the json object so we need
    # to replace the single quote of the pythonic json to double-quotes
    json_string = str(as_json_object).replace("'", '\"')
    advnc_mode.load_json(json_string)

except Exception as e:
    print(e)
    pass

venerdì 8 marzo 2024

Realsense Docker

Aggiornamento:

Il container e' disponibile gia' compilato su Docker.com al mio account

https://hub.docker.com/repository/docker/c1p81/realsense_2004/general 

per il pull

 docker pull c1p81/realsense_2004:latest

per aiblitare la porta USB si deve impostare il comando come segue

sudo docker run -d  --net=host --env="DISPLAY" --volume="$HOME/.Xauthority:/root/.Xauthority:rw"   -v /dev:/dev    --device-cgroup-rule "c 81:* rmw"     --device-cgroup-rule "c 189:* rmw" --privileged -v /dev/bus/usb:/dev/bus/usb  b105279d1264 realsense-viewer

 ============================================

Ho deciso di tirare fuori dal cassetto la D415 Realsense e come al solito montare l'SDK diventa sempre piu' difficile a causa delle politiche di Intel di dismissione dei propri dispositivi


 

Stavolta volevo provare la strada dell'ambiente docker ma ne' il container ufficiale ne' alcuni trovati su docker hub risultavano completamente funzionanti e me lo sono scritto da solo

(da modificare l'image_id)

Bash

docker run -it --rm  --privileged  -v /dev:/dev  -v "$HOME:/home/luca/"   --device-cgroup-rule "c 81:* rmw"     --device-cgroup-rule "c 189:* rmw"   b105279d1264 /bin/bash

Realsense Viewer

xhost +

docker run -d  --net=host --env="DISPLAY" --volume="$HOME/.Xauthority:/root/.Xauthority:rw"   -v /dev:/dev    --device-cgroup-rule "c 81:* rmw"     --device-cgroup-rule "c 189:* rmw"   2bd94ff6bc38 realsense-viewer

 

Save to disk

docker run -it --rm --privileged    -v /dev:/dev  -v "$HOME:/home/luca/"   --device-cgroup-rule "c 81:* rmw"     --device-cgroup-rule "c 189:* rmw"   34ea465a203e sh -c "cd /home/luca && rs-save-to-disk"

per creare il docker si usa il comando

docker build -t 20_04_real .

con il seguente Dockerfile


FROM public.ecr.aws/lts/ubuntu:20.04_stable

ENV DEBIAN_FRONTEND=noninteractive

RUN apt-get update \
&& apt-get install -qq -y --no-install-recommends \
build-essential \
cmake \
lsb-release \
git \
curl \
libssl-dev \
libusb-1.0-0-dev \
pkg-config \
libudev-dev \
libgtk-3-dev \
libglfw3-dev \
libgl1-mesa-dev \
libglu1-mesa-dev \
curl \
python3 \
python3-dev \
python3-pip \
libopencv-dev \
python3-opencv \
python3-numpy \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*


RUN mkdir -p /etc/apt/keyrings
RUN curl -sSf https://librealsense.intel.com/Debian/librealsense.pgp | tee /etc/apt/keyrings/librealsense.pgp > /dev/null


RUN echo "deb [signed-by=/etc/apt/keyrings/librealsense.pgp] https://librealsense.intel.com/Debian/apt-repo `lsb_release -cs` main" | tee /etc/apt/sources.list.d/librealsense.list
RUN apt-get update


RUN apt-get -y install librealsense2-dkms librealsense2-utils librealsense2-dev librealsense2-dbg librealsense2-udev-rules mc nano locales
RUN apt-get clean
RUN pip install pyrealsense2

RUN git clone https://github.com/IntelRealSense/librealsense
RUN cd librealsense
RUN mkdir /librealsense/build
WORKDIR /librealsense/build
RUN cmake ../ -DBUILD_EXAMPLES=true
RUN make

sabato 13 agosto 2022

Distanza Realsense

Misura la distanza al centro dell'immagine

Per rendere la misura piu' stabile e' stato utilizzato un ring buffer


## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved.

###############################################
## Open CV and Numpy integration ##
###############################################

import pyrealsense2 as rs
import numpy as np
from numpy_ringbuffer import RingBuffer
import cv2


RingBufferSize = 50
r = RingBuffer(capacity=RingBufferSize, dtype=np.float16)

# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()

# Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))

found_rgb = False
for s in device.sensors:
if s.get_info(rs.camera_info.name) == 'RGB Camera':
found_rgb = True
break
if not found_rgb:
print("The demo requires Depth camera with Color sensor")
exit(0)

config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

if device_product_line == 'L500':
config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
else:
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

# Start streaming
pipeline.start(config)

try:
while True:

# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue

# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
#print(depth_image[320,240])
r.append(depth_image[320,240])
print(np.mean(r))

# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

depth_colormap_dim = depth_colormap.shape
color_colormap_dim = color_image.shape

# If depth and color resolutions are different, resize color image to match depth image for display
if depth_colormap_dim != color_colormap_dim:
resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
images = np.hstack((resized_color_image, depth_colormap))
else:
images = np.hstack((color_image, depth_colormap))

# Show images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
startpoint=(310,240)
endpoint=(330,240)
color=(255,0,0)
thickness=3
images = cv2.line(images,startpoint,endpoint,color,thickness)
startpoint=(320,230)
endpoint=(320,250)


images = cv2.line(images,startpoint,endpoint,color,thickness)
cv2.imshow('RealSense', images)
cv2.waitKey(1)

finally:

# Stop streaming
pipeline.stop()

martedì 28 dicembre 2021

Realsense

Stavo cercando un metodo per aveere un set di training per Monocular Depth Estimation ed ho ritirato fuori la RealSense  D415 per scoprire che ad Agosto 2021 la Intel ha dichiarato la dismissione di questi sensori robotici (ad esclusione di quelli stereoscopici).... mi ricorda tanto la storia di Intel Edison ed Intel Stick...progetti interessanti morti 



Questo piccolo programmino (che modifica uno degli esempi dell'SDK di Realsense) salva la mappa di profondita' in formato NumPy ed la corrispondente immagine JPG premendo il tasto S

Per compilare in binario l'eseguibile

pyinstaller main.py --onefile


requirements.txt

altgraph==0.17.2
importlib-metadata==4.8.3
numpy==1.19.5
opencv-python==4.1.2.30
Pillow==8.4.0
pyinstaller==4.7
pyinstaller-hooks-contrib==2021.4
pyrealsense2==2.50.0.3812
typing_extensions==4.0.1
zipp==3.6.0

main.py

import sys,getopt
import pyrealsense2 as rs
import numpy as np
import cv2
from PIL import Image
import os.path

def completo(argv):
outputdir = ''
if len(sys.argv) < 2:
print('test.py -o <output_directory>')
sys.exit()
try:
opts, args = getopt.getopt(argv, "hi:o:", ["odir="])
except getopt.GetoptError:
print('test.py -o <output_directory>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('test.py -o <output_directory>')
sys.exit()
elif opt in ("-o", "--odir"):
outputdir = arg
if (os.path.isdir(outputdir)) == False:
print('Errore nome directory output')
sys.exit()
print('Output file is ' + outputdir)
## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved.

###############################################
## Open CV and Numpy integration ##
###############################################



# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()

# Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))

found_rgb = False
for s in device.sensors:
if s.get_info(rs.camera_info.name) == 'RGB Camera':
found_rgb = True
break
if not found_rgb:
print("The demo requires Depth camera with Color sensor")
exit(0)

contatore = 0

config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

if device_product_line == 'L500':
config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
else:
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

# Start streaming
pipeline.start(config)

try:
while True:

# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue

# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())

# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

depth_colormap_dim = depth_colormap.shape
color_colormap_dim = color_image.shape


# If depth and color resolutions are different, resize color image to match depth image for display
if depth_colormap_dim != color_colormap_dim:
resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]),
interpolation=cv2.INTER_AREA)
images = np.hstack((resized_color_image, depth_colormap))
else:
images = np.hstack((color_image, depth_colormap))

# Show images
cv2.namedWindow('Immagine', cv2.WINDOW_AUTOSIZE)

cv2.imshow('Immagine' , images)
tasto = cv2.waitKey(1000)
#print(str(tasto))
if (tasto == 115): #S
#salva su file
contatore = contatore + 1
str_conta = str(contatore)
try:
np.save(str_conta.zfill(3)+'_depth.npy', depth_image)
im = Image.fromarray(color_image)
im.save(str_conta.zfill(3)+"_color.jpg")
print('Immagine ' + str(contatore) + 'acquisita')
except:
print('Errore acquisizione')
exit()
if (tasto == 113): #Q
exit()

finally:
# Stop streaming
pipeline.stop()

if __name__ == '__main__':
completo(sys.argv[1:])

sabato 21 novembre 2020

Realsense SDK con D435 in Debian

Dopo un po' di esperienza con i sensori a luce strutturata ho provata il sensore Intel RealSense D435 (attenzione manca la i finale....a differenza al D435i qui non e' presenta la IMU integrata) che funziona come sensore di profondita' usando la stereoscopia di due camere

Il sensore e' nato per applicazioni di robotica ed e' molto veloce nell'acquisizione/elaborazione (circa 30 fps) ma il risultato come dato di profondita' e' molto differente da quello che si ottiene da un sensore a luce strutturata


Oltre alle due camere per la stereoscopia e' presente un illuminatore IR per migliorare il dato di profondita' ed una camera RGB

La distanza massima operativa e' di circa 4m. Come si nota dal video il dato e' molto rumoroso



Un aspetto che mi ha lasciato molto perplesso e' che non risultano essere conservati gli angoli. Per esempio nella realta' l'angolo e' di 90 gradi mentre dalla mesh risulta essere di 105 gradi. Credo che questo sia dovuto al fatto che le lenti sono quasi dei fish-eye



Per quanto rigurda le lunghezza la porta in realta' e' larga 125 cm mentre il sensore la misura circa 105 cm

Diciamo che i dati al centro dell'immagine sono coerenti ma sui bordi 

l'SDK e' nato per Windows e Ubuntu ma si puo' installare su Debian utilizzando Snap

snap install librealsense

Oltre alle librerie si installa anche realsense-viewer da cui e' possibile anche effettuare l'upgrade del firmware della camera

Gli esempi si trovano a  https://dev.intelrealsense.com/docs/code-samples

Per compilare gli esempi ho dovuto installare anche la libreria StbEasyFont (e' presente nella directory third parts ma io la ho installata con apt per semplicita')

=================================================
cmake_minimum_required(VERSION 3.5)

project(realsense LANGUAGES CXX)

set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)

find_package(realsense2 REQUIRED )


add_executable(realsense main.cpp)
target_link_libraries(realsense realsense2)

=================================================

=================================================
#include <iostream>
#include <librealsense2/rs.hpp> 

using namespace std;

int main()
{
    rs2::pipeline p;
    p.start();
    rs2::frameset frames = p.wait_for_frames();
    rs2::depth_frame depth = frames.get_depth_frame();
    float width = depth.get_width();
    float height = depth.get_height();
    float dist_to_center = depth.get_distance(width / 2, height / 2);
    std::cout << "The camera is facing an object " << 
dist_to_center << " meters away" << endl << endl;
    return 0;
}
=================================================

Multicam
Nell'esempio Multicam oltre alla libreria Realsense viene usata anche OpenGL con glfw


================================================
cmake_minimum_required(VERSION 3.5)

project(multicam LANGUAGES CXX)

set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)

# finds OpenGL, GLU and X11
find_package(OpenGL REQUIRED)
if(NOT OPENGL_FOUND)
    message("ERROR: OpenGL not found")
endif(NOT OPENGL_FOUND)
set(GL_LIBRARY GL GLU X11)


find_package(realsense2 REQUIRED )

add_executable(multicam main.cpp)
target_link_libraries(multicam realsense2 glfw ${GL_LIBRARY} m)
================================================


Analisi MNF su spettri di riflettanza di plastica

Devo cerca di lavorare su spettri di riflettanza di plastica e la prima domanda e': quale sono le bande significative? Sono partito dal ...