Visualizzazione post con etichetta Opencv. Mostra tutti i post
Visualizzazione post con etichetta Opencv. Mostra tutti i post

martedì 22 luglio 2025

Stack images with opencv

Per ridurre il rumore, come si fa in astrofotografia, un sistema semplice per fare stacking

 

g++ stack_images.cpp -o stack_images `pkg-config --cflags --libs opencv4` -std=c++17


 stack_images.cpp

#include <opencv2/opencv.hpp>
#include <filesystem>
#include <vector>
#include <iostream>

namespace fs = std::filesystem;

int main() {
std::string folder = "images"; // Folder containing your PNGs
std::vector<cv::Mat> images;

// Load all PNGs from folder
for (const auto& entry : fs::directory_iterator(folder)) {
if (entry.path().extension() == ".png") {
cv::Mat img = cv::imread(entry.path().string(), cv::IMREAD_UNCHANGED);
if (!img.empty()) {
images.push_back(img);
std::cout << "Loaded: " << entry.path() << std::endl;
}
}
}

if (images.empty()) {
std::cerr << "No images loaded!" << std::endl;
return 1;
}

// Use first image size/type as reference
cv::Size imgSize = images[0].size();
int imgType = images[0].type();

// Convert all to CV_32F or CV_32FCn for precision
cv::Mat accumulator = cv::Mat::zeros(imgSize, CV_MAKETYPE(CV_32F, images[0].channels()));

for (const auto& img : images) {
cv::Mat imgFloat;
img.convertTo(imgFloat, CV_32F);
accumulator += imgFloat;
}

// Compute average
accumulator /= static_cast<float>(images.size());

// Convert back to original depth
cv::Mat result;
accumulator.convertTo(result, images[0].type());

// Save result
cv::imwrite("stacked_avg.png", result);
std::cout << "Saved stacked image as stacked_avg.png" << std::endl;


cv::Mat claheResult;
if (result.channels() == 3) {
// Convert to Lab color space
cv::Mat lab;
cv::cvtColor(result, lab, cv::COLOR_BGR2Lab);
std::vector<cv::Mat> lab_planes;
cv::split(lab, lab_planes);

// CLAHE on L channel
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE();
clahe->setClipLimit(2.0);
clahe->apply(lab_planes[0], lab_planes[0]);

// Merge and convert back
cv::merge(lab_planes, lab);
cv::cvtColor(lab, claheResult, cv::COLOR_Lab2BGR);
} else {
// Grayscale CLAHE
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE();
clahe->setClipLimit(2.0);
clahe->apply(result, claheResult);
}

cv::imwrite("stacked_avg_clahe.png", claheResult);




return 0;
}


 

lunedì 21 luglio 2025

Tavole di calibrazione molto grandi

Per creare delle tavole di calibrazione Charuco si puo' usare un proiettore ed un muro bianco. L'importante e' che la risoluzione dell'immagine proiettata sia uguale a quella massima del proiettore (in modo da non avere interpolazione sui bordi dei pixels) 


Questo programma per Opencv 4.10 mostra l'immagine  a schermo e salva su file un png. Per mostrare a schermo pieno si puo' usare il comando feh -f immagine.png

#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
#include <iostream>

int main() {
int squaresX = 7;
int squaresY = 5;
float squareLength = 60; // in pixels (screen units)
float markerLength = 40; // in pixels
auto dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
// Create ChArUco board using constructor (OpenCV 4.x syntax)
cv::aruco::CharucoBoard board(cv::Size(squaresX, squaresY), squareLength, markerLength, dictionary);
cv::Mat boardImage;
// Generate the board image
board.generateImage(cv::Size(1920, 1080 ), boardImage, 10, 1);
// Check if image was generated successfully
if (boardImage.empty()) {
std::cerr << "Failed to generate board image" << std::endl;
return -1;
}
cv::imwrite("charuco_board.png", boardImage);
// Display the board

cv::namedWindow("Projected Charuco Board", cv::WINDOW_NORMAL);
cv::setWindowProperty("Projected Charuco Board", cv::WND_PROP_FULLSCREEN, cv::WINDOW_FULLSCREEN);
cv::imshow("Projected Charuco Board", boardImage);
cv::waitKey(0);
cv::destroyAllWindows();
return 0;
}


 Makefile

# Makefile for OpenCV 4.10.0 ChArUco Board Program

# Compiler
CXX = g++

# Program name
TARGET = charuco_board

# Source files
SOURCES = main.cpp

# Object files
OBJECTS = $(SOURCES:.cpp=.o)

# OpenCV version
OPENCV_VERSION = 4.10.0

# Compiler flags
CXXFLAGS = -std=c++11 -Wall -Wextra -O2

# OpenCV flags (using pkg-config)
OPENCV_CFLAGS = `pkg-config --cflags opencv4`
OPENCV_LIBS = `pkg-config --libs opencv4`

# Alternative manual OpenCV flags if pkg-config is not available
# Uncomment these lines and comment out the pkg-config lines above if needed
# OPENCV_INCLUDE = -I/usr/local/include/opencv4
# OPENCV_LIBS = -L/usr/local/lib -lopencv_core -lopencv_imgproc -lopencv_imgcodecs -lopencv_highgui -lopencv_aruco

# Default target
all: $(TARGET)

# Link the program
$(TARGET): $(OBJECTS)
$(CXX) $(OBJECTS) -o $(TARGET) $(OPENCV_LIBS)

# Compile source files
%.o: %.cpp
$(CXX) $(CXXFLAGS) $(OPENCV_CFLAGS) -c $< -o $@

# Clean build files
clean:
rm -f $(OBJECTS) $(TARGET)

# Install dependencies (Ubuntu/Debian)
install-deps:
sudo apt-get update
sudo apt-get install build-essential cmake pkg-config
sudo apt-get install libopencv-dev libopencv-contrib-dev

# Check OpenCV installation
check-opencv:
@echo "Checking OpenCV installation..."
@pkg-config --modversion opencv4 2>/dev/null || echo "OpenCV not found via pkg-config"
@echo "OpenCV compile flags:"
@pkg-config --cflags opencv4 2>/dev/null || echo "pkg-config not available"
@echo "OpenCV link flags:"
@pkg-config --libs opencv4 2>/dev/null || echo "pkg-config not available"

# Run the program
run: $(TARGET)
./$(TARGET)

# Debug build
debug: CXXFLAGS += -g -DDEBUG
debug: $(TARGET)

# Release build
release: CXXFLAGS += -O3 -DNDEBUG
release: $(TARGET)

# Help
help:
@echo "Available targets:"
@echo " all - Build the program (default)"
@echo " clean - Remove build files"
@echo " run - Build and run the program"
@echo " debug - Build with debug flags"
@echo " release - Build with release optimization"
@echo " install-deps - Install OpenCV dependencies (Ubuntu/Debian)"
@echo " check-opencv - Check OpenCV installation"
@echo " help - Show this help message"

# Phony targets
.PHONY: all clean run debug release install-deps check-opencv help


martedì 15 luglio 2025

Confronto AprilTag vs ArucoTag

Ho provato a modifica il programma opencv_demo.cpp per avere un confronto diretto nelle prestazioni tra Apriltag e Arucotag

L'idea e' quella di acquisire un fotogramma da una camera realsense nel quale sono visulizzati contemporaneamente Aruco ed April tag con le stesse condizioni di illuminazione e risoluzione per vedere quale delle due famiglie e' migliore

Alla fine l'errore percentuale std_dev/media (150 fotogrammi) risulta essere

Arucotag = 0.0033%

AprilTag = 0.002%

 

/* Copyright (C) 2013-2016, The Regents of The University of Michigan.
All rights reserved.
This software was developed in the APRIL Robotics Lab under the
direction of Edwin Olson, ebolson@umich.edu. This software may be
available under alternative licensing terms; contact the address above.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the Regents of The University of Michigan.
*/

// v4l2-ctl -A
// v4l2-ctl --device=/dev/video1 --all

#include <iostream>

#include "opencv2/opencv.hpp"
#include <opencv2/aruco.hpp>
#include <opencv2/objdetect/aruco_detector.hpp>
#include <librealsense2/rs.hpp>

#include <iostream>
#include <fstream>


extern "C" {
#include "apriltag.h"
#include "tag36h11.h"
#include "tag25h9.h"
#include "tag16h5.h"
#include "tagCircle21h7.h"
#include "tagCircle49h12.h"
#include "tagCustom48h12.h"
#include "tagStandard41h12.h"
#include "tagStandard52h13.h"
#include "apriltag_pose.h"

#include "common/getopt.h"
}

using namespace std;
using namespace cv;


int main(int argc, char *argv[])
{
std::ofstream dati_april("dati_april.csv");
std::ofstream dati_aruco("dati_aruco.csv");

cv::aruco::Dictionary dictionary;
cv::aruco::DetectorParameters detector_params;
cv::aruco::ArucoDetector detector;

double fx = 671.29320421; // focal length x
double fy = 672.01634326; // focal length y
double cx = 640;
double cy = 360;
double tagsize = 0.180; // in meters
float tagsize_aruco = 0.150; // in meters

std::vector<float> x_vec;
std::vector<float> y_vec;
std::vector<float> z_vec;
std::vector<float> id_vec;


std::vector<cv::Vec3d> rvecs, tvecs;

double ref_x = 0.0;
double ref_y = 0.0;
double ref_z = 0.0;

double aruco_ref_x = 0.0;
double aruco_ref_y = 0.0;
double aruco_ref_z = 0.0;


int fontface = FONT_HERSHEY_SCRIPT_SIMPLEX;
double fontscale = 1.0;
String text;

getopt_t *getopt = getopt_create();

getopt_add_bool(getopt, 'h', "help", 0, "Show this help");
getopt_add_bool(getopt, 'd', "debug", 1, "Enable debugging output (slow)");
getopt_add_bool(getopt, 'q', "quiet", 0, "Reduce output");
getopt_add_string(getopt, 'f', "family", "tag36h11", "Tag family to use");
getopt_add_int(getopt, 't', "threads", "1", "Use this many CPU threads");
getopt_add_double(getopt, 'x', "decimate", "2.0", "Decimate input image by this factor");
getopt_add_double(getopt, 'b', "blur", "0.0", "Apply low-pass blur to input");
getopt_add_int(getopt, 'c', "camera", "1", "Select camera");

getopt_add_bool(getopt, '0', "refine-edges", 1, "Spend more time trying to align edges of tags");

if (!getopt_parse(getopt, argc, argv, 1) ||
getopt_get_bool(getopt, "help")) {
printf("Usage: %s [options]\n", argv[0]);
getopt_do_usage(getopt);
exit(0);
}
//VideoCapture inputVideo;
rs2::pipeline pipe;
rs2::config cfg;
cfg.enable_stream(RS2_STREAM_COLOR, 1280, 720, RS2_FORMAT_BGR8, 30);

try {
pipe.start(cfg);
cout << "RealSense D415 started at 1280x720 @ 30fps" << endl;
} catch (const rs2::error &e) {
cerr << "RealSense error: " << e.what() << endl;
return -1;
}


dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
detector = cv::aruco::ArucoDetector(dictionary);


// Camera intrinsics for ArUco (same as AprilTag)
cv::Mat camMatrix = (cv::Mat_<double>(3,3) << fx, 0, cx, 0, fy, cy, 0, 0, 1);
cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F); // Assuming no distortion



// Initialize tag detector with options
apriltag_family_t *tf = NULL;
const char *famname = getopt_get_string(getopt, "family");
if (!strcmp(famname, "tag36h11")) {
tf = tag36h11_create();
} else if (!strcmp(famname, "tag25h9")) {
tf = tag25h9_create();
} else if (!strcmp(famname, "tag16h5")) {
tf = tag16h5_create();
} else if (!strcmp(famname, "tagCircle21h7")) {
tf = tagCircle21h7_create();
} else if (!strcmp(famname, "tagCircle49h12")) {
tf = tagCircle49h12_create();
} else if (!strcmp(famname, "tagStandard41h12")) {
tf = tagStandard41h12_create();
} else if (!strcmp(famname, "tagStandard52h13")) {
tf = tagStandard52h13_create();
} else if (!strcmp(famname, "tagCustom48h12")) {
tf = tagCustom48h12_create();
} else {
printf("Unrecognized tag family name. Use e.g. \"tag36h11\".\n");
exit(-1);
}


apriltag_detector_t *td = apriltag_detector_create();
apriltag_detector_add_family(td, tf);
td->quad_decimate = getopt_get_double(getopt, "decimate");
td->quad_sigma = getopt_get_double(getopt, "blur");
td->nthreads = getopt_get_int(getopt, "threads");
td->debug = getopt_get_bool(getopt, "debug");
td->refine_edges = getopt_get_bool(getopt, "refine-edges");

Mat frame, gray;

vector<Mat> allImages;
Size imageSize;
while (true) {
rs2::frameset frames = pipe.wait_for_frames();
rs2::video_frame color_frame = frames.get_color_frame();
if (!color_frame) continue;

// Convert to cv::Mat
Mat image(Size(1280, 720), CV_8UC3, (void*)color_frame.get_data(), Mat::AUTO_STEP);
Mat imageCopy;
cvtColor(image, gray, COLOR_BGR2GRAY);

// Make an image_u8_t header for the Mat data
image_u8_t im = { .width = gray.cols,
.height = gray.rows,
.stride = gray.cols,
.buf = gray.data
};

std::vector<std::vector<cv::Point2f>> corners;
std::vector<int> ids;
std::vector<std::vector<cv::Point2f>> rejected;
detector.detectMarkers(image, corners, ids, rejected);

int test = 0;

if (!ids.empty()) {
std::cout << "Aruco trovato" << endl;
cv::aruco::drawDetectedMarkers(image, corners, ids);
cv::aruco::estimatePoseSingleMarkers(corners, 0.15, camMatrix, distCoeffs, rvecs, tvecs);
for (size_t i = 0; i < ids.size(); ++i) {
std::cout << "Id " << ids[i] << " Pos: (" << std::fixed << std::setprecision(6) << tvecs[i][0] << ", " << tvecs[i][1] << ", " << tvecs[i][2] << ")" << endl;
if (ids[i] == 0)
{
aruco_ref_x = tvecs[i][0] ;
aruco_ref_y = tvecs[i][1] ;
aruco_ref_z = tvecs[i][2] ;
test =1;
}
if (ids[i] == 1)
{
double distanza_aruco = std::sqrt(std::pow(tvecs[i][0] - aruco_ref_x, 2) + std::pow(tvecs[i][0] - aruco_ref_y, 2) + std::pow(tvecs[i][0] - aruco_ref_z, 2));
putText(image, std::to_string(distanza_aruco), Point(750,100),fontface, fontscale, Scalar(0xff, 0x0, 0), 2);
dati_aruco << distanza_aruco << endl;

}

}


}

zarray_t *detections = apriltag_detector_detect(td, &im);
cout << zarray_size(detections) << " Apriltags detected" << endl;

// Draw detection outlines
for (int i = 0; i < zarray_size(detections); i++) {
apriltag_detection_t *det;

zarray_get(detections, i, &det);

apriltag_detection_info_t info;
info.det = det;
info.tagsize = tagsize;
info.fx = fx;
info.fy = fy;
info.cx = cx;
info.cy = cy;

apriltag_pose_t pose;
estimate_tag_pose(&info, &pose);

// --- Translation ---
double x = pose.t->data[0];
double y = pose.t->data[1];
double z = pose.t->data[2];

std::cout << det->id << ";" << x << ";" << y << ";" << z;

// --- Convert rotation matrix to yaw-pitch-roll ---
double r00 = pose.R->data[0], r01 = pose.R->data[1], r02 = pose.R->data[2];
double r10 = pose.R->data[3], r11 = pose.R->data[4], r12 = pose.R->data[5];
double r20 = pose.R->data[6], r21 = pose.R->data[7], r22 = pose.R->data[8];

double yaw = atan2(r10, r00);
double pitch = atan2(-r20, sqrt(r21 * r21 + r22 * r22));
double roll = atan2(r21, r22);

std::cout << ";" << yaw * 180.0 / M_PI
<< ";" << pitch * 180.0 / M_PI
<< ";" << roll * 180.0 / M_PI << "\n";
if (det->id == 0){
ref_x = x;
ref_y = y;
ref_z = z;
}
else
{
x_vec.push_back(x);
y_vec.push_back(y);
z_vec.push_back(z);
id_vec.push_back(det->id);
}

matd_destroy(pose.R);
matd_destroy(pose.t);

line(image , Point(det->p[0][0], det->p[0][1]),
Point(det->p[1][0], det->p[1][1]),
Scalar(0, 0xff, 0), 2);
line(image, Point(det->p[0][0], det->p[0][1]),
Point(det->p[3][0], det->p[3][1]),
Scalar(0, 0, 0xff), 2);
line(image, Point(det->p[1][0], det->p[1][1]),
Point(det->p[2][0], det->p[2][1]),
Scalar(0xff, 0, 0), 2);
line(image, Point(det->p[2][0], det->p[2][1]),
Point(det->p[3][0], det->p[3][1]),
Scalar(0xff, 0, 0), 2);

stringstream ss;
ss << det->id;
text = ss.str();
int baseline;
Size textsize = getTextSize(text, fontface, fontscale, 2,&baseline);
putText(image, text, Point(det->c[0]-textsize.width/2,det->c[1]+textsize.height/2),fontface, fontscale, Scalar(0xff, 0x99, 0), 2);

}

std::cout << "\n";
for (int t=0;t < x_vec.size();t++)
{
double distanza = std::sqrt(std::pow(x_vec[t] - ref_x, 2) + std::pow(y_vec[t] - ref_y, 2) + std::pow(z_vec[t] - ref_z, 2));
std::cout << id_vec[t] << ";" << distanza << "\n";
switch ((int)id_vec[t])
{
case 1:
putText(image, std::to_string(distanza), Point(50,100),fontface, fontscale, Scalar(0xff, 0x99, 0), 2);
dati_april << distanza << endl;
break;
case 2:
putText(image, std::to_string(distanza), Point(50,200),fontface, fontscale, Scalar(0xff, 0x99, 0), 2);
break;
default:
break;
}
}
x_vec.clear();
y_vec.clear();
z_vec.clear();
id_vec.clear();


apriltag_detections_destroy(detections);

imshow("Tag Detections", image);
if (waitKey(30) >= 0)
break;
}

apriltag_detector_destroy(td);

if (!strcmp(famname, "tag36h11")) {
tag36h11_destroy(tf);
} else if (!strcmp(famname, "tag25h9")) {
tag25h9_destroy(tf);
} else if (!strcmp(famname, "tag16h5")) {
tag16h5_destroy(tf);
} else if (!strcmp(famname, "tagCircle21h7")) {
tagCircle21h7_destroy(tf);
} else if (!strcmp(famname, "tagCircle49h12")) {
tagCircle49h12_destroy(tf);
} else if (!strcmp(famname, "tagStandard41h12")) {
tagStandard41h12_destroy(tf);
} else if (!strcmp(famname, "tagStandard52h13")) {
tagStandard52h13_destroy(tf);
} else if (!strcmp(famname, "tagCustom48h12")) {
tagCustom48h12_destroy(tf);
}


getopt_destroy(getopt);
dati_april.close();
dati_aruco.close();

return 0;
}


Per compilare si modifica CMakeLists.txt per includere librealsense

# opencv_demo
if(OpenCV_FOUND)
find_package(realsense2 REQUIRED) # Add this line
add_executable(opencv_demo example/opencv_demo.cc)
target_include_directories(opencv_demo PRIVATE ${realsense2_INCLUDE_DIR})
target_link_libraries(opencv_demo
apriltag
${OpenCV_LIBRARIES}
realsense2 # Link RealSense2
)

set_target_properties(opencv_demo PROPERTIES CXX_STANDARD 11)
install(TARGETS opencv_demo RUNTIME DESTINATION bin)

endif(OpenCV_FOUND)

Analisi MNF su spettri di riflettanza di plastica

Devo cerca di lavorare su spettri di riflettanza di plastica e la prima domanda e': quale sono le bande significative? Sono partito dal ...