lunedì 23 settembre 2024

Spot Robot Calibration Board Boston Bynamics e OpenCV

Ho avuto la fortuna di poter usare una enorma Charuco Board legata al cane robot Spot della Boston Dynamics


Nonostante non sia esplicitamente indicato si tratta di una Charuco Board 4x4_100 di 9 colonne e 4 righe

Per usarla per calibrare una camera si puo' usare il seguente script OpenCV

Attenzione: per funzionare lo script necessita setLegacyPattern(True)

import os
import numpy as np
import cv2

# ------------------------------
# ENTER YOUR REQUIREMENTS HERE:
ARUCO_DICT = cv2.aruco.DICT_4X4_250
SQUARES_VERTICALLY = 9
SQUARES_HORIZONTALLY = 4
SQUARE_LENGTH = 0.115
MARKER_LENGTH = 0.09
# ...
PATH_TO_YOUR_IMAGES = './hikvision'
# ------------------------------

def calibrate_and_save_parameters():
# Define the aruco dictionary and charuco board
dictionary = cv2.aruco.getPredefinedDictionary(ARUCO_DICT)
board = cv2.aruco.CharucoBoard((SQUARES_VERTICALLY, SQUARES_HORIZONTALLY), SQUARE_LENGTH, MARKER_LENGTH, dictionary)
board.setLegacyPattern(True)
params = cv2.aruco.DetectorParameters()
params.cornerRefinementMethod = 0


image_files = [os.path.join(PATH_TO_YOUR_IMAGES, f) for f in os.listdir(PATH_TO_YOUR_IMAGES) if f.endswith(".jpg")]
image_files.sort()

all_charuco_corners = []
all_charuco_ids = []

for image_file in image_files:
print(image_file)
image = cv2.imread(image_file)
marker_corners, marker_ids, _ = cv2.aruco.detectMarkers(image, dictionary, parameters=params)

# If at least one marker is detected
if len(marker_ids) > 0:
charuco_retval, charuco_corners, charuco_ids = cv2.aruco.interpolateCornersCharuco(marker_corners, marker_ids, image, board)

if charuco_retval:
all_charuco_corners.append(charuco_corners)
all_charuco_ids.append(charuco_ids)

# Calibrate camera
retval, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.aruco.calibrateCameraCharuco(all_charuco_corners, all_charuco_ids, board, image.shape[:2], None, None)

# Save calibration data
np.save('hik_camera_matrix.npy', camera_matrix)
np.save('hik_dist_coeffs.npy', dist_coeffs)


cv2.destroyAllWindows()

calibrate_and_save_parameters()







martedì 17 settembre 2024

RAG con Ollama Mistral e LangChain

Una altra prova usando questo repository https://github.com/CallumJMac/lessons

Il folder di riferimento e'  lessons/1. RAG/examples/pixegami /PDF_files_langchain/rag-tutorial-v2-main

I files Pdf vanno messi nel folder data

Poi si lancia populate_database.py. La persistenza e' data da ChromaDB

import argparse
import os
import shutil
from langchain.document_loaders.pdf import PyPDFDirectoryLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain.schema.document import Document
from get_embedding_function import get_embedding_function
from langchain.vectorstores.chroma import Chroma


CHROMA_PATH = "chroma"
DATA_PATH = "data"


def main():

# Check if the database should be cleared (using the --clear flag).
parser = argparse.ArgumentParser()
parser.add_argument("--reset", action="store_true", help="Reset the database.")
args = parser.parse_args()
if args.reset:
print("✨ Clearing Database")
clear_database()

# Create (or update) the data store.
documents = load_documents()
chunks = split_documents(documents)
add_to_chroma(chunks)


def load_documents():
document_loader = PyPDFDirectoryLoader(DATA_PATH)
return document_loader.load()


def split_documents(documents: list[Document]):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=800,
chunk_overlap=80,
length_function=len,
is_separator_regex=False,
)
return text_splitter.split_documents(documents)


def add_to_chroma(chunks: list[Document]):
# Load the existing database.
db = Chroma(
persist_directory=CHROMA_PATH,
embedding_function=get_embedding_function()
)

# Calculate Page IDs.
chunks_with_ids = calculate_chunk_ids(chunks)

# Add or Update the documents.
existing_items = db.get(include=[]) # IDs are always included by default
existing_ids = set(existing_items["ids"])
print(f"Number of existing documents in DB: {len(existing_ids)}")

# Only add documents that don't exist in the DB.
new_chunks = []
for chunk in chunks_with_ids:
if chunk.metadata["id"] not in existing_ids:
new_chunks.append(chunk)

if len(new_chunks):
print(f"👉 Adding new documents: {len(new_chunks)}")
new_chunk_ids = [chunk.metadata["id"] for chunk in new_chunks]
db.add_documents(new_chunks, ids=new_chunk_ids)
db.persist()
else:
print("✅ No new documents to add")


def calculate_chunk_ids(chunks):

# This will create IDs like "data/monopoly.pdf:6:2"
# Page Source : Page Number : Chunk Index

last_page_id = None
current_chunk_index = 0

for chunk in chunks:
source = chunk.metadata.get("source")
page = chunk.metadata.get("page")
current_page_id = f"{source}:{page}"

# If the page ID is the same as the last one, increment the index.
if current_page_id == last_page_id:
current_chunk_index += 1
else:
current_chunk_index = 0

# Calculate the chunk ID.
chunk_id = f"{current_page_id}:{current_chunk_index}"
last_page_id = current_page_id

# Add it to the page meta-data.
chunk.metadata["id"] = chunk_id

return chunks


def clear_database():
if os.path.exists(CHROMA_PATH):
shutil.rmtree(CHROMA_PATH)


if __name__ == "__main__":
main()

In seguito si puo' effettuare la query da linea di comando come 

python query_data.py "what's monopoly"


import argparse
from langchain.vectorstores.chroma import Chroma
from langchain.prompts import ChatPromptTemplate
from langchain_community.llms.ollama import Ollama

from get_embedding_function import get_embedding_function

CHROMA_PATH = "chroma"

PROMPT_TEMPLATE = """
Answer the question based only on the following context:

{context}

---

Answer the question based on the above context: {question}
"""


def main():
# Create CLI.
parser = argparse.ArgumentParser()
parser.add_argument("query_text", type=str, help="The query text.")
args = parser.parse_args()
query_text = args.query_text
query_rag(query_text)


def query_rag(query_text: str):
# Prepare the DB.
embedding_function = get_embedding_function()
db = Chroma(persist_directory=CHROMA_PATH, embedding_function=embedding_function)

# Search the DB.
results = db.similarity_search_with_score(query_text, k=5)

context_text = "\n\n---\n\n".join([doc.page_content for doc, _score in results])
prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
prompt = prompt_template.format(context=context_text, question=query_text)
# print(prompt)

model = Ollama(model="mistral")
response_text = model.invoke(prompt)

sources = [doc.metadata.get("id", None) for doc, _score in results]
formatted_response = f"Response: {response_text}\nSources: {sources}"
print(formatted_response)
return response_text


if __name__ == "__main__":
main()


giusto per dare un'idea questa e' la risposta

Response:  Monopoly is a property trading game from Parker Brothers designed for ages 8 and up, suitable for 2 to 8 players. The gameboard is used along with tokens, houses, hotels, Chance and Community Chest cards, Title Deed cards, play money, and a Banker's tray. Players can choose to play by the classic rules or use the Speed Die for faster gameplay. In Monopoly, the objective is to become the wealthiest player by buying, renting, and selling properties.


Usando LLama3:7b la risposta e' stata

A simple one!

According to the context, Monopoly is a "Property Trading Game" from Parker Brothers.

 





AnythingLLM

Partendo dal post precedente volevo provare un servizio da distribuire via Web. Su youtube molto esempi utilizzano streamlit ma frugando ho trovando un servizio chiavi in mano che 

1) permette l'upload dei pdf via web

2) istruisce la rete 

3) si puo' interagire sempre via web come chat

il servizio si chiama AnythingLLM ed e' distribuito sia come docker che come applicazione desktop (io ho preferisco la versione docker)

Attenzione : se si vedono strani errori (tipo  Could not respond to message.fetch failed) il problema puo' essere derivante da poca Ram. In un primo momento stavo provando Gemma (ho 16 Gb, Gemma ne richiede 9 ...pensavo di starci largo) ma il sistema ha iniziato a funzionare solo quando ho installato Mistral con Ollama

 

export STORAGE_LOCATION=$HOME/anythingllm && \
mkdir -p $STORAGE_LOCATION && \
touch "$STORAGE_LOCATION/.env" && \
docker run -d -p 3001:3001 \
--cap-add SYS_ADMIN \
-v ${STORAGE_LOCATION}:/app/server/storage \
-v ${STORAGE_LOCATION}/.env:/app/server/.env \

-e STORAGE_DIR="/app/server/storage" \

--add-host=host.docker.internal:host-gateway\
mintplexlabs/anythingllm


Inoltre su Linux per andare all'interfaccia web si puo' usare http://localhost:3001 

Al primo avvio si deve configurare il modello da usare (se locale come Ollama o remoto). In questa fase ho avuto qualche problema in quanto AnythingLLM non riusciva a trovare i modelli gia' installati sulla macchina tramite pull. La soluzione e' stata quella di usare l'IP http://172.17.0.1:11434 al posto di localhost


 

Al posto di localhost ho usato 

Eventuali conflitti con Ollama possono essere risolti da qui

Di seguito uno screenshot di una "conversazione" con Mistral




 

 

 

 

 

 

 

RAG con Ollama Gemma2 e LangChain

Aggiornamento:

Per poter avere le risposte in italiano (al posto dell'inglese di default) basta modificare il context template

 <|context|>
You are an AI assistant that follows instruction extremely well.
Please be truthful and give direct answers
Answer in italian
</s>

questa la risposta alla domanda "Descrivimi il monopoly"

Monopoly è un gioco da tavolo di strategia e fortuna.

I giocatori comprano, vendono e scambiano proprietà immobiliari sul tabellone, cercando di accumulare il maggior patrimonio possibile.  

Si lanciano i dadi per muoversi sul tabellone, acquistando le caselle che incontrano. Alcune caselle comportano pagamenti o attività specifiche, come andare in prigione o pagare tasse.  

Il gioco prosegue finché tutti tranne uno dei giocatori sono falliti a causa del debito. Il vincitore è il giocatore che rimane con denaro e proprietà alla fine.

Per la cronaca il Pdf delle istruzioni di Monopoly era in inglese 

=========================================  

 Seguendo un po' la moda del momento volevo provare a fare una applicazione RAG (Retrieval Augmented Generation) partendo dal modello  Gemma2 di Google e dandogli in pasto dei Pdf personali per il retraining. Per questa prova ho sentito le indicazioni contenute in questo notebook

Prima cosa: per usare Gemma2 la macchina deve avere almeno 9 Gb di ram liberi (giusto per la cronaca per usare il modello LLama 70b sono necessari almeno 40 Gb di ram liberi..diciamo che non se ne parla nemmeno)


Secondo: per installare i modelli la cosa piu' comodo e' utilizzare Ollama

Terzo : molti esempi si basano su API esterne come https://jina.ai/, OpenAI, ma in questo caso ci sono pacchetti gratuiti e poi si deve pagare il servizio. Cercavo di mettere in piedi una soluzione offline svincolata da servizi cloud di terze parti

Si scaricano quindi il modello ed un text encoder (in questo caso Nomic-Embed-Text)

ollama pull gemma2 
ollama pull nomic-embed-text

Si installa poi i pacchetti delle librerie (meglio in un venv)

pip install langchain chromadb langchain_community pypdf

ChromaDb e' il database in cui vengono salvati i vectorstore per garantire la persistenza dell'apprendimento. E' in pratica un Db orientato all'AI basato su sqlite; una alternativa con un vero server e' Milvus

I file PDF devono essere inseriti nel folder ./Pdf


# -*- coding: utf-8 -*-

from langchain_community.document_loaders import PyPDFDirectoryLoader
from langchain.text_splitter import CharacterTextSplitter,RecursiveCharacterTextSplitter
from langchain_community.embeddings import OllamaEmbeddings
from langchain.vectorstores import Chroma
from langchain_community.llms import Ollama
from langchain.chains import RetrievalQA, LLMChain

import pathlib
import textwrap
from IPython.display import display
from IPython.display import Markdown



def to_markdown(text):
text = text.replace('•', ' *')
return Markdown(textwrap.indent(text, '> ', predicate=lambda _: True))


loader = PyPDFDirectoryLoader("./Pdf")
docs = loader.load()

text_splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=50)
chunks = text_splitter.split_documents(docs)


embeddings = OllamaEmbeddings(model="nomic-embed-text")



vectorstore = Chroma.from_documents(chunks, embeddings)



query = "who is at risk of heart disease"
search = vectorstore.similarity_search(query)

to_markdown(search[0].page_content)

"""## Retriever"""

retriever = vectorstore.as_retriever(
search_kwargs={'k': 5}
)

retriever.get_relevant_documents(query)

"""## Large Language Model - Open Source

## RAG Chain
"""

from langchain_community.llms import Ollama
llm = Ollama(model="gemma2")

from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.output_parser import StrOutputParser
from langchain.prompts import ChatPromptTemplate

template = """
<|context|>
You are an AI assistant that follows instruction extremely well.
Please be truthful and give direct answers
</s>
<|user|>
{query}
</s>
<|assistant|>
"""

prompt = ChatPromptTemplate.from_template(template)

rag_chain = (
{"context": retriever, "query": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)

response = rag_chain.invoke("why should I care about my heart health")

to_markdown(response)

import sys

while True:
user_input = input(f"Input Prompt: ")
if user_input == 'exit':
print('Exiting')
sys.exit()
if user_input == '':
continue
result = rag_chain.invoke(user_input)
print("Answer: ",result)



Ho inserito due Pdf (regole di Monopoly e Ticket to Ride), questo un esempio di interazione

===========================================

Input Prompt: give me the rules of Monopoly
Answer:  ##  Monopoly Rules (Simplified):

**Goal:** Be the last player standing with money! Drive your opponents into bankruptcy by buying properties, building houses and hotels, and charging rent.

**Gameplay:**

1. **Roll Dice & Move:** Players roll two dice and move their piece around the board clockwise.
2. **Land on Property?**
    * **Unowned:** You can buy it for the listed price.
    * **Owned:** Pay rent to the owner based on the property's value and any buildings.
3. **Chance & Community Chest:** Draw a card and follow its instructions. These can be good or bad!
4. **Go To Jail:**  Land on "Go To Jail," draw a "Go To Jail" card, or roll doubles three times in a row. While in jail, you can't collect rent or move until you pay a fine, use a "Get Out of Jail Free" card, or roll doubles.
5. **Build Houses & Hotels:** Once you own all properties in a color group, you can start building houses and hotels. These increase rent significantly!
6. **Bankruptcy:** If you can't afford to pay your debts, you're bankrupt and out of the game.

**Key Elements:**

* **Properties:** Divided into color groups, each with different rent values.
* **Railroads & Utilities:** Pay rent based on dice rolls.
* **Taxes:**  Pay a set amount when landing on specific spaces.
* **Free Parking:** A safe space where you can collect any "Free Parking" money (rules vary).


**Variations:**

Many variations of Monopoly exist, with different rules and themes. Be sure to clarify the specific rules being used before starting a game!

Let me know if you'd like more detail on any specific aspect of the rules!

===========================================

Adesso rendiamo le cose piu' difficili...domanda in italiano e richiesta di trovare le differenze tra i due giochi. La domanda viene correttamente interpretata anche se in italiano ed in effetti la risposta fa una sintesi delle differenze tra i due giochi

===========================================

Answer:  Here are the key differences between Monopoly and Ticket to Ride:

**Monopoly:**

* **Goal:** Become the richest player by buying properties, developing them, and charging rent to other players.
* **Gameplay:**  Players roll dice, move around a board, buy properties, build houses and hotels, and collect rent from opponents who land on their spaces.
* **Emphasis:**  Strategic property acquisition, negotiation, and bankrupting opponents.

**Ticket to Ride:**

* **Goal:** Score the most points by claiming railway routes connecting cities across a map.
* **Gameplay:** Players draw colored train cards and use them to claim routes. Longer routes earn more points, and completing destination tickets (secret routes) awards bonus points.
* **Emphasis:** Route building, strategic card management, and fulfilling destination goals.


**In short:** Monopoly is about accumulating wealth and bankrupting opponents, while Ticket to Ride is about building railway networks and strategically claiming routes. 


venerdì 6 settembre 2024

Antitaccheggio

Mentre ero fuori per lavoro mi sono comprato un metro a nastro perche' lo avevo dimenticato....tornato a casa (e lontano qualche decina di Km) mi sono accorto che era rimasto l'antitaccheggio...tornare indietro era fuori discussione e cosi' e' partito il tentativo di rimuoverlo da solo

 


Pensavo fosse del tipo magnetico ma dopo un po' di studio ho visto che era di tipo meccanico e si doveva inserire una lama ricurva per sganciare il fermo...piu' semplice aprirlo dalla saldatura

Il sistema di ritenzione del perno e' molto semplice ma dannatamente efficace..in pratica l'ago si inserisce in una asola a coda di rondine
Nella parte allungata e' invece inserito un magnete con un avvolgimento in rame (con i due capi isolati tra di loro)



Questo e' il componente che innesca l'allarme


 

 

 

 

 

sabato 31 agosto 2024

Aruco Tag con Opencv (nuova versione)

Ho riscritto per la nuova versione di OpenCV il programma di estrazione dei dati degli Aruco Tag

Il programma e' completamente parametrizzato (con valori di default) ed ha un output in csv in cui sono inserite le coordinate 2D immagine, coordinate 3D nel sistema di riferimento della camera, angoli di roll,pitch ed yaw

 

from os import listdir
from os.path import isfile, join

import numpy as np
import argparse
import cv2
import sys
import math
import os

ARUCO_DICT = {
"DICT_4X4_50": cv2.aruco.DICT_4X4_50,
"DICT_4X4_100": cv2.aruco.DICT_4X4_100,
"DICT_4X4_250": cv2.aruco.DICT_4X4_250,
"DICT_4X4_1000": cv2.aruco.DICT_4X4_1000,
"DICT_5X5_50": cv2.aruco.DICT_5X5_50,
"DICT_5X5_100": cv2.aruco.DICT_5X5_100,
"DICT_5X5_250": cv2.aruco.DICT_5X5_250,
"DICT_5X5_1000": cv2.aruco.DICT_5X5_1000,
"DICT_6X6_50": cv2.aruco.DICT_6X6_50,
"DICT_6X6_100": cv2.aruco.DICT_6X6_100,
"DICT_6X6_250": cv2.aruco.DICT_6X6_250,
"DICT_6X6_1000": cv2.aruco.DICT_6X6_1000,
"DICT_7X7_50": cv2.aruco.DICT_7X7_50,
"DICT_7X7_100": cv2.aruco.DICT_7X7_100,
"DICT_7X7_250": cv2.aruco.DICT_7X7_250,
"DICT_7X7_1000": cv2.aruco.DICT_7X7_1000,
"DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL,
"DICT_APRILTAG_16h5": cv2.aruco.DICT_APRILTAG_16h5,
"DICT_APRILTAG_25h9": cv2.aruco.DICT_APRILTAG_25h9,
"DICT_APRILTAG_36h10": cv2.aruco.DICT_APRILTAG_36h10,
"DICT_APRILTAG_36h11": cv2.aruco.DICT_APRILTAG_36h11
}

def isRotationMatrix(R):
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype=R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6

def rotationMatrixToEulerAngles(R):
assert (isRotationMatrix(R))
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
else:
x = math.atan2(-R[1, 2], R[1, 1])
y = math.atan2(-R[2, 0], sy)
z = 0
return np.array([x, y, z])


def estrai_parametri(img):
# inserire qui il ciclo per le img
image = cv2.imread(img)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

ArucoParams = cv2.aruco.DetectorParameters()
ArucoParams.cornerRefinementMethod = 0

R_flip = np.zeros((3, 3), dtype=np.float32)
R_flip[0, 0] = 1.0
R_flip[1, 1] = -1.0
R_flip[2, 2] = -1.0

Adict = cv2.aruco.getPredefinedDictionary(ARUCO_DICT[args["type"]])
detector = cv2.aruco.ArucoDetector(Adict, ArucoParams)
corners, ids, _ = cv2.aruco.detectMarkers(image, Adict, parameters=ArucoParams)

if len(corners) > 0:
x_sum = corners[0][0][0][0] + corners[0][0][1][0] + corners[0][0][2][0] + corners[0][0][3][0]
y_sum = corners[0][0][0][1] + corners[0][0][1][1] + corners[0][0][2][1] + corners[0][0][3][1]
x_centerPixel = x_sum / 4
y_centerPixel = y_sum / 4

# print(corners) #posizione degli angoli del marker
for i in range(0, len(ids)):
rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(corners[i], float(args["aruco_dim"]), k, d)
# distanza dalla camera
dist = math.sqrt(
(tvec[0][0][0] * tvec[0][0][0]) + (tvec[0][0][1] * tvec[0][0][1]) + (tvec[0][0][2] * tvec[0][0][2]))
str_dist = "{:4.2f}".format(dist)

R_ct = np.matrix(cv2.Rodrigues(rvec)[0])
R_ct = R_ct.T
roll_marker, pitch_marker, yaw_marker = rotationMatrixToEulerAngles(R_flip * R_ct)
str_roll = "%4.2f" % (math.degrees(roll_marker))
str_pitch = "%4.2f" % (math.degrees(pitch_marker))
str_yaw = "%4.2f" % (math.degrees(yaw_marker))

if (int(args["tag"]) > 0):
if (int(ids[i]) == int(args["tag"])):
f.write(str(ids[i]) + ";" + str(x_centerPixel) + ";" + str(y_centerPixel) + ";" + str(
tvec[0][0][0]) + ";" + str(tvec[0][0][1]) + ";" + str(tvec[0][0][2]) + ";" + os.path.basename(img) + ";" + str_roll + ";" + str_pitch + ";" + str_yaw+"\n")
else:
f.write(str(ids[i]) + ";" + str(x_centerPixel) + ";" + str(y_centerPixel) + ";" + str(
tvec[0][0][0]) + ";" + str(tvec[0][0][1]) + ";" + str(tvec[0][0][2]) + ";" + os.path.basename(img) + ";" + str_roll + ";" + str_pitch + ";" + str_yaw+"\n")



ap = argparse.ArgumentParser()
#mettere rem sulla successiva per utilizzare il ciclo sulle img
#ap.add_argument("-i", "--image", required=True, help="path to input image containing ArUCo tag")
ap.add_argument("-t", "--type", type=str, default="DICT_4X4_250", help="type of ArUCo tag to detect")
ap.add_argument("-k", "--K_Matrix", type=str,default='./calibration_matrix.npy',help="Path to calibration matrix (numpy file)")
ap.add_argument("-d", "--D_Coeff", type=str,default='./distortion_coefficients.npy',help="Path to distortion coefficients (numpy file)")
ap.add_argument("-a", "--aruco_dim", default=25,type=int, help="ArUco tag dimension")
ap.add_argument("-g", "--tag", default=0, type=str, help="Select only one Id")
ap.add_argument("-p", "--path", default="./", help="Path folder immagini")


#la dimensione del tag e' quella dello spigolo esterno del quadrato nero esterno, non i singoli quadrati interni

args = vars(ap.parse_args())
if ARUCO_DICT.get(args["type"], None) is None:
print(f"ArUCo tag type '{args['type']}' is not supported")
sys.exit(0)


calibration_matrix_path = args["K_Matrix"]
distortion_coefficients_path = args["D_Coeff"]
k = np.load(calibration_matrix_path)
d = np.load(distortion_coefficients_path)



immagini = [f for f in listdir(args["path"]) if isfile(join(args["path"], f))]

with open('aruco'+str(args["tag"])+'.csv', 'w') as f:
f.write("Id;Xpix;Ypix;X;Y;Z;Filename;Roll;Pitch;Roll\n")
for i in immagini:
print(args["path"]+i)
estrai_parametri(args["path"]+i)
f.close()

 

domenica 25 agosto 2024

Passato ferrarista 156/85

Ho ritrovato una foto in cui sono "alla guida" di una F1 Ferrari 156/85 (Alboreto)

In un tempo in cui era possibile farsi una foto senza spendere un patrimonio

 

Qui la macchina con il suo leggittimo proprietario


ed un disegno che avevo fattto
 


 

 

Feature Matching OpenCv

Il problema e' il seguente: trovare le differenze tra le due foto. Le due immagini sono state riprese a distanza di oltre un anno ed il ...