venerdì 11 ottobre 2024

Aruco e Image Stacking

Uno dei problemi maggiori dell'uso dei tag aruco in esterno per misurare distanze e' che le condizioni di illuminazione solare sono molto variabili e la compressione jpg completa l'opera aggiungendo rumore su rumore

Per cercare di limitare il problema ho provato a fare image stacking sovrapponendo 8 foto (una ogni ora dalle 09 alle 16) con lo script al link https://github.com/maitek/image_stacking ed applicando il programma visto qui


Il miglioramento non e' trascurabile perche' la standard deviation e' passata

3.1 m => da 0.19% a 0.16% (0.5 cm)

5.4 m => da 0.35% a 0.28% (1.5 cm)

7.6 m => da 0.71% a 0.38% (2.8 cm)

9.6 m => da 0.5% a 0.41% (4.8 cm)

 

import os
import cv2
import numpy as np
from time import time



# Align and stack images with ECC method
# Slower but more accurate
def stackImagesECC(file_list):
M = np.eye(3, 3, dtype=np.float32)

first_image = None
stacked_image = None

for file in file_list:
image = cv2.imread(file,1).astype(np.float32) / 255
print(file)
if first_image is None:
# convert to gray scale floating point image
first_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
stacked_image = image
else:
# Estimate perspective transform
s, M = cv2.findTransformECC(cv2.cvtColor(image,cv2.COLOR_BGR2GRAY), first_image, M, cv2.MOTION_HOMOGRAPHY)
w, h, _ = image.shape
# Align image to first image
image = cv2.warpPerspective(image, M, (h, w))
stacked_image += image

stacked_image /= len(file_list)
stacked_image = (stacked_image*255).astype(np.uint8)
return stacked_image


# Align and stack images by matching ORB keypoints
# Faster but less accurate
def stackImagesKeypointMatching(file_list):

orb = cv2.ORB_create()

# disable OpenCL to because of bug in ORB in OpenCV 3.1
cv2.ocl.setUseOpenCL(False)

stacked_image = None
first_image = None
first_kp = None
first_des = None
for file in file_list:
print(file)
image = cv2.imread(file,1)
imageF = image.astype(np.float32) / 255

# compute the descriptors with ORB
kp = orb.detect(image, None)
kp, des = orb.compute(image, kp)

# create BFMatcher object
matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

if first_image is None:
# Save keypoints for first image
stacked_image = imageF
first_image = image
first_kp = kp
first_des = des
else:
# Find matches and sort them in the order of their distance
matches = matcher.match(first_des, des)
matches = sorted(matches, key=lambda x: x.distance)

src_pts = np.float32(
[first_kp[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dst_pts = np.float32(
[kp[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)

# Estimate perspective transformation
M, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)
w, h, _ = imageF.shape
imageF = cv2.warpPerspective(imageF, M, (h, w))
stacked_image += imageF

stacked_image /= len(file_list)
stacked_image = (stacked_image*255).astype(np.uint8)
return stacked_image

# ===== MAIN =====
# Read all files in directory
import argparse


if __name__ == '__main__':

parser = argparse.ArgumentParser(description='')
parser.add_argument('input_dir', help='Input directory of images ()')
parser.add_argument('output_image', help='Output image name')
parser.add_argument('--method', help='Stacking method ORB (faster) or ECC (more precise)')
parser.add_argument('--show', help='Show result image',action='store_true')
args = parser.parse_args()

image_folder = args.input_dir
if not os.path.exists(image_folder):
print("ERROR {} not found!".format(image_folder))
exit()

file_list = os.listdir(image_folder)
file_list = [os.path.join(image_folder, x)
for x in file_list if x.endswith(('.jpg', '.png','.bmp'))]

if args.method is not None:
method = str(args.method)
else:
method = 'KP'

tic = time()

if method == 'ECC':
# Stack images using ECC method
description = "Stacking images using ECC method"
print(description)
stacked_image = stackImagesECC(file_list)

elif method == 'ORB':
#Stack images using ORB keypoint method
description = "Stacking images using ORB method"
print(description)
stacked_image = stackImagesKeypointMatching(file_list)

else:
print("ERROR: method {} not found!".format(method))
exit()

print("Stacked {0} in {1} seconds".format(len(file_list), (time()-tic) ))

print("Saved {}".format(args.output_image))
cv2.imwrite(str(args.output_image),stacked_image)

# Show image
if args.show:
cv2.imshow(description, stacked_image)
cv2.waitKey(0)

 

 

 

 

mercoledì 9 ottobre 2024

Cron in Docker

E' possibile attivare un crontab all'interno di un docker aggiungendo al Dockerfile i seguenti comandi


RUN apt-get -y install cron 

RUN crontab -l | { cat; echo "0 3 * * * bash /root/mioscript.sh"; } | crontab - 

CMD cron

mercoledì 2 ottobre 2024

Aruco Tag e filtri Kalman

Usando gli Aruco tag in ambiente esterno con illuminazione solare a diverse ore e in diversi periodi dell'anno ho trovato un errore nella stima della distanza tra due tag compresi tra 0.7% e 1.3% della distanza misurata. La domanda e' stata: e' possibile ridurre l'errore ?

Ho provato ad applicare il filtro Kalman per condizioni statiche (le distanze tra i tag nel tempo non sono variate) usando usando il codice a questo indirizzo 


Nel  grafico sottostante i punti blu sono i dati non filtrati, in linea continua blu i dati filtrati, in linea rossa il valore reale della misura di distanza

questo il codice impiegato con modestissime modifiche rispetto a quello di esempio

 

import numpy as np
import matplotlib.pyplot as plt
import pandas as pd



def kalman_1d(x, P, measurement, R_est, Q_est):
x_pred = x
P_pred = P + Q_est
K = P_pred / (P_pred + R_est)
x_est = x_pred + K * (measurement - x_pred)
P_est = (1 - K) * P_pred
return x_est, P_est


def plot_1d_comparison(measurements_made, estimate, true_value, axis):
axis.plot(measurements_made ,'k+' ,label='measurements' ,alpha=0.3)
axis.plot(estimate ,'-' ,label='KF estimate')
if not isinstance(true_value, (list, tuple, np.ndarray)):
# plot line for a constant value
axis.axhline(true_value ,color='r' ,label='true value', alpha=0.5)
else:
# for a list, tuple or array, plot the points
axis.plot(true_value ,color='r' ,label='true value', alpha=0.5)
axis.legend(loc = 'lower right')
axis.set_title('Estimated position vs. time step')
axis.set_xlabel('Time')
axis.set_ylabel('$x_t$')

def plot_1d_error(estimated_error, lower_limit, upper_limit, axis):
# lower_limit and upper_limit are the lower and upper limits of the vertical axis
axis.plot(estimated_error, label='KF estimate for $P$')
axis.legend(loc = 'upper right')
axis.set_title('Estimated error vs. time step')
axis.set_xlabel('Time')
axis.set_ylabel('$P_t$')
plt.setp(axis ,'ylim' ,[lower_limit, upper_limit])


nome_file = "20_40"
df = pd.read_csv(nome_file+'.csv',header=0,sep=";")

#11 5.38
#15 3.39
#25 7.46
#40 4.26

mu = 4.26 # Actual position
R = 0.1 # Actual standard deviation of actual measurements (R)

# Generate measurements
#n_measurements = 1000 # Change the number of points to see how the convergence changes
#Z = np.random.normal(mu, np.sqrt(R), size=n_measurements)
Z = df[df.columns[2]].to_numpy()
# Estimated covariances
Q_est = 1e-4
R_est = 2e-2

# initial guesses
x = 5.1 # Use an integer (imagine the initial guess is determined with a meter stick)
P = 0.04 # error covariance P

KF_estimate=[] # To store the position estimate at each time point
KF_error=[] # To store estimated error at each time point
for z in Z:
x, P = kalman_1d(x, P, z, R_est, Q_est)
KF_estimate.append(x)
KF_error.append(P)

fig, axes = plt.subplots(1 ,2, figsize=(12, 5))
plot_1d_comparison(Z, KF_estimate, mu, axes[0])
np.savetxt(nome_file+"_stima.csv",KF_estimate)
plot_1d_error(KF_error, 0, 0.015, axes[1])
plt.tight_layout()
plt.savefig(nome_file+'.png')

 

 

 

 

venerdì 27 settembre 2024

Opencv camera calibration in cpp

Oltre che con uno script Python come visto qui la calibrazione della camera si puo' fare anche con il programma in CPP

Questo il procedimento per compilare il programma partendo dai sorgenti

 

git clone https://github.com/opencv/opencv

git clone https://github.com/opencv/opencv_contrib

 

sudo cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D OPENCV_ENABLE_NONFREE=ON \
-D INSTALL_C_EXAMPLES=ON \
-D INSTALL_PYTHON_EXAMPLES=ON \
-D OPENCV_GENERATE_PKGCONFIG=ON \
-D OPENCV_PC_FILE_NAME=opencv.pc \
-D OPENCV_GENERATE_PKCONFIG=ON \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \
-D PYTHON_EXECUTABLE=/usr/bin/python3 \
-D PYTHON_DEFAULT_EXECUTABLE=$(which python3) .. \
-D BUILD_EXAMPLES=ON  \
-D BUILD_opencv_apps=ON  \
-DBUILD_opencv_gapi:BOOL=OFF
-D BUILD_DOCS=ON \
..

 

sudo make install 

export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib

 

si naviga fino al folder

/opencv/samples/cpp/tutorial_code/calib3d/camera_calibration 

e si digita il comando

g++ -ggdb camera_calibration.cpp -o camera_calibration `pkg-config --cflags --libs opencv`

per il funzionamento del programma occorrono due file xml, il primo con le impostazioni ed il secondo con l'elenco delle immagini di calibrazione

camera_calibration -d parametri.xml

parametri.xml

<?xml version="1.0"?>
<opencv_storage>
<Settings>
<!-- Number of inner corners per a item row and column. (square, circle) -->
<BoardSize_Width>9</BoardSize_Width>
<BoardSize_Height>4</BoardSize_Height>
<!-- The size of a square in some user defined metric system (pixel, millimeter)-->
<Square_Size>0.115</Square_Size>
<Marker_Size>0.090</Marker_Size>
<!-- The type of input used for camera calibration. One of: CHESSBOARD CHARUCOBOARD CIRCLES_GRID ASYMMETRIC_CIRCLES_GRID -->
<Calibrate_Pattern>"CHARUCOBOARD"</Calibrate_Pattern>
<ArUco_Dict_Name>DICT_4X4_250</ArUco_Dict_Name>
<ArUco_Dict_File_Name></ArUco_Dict_File_Name>
<!-- The input to use for calibration.
To use an input camera -> give the ID of the camera, like "1"
To use an input video -> give the path of the input video, like "/tmp/x.avi"
To use an image list -> give the path to the XML or YAML file containing the list of the images, like "/tmp/circles_list.xml"
-->
<Input>"elenco_foto.xml"</Input>
<!-- If true (non-zero) we flip the input images around the horizontal axis.-->
<Input_FlipAroundHorizontalAxis>0</Input_FlipAroundHorizontalAxis>
<!-- Time delay between frames in case of camera. -->
<Input_Delay>100</Input_Delay>
<!-- How many frames to use, for calibration. -->
<Calibrate_NrOfFrameToUse>25</Calibrate_NrOfFrameToUse>
<!-- Consider only fy as a free parameter, the ratio fx/fy stays the same as in the input cameraMatrix.
Use or not setting. 0 - False Non-Zero - True-->
<Calibrate_FixAspectRatio> 1 </Calibrate_FixAspectRatio>
<!-- If true (non-zero) tangential distortion coefficients are set to zeros and stay zero.-->
<Calibrate_AssumeZeroTangentialDistortion>1</Calibrate_AssumeZeroTangentialDistortion>
<!-- If true (non-zero) the principal point is not changed during the global optimization.-->
<Calibrate_FixPrincipalPointAtTheCenter> 1 </Calibrate_FixPrincipalPointAtTheCenter>
<!-- The name of the output log file. -->
<Write_outputFileName>"parametri_camera.xml"</Write_outputFileName>
<!-- If true (non-zero) we write to the output file the feature points.-->
<Write_DetectedFeaturePoints>1</Write_DetectedFeaturePoints>
<!-- If true (non-zero) we write to the output file the extrinsic camera parameters.-->
<Write_extrinsicParameters>1</Write_extrinsicParameters>
<!-- If true (non-zero) we write to the output file the refined 3D target grid points.-->
<Write_gridPoints>1</Write_gridPoints>
<!-- If true (non-zero) we show after calibration the undistorted images.-->
<Show_UndistortedImage>1</Show_UndistortedImage>
<!-- If true (non-zero) will be used fisheye camera model.-->
<Calibrate_UseFisheyeModel>0</Calibrate_UseFisheyeModel>
<!-- If true (non-zero) distortion coefficient k1 will be equals to zero.-->
<Fix_K1>0</Fix_K1>
<!-- If true (non-zero) distortion coefficient k2 will be equals to zero.-->
<Fix_K2>0</Fix_K2>
<!-- If true (non-zero) distortion coefficient k3 will be equals to zero.-->
<Fix_K3>0</Fix_K3>
<!-- If true (non-zero) distortion coefficient k4 will be equals to zero.-->
<Fix_K4>1</Fix_K4>
<!-- If true (non-zero) distortion coefficient k5 will be equals to zero.-->
<Fix_K5>1</Fix_K5>
</Settings>
</opencv_storage>


elenco_foto.xml

<?xml version="1.0"?>
<opencv_storage>
<images>
hikvision/01.jpg
hikvision/02.jpg
hikvision/03.jpg
hikvision/04.jpg
hikvision/05.jpg
hikvision/06.jpg
hikvision/07.jpg
hikvision/08.jpg
hikvision/09.jpg
hikvision/10.jpg
hikvision/11.jpg
hikvision/12.jpg
hikvision/13.jpg
hikvision/14.jpg
hikvision/15.jpg
hikvision/16.jpg
</images>
</opencv_storage>





Debug OpenCV Cpp con VIsual Code

come compilare il programma di esempio di OpenCv con Visual Code

 


 

Installare le estensioni CMake, CMale Tools, C++ Intellisense

CMakeLists.txt

# CMakeLists.txt

# Older versions of CMake are likely to work just fine but, since
# I don't know where to cut off I just use the version I'm using
cmake_minimum_required(VERSION "3.17")

# name of this example project
project(simple-demo)

# set OpenCV_DIR variable equal to the path to the cmake
# files within the previously installed opencv program
set(OpenCV_DIR /home/luca/opencv/install/lib/cmake/opencv4)

# Tell compiler to use C++ 14 features which is needed because
# Clang version is often behind in the XCode installation
set(CMAKE_CXX_STANDARD 14)

# configure the necessary common CMake environment variables
# needed to include and link the OpenCV program into this
# demo project, namely OpenCV_INCLUDE_DIRS and OpenCV_LIBS
find_package( OpenCV REQUIRED )

# tell the build to include the headers from OpenCV
include_directories( ${OpenCV_INCLUDE_DIRS} )

# specify the executable target to be built
add_executable(simple-demo main.cpp)

# tell it to link the executable target against OpenCV
target_link_libraries(simple-demo ${OpenCV_LIBS} )


.vscode/launch.json 

qui si possono inserire gli argomenti da linea di comando

se si imposta a false stopAtEntry il debugger si ferma solo ai breakpoint

{
"version": "0.2.0",
"configurations": [
{
"name": "(gdb) Debugger",
"type": "cppdbg",
"request": "launch",
"program": "${workspaceFolder}/build/simple-demo",
"args": ["valeria.jpg"],
"stopAtEntry": true,
"cwd": "${workspaceFolder}",
"environment": [],
"externalConsole": true,
"MIMode": "gdb"
}
]
}

main.cpp


#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui.hpp>

using std::cout;
using std::endl;

int main(int argc, char** argv) {

if (argc != 2) {
cout << "Expecting a image file to be passed to program" << endl;
return -1;
}
cv::Mat img = cv::imread(argv[1]);
if (img.empty()) {
cout << "Not a valid image file" << endl;
return -1;
}
cv::namedWindow("Simple Demo", cv::WINDOW_AUTOSIZE);
cv::imshow("Simple Demo", img);
cv::waitKey(0);
cv::destroyAllWindows();

return 0;
}

lunedì 23 settembre 2024

Spot Robot Calibration Board Boston Bynamics e OpenCV

Ho avuto la fortuna di poter usare una enorma Charuco Board legata al cane robot Spot della Boston Dynamics


Nonostante non sia esplicitamente indicato si tratta di una Charuco Board 4x4_100 di 9 colonne e 4 righe

Per usarla per calibrare una camera si puo' usare il seguente script OpenCV

Attenzione: per funzionare lo script necessita setLegacyPattern(True)

import os
import numpy as np
import cv2

# ------------------------------
# ENTER YOUR REQUIREMENTS HERE:
ARUCO_DICT = cv2.aruco.DICT_4X4_250
SQUARES_VERTICALLY = 9
SQUARES_HORIZONTALLY = 4
SQUARE_LENGTH = 0.115
MARKER_LENGTH = 0.09
# ...
PATH_TO_YOUR_IMAGES = './hikvision'
# ------------------------------

def calibrate_and_save_parameters():
# Define the aruco dictionary and charuco board
dictionary = cv2.aruco.getPredefinedDictionary(ARUCO_DICT)
board = cv2.aruco.CharucoBoard((SQUARES_VERTICALLY, SQUARES_HORIZONTALLY), SQUARE_LENGTH, MARKER_LENGTH, dictionary)
board.setLegacyPattern(True)
params = cv2.aruco.DetectorParameters()
params.cornerRefinementMethod = 0


image_files = [os.path.join(PATH_TO_YOUR_IMAGES, f) for f in os.listdir(PATH_TO_YOUR_IMAGES) if f.endswith(".jpg")]
image_files.sort()

all_charuco_corners = []
all_charuco_ids = []

for image_file in image_files:
print(image_file)
image = cv2.imread(image_file)
marker_corners, marker_ids, _ = cv2.aruco.detectMarkers(image, dictionary, parameters=params)

# If at least one marker is detected
if len(marker_ids) > 0:
charuco_retval, charuco_corners, charuco_ids = cv2.aruco.interpolateCornersCharuco(marker_corners, marker_ids, image, board)

if charuco_retval:
all_charuco_corners.append(charuco_corners)
all_charuco_ids.append(charuco_ids)

# Calibrate camera
retval, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.aruco.calibrateCameraCharuco(all_charuco_corners, all_charuco_ids, board, image.shape[:2], None, None)

# Save calibration data
np.save('hik_camera_matrix.npy', camera_matrix)
np.save('hik_dist_coeffs.npy', dist_coeffs)


cv2.destroyAllWindows()

calibrate_and_save_parameters()







martedì 17 settembre 2024

RAG con Ollama Mistral e LangChain

Una altra prova usando questo repository https://github.com/CallumJMac/lessons

Il folder di riferimento e'  lessons/1. RAG/examples/pixegami /PDF_files_langchain/rag-tutorial-v2-main

I files Pdf vanno messi nel folder data

Poi si lancia populate_database.py. La persistenza e' data da ChromaDB

import argparse
import os
import shutil
from langchain.document_loaders.pdf import PyPDFDirectoryLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain.schema.document import Document
from get_embedding_function import get_embedding_function
from langchain.vectorstores.chroma import Chroma


CHROMA_PATH = "chroma"
DATA_PATH = "data"


def main():

# Check if the database should be cleared (using the --clear flag).
parser = argparse.ArgumentParser()
parser.add_argument("--reset", action="store_true", help="Reset the database.")
args = parser.parse_args()
if args.reset:
print("✨ Clearing Database")
clear_database()

# Create (or update) the data store.
documents = load_documents()
chunks = split_documents(documents)
add_to_chroma(chunks)


def load_documents():
document_loader = PyPDFDirectoryLoader(DATA_PATH)
return document_loader.load()


def split_documents(documents: list[Document]):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=800,
chunk_overlap=80,
length_function=len,
is_separator_regex=False,
)
return text_splitter.split_documents(documents)


def add_to_chroma(chunks: list[Document]):
# Load the existing database.
db = Chroma(
persist_directory=CHROMA_PATH,
embedding_function=get_embedding_function()
)

# Calculate Page IDs.
chunks_with_ids = calculate_chunk_ids(chunks)

# Add or Update the documents.
existing_items = db.get(include=[]) # IDs are always included by default
existing_ids = set(existing_items["ids"])
print(f"Number of existing documents in DB: {len(existing_ids)}")

# Only add documents that don't exist in the DB.
new_chunks = []
for chunk in chunks_with_ids:
if chunk.metadata["id"] not in existing_ids:
new_chunks.append(chunk)

if len(new_chunks):
print(f"👉 Adding new documents: {len(new_chunks)}")
new_chunk_ids = [chunk.metadata["id"] for chunk in new_chunks]
db.add_documents(new_chunks, ids=new_chunk_ids)
db.persist()
else:
print("✅ No new documents to add")


def calculate_chunk_ids(chunks):

# This will create IDs like "data/monopoly.pdf:6:2"
# Page Source : Page Number : Chunk Index

last_page_id = None
current_chunk_index = 0

for chunk in chunks:
source = chunk.metadata.get("source")
page = chunk.metadata.get("page")
current_page_id = f"{source}:{page}"

# If the page ID is the same as the last one, increment the index.
if current_page_id == last_page_id:
current_chunk_index += 1
else:
current_chunk_index = 0

# Calculate the chunk ID.
chunk_id = f"{current_page_id}:{current_chunk_index}"
last_page_id = current_page_id

# Add it to the page meta-data.
chunk.metadata["id"] = chunk_id

return chunks


def clear_database():
if os.path.exists(CHROMA_PATH):
shutil.rmtree(CHROMA_PATH)


if __name__ == "__main__":
main()

In seguito si puo' effettuare la query da linea di comando come 

python query_data.py "what's monopoly"


import argparse
from langchain.vectorstores.chroma import Chroma
from langchain.prompts import ChatPromptTemplate
from langchain_community.llms.ollama import Ollama

from get_embedding_function import get_embedding_function

CHROMA_PATH = "chroma"

PROMPT_TEMPLATE = """
Answer the question based only on the following context:

{context}

---

Answer the question based on the above context: {question}
"""


def main():
# Create CLI.
parser = argparse.ArgumentParser()
parser.add_argument("query_text", type=str, help="The query text.")
args = parser.parse_args()
query_text = args.query_text
query_rag(query_text)


def query_rag(query_text: str):
# Prepare the DB.
embedding_function = get_embedding_function()
db = Chroma(persist_directory=CHROMA_PATH, embedding_function=embedding_function)

# Search the DB.
results = db.similarity_search_with_score(query_text, k=5)

context_text = "\n\n---\n\n".join([doc.page_content for doc, _score in results])
prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
prompt = prompt_template.format(context=context_text, question=query_text)
# print(prompt)

model = Ollama(model="mistral")
response_text = model.invoke(prompt)

sources = [doc.metadata.get("id", None) for doc, _score in results]
formatted_response = f"Response: {response_text}\nSources: {sources}"
print(formatted_response)
return response_text


if __name__ == "__main__":
main()


giusto per dare un'idea questa e' la risposta

Response:  Monopoly is a property trading game from Parker Brothers designed for ages 8 and up, suitable for 2 to 8 players. The gameboard is used along with tokens, houses, hotels, Chance and Community Chest cards, Title Deed cards, play money, and a Banker's tray. Players can choose to play by the classic rules or use the Speed Die for faster gameplay. In Monopoly, the objective is to become the wealthiest player by buying, renting, and selling properties.


Usando LLama3:7b la risposta e' stata

A simple one!

According to the context, Monopoly is a "Property Trading Game" from Parker Brothers.

 





Alpine Linux 2024 su IBM A31

Ho provato a far resuscitare un IBM A31 destinato alla discarica. La macchina ha processore P4, 256 Mb di RAM, la batteria CMOS morta ed e&#...