Table des matières

Cours 2023 provisoire: https://bvdp.inetdoc.net/files/iut/cours_vision_but3_2023.pdf

Planning du module

TD et TP OML 7,10,13/11

CM 6/11

TD/TP 8/11

TD/TP 9/11

CM 13/11

CM/TD 15/11

TD/TP 16/11

TD/TP 20/11

TD/TP 21/11

TD/TP 22/11

CM 27/11

TD/TP 27/11

TD/TP 29/11

TD/TP 30/11

Installations des outils

Chez vous installer Visual Studio Code: https://code.visualstudio.com/download

Logiciel pour visualiser les images, alternative à geeqie: https://nomacs.org/

version portable à installer sur D:\ : https://github.com/nomacs/nomacs/releases/latest/download/nomacs-portable-win.zip

régler la langue en anglais

Cliquer sur Panels→Toolbars→Statusbar pour afficher les informations sur l'image en bas à droite.

Pour relever des coordonnées de pixels dans les images, lire en bas a gauche.

Installation OpenCV pour Windows IUT

taper dans une invite de commande:

pip install numpy  matplotlib  opencv-python --upgrade

Créer un dossier sur P:\etu-new\s5aii\s5aiiX\tpvision et y télécharger le fichier suivant:

test1.py
import numpy as np
import cv2
import math
 
print("version openCV:"+str(  cv2.__version__ ))
 
 
 

lancer Visual Studio Code:

code
ouvrir le dossier que vous venez de créer  
Run
indique qu'il n'y a pas extension python
cliquer sur install  coté de Python IntelliSense(Pylance) Microsoft

Documentation OpenCV

https://pypi.org/project/opencv-python/

lien vers documentation openCV: https://docs.opencv.org/4.8.0/

TD1 Prise en main librairie OpenCV via Python

Nous allons commencer par prendre en main la librairie OpenCV via le langage Python: https://opencv24-python-tutorials.readthedocs.io/en/latest/py_tutorials/py_tutorials.html

Image à télécharger dans votre dossier de projet

bvdp.inetdoc.net_files_iut_vision_img.jpg

Solution TD1

td1_aii1.py
import numpy as np
import cv2
import math
 
print("version openCV:"+str(  cv2.__version__ ))
#Load an color image in grayscale
img = cv2.imread('img.jpg',0)
print("img:" +str(img)  )
 
 
# Create a black image
#img = np.zeros((512,512,3), np.uint8)
 
# Draw a diagonal blue line with thickness of 5 px
cv2.line(img,(0,0),(511,511),255,5,cv2.LINE_AA)
 
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'BUT3',(10,500), font, 4,255,2,cv2.LINE_AA)
 
cv2.imwrite('imggray.png',img)
 
#load image in color
img = cv2.imread('img.jpg',1)
px = img[100,100]
print("px:"+str(px))
img[200,100] = [255,0,0]
 
px = img[200,100]
print("px:"+str(px))
 
for i in range(0,255):
  for j in range(0,255):
    img[200+j,100+i]=[i,j,0]
 
 
print("img.shape:"+str(img.shape))
print("img.dtype:"+str(img.dtype))
 
xo=692
yo=252
larg=60
haut=60
x1=434
y1=218
 
ROI = img[yo:yo+haut, xo:xo+larg]
for i in range(0,3):
    img[y1+i*100:y1+haut+i*100, x1:x1+larg] = ROI
 
cv2.imwrite('imgcolor.png',img)
 
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
td1_aii2.py
import numpy as np
import cv2
import math
 
print("version openCV:"+str(  cv2.__version__ ))
 
 
 
# Load an color image in grayscale
img = cv2.imread('img.jpg',0)
print(img)
 
 
# Create a black image
#img = np.zeros((512,512,3), np.uint8)
#pour une image couleur
#cv2.line(img,(0,0),(511,511),(255,0,0),5)
 
 
cv2.line(img,(0,0),(511,511),128,5)
 
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'BUT3',(10,500), font, 4,255,2,cv2.LINE_AA)
cv2.imwrite('imggray.png',img)
 
#---------------------------------
# Load an color image in color
img = cv2.imread('img.jpg',1)
print(img)
 
 
cv2.imwrite('imgcolor.png',img)
 
px = img[100,100]
print("px:" + str(px))
blue = img[100,100,0]
print("blue:" +str(blue))
 
print("image shape: " +str(img.shape)+" , image type:" +str(img.dtype))
 
yo=257
xo=695
larg=60
haut=60
 
 
ROI=img[yo:yo+haut, xo:xo+larg]
 
x1=435
y1=220
img[y1:y1+haut, x1:x1+larg] = ROI
 
for i in range(0,3):
    x1=435+i*120
    y1=220
    img[y1:y1+haut, x1:x1+larg] = ROI
 
 
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()

TD2 Espaces colorimétriques

Ressources: https://opencv24-python-tutorials.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html#

Espace colorimétrique YUV: https://fr.wikipedia.org/wiki/YUV

Décimation chromatique en YUV: https://www.latelierducable.com/tv-televiseur/yuv-420-ycbcr-422-rgb-444-cest-quoi-le-chroma-subsampling/ et https://wiki.videolan.org/YUV/

Espace colorimétrique HSV (TSV en français): https://fr.wikipedia.org/wiki/Teinte_Saturation_Valeur

Solution TD2

td2_aii1.py
import numpy as np
import cv2
import math
 
print("version openCV:"+str(  cv2.__version__ ))
 
 
flags = [i for i in dir(cv2) if i.startswith('COLOR_')]
print("conversions supportées:"+ str(flags))
#exit(0)
 
#Load an color image in grayscale
img = cv2.imread('img.jpg',0)
print("img:" +str(img)  )
 
 
# Create a black image
#img = np.zeros((512,512,3), np.uint8)
 
# Draw a diagonal blue line with thickness of 5 px
cv2.line(img,(0,0),(511,511),255,5,cv2.LINE_AA)
 
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'BUT3',(10,500), font, 4,255,2,cv2.LINE_AA)
 
cv2.imwrite('imggray.png',img)
 
#load image in color
img = cv2.imread('img.jpg',1)
px = img[100,100]
print("px:"+str(px))
img[200,100] = [255,0,0]
 
px = img[200,100]
print("px:"+str(px))
 
for i in range(0,255):
  for j in range(0,255):
    img[200+j,100+i]=[i,j,0]
 
 
print("img.shape:"+str(img.shape))
print("img.dtype:"+str(img.dtype))
 
xo=692
yo=252
larg=60
haut=60
x1=434
y1=218
 
ROI = img[yo:yo+haut, xo:xo+larg]
for i in range(0,3):
    img[y1+i*100:y1+haut+i*100, x1:x1+larg] = ROI
 
 # Convert BGR to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
 
 # define range of blue color in HSV
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
 
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_blue, upper_blue)
 
# Bitwise-AND mask and original image
res = cv2.bitwise_and(img,img, mask= mask)
 
xgazon=350
ygazon=618
print("gazon:"+str(hsv[ygazon,xgazon]))
#gazon:[ 36 171 183]
lower_gazon = np.array([26,50,50])
upper_gazon= np.array([46,255,255])
 
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_gazon, upper_gazon)
 
# Bitwise-AND mask and original image
mask=	cv2.bitwise_not(	mask)
 
res = cv2.bitwise_and(img,img, mask= mask)
 
 
cv2.imwrite('imgres.png',res)
cv2.imshow('image res',res)
 
cv2.imwrite('imgcolor.png',img)
 
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
td2_aii2.py
import numpy as np
import cv2
import math
 
print("version openCV:"+str(  cv2.__version__ ))
 
flags = [i for i in dir(cv2) if i.startswith('COLOR_')]  
print("flags:"+str(flags))
 
# Load an color image in grayscale
img = cv2.imread('img.jpg',0)
print(img)
 
 
# Create a black image
#img = np.zeros((512,512,3), np.uint8)
#pour une image couleur
#cv2.line(img,(0,0),(511,511),(255,0,0),5)
 
 
cv2.line(img,(0,0),(511,511),128,5)
 
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'BUT3',(10,500), font, 4,255,2,cv2.LINE_AA)
cv2.imwrite('imggray.png',img)
 
#---------------------------------
# Load an color image in color
img = cv2.imread('img.jpg',1)
print(img)
 
 
cv2.imwrite('imgcolor.png',img)
 
px = img[100,100]
print("px:" + str(px))
blue = img[100,100,0]
print("blue:" +str(blue))
 
print("image shape: " +str(img.shape)+" , image type:" +str(img.dtype))
 
yo=257
xo=695
larg=60
haut=60
 
 
ROI=img[yo:yo+haut, xo:xo+larg]
 
x1=435
y1=220
img[y1:y1+haut, x1:x1+larg] = ROI
 
for i in range(0,3):
    x1=435+i*120
    y1=220
    img[y1:y1+haut, x1:x1+larg] = ROI
 
for r in range(0,256):
  for g in range(0,256):
    img[100+g,100+r]=(100,g,r)
 
# Convert BGR to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
 
xgazon=346
ygazon=641
print("gazon:"+ str(hsv[ygazon,xgazon]))
 
lower_gazon = np.array([38-10,50,50])
upper_gazon = np.array([38+10,255,255])
 
# Threshold the HSV image to get only blue colors
#mask = cv2.inRange(hsv, lower_blue, upper_blue)
mask = cv2.inRange(hsv, lower_gazon, upper_gazon)
 
mask=cv2.bitwise_not(mask)
 
 
# Bitwise-AND mask and original image
imgres = cv2.bitwise_and(img,img, mask= mask)
 
 
 
cv2.imwrite('imgres.png',imgres)
 
#cv2.imshow('image',img)
cv2.imshow('image',imgres)
cv2.waitKey(0)
cv2.destroyAllWindows()

Détection et comptage d'objets

Image avant traitement: bvdp.inetdoc.net_files_iut_tp_lpro_vision_riz.jpg

Solution intermédiaire aii 2

riz_aii2.py
import numpy as np
import cv2
from matplotlib import pyplot as plt
 
im = cv2.imread('riz.jpg')
 
 
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
 
#imgray=imgray*0.5
#imgray=imgray.astype(np.uint8)
 
# affichage histogramme:
#https://opencv24-python-tutorials.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.html
hist,bins = np.histogram(imgray.flatten(),256,[0,256])
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max()/ cdf.max()
plt.plot(cdf_normalized, color = 'b')
plt.hist(imgray.flatten(),256,[0,256], color = 'r')
plt.xlim([0,256])
plt.legend(('cdf','histogram'), loc = 'upper left')
plt.show()
 
#autre methode pour l'histogramme:
#cv2.calcHist(images, channels, mask, histSize, ranges[, hist[, accumulate]])
hist = cv2.calcHist([imgray],[0],None,[256],[0,256])
#hist = cv2.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] )
#plt.imshow(hist,interpolation = 'nearest')
#plt.show()
#print(str(hist))
 
#seuillage brut ou manuel
ret,thresh1 = cv2.threshold(imgray,150,255,cv2.THRESH_BINARY)
 
#seuillage Otsu's thresholding
ret2,th2 = cv2.threshold(imgray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
print("ret2: "+str(ret2))
 
cv2.imwrite('riz_out.png',im)
cv2.imwrite('riz_gray.png',imgray)
cv2.imwrite('riz_otsu.png',th2)
 
kernel = np.ones((3,3),np.uint8)
erosion = cv2.erode(th2,kernel,iterations = 1)
cv2.imwrite('riz_otsu_erosion.png',erosion)
dilation = cv2.dilate(erosion,kernel,iterations = 1)
cv2.imwrite('riz_otsu_erosion_dilatation.png',dilation)
 
cv2.imshow('image',th2)
cv2.waitKey(0)
cv2.destroyAllWindows()

Solution intermédiaire aii 1

riz_aii1.py
import numpy as np
import cv2
from matplotlib import pyplot as plt
 
im = cv2.imread('riz.jpg')
 
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
 
hist = cv2.calcHist([imgray],[0],None,[256],[0,256])
#hist = cv2.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] )
#plt.imshow(hist,interpolation = 'nearest')
#plt.show()
#print(str(hist))
 
#imgray=imgray*0.5
#imgray=imgray.astype(np.uint8)
 
if False:
    #https://opencv24-python-tutorials.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.html
    hist,bins = np.histogram(imgray.flatten(),256,[0,256])
    cdf = hist.cumsum()
    cdf_normalized = cdf * hist.max()/ cdf.max()
    plt.plot(cdf_normalized, color = 'b')
    plt.hist(imgray.flatten(),256,[0,256], color = 'r')
    plt.xlim([0,256])
    plt.legend(('cdf','histogram'), loc = 'upper left')
    plt.show()
 
 
print(im.shape)
cv2.imwrite('riz_gray.png',imgray)
 
 
 
# Otsu's thresholding
ret,thresh1 = cv2.threshold(imgray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
 
print ("ret: "+str(ret))
 
#seuillage à la main
#ret,thresh1 = cv2.threshold(imgray,150,255,cv2.THRESH_BINARY)
cv2.imwrite('riz_thresh1.png',thresh1)
 
 
kernel = np.ones((3,3),np.uint8)
erosion = cv2.erode(thresh1,kernel,iterations = 2)
cv2.imwrite('riz_thresh1_erosion.png',erosion)
 
dilatation = cv2.dilate(erosion,kernel,iterations = 1)
cv2.imwrite('riz_thresh1_erosion_dilatation.png',dilatation)
 
 
cv2.imshow('image',thresh1)
cv2.waitKey(0)
cv2.destroyAllWindows()

TD géométrie pour la vision

Solution pour le rendu de cube:

synthese.py
import cv2
import numpy as np
import math
print("==============================================================")
taillecube=1
listeM = np.float32([[0,0,0], [taillecube,0,0],[taillecube,taillecube,0], [0,taillecube,0],
                   [0,0,taillecube], [taillecube,0,taillecube],[taillecube,taillecube,taillecube], [0,taillecube,taillecube]])
 
print("listeM:"+str(listeM))         
listeseg=[[0,1],[1,2],[2,3],[3,0],[4,5],[5,6],[6,7],[7,4],[0,4],[1,5],[2,6],[3,7]]                   
print("listeseg:"+str(listeseg))
print("==============================================================")
 
cRw=np.float32([[1,0,0],[0,-1,0],[0,0,-1]])
cTw=np.float32([[0],[0],[3]])
cRTw = np.hstack((cRw,cTw))
cRTw = np.vstack((cRTw,[0,0,0,1]))
 
print("cRTw:"+str(cRTw))
print("==============================================================")
 
eu=0.00001 #10um
ev=eu
#champ de vision en degré
fovhdegres=100
demifovh=(fovhdegres/2)*math.pi/180
alphau=400/math.tan(demifovh)
print("alphau:"+str(alphau))
f=alphau*eu
print("f:"+str(f)+" m")
 
alphav=alphau
demifovv=math.atan(300/alphav)
fovvdegres=demifovv*2*180/math.pi
 
print("fovvdegres:"+str(fovvdegres))
#=======================================
pu=400
pv=300
iCc=np.float32([[alphau,0,pu,0],[0,alphav,pv,0],[0,0,1,0]])
print("iCc:"+str(iCc))
iCw=iCc@cRTw
print("iCw:"+str(iCw))
#------------------
def projeterPoint(M):
    print("M:"+str(M))
    M = np.append(M, 1)
    print("M:"+str(M))
    m=iCw@M
    #deshomogénéisation
    mu=m[0]/m[2]
    mv=m[1]/m[2] 
    #arrondi pour avoir des positions entières
    mu=int(round(mu,0))
    mv=int(round(mv,0))
    print("mu:"+str(mu))
    print("mv:"+str(mv))   
    return  (mu,mv)
 
#------------------
M= np.float32([0,0,0])
m= projeterPoint(M)
 
 
# Create a black image 
img = np.zeros((pv*2,pu*2), np.uint8)
 
for seg in listeseg:
    print("seg:"+str(seg))
    m1=projeterPoint(listeM[seg[0]])
    m2=projeterPoint(listeM[seg[1]])
    print("m1:"+str(m1))
    print("m2:"+str(m2))
    cv2.line(img,m1,m2,255,1,cv2.LINE_AA)   
 
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()

TP reconstruction 3D monoculaire et par stéréovision

bvdp.inetdoc.net_files_iut_vision_imaobj1_rect.jpg

bvdp.inetdoc.net_files_iut_vision_imaobj2_rect.jpg

tpstereo.py
#/usr/bin/python3
import numpy as np
import cv2
import glob
filename1="imaobj1_rect.jpg"
filename2="imaobj2_rect.jpg"
 
img1 = cv2.imread(filename1)
if(len(img1.shape)<3):
    img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2BGR)
img2 = cv2.imread(filename2)
if(len(img2.shape)<3):
    img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2BGR)
 
h,  w = img1.shape[:2]
print("h:"+str(h))    
print("w:"+str(w))   
 
#------------------
def projeterPoint(M,iCw):
    #print("M:"+str(M))
    M = np.append(M, 1)
    #print("M:"+str(M))
    m=iCw@M
    #deshomogénéisation
    mu=m[0]/m[2]
    mv=m[1]/m[2] 
    #arrondi pour avoir des positions entières
    mu=int(round(mu,0))
    mv=int(round(mv,0))
    #print("mu:"+str(mu))
    #print("mv:"+str(mv))   
    return  (mu,mv)
#------------------
alphau=2149.349810656287900
alphav=2146.276553276586100
 
pu=412.626457723086160 
pv=355.723365602020240 
 
 
rvec1 = np.float32([ -3.125480e-001 , -2.316874e+000 , 1.048207e-001 ])
tvec1 = 0.001 * np.float32([ [4.813190e+001]  , [-1.258122e+002] , [1.066916e+003] ])
rvec2 = np.float32([ 6.592006e-001  , -2.262313e+000 , -3.064294e-001])
tvec2 = 0.001 * np.float32([ [2.557518e+001]  , [-6.888417e+001] , [1.091155e+003] ])
 
matR1,jac1=cv2.Rodrigues(rvec1) 
print("matR1:"+str(matR1))     
matR2,jac2=cv2.Rodrigues(rvec2)    
print("matR2:"+str(matR2))     
 
 
#affichage comme TD synthese
cRT1w=np.hstack((matR1,tvec1))
cRT1w = np.vstack((cRT1w,[0,0,0,1]))
cRT2w=np.hstack((matR2,tvec2))
cRT2w = np.vstack((cRT2w,[0,0,0,1]))
 
print("cRTw: "+str(cRT1w))
#print("mtx: "+str(mtx))
iCc=np.float32([[alphau,0,pu,0],[0,alphav,pv,0],[0,0,1,0]])
print("iCc:"+str(iCc))
iC1w=iCc@cRT1w
print("iC1w:"+str(iC1w))
iC2w=iCc@cRT2w
print("iC2w:"+str(iC2w))
 
 
#---------------------------
#TODO: afficher un repère de 10cm de coté sur chaque image sur chaque image
#---------------------------
 
 
 
 
#liste des points pour les 2 image, mu puis mv
listem1 = np.int_([[490,96], [329,83] , [510,119] , [359,107],
                        [493,156],[339,151],[525,189],[373,180],[507,230],
                        [353,223],[534,253],[385,250],[514,302],[361,304],
                        [544,333],[393,336],[523,377],[369,383],
                        [400,252], # ce point n'est pas observé dans l'image
                        [254,264]])
 
listem2 = np.int_([[448,194], [290,184] , [450,233] , [297,228],
                        [420,249],[265,243],[427,297],[272,294],[391,311],
                        [241,308],[399,349],[250,354],[366,364],[212,367],
                        [371,408],[216,417],[338,416],[179,420],
                        [316,263], # ce point n'est pas observé dans l'image
                        [162,228]])
 
if listem2.shape[0]!=listem1.shape[0]:
    print("les 2 listes de points doivent avoir le même nombre de points")
    exit(-1)
listeM=np.zeros((listem2.shape[0],3), np.float32)
 
#valeur par defaut pour le point 3D reconstruit
X = np.zeros((3,1), np.float32)
Xz0 = np.zeros((3,1), np.float32)
 
for i,(mu,mv) in enumerate(listem1):   
    img1 = cv2.circle(img1,(mu,mv), 3,(255,0,255), -1)
    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(img1,str(i),(mu+10,mv), font, 1,(255,0,255),2,cv2.LINE_AA)
    #récupère le point correspondant dans l'image 2
    mu2=listem2[i][0]
    mv2=listem2[i][1]
    img2 = cv2.circle(img2,(mu2,mv2), 3,(255,0,255), -1)
    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(img2,str(i),(mu2+10,mv2), font, 1,(255,0,255),2,cv2.LINE_AA)
    #---------------------------
    #TODO: Remplir listeM[i][:] avec les coordonnées 3D du point i
    #---------------------------
 
 
 
 
 
 
#image composée à partir des 2 images 1 et 2
imagecomposee = np.hstack((img1,img2))
filenameout="imaobj_rect-out.jpg"
cv2.imwrite(filenameout,imagecomposee)
cv2.imshow('dst',imagecomposee)
cv2.waitKey(-1)
cv2.destroyAllWindows()
#-----------------------------------------------
#matplot 3D
#https://www.geeksforgeeks.org/three-dimensional-plotting-in-python-using-matplotlib/
#mettre à True pour activer le rendu 3D après fermeture de la fenêtre opencv
if False:
    # importing mplot3d toolkits
    from mpl_toolkits import mplot3d
    import numpy as np
    import matplotlib.pyplot as plt
 
    fig = plt.figure()
 
    # syntax for 3-D projection
    ax = plt.axes(projection ='3d')
 
    # defining axes
    x = listeM[:,0]
    y = listeM[:,1]
    z = listeM[:,2]
    ax.scatter(x, y, z)
    ax.axis('equal')
 
    # syntax for plotting
    ax.set_title('3d Scatter plot geeks for geeks')
    plt.show()

Solution

tpstereo_solution.py
#/usr/bin/python3
import numpy as np
import cv2
import glob
filename1="imaobj1_rect.jpg"
filename2="imaobj2_rect.jpg"
 
img1 = cv2.imread(filename1)
if(len(img1.shape)<3):
    img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2BGR)
img2 = cv2.imread(filename2)
if(len(img2.shape)<3):
    img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2BGR)
 
h,  w = img1.shape[:2]
print("h:"+str(h))    
print("w:"+str(w))   
 
#------------------
def projeterPoint(M,iCw):
    #print("M:"+str(M))
    M = np.append(M, 1)
    #print("M:"+str(M))
    m=iCw@M
    #deshomogénéisation
    mu=m[0]/m[2]
    mv=m[1]/m[2] 
    #arrondi pour avoir des positions entières
    mu=int(round(mu,0))
    mv=int(round(mv,0))
    #print("mu:"+str(mu))
    #print("mv:"+str(mv))   
    return  (mu,mv)
#------------------
alphau=2149.349810656287900
alphav=2146.276553276586100
 
pu=412.626457723086160 
pv=355.723365602020240 
 
 
rvec1 = np.float32([ -3.125480e-001 , -2.316874e+000 , 1.048207e-001 ])
tvec1 = 0.001 * np.float32([ [4.813190e+001]  , [-1.258122e+002] , [1.066916e+003] ])
rvec2 = np.float32([ 6.592006e-001  , -2.262313e+000 , -3.064294e-001])
tvec2 = 0.001 * np.float32([ [2.557518e+001]  , [-6.888417e+001] , [1.091155e+003] ])
 
matR1,jac1=cv2.Rodrigues(rvec1) 
print("matR1:"+str(matR1))     
matR2,jac2=cv2.Rodrigues(rvec2)    
print("matR2:"+str(matR2))     
 
 
#affichage comme TD synthese
cRT1w=np.hstack((matR1,tvec1))
cRT1w = np.vstack((cRT1w,[0,0,0,1]))
cRT2w=np.hstack((matR2,tvec2))
cRT2w = np.vstack((cRT2w,[0,0,0,1]))
 
print("cRTw: "+str(cRT1w))
#print("mtx: "+str(mtx))
iCc=np.float32([[alphau,0,pu,0],[0,alphav,pv,0],[0,0,1,0]])
print("iCc:"+str(iCc))
iC1w=iCc@cRT1w
print("iC1w:"+str(iC1w))
iC2w=iCc@cRT2w
print("iC2w:"+str(iC2w))
 
 
#---------------------------
#TODO: afficher un repère de 10cm de coté sur chaque image sur chaque image
#---------------------------
dimRepere=0.10
axis = np.float32([[0,0,0], [dimRepere,0,0], [0,dimRepere,0], [0,0,dimRepere]]).reshape(-1,3)
img1 = cv2.line(img1, projeterPoint(axis[0],iC1w),projeterPoint(axis[1],iC1w), (0,0,255), 1)
img1 = cv2.line(img1, projeterPoint(axis[0],iC1w),projeterPoint(axis[2],iC1w), (0,255,0), 1)
img1 = cv2.line(img1, projeterPoint(axis[0],iC1w),projeterPoint(axis[3],iC1w), (255,0,0), 1)
 
img2 = cv2.line(img2, projeterPoint(axis[0],iC2w),projeterPoint(axis[1],iC2w), (0,0,255), 1)
img2 = cv2.line(img2, projeterPoint(axis[0],iC2w),projeterPoint(axis[2],iC2w), (0,255,0), 1)
img2 = cv2.line(img2, projeterPoint(axis[0],iC2w),projeterPoint(axis[3],iC2w), (255,0,0), 1)
 
#liste des points pour les 2 image, mu puis mv
listem1 = np.int_([[490,96], [329,83] , [510,119] , [359,107],
                        [493,156],[339,151],[525,189],[373,180],[507,230],
                        [353,223],[534,253],[385,250],[514,302],[361,304],
                        [544,333],[393,336],[523,377],[369,383],
                        [400,252], # ce point n'est pas observé dans l'image
                        [254,264]])
 
listem2 = np.int_([[448,194], [290,184] , [450,233] , [297,228],
                        [420,249],[265,243],[427,297],[272,294],[391,311],
                        [241,308],[399,349],[250,354],[366,364],[212,367],
                        [371,408],[216,417],[338,416],[179,420],
                        [316,263], # ce point n'est pas observé dans l'image
                        [162,228]])
 
if listem2.shape[0]!=listem1.shape[0]:
    print("les 2 listes de points doivent avoir le même nombre de points")
    exit(-1)
listeM=np.zeros((listem2.shape[0],3), np.float32)
 
#valeur par defaut pour le point 3D reconstruit
X = np.zeros((3,1), np.float32)
Xz0 = np.zeros((3,1), np.float32)
 
for i,(mu,mv) in enumerate(listem1):   
    img1 = cv2.circle(img1,(mu,mv), 3,(255,0,255), -1)
    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(img1,str(i),(mu+10,mv), font, 1,(255,0,255),2,cv2.LINE_AA)
    #récupère le point correspondant dans l'image 2
    mu2=listem2[i][0]
    mv2=listem2[i][1]
    img2 = cv2.circle(img2,(mu2,mv2), 3,(255,0,255), -1)
    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(img2,str(i),(mu2+10,mv2), font, 1,(255,0,255),2,cv2.LINE_AA)
    #---------------------------
    #TODO: Remplir listeM[i][:] avec les coordonnées 3D du point i
    #---------------------------
    monoculaire=True
    if monoculaire: #monoculaire
        if i%2==0: #point 3D dans le plan de la mire z=0
            A = np.zeros((3,3), np.float32)
            B = np.zeros((3,1), np.float32)
 
            n=0
            A[n][0]=iC1w[2][0]*mu-iC1w[0][0]
            A[n][1]=iC1w[2][1]*mu-iC1w[0][1]
            A[n][2]=iC1w[2][2]*mu-iC1w[0][2]
            B[n]=  -iC1w[2][3]*mu+iC1w[0][3]
 
            n=1
            A[n][0]=iC1w[2][0]*mv-iC1w[1][0]
            A[n][1]=iC1w[2][1]*mv-iC1w[1][1]
            A[n][2]=iC1w[2][2]*mv-iC1w[1][2]
            B[n]=  -iC1w[2][3]*mv+iC1w[1][3]
 
            n=2
            A[n][0]=0
            A[n][1]=0
            A[n][2]=1
            B[n]=  0
 
            X=np.linalg.inv(A)@B
            Xz0=X.copy()
        else: #point 3D à la verticale du point précédent
            #A = np.zeros((4,3), np.float32)
            #B = np.zeros((4,1), np.float32)
            A = np.zeros((3,3), np.float32)
            B = np.zeros((3,1), np.float32)
 
            n=0
            A[n][0]=iC1w[2][0]*mu-iC1w[0][0]
            A[n][1]=iC1w[2][1]*mu-iC1w[0][1]
            A[n][2]=iC1w[2][2]*mu-iC1w[0][2]
            B[n]=  -iC1w[2][3]*mu+iC1w[0][3]
 
            n=1
            A[n][0]=iC1w[2][0]*mv-iC1w[1][0]
            A[n][1]=iC1w[2][1]*mv-iC1w[1][1]
            A[n][2]=iC1w[2][2]*mv-iC1w[1][2]
            B[n]=  -iC1w[2][3]*mv+iC1w[1][3]
 
            #Solution utilisant une altitude connue pour les points de la face supérieure
            #n=2
            #A[n][0]=0
            #A[n][1]=0
            #A[n][2]=1
            #B[n]=  0.1 #10cm            
 
            #Solution utilisant le point d'indice précédent dans le plan z=0 pour déterminer une des coordonnées (x ici) du point courant
            n=2
            A[n][0]=1
            A[n][1]=0
            A[n][2]=0
            B[n]=  Xz0[0]
            #ou bien
            #A[n][0]=0
            #A[n][1]=1
            #A[n][2]=0
            #B[n]=  Xz0[1]
 
            print("A:" +str(A))
            print("B:" +str(B))
            #X= (np.linalg.inv(A.transpose() @ A) @ A.transpose() ) @ B
            X=np.linalg.inv(A)@B
 
 
    else: #stereo
        A = np.zeros((4,3), np.float32)
        B = np.zeros((4,1), np.float32)
 
        n=0
        A[n][0]=iC1w[2][0]*mu-iC1w[0][0]
        A[n][1]=iC1w[2][1]*mu-iC1w[0][1]
        A[n][2]=iC1w[2][2]*mu-iC1w[0][2]
        B[n]=  -iC1w[2][3]*mu+iC1w[0][3]
 
        n=1
        A[n][0]=iC1w[2][0]*mv-iC1w[1][0]
        A[n][1]=iC1w[2][1]*mv-iC1w[1][1]
        A[n][2]=iC1w[2][2]*mv-iC1w[1][2]
        B[n]=  -iC1w[2][3]*mv+iC1w[1][3]
 
        n=2
        A[n][0]=iC2w[2][0]*mu2-iC2w[0][0]
        A[n][1]=iC2w[2][1]*mu2-iC2w[0][1]
        A[n][2]=iC2w[2][2]*mu2-iC2w[0][2]
        B[n]=  -iC2w[2][3]*mu2+iC2w[0][3]
 
        n=3
        A[n][0]=iC2w[2][0]*mv2-iC2w[1][0]
        A[n][1]=iC2w[2][1]*mv2-iC2w[1][1]
        A[n][2]=iC2w[2][2]*mv2-iC2w[1][2]
        B[n]=  -iC2w[2][3]*mv2+iC2w[1][3]
 
        X= (np.linalg.inv(A.transpose() @ A) @ A.transpose() ) @ B
 
    print("X( "+str(i)+" ): "+str(X))
    listeM[i][:]=X.transpose()
 
 
#image composée à partir des 2 images 1 et 2
imagecomposee = np.hstack((img1,img2))
filenameout="imaobj_rect-out.jpg"
cv2.imwrite(filenameout,imagecomposee)
cv2.imshow('dst',imagecomposee)
cv2.waitKey(-1)
cv2.destroyAllWindows()
#-----------------------------------------------
#matplot 3D
#https://www.geeksforgeeks.org/three-dimensional-plotting-in-python-using-matplotlib/
#mettre à True pour activer le rendu 3D après fermeture de la fenêtre opencv
if False:
    # importing mplot3d toolkits
    from mpl_toolkits import mplot3d
    import numpy as np
    import matplotlib.pyplot as plt
 
    fig = plt.figure()
 
    # syntax for 3-D projection
    ax = plt.axes(projection ='3d')
 
    # defining axes
    x = listeM[:,0]
    y = listeM[:,1]
    z = listeM[:,2]
    ax.scatter(x, y, z)
    ax.axis('equal')
 
    # syntax for plotting
    ax.set_title('3d Scatter plot geeks for geeks')
    plt.show()