J'essaye d'écrire une fonction dans Scilab pour afficher des images. Je traite des images sous forme de listes de matrices, puis convertit la liste en hypermatrice à l'intérieur de la fonction. Cependant, le code ne semble pas fonctionner pour les images de type uint16 ou uint32. Ce est le code que je travaille sur:Affichage d'une hypermatrice de matrices uint16 en utilisant Matplot
imshow()
function[] =imshow(Image)
global TYPE_DOUBLE; //retrieving list and creating 3 dimensional matrix out of it
dimensions=size(Image)
MaxUInt8 = 2^8 - 1;
MaxGrayValue = MaxUInt8; //changed from MaximumGrayValue
if dimensions==3 then
matSize=size(Image(1));
r=matrix(Image(1),matSize(1),matSize(2));
g=matrix(Image(2),matSize(1),matSize(2));
b=matrix(Image(3),matSize(1),matSize(2));
z(:,:,1)=uint8(r); //Since Matplot is not working with uint16 and uint32, convert every image to a
z(:,:,2)=uint8(g); //8 bit palette.
z(:,:,3)=uint8(b); //Note: this will affect the color depth.
[NumberOfRows NumberOfColumns NumberOfChannels] = size(z);
NumberOfPixels = NumberOfRows * NumberOfColumns;
Sample = z(1);
//printf("\nType of Sample: ");
//printf(typeof(Sample)); //DEBUG:
//printf("\n");
if type(Sample) == 1 then //type 1 = real/complex matrix of double
ColorMap = matrix(z, NumberOfPixels, NumberOfChannels);
disp(ColorMap);
else
TypeName = typeof(Sample)
select TypeName
case 'uint8'
MaxGrayValue = 2^8 - 1;
//printf("uint8\t%d", MaxGrayValue); //DEBUG:
case 'uint16'
MaxGrayValue = 2^16 - 1;
//ColorMap = double(matrix(z, NumberOfPixels, NumberOfChannels))/MaxGrayValue;
//printf("uint16\t%d", MaxGrayValue); //DEBUG:
case 'uint32'
MaxGrayValue = 2^32 - 1;
//ColorMap = double(matrix(z, NumberOfPixels, NumberOfChannels))/MaxGrayValue;
//printf("uint32\t%d", MaxGrayValue); //DEBUG:
end;
ColorMap = double(matrix(z, NumberOfPixels, NumberOfChannels))/MaxGrayValue;
printf("\nCreated colormap with MaxGrayValue = %d\n", MaxGrayValue); //DEBUG:
end;
Img=z;
//Grayscale
elseif dimensions==1 then
matSize = size(Image(1));
Img=matrix(Image(1),matSize(1),matSize(2));
Img=Img';
select typeof(Img)
case 'uint8'
MaxGrayValue = MaxUInt8;
case 'uint16'
MaxGrayValue = max(Image(:)) ;
case 'uint32'
MaxGrayValue = max(Image(:));
end;
ColorMap = graycolormap(double(MaxGrayValue + 1)); //changed from MaximumGrayValue
end;
show(Img,ColorMap);
endfunction
show()
function[] =show(Img,ColorMap)
FigureHandle = gcf();
drawlater();
FigureHandle.color_map = ColorMap
FigureHandle.background = -2; // sets the background to white
FigureHandle.figure_name = "Title";
[NumberOfRows NumberOfColumns] = size(Img);
FigureHandle.axes_size = [NumberOfColumns NumberOfRows];
delete(gca()); // previous image is deleted
Diagram = gca();
[NumberOfRows NumberOfColumns] = size(Img);
Diagram.data_bounds = [1, 1; NumberOfColumns, NumberOfRows];
Diagram.axes_visible = ['off' 'off' 'off'];
Diagram.isoview = 'on';
Options = '082'; // Box is drawn around image.
//printf("\nGiven to Matplot: "); //DEBUG:
//printf(typeof(Img)); //DEBUG:
Matplot(Img, Options);
drawnow();
endfunction
L'erreur que je reçois est:
!--error 202
Matplot: Wrong type for input argument #1: A real or integer expected.
at line 22 of function show called by :
at line 67 of function imshow called by :
imshow(a);
Toute aide serait génial.
Veuillez ajouter votre code ici au lieu de créer un lien vers une source externe. –
Que voulez-vous dire par: "le code ne semble pas fonctionner pour les images de type uint16 ou uint32". Avez-vous des messages d'erreur spécifiques? – spoorcc
Oui, je le fais. Je vais modifier le post original. –