patterncsharpMinor
Data conversion routines for FFT
Viewed 0 times
conversionfftforroutinesdata
Problem
I have written the following routines to work with digital images in various representations.
Can I optimize them for better accuracy and performance?
```
public static partial class ImageDataConverter
{
#region Bitmap ToBitmap32(int[,] image)
//Tested
///Working fine.
public static Bitmap ToBitmap32(int[,] image)
{
int Width = image.GetLength(0);
int Height = image.GetLength(1);
int i, j;
Bitmap bitmap = new Bitmap(Width, Height);
BitmapData bitmapData = bitmap.LockBits(new Rectangle(0, 0, Width, Height),
ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
int bytesPerPixel = sizeof(int);
unsafe
{
byte address = (byte)bitmapData.Scan0;
for (i = 0; i = sizeof(int))
{
iii = BitConverter.ToInt32(temp, 0);
}
else
{
iii = (int)temp[0];
}
array2D[j, i] = iii;
address += bytesPerPixel;
}
address += paddingOffset;
}
}
bitmap.UnlockBits(bitmapData);
return array2D;
}
public static Bitmap ToBitmap(int[,] image)
{
int Width = image.GetLength(0);
int Height = image.GetLength(1);
int i, j;
Bitmap bitmap = new Bitmap(Width, Height, PixelFormat.Format8bppIndexed);
BitmapData bitmapData = bitmap.LockBits(new Rectangle(0, 0, Width, Height),
ImageLockMode.ReadOnly, PixelFormat.Format8bppIndexed);
int bytesPerPixel = sizeof(byte);
unsafe
{
byte address = (byte)bitmapData.Scan0;
for (i = 0; i = sizeof(int))
{
iii = BitConverter.ToInt32(temp, 0);
}
else
Can I optimize them for better accuracy and performance?
```
public static partial class ImageDataConverter
{
#region Bitmap ToBitmap32(int[,] image)
//Tested
///Working fine.
public static Bitmap ToBitmap32(int[,] image)
{
int Width = image.GetLength(0);
int Height = image.GetLength(1);
int i, j;
Bitmap bitmap = new Bitmap(Width, Height);
BitmapData bitmapData = bitmap.LockBits(new Rectangle(0, 0, Width, Height),
ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
int bytesPerPixel = sizeof(int);
unsafe
{
byte address = (byte)bitmapData.Scan0;
for (i = 0; i = sizeof(int))
{
iii = BitConverter.ToInt32(temp, 0);
}
else
{
iii = (int)temp[0];
}
array2D[j, i] = iii;
address += bytesPerPixel;
}
address += paddingOffset;
}
}
bitmap.UnlockBits(bitmapData);
return array2D;
}
public static Bitmap ToBitmap(int[,] image)
{
int Width = image.GetLength(0);
int Height = image.GetLength(1);
int i, j;
Bitmap bitmap = new Bitmap(Width, Height, PixelFormat.Format8bppIndexed);
BitmapData bitmapData = bitmap.LockBits(new Rectangle(0, 0, Width, Height),
ImageLockMode.ReadOnly, PixelFormat.Format8bppIndexed);
int bytesPerPixel = sizeof(byte);
unsafe
{
byte address = (byte)bitmapData.Scan0;
for (i = 0; i = sizeof(int))
{
iii = BitConverter.ToInt32(temp, 0);
}
else
Solution
Loops
I do not really like this kind of loops :
I would prefer :
but according to this stack overflow thread performance should not be harmed.
Operations within the loops
I notice that this operation :
Towards better accessors ?
Actually the accessor
If
Parallel processing ?
We do not have information regarding the size of the images you are working with, and parallel processing can make you waste a lot of time, especially if the number of instructions per loop are small.
But these loops :
and:
could easily be parallelized over the rows or the columns.
I do not really like this kind of loops :
for (int j = 0; j <= Height - 1; j++)I would prefer :
for (int j = 0; j < Height; j++)but according to this stack overflow thread performance should not be harmed.
Operations within the loops
I notice that this operation :
bitmapData.Width * bytesPerPixel is repeated in some of your loops. You can expect a (small) gain if you evaluate them outside the loop.Towards better accessors ?
Actually the accessor
a[i,j] can be improved, especially in loops like :for (int j = 0; j <= Height - 1; j++)
{
for (int i = 0; i <= Width - 1; i++)
{
integer[i, j] = ((int)image[i, j].Magnitude);
}
}If
integer was a 1-dimensional array, of size Height * Width, you could just have a single loop over all the elements, that would be faster. But this needs you to change all your code.Parallel processing ?
We do not have information regarding the size of the images you are working with, and parallel processing can make you waste a lot of time, especially if the number of instructions per loop are small.
But these loops :
for (int j = 0; j <= Height - 1; j++)
{
for (int i = 0; i <= Width - 1; i++)
{
integer[i, j] = ((int)image[i, j].Magnitude);
}
}and:
for (int j = 0; j <= Height - 1; j++)
{
for (int i = 0; i <= Width - 1; i++)
{
Complex tempComp = new Complex((double)image[i,j], 0.0);
comp[i,j] = tempComp;
}
}could easily be parallelized over the rows or the columns.
Code Snippets
for (int j = 0; j <= Height - 1; j++)for (int j = 0; j < Height; j++)for (int j = 0; j <= Height - 1; j++)
{
for (int i = 0; i <= Width - 1; i++)
{
integer[i, j] = ((int)image[i, j].Magnitude);
}
}for (int j = 0; j <= Height - 1; j++)
{
for (int i = 0; i <= Width - 1; i++)
{
integer[i, j] = ((int)image[i, j].Magnitude);
}
}for (int j = 0; j <= Height - 1; j++)
{
for (int i = 0; i <= Width - 1; i++)
{
Complex tempComp = new Complex((double)image[i,j], 0.0);
comp[i,j] = tempComp;
}
}Context
StackExchange Code Review Q#139861, answer score: 2
Revisions (0)
No revisions yet.