Matlab vs Python 2D convolution performance -
i prototype algorithms in matlab, have requirement of putting them on server runs quite bit of python code. hence converted code python , compared two. matlab implementation runs ~1000 times faster (from timing function calls - no profiling). know off hand why performance of python slow?
matlab
% init random data w = 800; h = 1200; hmap = zeros(w,h); npts = 250; i=1:npts hmap(randi(w),randi(h)) = hmap(randi(w),randi(h))+1; end % params disksize = 251; nbreaks = 25; saturation = .9; floorthresh =.05; fh = fspecial('gaussian', disksize, disksize/7); hmap = conv2(hmap, fh, 'same'); % scaling, paritioning etc hmap = hmap/(max(max(hmap))); hmap(hmap<floorthresh) = 0; hmap = round(nbreaks * hmap)/nbreaks; hmap = hmap * (1/saturation); % show image imshow(hmap, [0,1]) colormap('jet')
python
import numpy np scipy.signal import convolve2d conv2 # test data parameters w = 800 h = 1200 npts = 250 # generate data xvals = np.random.randint(w, size=npts) yvals = np.random.randint(h, size=npts) # heatmap parameters gaussiansize = 250 nbreaks = 25 # preliminary function definitions def populatemat(w, h, xvals, yvals): container = np.zeros((w,h)) idx in range(0,xvals.size): x = xvals[idx] y = yvals[idx] container[x,y] += 1 return container def makegaussian(size, fwhm): x = np.arange(0, size, 1, float) y = x[:,np.newaxis] x0 = y0 = size // 2 return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2) # create data matrix dmat = populatemat(w,h,xvals,yvals) h = makegaussian(gaussiansize, fwhm=gaussiansize/2) # convolve dmat2 = conv2(dmat, h, mode='same') # scaling etc dmat2 = dmat2 / dmat2.max() dmat2 = np.round(nbreaks*dmat2)/nbreaks # show imshow(dmat2)
ok, problem solved me suggestion @yves daust's comments;
the filter scipy.ndimage.filters.gaussian_filter
utilises separability of kernel , reduces running time within single order of magnitude of matlab implementation.
import numpy np scipy.ndimage.filters import gaussian_filter gaussian # test data parameters w = 800 h = 1200 npts = 250 # generate data xvals = np.random.randint(w, size=npts) yvals = np.random.randint(h, size=npts) # heatmap parameters gaussiansize = 250 nbreaks = 25 # preliminary function definitions def populatemat(w, h, xvals, yvals): container = np.zeros((w,h)) idx in range(0,xvals.size): x = xvals[idx] y = yvals[idx] container[x,y] += 1 return container # create data matrix dmat = populatemat(w,h,xvals,yvals) # convolve dmat2 = gaussian(dmat, gaussiansize/7) # scaling etc dmat2 = dmat2 / dmat2.max() dmat2 = np.round(nbreaks*dmat2)/nbreaks # show imshow(dmat2)
Comments
Post a Comment