module #include "diplib/nonlinear.h"
Nonlinear filters Nonlinear filters for noise reduction, detection, etc., excluding morphological filters.
Contents
 Reference
Functions

void dip::
PercentileFilter (dip::Image const& in, dip::Image& out, dip::dfloat percentile, dip::Kernel const& kernel = {}, dip::StringArray const& boundaryCondition = {})  Applies a percentile filter to
in
. 
void dip::
MedianFilter (dip::Image const& in, dip::Image& out, dip::Kernel const& kernel = {}, dip::StringArray const& boundaryCondition = {})  The median filter, a nonlinear smoothing filter.

void dip::
VarianceFilter (dip::Image const& in, dip::Image& out, dip::Kernel const& kernel = {}, dip::StringArray const& boundaryCondition = {})  Computes, for each pixel, the sample variance within a filter window around the pixel.

void dip::
SelectionFilter (dip::Image const& in, dip::Image const& control, dip::Image& out, dip::Kernel const& kernel = {}, dip::dfloat threshold = 0.0, dip::String const& mode = S::MINIMUM, dip::StringArray const& boundaryCondition = {})  Selects, for each pixel, a value from within the filter window, where a control image is minimal or maximal.

void dip::
Kuwahara (dip::Image const& in, dip::Image& out, dip::Kernel kernel = {}, dip::dfloat threshold = 0.0, dip::StringArray const& boundaryCondition = {})  The KuwaharaNagao operator, a nonlinear edgepreserving smoothing filter.

void dip::
NonMaximumSuppression (dip::Image const& gradmag, dip::Image const& gradient, dip::Image const& mask, dip::Image& out, dip::String const& mode = S::INTERPOLATE)  Nonmaximum suppression, as used in the Canny edge detector.

void dip::
MoveToLocalMinimum (dip::Image const& bin, dip::Image const& weights, dip::Image& out)  Given a sparse binary image
bin
, moves each set pixel to the pixel in the 3x3 neighborhood with lowestweight
. 
void dip::
PeronaMalikDiffusion (dip::Image const& in, dip::Image& out, dip::uint iterations = 5, dip::dfloat K = 10, dip::dfloat lambda = 0.25, dip::String const& g = "Gauss")  Applies PeronaMalik anisotropic diffusion

void dip::
GaussianAnisotropicDiffusion (dip::Image const& in, dip::Image& out, dip::uint iterations = 5, dip::dfloat K = 10, dip::dfloat lambda = 0.25, dip::String const& g = "Gauss")  Applies iterative generic anisotropic diffusion using Gaussian derivatives

void dip::
RobustAnisotropicDiffusion (dip::Image const& in, dip::Image& out, dip::uint iterations = 5, dip::dfloat sigma = 10, dip::dfloat lambda = 0.25)  Applies iterative robust anisotropic diffusion

void dip::
CoherenceEnhancingDiffusion (dip::Image const& in, dip::Image& out, dip::dfloat derivativeSigma = 1, dip::dfloat regularizationSigma = 3, dip::uint iterations = 5, dip::StringSet const& flags = {})  Applies iterative coherence enhancing (anisotropic) diffusion

void dip::
AdaptiveGauss (dip::Image const& in, dip::ImageConstRefArray const& params, dip::Image& out, dip::FloatArray const& sigmas = {5.0,1.0}, dip::UnsignedArray const& orders = {0}, dip::dfloat truncation = 2.0, dip::UnsignedArray const& exponents = {0}, dip::String const& interpolationMethod = S::LINEAR, dip::String const& boundaryCondition = S::SYMMETRIC_MIRROR)  Adaptive Gaussian filtering.

void dip::
AdaptiveBanana (dip::Image const& in, dip::ImageConstRefArray const& params, dip::Image& out, dip::FloatArray const& sigmas = {5.0,1.0}, dip::UnsignedArray const& orders = {0}, dip::dfloat truncation = 2.0, dip::UnsignedArray const& exponents = {0}, dip::String const& interpolationMethod = S::LINEAR, dip::String const& boundaryCondition = S::SYMMETRIC_MIRROR)  Adaptive Gaussian filtering using curvature.

void dip::
FullBilateralFilter (dip::Image const& in, dip::Image const& estimate, dip::Image& out, dip::FloatArray spatialSigmas = {2.0}, dip::dfloat tonalSigma = 30.0, dip::dfloat truncation = 2.0, dip::StringArray const& boundaryCondition = {})  Bilateral filter, bruteforce full kernel implementation

void dip::
QuantizedBilateralFilter (dip::Image const& in, dip::Image const& estimate, dip::Image& out, dip::FloatArray spatialSigmas = {2.0}, dip::dfloat tonalSigma = 30.0, dip::FloatArray tonalBins = {}, dip::dfloat truncation = 2.0, dip::StringArray const& boundaryCondition = {})  Quantized (piecewise linear) bilateral filter

void dip::
SeparableBilateralFilter (dip::Image const& in, dip::Image const& estimate, dip::Image& out, dip::BooleanArray const& process = {}, dip::FloatArray spatialSigmas = {2.0}, dip::dfloat tonalSigma = 30.0, dip::dfloat truncation = 2.0, dip::StringArray const& boundaryCondition = {})  Separable bilateral filter, a very fast approximation

void dip::
BilateralFilter (dip::Image const& in, dip::Image const& estimate, dip::Image& out, dip::FloatArray const& spatialSigmas = {2.0}, dip::dfloat tonalSigma = 30.0, dip::dfloat truncation = 2.0, dip::String const& method = "xysep", dip::StringArray const& boundaryCondition = {})  Bilateral filter, convenience function that allows selecting an implementation
Function documentation
void
dip::PercentileFilter (dip::Image const& in,
dip::Image& out,
dip::dfloat percentile,
dip::Kernel const& kernel = {},
dip::StringArray const& boundaryCondition = {})
Applies a percentile filter to in
.
Determines the percentile
percentile within the filter window, and assigns that value to the output pixel.
See also dip::RankFilter
, which does the same thing but uses a rank instead of a percentile as input argument.
The size and shape of the filter window is given by kernel
, which you can define through a default
shape with corresponding sizes, or through a binary image. See dip::Kernel
.
boundaryCondition
indicates how the boundary should be expanded in each dimension. See dip::BoundaryCondition
.
void
dip::MedianFilter (dip::Image const& in,
dip::Image& out,
dip::Kernel const& kernel = {},
dip::StringArray const& boundaryCondition = {})
The median filter, a nonlinear smoothing filter.
The size and shape of the filter window is given by kernel
, which you can define through a default
shape with corresponding sizes, or through a binary image. See dip::Kernel
.
boundaryCondition
indicates how the boundary should be expanded in each dimension. See dip::BoundaryCondition
.
Calls dip::PercentileFilter
with the percentile
parameter set to 50.
void
dip::VarianceFilter (dip::Image const& in,
dip::Image& out,
dip::Kernel const& kernel = {},
dip::StringArray const& boundaryCondition = {})
Computes, for each pixel, the sample variance within a filter window around the pixel.
The size and shape of the filter window is given by kernel
, which you can define through a default
shape with corresponding sizes, or through a binary image. See dip::Kernel
.
boundaryCondition
indicates how the boundary should be expanded in each dimension. See dip::BoundaryCondition
.
Uses dip::FastVarianceAccumulator
for the computation.
void
dip::SelectionFilter (dip::Image const& in,
dip::Image const& control,
dip::Image& out,
dip::Kernel const& kernel = {},
dip::dfloat threshold = 0.0,
dip::String const& mode = S::MINIMUM,
dip::StringArray const& boundaryCondition = {})
Selects, for each pixel, a value from within the filter window, where a control image is minimal or maximal.
For each pixel, within the filter window, looks for the pixel with the lowest value (mode
is "minimum"
) or
highest value (mode
is "maximum"
), and takes the value from in
at that location as the output value. To
prevent a staircase effect in the output, where many pixels use the same input value, a threshold
can be
specified. If it is a positive value, then the lowest (or highest) value found must be threshold
lower (or
higher) than the central pixel, otherwise the central pixel is used.
Ties are solved by picking the value closest to the central pixel. Multiple control pixels with the same value and at the same distance to the central pixel are solved arbitrarily (in the current implementation, the first of these pixels encountered is used).
The KuwaharaNagao operator (see dip::Kuwahara
) is implemented in terms of the SelectionFilter
:
Image value = dip::Uniform( in, kernel ); Image control = dip::VarianceFilter( in, kernel ); kernel.Mirror(); Image out = dip::SelectionFilter( value, control, kernel );
Note that the following reproduces the result of the erosion (albeit in a very costly manner):
Image out = dip::SelectionFilter( in, in, kernel );
Nonetheless, this can used to implement color morphology, for example (note there are much better approaches to
build the control
image):
// Image in is a color image Image control = dip::SumTensorElements( in ); Image out = dip::SelectionFilter( in, control, kernel, 0.0, "maximum" );
The size and shape of the filter window is given by kernel
, which you can define through a default
shape with corresponding sizes, or through a binary image. See dip::Kernel
.
boundaryCondition
indicates how the boundary should be expanded in each dimension. See dip::BoundaryCondition
.
control
must be a realvalued scalar image. in
can be of any data type and tensor size. out
will be of
the same size, tensor size, and data type as in
.
void
dip::Kuwahara (dip::Image const& in,
dip::Image& out,
dip::Kernel kernel = {},
dip::dfloat threshold = 0.0,
dip::StringArray const& boundaryCondition = {})
The KuwaharaNagao operator, a nonlinear edgepreserving smoothing filter.
For each pixel, shifts the filtering window such that the variance within the window is minimal, then computes the average value as the output. The shift of the window is always such that the pixel under consideration stays within the window.
In the two original papers describing the method (Kuwahara et al., 1980; Nagao and Matsuyama, 1979), a limited number of subwindows within the filtering window were examined (4 and 8, respectively). This function implements a generalized version that allows as many different shifts are pixels are in the filtering window (Bakker et al., 1999).
As described by Bakker (2002), this operator produces artificial boundaries in flat regions. This is because,
due to noise, one position of the filtering window will have the lowest variance in its neighborhood, and therefore
that position will be selected for all output pixels in the neighborhood. The solution we implement here is
requiring that the variance at the minimum be lower than the variance when the window is not shifted. The parameter
threshold
controls how much lower the minimum must be. If the neighborhood is uniform w.r.t. this threshold
parameter, then the filtering window is not shifted.
The size and shape of the filter window is given by kernel
, which you can define through a default
shape with corresponding sizes, or through a binary image. See dip::Kernel
.
If in
is nonscalar (e.g. a color image), then the variance is computed perchannel, and the maximum variance
at each pixel (i.e. the maximum across tensor elements) is used to direct the filtering for all channels.
If the Kuwahara filter were applied to each channel independently, false colors would appear.
boundaryCondition
indicates how the boundary should be expanded in each dimension. See dip::BoundaryCondition
.
void
dip::NonMaximumSuppression (dip::Image const& gradmag,
dip::Image const& gradient,
dip::Image const& mask,
dip::Image& out,
dip::String const& mode = S::INTERPOLATE)
Nonmaximum suppression, as used in the Canny edge detector.
out
contains the value of gradmag
where gradmag
is a local maximum in the orientation
specified by the vector image gradient
. Note that gradmag
does not need to be the magnitude
of gradient
, and that only the direction of the vectors (or orientation) is used.
gradmag
and gradient
must be of the same floatingpoint type (i.e. they are either
dip::DT_SFLOAT
or dip::DT_DFLOAT
). gradmag
must be scalar, and gradient
must have as
many tensor elements as spatial dimensions. In the 1D case, gradient
is not used.
If gradmag
is not forged, the magnitude (dip::Norm
) of gradient
is used instead.
mask
, if forged, must be a binary scalar image. Only those pixels are evaluated that are set in mask
.
All three input images (if forged) must have the same spatial dimensions.
mode
can be “interpolate” or “round”. The interpolating mode is only valid in 2D; the gradient magnitude
is interpolated to take into account all information present in the direction of the gradient. The rounding
mode rounds the angles to point to the nearest neighbor.
For higherdimensional images, gradients are always rounded.
void
dip::MoveToLocalMinimum (dip::Image const& bin,
dip::Image const& weights,
dip::Image& out)
Given a sparse binary image bin
, moves each set pixel to the pixel in the 3x3 neighborhood with
lowest weight
.
The neighborhood used is 3x3 in 2D, or 3x3x3 in 3D.
In other words, the connectivity is equal to bin.Dimensionality()
.
Note that the output doesn’t necessarily have the same number of set pixels as the bin
input. However,
it will not have more. To move pixels over a larger distance, call this function repeatedly.
out
will have the same properties as bin
. bin
must be binary, scalar, and have at least one dimension.
weights
must be realvalued, scalar, and of the same sizes as bin
. No singleton expansion is applied.
void
dip::PeronaMalikDiffusion (dip::Image const& in,
dip::Image& out,
dip::uint iterations = 5,
dip::dfloat K = 10,
dip::dfloat lambda = 0.25,
dip::String const& g = "Gauss")
Applies PeronaMalik anisotropic diffusion
Applies iterations
steps of the anisotropic diffusion as proposed by Perona and Malik:
where is set with the lambda
parameter, are the each of the cardinal directions,
is the finite difference in direction ,
and is a monotonically decreasing function, selected with the g
parameter, and modulated
by the K
parameter:
"Gauss"
:"quadratic"
:"exponential"
:
The diffusion is generalized to any image dimensionality. in
must be scalar and realvalued.
void
dip::GaussianAnisotropicDiffusion (dip::Image const& in,
dip::Image& out,
dip::uint iterations = 5,
dip::dfloat K = 10,
dip::dfloat lambda = 0.25,
dip::String const& g = "Gauss")
Applies iterative generic anisotropic diffusion using Gaussian derivatives
Applies iterations
steps of the generic anisotropic diffusion equation:
where is set with the lambda
parameter, and are computed using
Gaussian gradients (dip::Gradient
and dip::Divergence
),
and is a monotonically decreasing function, selected with the g
parameter, and modulated
by the K
parameter:
"Gauss"
:"quadratic"
:"exponential"
:
Note that the parameters here are identical to those in dip::PeronaMalikDiffusion
. The PeronaMalik diffusion
is a discrete differences approximation to the generic anisotropic diffusion equation. This function uses Gaussian
gradients as a discretization strategy.
The diffusion is generalized to any image dimensionality. in
must be scalar and realvalued.
void
dip::RobustAnisotropicDiffusion (dip::Image const& in,
dip::Image& out,
dip::uint iterations = 5,
dip::dfloat sigma = 10,
dip::dfloat lambda = 0.25)
Applies iterative robust anisotropic diffusion
Applies iterations
steps of the robust anisotropic diffusion using Tukey’s biweight (Black et al., 1998):
where is set with the lambda
parameter, are each of the cardinal directions,
is the finite difference in direction , and
is set by the sigma
parameter.
The diffusion is generalized to any image dimensionality. in
must be scalar and realvalued.
void
dip::CoherenceEnhancingDiffusion (dip::Image const& in,
dip::Image& out,
dip::dfloat derivativeSigma = 1,
dip::dfloat regularizationSigma = 3,
dip::uint iterations = 5,
dip::StringSet const& flags = {})
Applies iterative coherence enhancing (anisotropic) diffusion
Applies iterations
steps of the coherence enhancing diffusion:
where is set with the lambda
parameter, and is the diffusion tensor, derived from
the structure tensor (see dip::StructureTensor
). derivativeSigma
and regularizationSigma
are the sigmas for the Gaussian derivatives and smoothing in the structure tensor. The gradient and
divergence are computed using Gaussian derivatives also, using a sigma of 0.5.
flags
allows the selection of different computational options:

"const"
: is taken as constant, simplifying the computation from to , reducing the number of filters to apply from 4 to 3. The opposite is"variable"
, which is the default. 
"all"
: is obtained in a simple manner from the structure tensor, where all eigenvalues of are adjusted. The opposite is"first"
, which is the default. See below for more information. 
"resample"
: the output is twice the size of the input. Computations are always done on the larger image, this flag returns the larger image instead of the subsampled one.
This function can be applied to images with two or more dimensions. in
must be scalar and realvalued.
The "first"
flag is only supported for 2D images, if in
has more dimensions, the "first"
flag is
ignored and "all"
is assumed.
In "all"
mode, is composed from the eigen decomposition of the structure tensor :
with
In "first"
mode, is composed similarly, but the two eigenvalues of , , are determined
from the eigenvalues of (with ) as follows:
is a magic number set to 0.01, and is set to the median of all values across the image (as proposed by Lucas van Vliet).
void
dip::AdaptiveGauss (dip::Image const& in,
dip::ImageConstRefArray const& params,
dip::Image& out,
dip::FloatArray const& sigmas = {5.0,1.0},
dip::UnsignedArray const& orders = {0},
dip::dfloat truncation = 2.0,
dip::UnsignedArray const& exponents = {0},
dip::String const& interpolationMethod = S::LINEAR,
dip::String const& boundaryCondition = S::SYMMETRIC_MIRROR)
Adaptive Gaussian filtering.
One or more parameter images control the adaptivity. The meaning of the parameter images depend on the dimensionality of the input image. The current implementation only supports 2D and 3D images.

2D:
params[0]
is the angle of the orientationparams[1]
(optional) is a tensor image with the local kernel scale

3D (with 1D structures):
params[0]
is the polar coordinate phi of the first orientationparams[1]
is the polar coordinate theta of the first orientationparams[2]
(optional) is a tensor image with the local kernel scale

3D (with 2D structures):
params[0]
is the polar coordinate phi of the first orientationparams[1]
is the polar coordinate theta of the first orientationparams[2]
is the polar coordinate phi of the second orientationparams[3]
is the polar coordinate theta of the second orientationparams[4]
(optional) is a tensor image with the local kernel scale
For intrinsic 1D structures, pass one set of polar coordinates. For intrinsic 2d structures, pass two.
The kernel scale parameter image is interpreted as follows. Each input tensor element corresponds to a tensor row in the scale image. Each tensor column in the scale image corresponds to a convolution kernel dimension. As an example, consider a 2D RGB image. The scale tensor is then interpreted as:
The kernel is first scaled and then rotated before it is applied. The scale parameter image is automatically expanded if the image or the tensor are too small. If the scale tensor has one element, it is expanded to all input tensor elements and kernel dimensions. If the scale tensor has a single column, each element is expanded to all kernel dimensions. For more information on scaling, also see “Structureadaptive applicability function” in Pham et al. (2006).
The sigma for each kernel dimension is passed by sigmas
.
For intrinsic 1D structures, the first value is along the contour, the second perpendicular to it.
For intrinsic 2D structures, the first two are in the plane, whereas the other is perpendicular to them.
If a value is zero, no convolution is done is this direction.
Together with sigmas
, the orders
, truncation
and exponents
parameters define the gaussian kernel.
interpolationMethod
can be "linear"
(default) or "zero order"
(faster).
As of yet, boundaryCondition
can only be “mirror” or “add zeros”.
Example:
dip::Image in = dip::ImageReadTIFF( "erika.tif" ); // Defined in "diplib/file_io.h" dip::Image st = dip::StructureTensor( in, {}, 1, 3 ); // Defined in "diplib/analysis.h" dip::ImageArray params = dip::StructureTensorAnalysis( st, { "orientation" } ); dip::Image out = dip::AdaptiveGauss( in, dip::CreateImageConstRefArray( params ), { 2, 0 } );
void
dip::AdaptiveBanana (dip::Image const& in,
dip::ImageConstRefArray const& params,
dip::Image& out,
dip::FloatArray const& sigmas = {5.0,1.0},
dip::UnsignedArray const& orders = {0},
dip::dfloat truncation = 2.0,
dip::UnsignedArray const& exponents = {0},
dip::String const& interpolationMethod = S::LINEAR,
dip::String const& boundaryCondition = S::SYMMETRIC_MIRROR)
Adaptive Gaussian filtering using curvature.
The parameter images control the adaptivity. The current implementation only supports 2D images:
params[0]
is the angle of the orientationparams[1]
is the curvatureparams[2]
(optional) is a tensor image with the local kernel scale
The kernel scale parameter image is interpreted as follows. Each input tensor element corresponds to a tensor row in the scale image. Each tensor column in the scale image corresponds to a convolution kernel dimension. As an example, consider a 2D RGB image. The scale tensor is then interpreted as:
The kernel is first scaled and then rotated before it is applied. The scale parameter image is automatically expanded if the image or the tensor are too small. If the scale tensor has one element, it is expanded to all input tensor elements and kernel dimensions. If the scale tensor has a single column, each element is expanded to all kernel dimensions. For more information on scaling, also see “Structureadaptive applicability function” in Pham et al. (2006).
The sigma for each kernel dimension is passed by sigmas
. The first value is along the contour,
the second perpendicular to it. If a value is zero, no convolution is done is this direction.
Together with sigmas
, the orders
, truncation
and exponents
parameters define the gaussian kernel.
interpolationMethod
can be "linear"
(default) or "zero order"
(faster).
As of yet, boundaryCondition
can only be “mirror” or “add zeros”.
Example:
dip::Image in = dip::ImageReadTIFF( "erika.tif" ); // Defined in "diplib/file_io.h" dip::Image st = dip::StructureTensor( in, {}, {1}, {3} ); // Defined in "diplib/analysis.h" dip::ImageArray params = dip::StructureTensorAnalysis( st, { "orientation", "curvature" } ); dip::Image out = dip::AdaptiveBanana( in, dip::CreateImageConstRefArray( params ), { 2, 0 } );
void
dip::FullBilateralFilter (dip::Image const& in,
dip::Image const& estimate,
dip::Image& out,
dip::FloatArray spatialSigmas = {2.0},
dip::dfloat tonalSigma = 30.0,
dip::dfloat truncation = 2.0,
dip::StringArray const& boundaryCondition = {})
Bilateral filter, bruteforce full kernel implementation
The bilateral filter is a nonlinear edgepreserving smoothing filter. It locally averages input pixels,
weighting them with both the spatial distance to the origin as well as the intensity difference with the
pixel at the origin. The weights are Gaussian, and therefore there are two sigmas as parameters. The
spatial sigma can be defined differently for each image dimension in spatialSigma
. tonalSigma
determines
what similar intensities are. truncation
applies to the spatial dimension only, and determines, together
with spatialSigma
, the size of the neighborhood and thus its computational cost.
boundaryCondition
indicates how the boundary should be expanded in each dimension. See dip::BoundaryCondition
.
If in
is not scalar, each tensor element will be filtered independently. For color images, this leads to
false colors at edges.
The optional image estimate
, if forged, is used as the tonal center when computing the kernel at each pixel.
That is, each point in the kernel is computed based on the distance of the corresponding pixel value in in
to the value of the pixel at the origin of the kernel in estimate
. If not forged, in
is used for estimate
.
estimate
must be realvalued and have the same sizes and number of tensor elements as in
.
void
dip::QuantizedBilateralFilter (dip::Image const& in,
dip::Image const& estimate,
dip::Image& out,
dip::FloatArray spatialSigmas = {2.0},
dip::dfloat tonalSigma = 30.0,
dip::FloatArray tonalBins = {},
dip::dfloat truncation = 2.0,
dip::StringArray const& boundaryCondition = {})
Quantized (piecewise linear) bilateral filter
The bilateral filter is a nonlinear edgepreserving smoothing filter. It locally averages input pixels,
weighting them with both the spatial distance to the origin as well as the intensity difference with the
pixel at the origin. The weights are Gaussian, and therefore there are two sigmas as parameters. The
spatial sigma can be defined differently for each image dimension in spatialSigma
. tonalSigma
determines
what similar intensities are. truncation
applies to the spatial dimension only, and determines, together
with spatialSigma
, the size of the neighborhood and thus its computational cost.
This version of the filter applies a piecewise linear approximation as described by Durand and Dorsey, but without subsampling. This requires a significant amount of memory, and is efficient only for larger spatial sigmas.
boundaryCondition
indicates how the boundary should be expanded in each dimension. See dip::BoundaryCondition
.
in
must be scalar and realvalued.
The optional image estimate
, if forged, is used as the tonal center when computing the kernel at each pixel.
That is, each point in the kernel is computed based on the distance of the corresponding pixel value in in
to the value of the pixel at the origin of the kernel in estimate
. If not forged, in
is used for estimate
.
estimate
must be realvalued and have the same sizes and number of tensor elements as in
.
void
dip::SeparableBilateralFilter (dip::Image const& in,
dip::Image const& estimate,
dip::Image& out,
dip::BooleanArray const& process = {},
dip::FloatArray spatialSigmas = {2.0},
dip::dfloat tonalSigma = 30.0,
dip::dfloat truncation = 2.0,
dip::StringArray const& boundaryCondition = {})
Separable bilateral filter, a very fast approximation
The bilateral filter is a nonlinear edgepreserving smoothing filter. It locally averages input pixels,
weighting them with both the spatial distance to the origin as well as the intensity difference with the
pixel at the origin. The weights are Gaussian, and therefore there are two sigmas as parameters. The
spatial sigma can be defined differently for each image dimension in spatialSigma
. tonalSigma
determines
what similar intensities are. truncation
applies to the spatial dimension only, and determines, together
with spatialSigma
, the size of the neighborhood and thus its computational cost.
This version of the filter applies a 1D bilateral filter along each of the image dimensions, approximating the result of the bilateral filter with a much reduced computational cost.
boundaryCondition
indicates how the boundary should be expanded in each dimension. See dip::BoundaryCondition
.
If in
is not scalar, each tensor element will be filtered independently. For color images, this leads to
false colors at edges.
The optional image estimate
, if forged, is used as the tonal center when computing the kernel at each pixel.
That is, each point in the kernel is computed based on the distance of the corresponding pixel value in in
to the value of the pixel at the origin of the kernel in estimate
. If not forged, in
is used for estimate
.
estimate
must be realvalued and have the same sizes and number of tensor elements as in
.
void
dip::BilateralFilter (dip::Image const& in,
dip::Image const& estimate,
dip::Image& out,
dip::FloatArray const& spatialSigmas = {2.0},
dip::dfloat tonalSigma = 30.0,
dip::dfloat truncation = 2.0,
dip::String const& method = "xysep",
dip::StringArray const& boundaryCondition = {})
Bilateral filter, convenience function that allows selecting an implementation
The method
can be set to one of the following:
"full"
: the bruteforce implementation, using the full kernel, callsdip::FullBilateralFilter
."xysep"
(default): xyseparable approximation, callsdip::SeparableBilateralFilter
."pwlinear"
: piecewise linear approximation (quantized), callsdip::QuantizedBilateralFilter
. The bins are automatically computed.
See the linked functions for details on the other parameters.