# Copyright (c) 2018 The Harmonica Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Equivalent sources for generic harmonic functions in Cartesian coordinates
"""
import warnings
import numpy as np
import verde as vd
import verde.base as vdb
from numba import jit
from sklearn.utils.validation import check_is_fitted
from .._forward.utils import distance_cartesian
from .utils import (
cast_fit_input,
jacobian_numba_parallel,
jacobian_numba_serial,
pop_extra_coords,
predict_numba_parallel,
predict_numba_serial,
)
[docs]
class EquivalentSources(vdb.BaseGridder):
r"""
Equivalent sources for generic harmonic functions (gravity, magnetics).
These equivalent sources can be used for:
* Cartesian coordinates (geographic coordinates must be project before use)
* Gravity and magnetic data (including derivatives)
* Single data types
* Interpolation
* Upward continuation
* Finite-difference based derivative calculations
They cannot be used for:
* Regional or global data where Earth's curvature must be taken into
account
* Joint inversion of multiple data types (e.g., gravity + gravity
gradients)
* Reduction to the pole of magnetic total field anomaly data
* Analytical derivative calculations
By default, the point sources are located beneath the observed
potential-field measurement points [Cooper2000]_ that are passed as
arguments to the :meth:`EquivalentSources.fit` method, producing the same
number of sources as data points.
Alternatively, we can reduce the number of sources by using block-averaged
sources [Soler2021]_: we divide the data region in blocks of equal size and
compute the median location of the observations points that fall under each
block. Then, we locate one point source beneath each one of these
locations. The size of the blocks, that indirectly controls how many
sources will be created, can be specified through the ``block_size``
argument.
We recommend choosing a ``block_size`` no larger than the resolution of the
grid where interpolations will be carried out.
The depth of the sources can be controlled by the ``depth`` argument.
Each source is located beneath each data point or block-averaged location
at a depth equal to its elevation minus the value of the ``depth``
argument.
In both cases a positive value of ``depth`` locates sources _beneath_ the
data points or the block-averaged locations, thus a negative ``depth`` will
put the sources _above_ them.
Custom source locations can be chosen by specifying the ``points``
argument, in which case the ``block_size`` and ``depth`` arguments will be
ignored.
The corresponding coefficient for each point source is estimated through
linear least-squares with damping (Tikhonov 0th order) regularization.
The Green's function for point mass effects used is the inverse Euclidean
distance between the observation points and the point sources:
.. math::
\phi(\bar{x}, \bar{x}') = \frac{1}{||\bar{x} - \bar{x}'||}
where :math:`\bar{x}` and :math:`\bar{x}'` are the coordinate vectors of
the observation point and the source, respectively.
Parameters
----------
damping : None or float
The positive damping regularization parameter. Controls how much
smoothness is imposed on the estimated coefficients.
If None, no regularization is used.
points : None or list of arrays (optional)
List containing the coordinates of the equivalent point sources.
Coordinates are assumed to be in the following order:
(``easting``, ``northing``, ``upward``).
If None, will place one point source below each observation point at
a fixed relative depth below the observation point [Cooper2000]_.
Defaults to None.
depth : float
Parameter used to control the depth at which the point sources will be
located.
Each source is located beneath each data point (or block-averaged
location) at a depth equal to its elevation minus the ``depth`` value.
This parameter is ignored if *points* is specified.
Defaults to 500.
block_size: float, tuple = (s_north, s_east) or None
Size of the blocks used on block-averaged equivalent sources.
If a single value is passed, the blocks will have a square shape.
Alternatively, the dimensions of the blocks in the South-North and
West-East directions can be specified by passing a tuple.
If None, no block-averaging is applied.
This parameter is ignored if *points* are specified.
Default to None.
parallel : bool
If True any predictions and Jacobian building is carried out in
parallel through Numba's ``jit.prange``, reducing the computation time.
If False, these tasks will be run on a single CPU. Default to True.
dtype : data-type
The desired data-type for the predictions and the Jacobian matrix.
Default to ``"float64"``.
Attributes
----------
points_ : 2d-array
Coordinates of the equivalent point sources.
coefs_ : array
Estimated coefficients of every point source.
region_ : tuple
The boundaries (``[W, E, S, N]``) of the data used to fit the
interpolator. Used as the default region for the
:meth:`~harmonica.EquivalentSources.grid` method.
References
----------
[Soler2021]_
"""
# Set the default dimension names for generated outputs
# as xr.Dataset.
dims = ("northing", "easting")
# Overwrite the defalt name for the upward coordinate.
extra_coords_name = "upward"
# Define dispatcher for Numba functions with or without parallelization
_predict_kernel = {False: predict_numba_serial, True: predict_numba_parallel}
_jacobian_kernel = {False: jacobian_numba_serial, True: jacobian_numba_parallel}
def __init__(
self,
damping=None,
points=None,
depth=500,
block_size=None,
parallel=True,
dtype="float64",
):
self.damping = damping
self.points = points
self.depth = depth
self.block_size = block_size
self.parallel = parallel
self.dtype = dtype
# Define Green's function for Cartesian coordinates
self.greens_function = greens_func_cartesian
[docs]
def fit(self, coordinates, data, weights=None):
"""
Fit the coefficients of the equivalent sources.
The data region is captured and used as default for the
:meth:`~harmonica.EquivalentSources.grid` method.
All input arrays must have the same shape.
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (``easting``, ``northing``, ``upward``, ...).
Only ``easting``, ``northing``, and ``upward`` will be used, all
subsequent coordinates will be ignored.
data : array
The data values of each data point.
weights : None or array
If not None, then the weights assigned to each data point.
Typically, this should be 1 over the data uncertainty squared.
Returns
-------
self
Returns this estimator instance for chaining operations.
"""
coordinates, data, weights = vdb.check_fit_input(coordinates, data, weights)
coordinates, data, weights = cast_fit_input(
coordinates, data, weights, self.dtype
)
# Capture the data region to use as a default when gridding.
self.region_ = vd.get_region(coordinates[:2])
coordinates = vdb.n_1d_arrays(coordinates, 3)
if self.points is None:
self.points_ = self._build_points(coordinates)
else:
self.points_ = tuple(
p.astype(self.dtype) for p in vdb.n_1d_arrays(self.points, 3)
)
jacobian = self.jacobian(coordinates, self.points_)
self.coefs_ = vdb.least_squares(jacobian, data, weights, self.damping)
return self
def _build_points(self, coordinates):
"""
Generate coordinates of point sources based on the data points
Locate the point sources using a relative depth strategy
and apply block-averaging if ``block_size`` is not None.
The point sources will be placed beneath the (averaged) observation
points at a depth calculated as the elevation of the data point minus
the ``depth``.
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (``easting``, ``northing``, ``upward``, ...).
Only ``easting``, ``northing``, and ``upward`` will be used, all
subsequent coordinates will be ignored.
Returns
-------
points : tuple of arrays
Tuple containing the coordinates of the equivalent point sources,
in the following order: (``easting``, ``northing``, ``upward``).
"""
if self.block_size is not None:
coordinates = self._block_average_coordinates(coordinates)
return (
coordinates[0],
coordinates[1],
coordinates[2] - self.depth,
)
def _block_average_coordinates(self, coordinates):
"""
Run a block-averaging process on observation points
Apply a median as the reduction function. The blocks will have the size
specified through the ``block_size`` argument on the constructor.
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (``easting``, ``northing``, ``upward``, ...).
Returns
-------
blocked_coords : tuple of arrays
Tuple containing the coordinates of the block-averaged observation
points.
"""
reducer = vd.BlockReduce(
spacing=self.block_size, reduction=np.median, drop_coords=False
)
# Must pass a dummy data array to BlockReduce.filter(), we choose an
# array full of zeros. We will ignore the returned reduced dummy array.
blocked_coords, _ = reducer.filter(coordinates, np.zeros_like(coordinates[0]))
return blocked_coords
[docs]
def predict(self, coordinates):
"""
Evaluate the estimated equivalent sources on the given set of points.
Requires a fitted estimator (see
:meth:`~harmonica.EquivalentSources.fit`).
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (``easting``, ``northing``, ``upward``, ...). Only
``easting``, ``northing`` and ``upward`` will be used, all
subsequent coordinates will be ignored.
Returns
-------
data : array
The data values evaluated on the given points.
"""
# We know the gridder has been fitted if it has the coefs_
check_is_fitted(self, ["coefs_"])
shape = np.broadcast(*coordinates[:3]).shape
size = np.broadcast(*coordinates[:3]).size
coordinates = tuple(
np.atleast_1d(i.astype(self.dtype)).ravel() for i in coordinates[:3]
)
data = np.zeros(size, dtype=self.dtype)
self._predict_kernel[self.parallel](
coordinates, self.points_, self.coefs_, data, self.greens_function
)
return data.reshape(shape)
[docs]
def jacobian(self, coordinates, points):
"""
Make the Jacobian matrix for the equivalent sources.
Each column of the Jacobian is the Green's function for a single point
source evaluated on all observation points.
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (``easting``, ``northing``, ``upward``).
Each array must be 1D.
points : tuple of arrays
Tuple of arrays containing the coordinates of the equivalent point
sources in the following order:
(``easting``, ``northing``, ``upward``).
Each array must be 1D.
Returns
-------
jacobian : 2D array
The (n_data, n_points) Jacobian matrix.
"""
# Compute Jacobian matrix
n_data = coordinates[0].size
n_points = points[0].size
jac = np.zeros((n_data, n_points), dtype=self.dtype)
self._jacobian_kernel[self.parallel](
coordinates, points, jac, self.greens_function
)
return jac
[docs]
def grid(
self,
coordinates,
dims=None,
data_names=None,
projection=None,
**kwargs,
):
"""
Interpolate the data onto a regular grid.
The coordinates of the regular grid must be passed through the
``coordinates`` argument as a tuple containing three arrays in the
following order: ``(easting, nothing, upward)``. They can be easily
created through the :func:`verde.grid_coordinates` function. If the
grid points must be all at the same height, it can be specified in the
``extra_coords`` argument of :func:`verde.grid_coordinates`.
Use the *dims* and *data_names* arguments to set custom names for the
dimensions and the data field(s) in the output :class:`xarray.Dataset`.
Default names will be provided if none are given.
Parameters
----------
coordinates : tuple of arrays
Tuple of arrays containing the coordinates of the grid in the
following order: (easting, northing, upward).
The easting and northing arrays could be 1d or 2d arrays, if
they are 2d they must be part of a meshgrid.
The upward array should be a 2d array with the same shape of
easting and northing (if they are 2d arrays) or with a shape of
``(northing.size, easting.size)`` (if they are 1d arrays).
dims : list or None
The names of the northing and easting data dimensions,
respectively, in the output grid. Default is determined from the
``dims`` attribute of the class. Must be defined in the following
order: northing dimension, easting dimension.
**NOTE: This is an exception to the "easting" then
"northing" pattern but is required for compatibility with xarray.**
data_names : list of None
The name(s) of the data variables in the output grid. Defaults to
``['scalars']``.
projection : callable or None
If not None, then should be a callable object
``projection(easting, northing) -> (proj_easting, proj_northing)``
that takes in easting and northing coordinate arrays and returns
projected northing and easting coordinate arrays. This function
will be used to project the generated grid coordinates before
passing them into ``predict``. For example, you can use this to
generate a geographic grid from a Cartesian gridder.
Returns
-------
grid : xarray.Dataset
The interpolated grid. Metadata about the interpolator is written
to the ``attrs`` attribute.
See also
--------
:func:`verde.grid_coordinates`
"""
# We override the grid method from BaseGridder to change the docstring
# and to make it work only with the `coordinates` argument (no region,
# shape or spacing)
# Raise ValueError if any deprecated argument has been passed
deprecated_args = (
"upward" in kwargs,
"shape" in kwargs,
"region" in kwargs,
"spacing" in kwargs,
)
if any(deprecated_args):
raise ValueError(
"The 'upward', 'region', 'shape' and 'spacing' arguments have been "
+ "deprecated. "
+ "Please pass the coordinates of the target grid through the "
+ "'coordinates' argument."
)
# Raise warning if any kwargs has been passed
if kwargs:
args = "'" + "', '".join(list(kwargs.keys())) + "'"
warnings.warn(
f"The {args} arguments are being ignored. The 'grid' method "
+ "will not take any keyword arguments in the next Harmonica release",
FutureWarning,
)
# Grid data
grid = super().grid(
coordinates=coordinates,
dims=dims,
data_names=data_names,
projection=projection,
)
return grid
[docs]
def scatter(
self,
region=None, # noqa: U100
size=300, # noqa: U100
random_state=0, # noqa: U100
dims=None, # noqa: U100
data_names=None, # noqa: U100
projection=None, # noqa: U100
**kwargs, # noqa: U100
):
"""
.. warning ::
Not implemented method. The scatter method will be deprecated on
Verde v2.0.0.
"""
raise NotImplementedError
[docs]
def profile(
self,
point1,
point2,
upward,
size,
dims=None,
data_names=None,
projection=None,
**kwargs,
):
"""
Interpolate data along a profile between two points.
Generates the profile along a straight line assuming Cartesian
distances and the same upward coordinate for all points. Point
coordinates are generated by :func:`verde.profile_coordinates`. Other
arguments for this function can be passed as extra keyword arguments
(``kwargs``) to this method.
Use the *dims* and *data_names* arguments to set custom names for the
dimensions and the data field(s) in the output
:class:`pandas.DataFrame`. Default names are provided.
Includes the calculated Cartesian distance from *point1* for each data
point in the profile.
To specify *point1* and *point2* in a coordinate system that would
require projection to Cartesian (geographic longitude and latitude, for
example), use the ``projection`` argument. With this option, the input
points will be projected using the given projection function prior to
computations. The generated Cartesian profile coordinates will be
projected back to the original coordinate system. **Note that the
profile points are evenly spaced in projected coordinates, not the
original system (e.g., geographic)**.
Parameters
----------
point1 : tuple
The easting and northing coordinates, respectively, of the first
point.
point2 : tuple
The easting and northing coordinates, respectively, of the second
point.
upward : float
Upward coordinate of the profile points.
size : int
The number of points to generate.
dims : list or None
The names of the northing and easting data dimensions,
respectively, in the output dataframe. Default is determined from
the ``dims`` attribute of the class. Must be defined in the
following order: northing dimension, easting dimension.
**NOTE: This is an exception to the "easting" then
"northing" pattern but is required for compatibility with xarray.**
data_names : list of None
The name(s) of the data variables in the output dataframe. Defaults
to ``['scalars']`` for scalar data,
``['east_component', 'north_component']`` for 2D vector data, and
``['east_component', 'north_component', 'vertical_component']`` for
3D vector data.
projection : callable or None
If not None, then should be a callable object ``projection(easting,
northing, inverse=False) -> (proj_easting, proj_northing)`` that
takes in easting and northing coordinate arrays and returns
projected northing and easting coordinate arrays. Should also take
an optional keyword argument ``inverse`` (default to False) that if
True will calculate the inverse transform instead. This function
will be used to project the profile end points before generating
coordinates and passing them into ``predict``. It will also be used
to undo the projection of the coordinates before returning the
results.
Returns
-------
table : pandas.DataFrame
The interpolated values along the profile.
"""
# We override the profile method from BaseGridder so it takes the
# upward coordinate as a positional argument.
# Ignore extra_coords if passed
pop_extra_coords(kwargs)
# Create profile points and predict
table = super().profile(
point1,
point2,
size,
dims=dims,
data_names=data_names,
projection=projection,
extra_coords=upward,
**kwargs,
)
return table
@jit(nopython=True)
def greens_func_cartesian(east, north, upward, point_east, point_north, point_upward):
"""
Green's function for the equivalent sources in Cartesian coordinates
Uses Numba to speed up things.
"""
distance = distance_cartesian(
(east, north, upward), (point_east, point_north, point_upward)
)
return 1 / distance