diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index f790ce819..8823d07b0 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -3,18 +3,18 @@ on: name: release-please jobs: continuous-integration: - name: Continuous integration ${{ matrix.os }} python ${{ matrix.python-version }} + name: Continuous integration ${{ matrix.os }} python ${{ matrix.python-version }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: - os: ["ubuntu-latest", "windows-latest"] #"macos-latest", - python-version: ["3.8","3.9","3.10"] + os: ${{ fromJSON(vars.BUILD_OS)}} + python-version: ${{ fromJSON(vars.PYTHON_VERSIONS)}} steps: - uses: actions/checkout@v2 - uses: conda-incubator/setup-miniconda@v2 with: - python-version: ${{ matrix.python }} + python-version: ${{ matrix.python }} - name: Installing dependencies shell: bash -l {0} run: | @@ -22,11 +22,11 @@ jobs: - name: Checking formatting of code shell: bash -l {0} run: | - # stop the build if there are Python syntax errors or undefined names - flake8 LoopStructural --count --select=E9,F63,F7,F82 --show-source --statistics - # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 LoopStructural --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Building and install + # stop the build if there are Python syntax errors or undefined names + flake8 LoopStructural --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 LoopStructural --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + - name: Building and install shell: bash -l {0} run: | python setup.py install build_ext --inplace @@ -78,109 +78,88 @@ jobs: with: branch: gh-pages # The branch the action should deploy to. folder: docs/build/html # The folder the action should deploy. - + conda-deploy: name: Uploading to Loop3d for python ${{ matrix.os }}) needs: ["documentation-test", "continuous-integration"] - runs-on: ${{ matrix.os }} + runs-on: ${{matrix.os}} strategy: fail-fast: false matrix: - os: ["ubuntu-latest", "windows-latest"] - python-version: ["3.10","3.9","3.8"] + os: ["ubuntu-latest"] + python-version: ${{ fromJSON(vars.PYTHON_VERSIONS)}} steps: - uses: conda-incubator/setup-miniconda@v2 with: auto-update-conda: true python-version: ${{ matrix.python-version }} - + - uses: actions/checkout@v2 - name: update submodules -# shell: bash -l {0} + # shell: bash -l {0} run: | - git submodule update --init --recursive - - name: Add msbuild to PATH - if: matrix.os == 'windows-latest' - uses: microsoft/setup-msbuild@v1.0.2 - - name: Conda build' + git submodule update --init --recursive + - name: Conda build env: ANACONDA_API_TOKEN: ${{ secrets.ANACONDA_TOKEN }} shell: bash -l {0} run: | - conda install -c conda-forge conda-build scikit-build numpy cython anaconda-client -y - conda build -c anaconda -c conda-forge -c loop3d --output-folder conda conda --numpy 1.21 - conda install anaconda-client -y + conda install -c conda-forge conda-build scikit-build numpy cython anaconda-client -y + conda build -c anaconda -c conda-forge -c loop3d --output-folder conda conda + conda convert -p all conda/linux-64/*.tar.bz2 -f -o conda + conda install anaconda-client -y - name: upload artifacts uses: actions/upload-artifact@v3 with: name: conda path: conda - make_sdist: needs: ["documentation-test", "continuous-integration"] name: Make SDist runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - - name: Build SDist - run: | - pip install numpy cython - python setup.py sdist + - uses: actions/checkout@v3 - - uses: actions/upload-artifact@v3 - with: - name: dist - path: dist/*.tar.gz - - build_wheels: - needs: ["documentation-test", "continuous-integration"] - name: Build wheels - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - steps: - - uses: actions/checkout@v2 + - name: Build SDist + run: | + pip install numpy + python setup.py sdist + pip wheel -w wheelhouse . - - name: Build wheels - uses: pypa/cibuildwheel@v2.12.0 - env: - CIBW_ARCHS_MACOS: x86_64 universal2 - CIBW_BUILD: "cp36-* cp37-* cp38-* cp39-* cp310*" - CIBW_BEFORE_BUILD: "pip install numpy cython" #make sure numpy is the same version as required by LS - uses: actions/upload-artifact@v3 with: name: dist - path: ./wheelhouse/*.whl - + path: dist/*.tar.gz + - uses: actions/upload-artifact@v3 + with: + name: dist + path: wheelhouse/*.whl + upload_to_pypi: - needs: ["release-please","build_wheels","make_sdist","conda-deploy"] + needs: ["release-please", "make_sdist", "conda-deploy"] runs-on: ubuntu-latest if: ${{ needs.release-please.outputs.release_created }} steps: - - uses: actions/download-artifact@v3 - with: + - uses: actions/download-artifact@v3 + with: name: dist path: dist - - uses: actions/download-artifact@v3 - with: + - uses: actions/download-artifact@v3 + with: name: conda path: conda - - uses: pypa/gh-action-pypi-publish@v1.6.4 - with: - skip_existing: true - verbose: true - user: ${{ secrets.PYPI_USERNAME }} - password: ${{ secrets.PYPI_PASSWORD }} - - uses: conda-incubator/setup-miniconda@v2 - - name: upload all files to conda-forge - shell: bash -l {0} - env: - ANACONDA_API_TOKEN: ${{ secrets.ANACONDA_TOKEN }} - run: | - conda install -c anaconda anaconda-client -y - anaconda upload --label main conda/*/*.tar.bz2 - + - uses: pypa/gh-action-pypi-publish@v1.6.4 + with: + skip_existing: true + verbose: true + user: ${{ secrets.PYPI_USERNAME }} + password: ${{ secrets.PYPI_PASSWORD }} + - uses: conda-incubator/setup-miniconda@v2 + - name: upload all files to conda-forge + shell: bash -l {0} + env: + ANACONDA_API_TOKEN: ${{ secrets.ANACONDA_TOKEN }} + run: | + conda install -c anaconda anaconda-client -y + anaconda upload --label main conda/*/*.tar.bz2 diff --git a/.gitignore b/.gitignore index a542281ae..fc3a44841 100644 --- a/.gitignore +++ b/.gitignore @@ -132,3 +132,7 @@ examples/*.png dev/scalar_field dev/unconf docs/source/sg_execution_times.rst +conda/index.html +conda/channeldata.json +conda/noarch +conda/win-64 \ No newline at end of file diff --git a/LoopStructural/__init__.py b/LoopStructural/__init__.py index 57594fff4..a0a09d45f 100644 --- a/LoopStructural/__init__.py +++ b/LoopStructural/__init__.py @@ -1,6 +1,6 @@ """ -LoopStructural API -======================= +LoopStructural +============== """ diff --git a/LoopStructural/api/__init__.py b/LoopStructural/api/__init__.py index e69de29bb..7aab14e9b 100644 --- a/LoopStructural/api/__init__.py +++ b/LoopStructural/api/__init__.py @@ -0,0 +1,2 @@ +from ._interpolate import LoopInterpolator +from ._surface import LoopIsosurfacer diff --git a/LoopStructural/api/_interpolate.py b/LoopStructural/api/_interpolate.py index 0fe8fb9c2..a92968ae9 100644 --- a/LoopStructural/api/_interpolate.py +++ b/LoopStructural/api/_interpolate.py @@ -6,6 +6,7 @@ from LoopStructural.interpolators import ( GeologicalInterpolator, InterpolatorFactory, + InterpolatorType, ) from LoopStructural.utils import BoundingBox from LoopStructural.utils import getLogger @@ -18,7 +19,7 @@ def __init__( self, bounding_box: BoundingBox, dimensions: int = 3, - type: str = "FDI", + type=InterpolatorType.FINITE_DIFFERENCE, nelements: int = 1000, ): """Scikitlearn like interface for LoopStructural interpolators diff --git a/LoopStructural/api/_surface.py b/LoopStructural/api/_surface.py index 4e6ace9e3..6f7acc21c 100644 --- a/LoopStructural/api/_surface.py +++ b/LoopStructural/api/_surface.py @@ -1,5 +1,6 @@ -from typing import Optional, Union +from typing import Optional, Union, Callable import numpy as np +import numpy.typing as npt from LoopStructural.utils import getLogger logger = getLogger(__name__) @@ -13,23 +14,81 @@ from LoopStructural.utils import BoundingBox from LoopStructural.datatypes import Surface -surface_list = dict[str, tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]] +surface_list = dict[ + str, tuple[npt.ArrayLike, npt.ArrayLike, npt.ArrayLike, npt.ArrayLike] +] class LoopIsosurfacer: - def __init__(self, bounding_box: BoundingBox, interpolator: GeologicalInterpolator): + def __init__( + self, + bounding_box: BoundingBox, + interpolator: Optional[GeologicalInterpolator] = None, + callable: Optional[Callable[[npt.ArrayLike], npt.ArrayLike]] = None, + ): + """Extract isosurfaces from a geological interpolator or a callable function. + + + Parameters + ---------- + bounding_box : BoundingBox + _description_ + interpolator : Optional[GeologicalInterpolator], optional + interpolator object, by default None + callable : Optional[Callable[[npt.ArrayLike], npt.ArrayLike]], optional + callable object, by default None + + Raises + ------ + ValueError + _description_ + ValueError + _description_ + ValueError + _description_ + """ self.bounding_box = bounding_box - self.interpolator = interpolator + self.callable = callable + if interpolator is None and callable is None: + raise ValueError("Must specify either interpolator or callable") + if interpolator is not None and self.callable is not None: + raise ValueError("Must specify either interpolator or callable") + + if interpolator is not None: + self.callable = interpolator.evaluate_value + if self.callable is None: + raise ValueError("Must specify either interpolator or callable") def fit(self, values: Union[list, int, float]) -> surface_list: + """Extract isosurfaces from the interpolator + + Parameters + ---------- + values : Union[list, int, float] + Either a list of values to extract isosurfaces for, or a single value + to extract a single isosurface for, or an integer to extract that many + isosurfaces evenly spaced between the minimum and maximum values of the + interpolator. + + Returns + ------- + surface_list + a dictionary containing the extracted isosurfaces + """ + if not callable(self.callable): + raise ValueError("No interpolator of callable function set") surfaces = {} - all_values = self.interpolator.evaluate_value(self.bounding_box.regular_grid()) + all_values = self.callable(self.bounding_box.regular_grid()) if isinstance(values, list): isovalues = values elif isinstance(values, float): isovalues = [values] elif isinstance(values, int): - isovalues = np.linspace(np.min(all_values), np.max(all_values), values) + isovalues = np.linspace( + np.min(all_values) + np.finfo(float).eps, + np.max(all_values) + np.finfo(float).eps, + values, + ) for isovalue in isovalues: verts, faces, normals, values = marching_cubes( all_values.reshape(self.bounding_box.nsteps, order="C"), @@ -38,7 +97,7 @@ def fit(self, values: Union[list, int, float]) -> surface_list: ) values = np.zeros(verts.shape[0]) + isovalue surfaces[f"surface_{isovalue}"] = Surface( - vertices=verts, + vertices=verts + self.bounding_box.origin, triangles=faces, normals=normals, name=f"surface_{isovalue}", diff --git a/LoopStructural/api/model.py b/LoopStructural/api/model.py deleted file mode 100644 index d5fe30272..000000000 --- a/LoopStructural/api/model.py +++ /dev/null @@ -1,6 +0,0 @@ -def CreateModelWithSingleScalarField(): - pass - - -def AddFaultToModel(): - pass diff --git a/LoopStructural/datasets/__init__.py b/LoopStructural/datasets/__init__.py index dbf51819c..231123cfd 100644 --- a/LoopStructural/datasets/__init__.py +++ b/LoopStructural/datasets/__init__.py @@ -19,3 +19,4 @@ from ._base import load_tabular_intrusion from ._base import load_geological_map_data from ._base import load_fault_trace +from ._base import load_horizontal diff --git a/LoopStructural/datasets/_base.py b/LoopStructural/datasets/_base.py index a729be64b..c09737efe 100644 --- a/LoopStructural/datasets/_base.py +++ b/LoopStructural/datasets/_base.py @@ -1,9 +1,46 @@ from os.path import dirname, join from pathlib import Path +from typing import Tuple import numpy as np import pandas as pd +def load_horizontal() -> Tuple[pd.DataFrame, np.ndarray]: + """Synthetic model for horizontal layers + + Returns + ------- + Tuple[pd.DataFrame, np.ndarray] + dataframe with feature_name 'strati', bounding box array + """ + bb = np.array([[0, 0, 0], [10, 10, 10]]) + xy = np.mgrid[0:10, 0:10].reshape(2, -1).T + data = pd.DataFrame( + np.vstack( + [ + np.hstack( + [ + xy, + np.zeros(xy.shape[0])[:, None] + 2, + np.zeros(xy.shape[0])[:, None] + 2, + ] + ), + np.hstack( + [ + xy, + np.zeros(xy.shape[0])[:, None] + 3, + np.zeros(xy.shape[0])[:, None] + 3, + ] + ), + ] + ), + columns=["X", "Y", "Z", "val"], + ) + + data["feature_name"] = "strati" + return data, bb + + def load_claudius(): """Model dataset sampled from 3D seismic data diff --git a/LoopStructural/datatypes/_bounding_box.py b/LoopStructural/datatypes/_bounding_box.py index 0e335e7bc..3c84f8845 100644 --- a/LoopStructural/datatypes/_bounding_box.py +++ b/LoopStructural/datatypes/_bounding_box.py @@ -12,6 +12,20 @@ def __init__( maximum: Optional[np.ndarray] = None, nsteps: Optional[np.ndarray] = None, ): + """A bounding box for a model, defined by the + origin, maximum and number of steps in each direction + + Parameters + ---------- + dimensions : int, optional + _description_, by default 3 + origin : Optional[np.ndarray], optional + _description_, by default None + maximum : Optional[np.ndarray], optional + _description_, by default None + nsteps : Optional[np.ndarray], optional + _description_, by default None + """ self._origin = np.array(origin) self._maximum = np.array(maximum) self.dimensions = dimensions @@ -82,16 +96,42 @@ def nelements(self, nelements): nsteps = np.ceil((self.maximum - self.origin) / step_vector).astype(int) self.nsteps = nsteps + @property + def corners(self): + """Returns the corners of the bounding box + + + Returns + ------- + _type_ + _description_ + """ + return np.array( + [ + self.origin.tolist(), + [self.maximum[0], self.origin[1], self.origin[2]], + [self.maximum[0], self.maximum[1], self.origin[2]], + [self.origin[0], self.maximum[1], self.origin[2]], + [self.origin[0], self.origin[1], self.maximum[2]], + [self.maximum[0], self.origin[1], self.maximum[2]], + self.maximum.tolist(), + [self.origin[0], self.maximum[1], self.maximum[2]], + ] + ) + @property def step_vector(self): return (self.maximum - self.origin) / self.nsteps + @property + def length(self): + return self.maximum - self.origin + def fit(self, locations: np.ndarray): if locations.shape[1] != self.dimensions: raise LoopValueError( f"locations array is {locations.shape[1]}D but bounding box is {self.dimensions}" ) - print("fitting") self.origin = locations.min(axis=0) self.maximum = locations.max(axis=0) return self @@ -122,6 +162,7 @@ def __getitem__(self, name): return self.get_value(name) def is_inside(self, xyz): + xyz = np.array(xyz) if len(xyz.shape) == 1: xyz = xyz.reshape((1, -1)) if xyz.shape[1] != 3: diff --git a/LoopStructural/datatypes/_surface.py b/LoopStructural/datatypes/_surface.py index 8c495e96c..426ed20f8 100644 --- a/LoopStructural/datatypes/_surface.py +++ b/LoopStructural/datatypes/_surface.py @@ -12,7 +12,7 @@ class Surface: values: Optional[np.ndarray] = None @property - def pyvista(self): + def vtk(self): import pyvista as pv surface = pv.PolyData.from_regular_faces(self.vertices, self.triangles) diff --git a/LoopStructural/interpolators/__init__.py b/LoopStructural/interpolators/__init__.py index bcae9b2e9..e94a88009 100644 --- a/LoopStructural/interpolators/__init__.py +++ b/LoopStructural/interpolators/__init__.py @@ -23,7 +23,6 @@ from enum import IntEnum from ..utils import getLogger -import LoopStructural logger = getLogger(__name__) @@ -46,6 +45,13 @@ class InterpolatorType(IntEnum): SURFE = 11 +interpolator_string_map = { + "FDI": InterpolatorType.FINITE_DIFFERENCE, + "PLI": InterpolatorType.PIECEWISE_LINEAR, + "P2": InterpolatorType.PIECEWISE_QUADRATIC, + "P1": InterpolatorType.PIECEWISE_LINEAR, + "DFI": InterpolatorType.DISCRETE_FOLD, +} from ..interpolators._geological_interpolator import GeologicalInterpolator from ..interpolators._discrete_interpolator import DiscreteInterpolator from ..interpolators.supports import ( @@ -56,14 +62,15 @@ class InterpolatorType(IntEnum): P2Unstructured2d, StructuredGrid2D, P2UnstructuredTetMesh, + SupportType, ) from ..interpolators._finite_difference_interpolator import ( FiniteDifferenceInterpolator, ) -from ..interpolators.piecewiselinear_interpolator import ( - PiecewiseLinearInterpolator, +from ..interpolators._p1interpolator import ( + P1Interpolator as PiecewiseLinearInterpolator, ) from ..interpolators._discrete_fold_interpolator import ( DiscreteFoldInterpolator, @@ -74,10 +81,8 @@ class InterpolatorType(IntEnum): except ImportError: logger.warning('Can\'t import surfepy - to install "pip install surfe"') -logger.warning("Using experimental interpolators: P1Interpolator and P2Interpolator") from ._p1interpolator import P1Interpolator from ._p2interpolator import P2Interpolator -from ._builders import get_interpolator interpolator_map = { InterpolatorType.BASE: GeologicalInterpolator, @@ -89,4 +94,13 @@ class InterpolatorType(IntEnum): InterpolatorType.BASE_DATA_SUPPORTED: GeologicalInterpolator, # InterpolatorType.SURFE: SurfeRBFInterpolator, } + +support_interpolator_map = { + InterpolatorType.FINITE_DIFFERENCE: SupportType.StructuredGrid, + InterpolatorType.DISCRETE_FOLD: SupportType.TetMesh, + InterpolatorType.PIECEWISE_LINEAR: SupportType.TetMesh, + InterpolatorType.PIECEWISE_QUADRATIC: SupportType.P2UnstructuredTetMesh, +} + + from ._interpolator_factory import InterpolatorFactory diff --git a/LoopStructural/interpolators/_builders.py b/LoopStructural/interpolators/_builders.py index a33401621..1b3b12fb8 100644 --- a/LoopStructural/interpolators/_builders.py +++ b/LoopStructural/interpolators/_builders.py @@ -3,6 +3,7 @@ from typing import Optional, Union from LoopStructural.interpolators import ( PiecewiseLinearInterpolator, + P1Interpolator, P2Interpolator, FiniteDifferenceInterpolator, GeologicalInterpolator, @@ -61,7 +62,7 @@ def get_interpolator( "for modelling using PLI" % (support.ntetra) ) - return PiecewiseLinearInterpolator(support) + return P1Interpolator(support) if interpolatortype == "P2": if support is not None: logger.info( diff --git a/LoopStructural/interpolators/_discrete_fold_interpolator.py b/LoopStructural/interpolators/_discrete_fold_interpolator.py index 1013c94a8..2b065a281 100644 --- a/LoopStructural/interpolators/_discrete_fold_interpolator.py +++ b/LoopStructural/interpolators/_discrete_fold_interpolator.py @@ -2,26 +2,21 @@ Piecewise linear interpolator using folds """ import logging +from typing import Optional import numpy as np from ..interpolators import PiecewiseLinearInterpolator, InterpolatorType - +from ..modelling.features.fold import FoldEvent from ..utils import getLogger logger = getLogger(__name__) -try: - from ._cython.dsi_helper import fold_cg -except: - from ._python.dsi_helper import fold_cg - - logger.warning("Cython compiled code not found, using python version") class DiscreteFoldInterpolator(PiecewiseLinearInterpolator): """ """ - def __init__(self, support, fold): + def __init__(self, support, fold: Optional[FoldEvent] = None): """ A piecewise linear interpolator that can also use fold constraints defined in Laurent et al., 2016 @@ -37,40 +32,6 @@ def __init__(self, support, fold): self.type = InterpolatorType.DISCRETE_FOLD self.fold = fold - @classmethod - def from_piecewise_linear_and_fold(cls, pli, fold): - """ - Constructor from an existing piecewise linear interpolation object and a fold object - copies data from the PLI to the DFI - - Parameters - ---------- - pli : PiecewiseLinearInterpolator - existing interpolator - fold : FoldEvent - a fold event with a valid - - Returns - ------- - DiscreteFoldInterpolator - - """ - # create a blank fold interpolator - interpolator = cls(pli.support, fold) - - # copy the data and stuff from the existing interpolator - interpolator.region = pli.region - interpolator.shape = pli.shape - interpolator.region_map = pli.region_map - interpolator.p_i = pli.p_i - interpolator.p_g = pli.p_g - interpolator.p_t = pli.p_t - interpolator.n_i = pli.n_i - interpolator.n_g = pli.n_g - interpolator.n_t = pli.n_t - interpolator.propertyname = pli.propertyname - return interpolator - def update_fold(self, fold): """ @@ -88,6 +49,15 @@ def update_fold(self, fold): ) self.fold = fold + def setup_interpolator(self, **kwargs): + if self.fold is None: + raise Exception("No fold event specified") + fold_weights = kwargs.get("fold_weights", {}) + super().setup_interpolator(**kwargs) + self.add_fold_constraints(**fold_weights) + + return + def add_fold_constraints( self, fold_orientation=10.0, @@ -210,45 +180,14 @@ def add_fold_constraints( logger.info( f"Adding fold regularisation constraint to w = {fold_regularisation[0]} {fold_regularisation[1]} {fold_regularisation[2]}" ) - - idc, c, ncons = fold_cg( - eg, - dgz, - self.support.get_neighbours(), - self.support.get_elements(), - self.support.nodes, + self.minimise_edge_jumps( + w=fold_regularisation[0], vector=dgz, name="fold regularisation 1" ) - A = np.array(c[:ncons, :]) - B = np.zeros(A.shape[0]) - idc = np.array(idc[:ncons, :]) - self.add_constraints_to_least_squares( - A, B, idc, fold_regularisation[0], name="fold regularisation 1" + self.minimise_edge_jumps( + w=fold_regularisation[1], + vector=deformed_orientation, + name="fold regularisation 2", ) - - idc, c, ncons = fold_cg( - eg, - deformed_orientation, - self.support.get_neighbours(), - self.support.get_elements(), - self.support.nodes, - ) - A = np.array(c[:ncons, :]) - B = np.zeros(A.shape[0]) - idc = np.array(idc[:ncons, :]) - self.add_constraints_to_least_squares( - A, B, idc, fold_regularisation[1], name="fold regularisation 2" - ) - - idc, c, ncons = fold_cg( - eg, - fold_axis, - self.support.get_neighbours(), - self.support.get_elements(), - self.support.nodes, - ) - A = np.array(c[:ncons, :]) - B = np.zeros(A.shape[0]) - idc = np.array(idc[:ncons, :]) - self.add_constraints_to_least_squares( - A, B, idc, fold_regularisation[2], name="fold regularisation 3" + self.minimise_edge_jumps( + w=fold_regularisation[2], vector=fold_axis, name="fold regularisation 3" ) diff --git a/LoopStructural/interpolators/_discrete_interpolator.py b/LoopStructural/interpolators/_discrete_interpolator.py index 347c32b56..ae50447a6 100644 --- a/LoopStructural/interpolators/_discrete_interpolator.py +++ b/LoopStructural/interpolators/_discrete_interpolator.py @@ -69,6 +69,7 @@ def __init__(self, support, data={}, c=None, up_to_date=False): "Creating discrete interpolator with {} degrees of freedom".format(self.nx) ) self.type = InterpolatorType.BASE_DISCRETE + self.c = np.zeros(self.support.n_nodes) @property def nx(self) -> int: @@ -144,6 +145,8 @@ def reset(self): Reset the interpolation constraints """ + self.constraints = {} + self.c_ = 0 logger.debug("Resetting interpolation constraints") def add_constraints_to_least_squares(self, A, B, idc, w=1.0, name="undefined"): @@ -426,7 +429,7 @@ def build_matrix(self, square=True, damp=0.0, ie=False): b = [] rows = [] cols = [] - for c in self.constraints.values(): + for name, c in self.constraints.items(): if len(c["w"]) == 0: continue aa = (c["A"] * c["w"][:, None] / max_weight).flatten() @@ -800,7 +803,6 @@ def update(self): bool """ - if self.solver is None: logging.debug("Cannot rerun interpolator") return False @@ -808,7 +810,7 @@ def update(self): self.setup_interpolator() return self.solve_system(self.solver) - def evaluate_value(self, evaluation_points: np.ndarray) -> np.ndarray: + def evaluate_value(self, locations: np.ndarray) -> np.ndarray: """Evaluate the value of the interpolator at location Parameters @@ -822,7 +824,7 @@ def evaluate_value(self, evaluation_points: np.ndarray) -> np.ndarray: value of the interpolator """ self.update() - evaluation_points = np.array(evaluation_points) + evaluation_points = np.array(locations) evaluated = np.zeros(evaluation_points.shape[0]) mask = np.any(evaluation_points == np.nan, axis=1) @@ -832,7 +834,7 @@ def evaluate_value(self, evaluation_points: np.ndarray) -> np.ndarray: ) return evaluated - def evaluate_gradient(self, evaluation_points: np.ndarray) -> np.ndarray: + def evaluate_gradient(self, locations: np.ndarray) -> np.ndarray: """ Evaluate the gradient of the scalar field at the evaluation points Parameters @@ -845,8 +847,8 @@ def evaluate_gradient(self, evaluation_points: np.ndarray) -> np.ndarray: """ self.update() - if evaluation_points.shape[0] > 0: - return self.support.evaluate_gradient(evaluation_points, self.c) + if locations.shape[0] > 0: + return self.support.evaluate_gradient(locations, self.c) return np.zeros((0, 3)) def to_dict(self): diff --git a/LoopStructural/interpolators/_finite_difference_interpolator.py b/LoopStructural/interpolators/_finite_difference_interpolator.py index 4426656a2..3798f4b4d 100644 --- a/LoopStructural/interpolators/_finite_difference_interpolator.py +++ b/LoopStructural/interpolators/_finite_difference_interpolator.py @@ -5,7 +5,7 @@ import numpy as np -from ..utils.helper import get_vectors +from ..utils import get_vectors from ._discrete_interpolator import DiscreteInterpolator from ..interpolators import InterpolatorType @@ -79,6 +79,7 @@ def setup_interpolator(self, **kwargs): ------- """ + self.reset() for key in kwargs: self.up_to_date = False if "regularisation" in kwargs: @@ -478,6 +479,8 @@ def add_regularisation(self, operator, w=0.1): self.assemble_inner(operator) # self.assemble_borders() + # def assemble_borders(self, operator, w): + def assemble_inner(self, operator, w): """ diff --git a/LoopStructural/interpolators/_geological_interpolator.py b/LoopStructural/interpolators/_geological_interpolator.py index 817daba82..2d889ac6e 100644 --- a/LoopStructural/interpolators/_geological_interpolator.py +++ b/LoopStructural/interpolators/_geological_interpolator.py @@ -68,6 +68,25 @@ def check_array(self, array: np.ndarray): except Exception as e: raise LoopTypeError(str(e)) + def to_json(self): + """ + Returns a json representation of the geological interpolator + + Returns + ------- + json : dict + json representation of the geological interpolator + """ + json = {} + json["type"] = self.type + # json["name"] = self.propertyname + json["constraints"] = self.constraints + json["data"] = self.data + json["type"] = self.type + # json["dof"] = self.nx + json["up_to_date"] = self.up_to_date + return json + @abstractmethod def set_region(self, **kwargs): pass @@ -250,6 +269,10 @@ def evaluate_value(self, locations: np.ndarray): def evaluate_gradient(self, locations: np.ndarray): raise NotImplementedError("evaluate_gradient not implemented") + @abstractmethod + def reset(self): + pass + def to_dict(self): return { "type": self.type, diff --git a/LoopStructural/interpolators/_interpolator_factor.py b/LoopStructural/interpolators/_interpolator_factor.py deleted file mode 100644 index 7b889b895..000000000 --- a/LoopStructural/interpolators/_interpolator_factor.py +++ /dev/null @@ -1,27 +0,0 @@ -from LoopStructural.interpolators import interpolator_map, InterpolatorType -from LoopStructural.interpolators.supports._support_factory import SupportFactory - - -class InterpolatorFactory: - @staticmethod - def create_interpolator(interpolator_type, support, data, c, up_to_date, **kwargs): - if interpolator_type is None: - raise ValueError("No interpolator type specified") - if type(interpolator_type) == str: - interpolator_type = InterpolatorType._member_map_[ - interpolator_type - ].numerator - # TODO add a warning for all kwargs that are not used - return interpolator_map[interpolator_type](support, data, c, up_to_date) - - @staticmethod - def from_dict(d): - d = d.copy() - interpolator_type = d.pop("type", None) - if interpolator_type is None: - raise ValueError("No interpolator type specified") - support = d.pop("support", None) - if support is None: - raise ValueError("No support specified") - support = SupportFactory.from_dict(support) - return InterpolatorFactory.create_interpolator(interpolator_type, support, **d) diff --git a/LoopStructural/interpolators/_interpolator_factory.py b/LoopStructural/interpolators/_interpolator_factory.py index c5964d59d..35bb9da8b 100644 --- a/LoopStructural/interpolators/_interpolator_factory.py +++ b/LoopStructural/interpolators/_interpolator_factory.py @@ -1,6 +1,11 @@ -from typing import Optional - -from . import interpolator_map, InterpolatorType # , support_interpolator_map +from typing import Optional, Union +from .supports import support_map, SupportFactory +from . import ( + interpolator_map, + InterpolatorType, + support_interpolator_map, + interpolator_string_map, +) from LoopStructural.utils import BoundingBox from typing import Optional import numpy as np @@ -11,11 +16,12 @@ class InterpolatorFactory: @staticmethod def create_interpolator( - interpolatortype: str, - boundingbox: BoundingBox, - nelements: int, + interpolatortype: Union[str, InterpolatorType] = None, + boundingbox: Optional[BoundingBox] = None, + nelements: Optional[int] = None, element_volume: Optional[float] = None, support=None, + buffer: float = 0.2, ): if interpolatortype == None: raise ValueError("No interpolator type specified") @@ -23,17 +29,18 @@ def create_interpolator( raise ValueError("No bounding box specified") if nelements == None: raise ValueError("No number of elements specified") - if type(interpolatortype) == str: - interpolatortype = InterpolatorType._member_map_[interpolatortype].numerator + if isinstance(interpolatortype, str): + interpolatortype = interpolator_string_map[interpolatortype] if support is None: - raise Exception("Support must be specified") - # supporttype = support_interpolator_map[interpolatortype] - # support = SupportFactory.create_support_from_bbox( - # supporttype, - # bounding_box=boundingbox, - # nelements=nelements, - # element_volume=element_volume, - # ) + # raise Exception("Support must be specified") + supporttype = support_interpolator_map[interpolatortype] + support = SupportFactory.create_support_from_bbox( + supporttype, + bounding_box=boundingbox, + nelements=nelements, + element_volume=element_volume, + buffer=buffer, + ) return interpolator_map[interpolatortype](support) @staticmethod diff --git a/LoopStructural/interpolators/_p1interpolator.py b/LoopStructural/interpolators/_p1interpolator.py index d6efe6516..9cb25446e 100644 --- a/LoopStructural/interpolators/_p1interpolator.py +++ b/LoopStructural/interpolators/_p1interpolator.py @@ -6,7 +6,7 @@ import numpy as np from ._discrete_interpolator import DiscreteInterpolator -from ..utils.helper import get_vectors +from ..utils import get_vectors logger = logging.getLogger(__name__) @@ -47,24 +47,23 @@ def add_norm_ctr_pts(self, w=1.0): grad, elements, inside = self.support.evaluate_shape_derivatives( points[:, :3] ) - size = self.support.element_size[inside] + size = self.support.element_size[elements[inside]] wt = np.ones(size.shape[0]) wt *= w * size - # print(grad[inside,:,:].shape) - # print(self.support.elements[elements[inside]].shape) elements = np.tile(self.support.elements[elements[inside]], (3, 1, 1)) elements = elements.swapaxes(0, 1) - # elements = elements.swapaxes(0,2) - grad = grad.swapaxes(1, 2) + # elements = elements.swapaxes(0, 2) + # grad = grad.swapaxes(1, 2) + # elements = elements.swapaxes(1, 2) self.add_constraints_to_least_squares( grad[inside, :, :] * wt[:, None, None], - points[inside, 3:5] * wt[:, None], + points[inside, 3:6] * wt[:, None], elements, name="norm", ) - + self.up_to_date = False pass def add_ctr_pts(self, w=1.0): @@ -80,10 +79,12 @@ def add_ctr_pts(self, w=1.0): self.support.elements[elements[inside], :], name="value", ) + self.up_to_date = False - def minimize_edge_jumps( - self, w=0.1, vector_func=None - ): # NOTE: imposes \phi_T1(xi)-\phi_T2(xi) dot n =0 + def minimise_edge_jumps( + self, w=0.1, vector_func=None, vector=None, name="edge jump" + ): + # NOTE: imposes \phi_T1(xi)-\phi_T2(xi) dot n =0 # iterate over all triangles # flag inidicate which triangles have had all their relationships added v1 = self.support.nodes[self.support.shared_elements][:, 0, :] @@ -96,14 +97,16 @@ def minimize_edge_jumps( # evaluate normal if using vector func for cp2 if vector_func: norm = vector_func((v1 + v2) / 2) + if vector is not None: + if bc_t1.shape[0] == vector.shape[0]: + norm = vector # evaluate the shape function for the edges for each neighbouring triangle - Dt, tri1 = self.support.evaluate_shape_derivatives( + Dt, tri1, inside = self.support.evaluate_shape_derivatives( bc_t1, elements=self.support.shared_element_relationships[:, 0] ) - Dn, tri2 = self.support.evaluate_shape_derivatives( + Dn, tri2, inside = self.support.evaluate_shape_derivatives( bc_t2, elements=self.support.shared_element_relationships[:, 1] ) - # constraint for each cp is triangle - neighbour create a Nx12 matrix const_t = np.einsum("ij,ijk->ik", norm, Dt) const_n = -np.einsum("ij,ijk->ik", norm, Dn) @@ -120,6 +123,102 @@ def minimize_edge_jumps( const * shared_element_size[:, None] * w, np.zeros(const.shape[0]), tri_cp1, - name="edge jump", + name=name, ) + self.up_to_date = False # p2.add_constraints_to_least_squares(const_cp2*e_len[:,None]*w,np.zeros(const_cp1.shape[0]),tri_cp2, name='edge jump cp2') + + def setup_interpolator(self, **kwargs): + """ + Searches through kwargs for any interpolation weights and updates + the dictionary. + Then adds the constraints to the linear system using the + interpolation weights values + Parameters + ---------- + kwargs - + interpolation weights + + Returns + ------- + + """ + # can't reset here, clears fold constraints + self.reset() + for key in kwargs: + if "regularisation" in kwargs: + self.interpolation_weights["cgw"] = 0.1 * kwargs["regularisation"] + self.up_to_date = False + self.interpolation_weights[key] = kwargs[key] + if self.interpolation_weights["cgw"] > 0.0: + self.up_to_date = False + self.minimise_edge_jumps(self.interpolation_weights["cgw"]) + # direction_feature=kwargs.get("direction_feature", None), + # direction_vector=kwargs.get("direction_vector", None), + # ) + # self.minimise_grad_steepness( + # w=self.interpolation_weights.get("steepness_weight", 0.01), + # wtfunc=self.interpolation_weights.get("steepness_wtfunc", None), + # ) + logger.info( + "Using constant gradient regularisation w = %f" + % self.interpolation_weights["cgw"] + ) + + logger.info( + "Added %i gradient constraints, %i normal constraints," + "%i tangent constraints and %i value constraints" + % (self.n_g, self.n_n, self.n_t, self.n_i) + ) + self.add_gradient_ctr_pts(self.interpolation_weights["gpw"]) + self.add_norm_ctr_pts(self.interpolation_weights["npw"]) + self.add_ctr_pts(self.interpolation_weights["cpw"]) + self.add_tangent_constraints(self.interpolation_weights["tpw"]) + # self.add_interface_constraints(self.interpolation_weights["ipw"]) + + def add_gradient_orthogonal_constraints( + self, points: np.ndarray, vector: np.ndarray, w: float = 1.0, B: float = 0 + ): + """ + constraints scalar field to be orthogonal to a given vector + + Parameters + ---------- + points : np.darray + location to add gradient orthogonal constraint + vector : np.darray + vector to be orthogonal to, should be the same shape as points + w : double + B : np.array + + Returns + ------- + + """ + if points.shape[0] > 0: + grad, elements, inside = self.support.evaluate_shape_derivatives( + points[:, :3] + ) + size = self.support.element_size[elements[inside]] + wt = np.ones(size.shape[0]) + wt *= w * size + elements = self.support.elements[elements[inside], :] + # elements = np.tile(self.support.elements[elements[inside]], (3, 1, 1)) + + # elements = elements.swapaxes(0, 1) + # elements = elements.swapaxes(0, 2) + # grad = grad.swapaxes(1, 2) + # elements = elements.swapaxes(1, 2) + norm = np.linalg.norm(vector, axis=1) + vector[norm > 0, :] /= norm[norm > 0, None] + A = np.einsum("ij,ijk->ik", vector[inside, :3], grad[inside, :, :]) + B = np.zeros(points[inside, :].shape[0]) + B + self.add_constraints_to_least_squares( + A, B, elements, w=wt, name="gradient orthogonal" + ) + if np.sum(inside) <= 0: + logger.warning( + f"{np.sum(~inside)} \ + gradient constraints not added: outside of model bounding box" + ) + self.up_to_date = False diff --git a/LoopStructural/interpolators/_p2interpolator.py b/LoopStructural/interpolators/_p2interpolator.py index 1efc39fc1..fb3c288f8 100644 --- a/LoopStructural/interpolators/_p2interpolator.py +++ b/LoopStructural/interpolators/_p2interpolator.py @@ -7,7 +7,7 @@ import numpy as np from ..interpolators import DiscreteInterpolator -from ..utils.helper import get_vectors +from ..utils import get_vectors logger = logging.getLogger(__name__) diff --git a/LoopStructural/interpolators/_python/__init__.py b/LoopStructural/interpolators/_python/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/LoopStructural/interpolators/_python/dsi_helper.py b/LoopStructural/interpolators/_python/dsi_helper.py deleted file mode 100644 index b8c17cfa0..000000000 --- a/LoopStructural/interpolators/_python/dsi_helper.py +++ /dev/null @@ -1,187 +0,0 @@ -import numpy as np - - -def cg(EG: np.ndarray, neighbours: np.ndarray, elements: np.ndarray, nodes, region): - if EG.shape[0] != neighbours.shape[0] and EG.shape[0] != elements.shape[0]: - raise ValueError("EG and neighbours must have the same number of elements") - if EG.shape[2] != 4: - raise ValueError("EG must have 4 columns") - Nc = 5 # numer of constraints shared nodes + independent - Na = 4 # number of nodes - Ns = Na - 1 - ne = len(neighbours) - ncons = 0 - flag = np.zeros(ne, dtype=np.int32) - c = np.zeros((len(neighbours) * 4, Nc)) - areas = np.zeros((len(neighbours) * 4)) - idc = np.zeros((ne * 4, 5), dtype=np.int64) - common = np.zeros((3), dtype=np.int64) - norm = np.zeros((3)) - shared_pts = np.zeros((3, 3)) - v1 = np.zeros(3) - v2 = np.zeros(3) - area = 0 - length = 0 - idl = np.zeros(4, dtype=np.int64) - idr = np.zeros(4, dtype=np.int64) - for e in range(ne): - idl = elements[e, :] - e1 = EG[e, :, :] - flag[e] = 1 - # if not in region then skip this tetra - if ( - region[idl[0]] == 0 - or region[idl[1]] == 0 - or region[idl[2]] == 0 - or region[idl[3]] == 0 - ): - continue - for n in range(4): - neigh = neighbours[e, n] - idr = elements[neigh, :] - if neigh < 0: - continue - if flag[neigh] == 1: - continue - - # if not in region then skip this tetra - if ( - region[idr[0]] == 0 - or region[idr[1]] == 0 - or region[idr[2]] == 0 - or region[idr[3]] == 0 - ): - continue - - e2 = EG[neigh, :, :] - - for i in range(Nc): - idc[ncons, i] = -1 - - i = 0 - for itr_right in range(Na): - for itr_left in range(Na): - if idl[itr_left] == idr[itr_right]: - common[i] = idl[itr_left] - i += 1 - for j in range(3): - for k in range(3): - shared_pts[j][k] = nodes[common[j]][k] # common - for i in range(3): - v1[i] = shared_pts[0, i] - shared_pts[1, i] - v2[i] = shared_pts[2, i] - shared_pts[1, i] - norm[0] = v2[2] * v1[1] - v1[2] * v2[1] - norm[1] = v1[2] * v2[0] - v1[0] * v2[2] - norm[2] = v1[0] * v2[1] - v1[1] * v2[0] - - length = np.linalg.norm(norm) - # we want to weight the cg by the area of the shared face - # area of triangle is half area of parallelogram - # https://math.stackexchange.com/questions/128991/how-to-calculate-the-area-of-a-3d-triangle - area = ( - 0.5 * length - ) # sqrt(norm[0]*norm[0]+norm[1]*norm[1]+norm[2]*norm[2])#np.linalg.norm(norm) - for i in range(3): - norm[i] /= length - for itr_left in range(Na): - idc[ncons, itr_left] = idl[itr_left] - for i in range(3): - c[ncons, itr_left] += norm[i] * e1[i][itr_left] - next_available_position = Na - for itr_right in range(Na): - common_index = -1 - for itr_left in range(Na): - if idc[ncons, itr_left] == idr[itr_right]: - common_index = itr_left - - position_to_write = 0 - if common_index != -1: - position_to_write = common_index - else: - position_to_write = 4 # next_available_position - next_available_position += 1 - idc[ncons, position_to_write] = idr[itr_right] - for i in range(3): - c[ncons, position_to_write] -= norm[i] * e2[i][itr_right] - areas[ncons] = area - ncons += 1 - return idc, c, ncons, areas - - -def fold_cg(EG, X, neighbours, elements, nodes): - Nc = 5 # numer of constraints shared nodes + independent - Na = 4 # number of nodes - Ns = Na - 1 - ne = len(neighbours) - ncons = 0 - flag = np.zeros(ne, dtype=np.int32) - c = np.zeros((len(neighbours) * 4, Nc)) - idc = np.zeros((ne * 4, 5), dtype=np.int64) - common = np.zeros((3), dtype=np.int64) - norm = np.zeros((3)) - shared_pts = np.zeros((3, 3)) - v1 = np.zeros(3) - v2 = np.zeros(3) - - idl = np.zeros(4, dtype=np.int64) - idr = np.zeros(4, dtype=np.int64) - for e in range(ne): - idl = elements[e, :] - e1 = EG[e, :, :] - flag[e] = 1 - Xl = X[e, :] - for n in range(4): - neigh = neighbours[e, n] - idr = elements[neigh, :] - if neigh == -1: - continue - if flag[neigh] == 1: - continue - e2 = EG[neigh, :, :] - Xr = X[neigh, :] - - for i in range(Nc): - idc[ncons, i] = -1 - i = 0 - for itr_right in range(Na): - for itr_left in range(Na): - if idl[itr_left] == idr[itr_right]: - common[i] = idl[itr_left] - i += 1 - for j in range(3): - for k in range(3): - - shared_pts[j][k] = nodes[common[j]][k] # common - for i in range(3): - v1[i] = shared_pts[0, i] - shared_pts[1, i] - v2[i] = shared_pts[2, i] - shared_pts[1, i] - norm[0] = v2[2] * v1[1] - v1[2] * v2[1] - norm[1] = v1[2] * v2[0] - v1[0] * v2[2] - norm[2] = v1[0] * v2[1] - v1[1] * v2[0] - area = 0.5 * np.sqrt( - norm[0] * norm[0] + norm[1] * norm[1] + norm[2] * norm[2] - ) # np.linalg.norm(norm) - - i = 0 - for itr_left in range(Na): - idc[ncons, itr_left] = idl[itr_left] - for i in range(3): - c[ncons, itr_left] += Xl[i] * e1[i][itr_left] * area - next_available_position = Na - for itr_right in range(Na): - common_index = -1 - for itr_left in range(Na): - if idc[ncons, itr_left] == idr[itr_right]: - common_index = itr_left - position_to_write = 0 - if common_index != -1: - position_to_write = common_index - else: - position_to_write = 4 # next_available_position - next_available_position += 1 - - idc[ncons, position_to_write] = idr[itr_right] - for i in range(3): - c[ncons, position_to_write] -= Xr[i] * e2[i][itr_right] * area - ncons += 1 - return idc, c, ncons diff --git a/LoopStructural/interpolators/piecewiselinear_interpolator.py b/LoopStructural/interpolators/piecewiselinear_interpolator.py deleted file mode 100644 index 602ec2fbb..000000000 --- a/LoopStructural/interpolators/piecewiselinear_interpolator.py +++ /dev/null @@ -1,475 +0,0 @@ -""" -Piecewise linear interpolator -""" -import numpy as np -from ..utils import getLogger - -logger = getLogger(__name__) -try: - from ..interpolators._cython.dsi_helper import cg, fold_cg -except: - from ..interpolators._python.dsi_helper import cg, fold_cg - - logger.warning("Using python code not compiled cython code, this will be slower") -from ..interpolators import DiscreteInterpolator -from ..interpolators import InterpolatorType - -from ..utils.helper import get_vectors - - -class PiecewiseLinearInterpolator(DiscreteInterpolator): - def __init__(self, support): - """ - Piecewise Linear Interpolator - Approximates scalar field by finding coefficients to a piecewise linear - equation on a tetrahedral mesh. Uses constant gradient regularisation. - - Parameters - ---------- - support - TetMesh - interpolation support - """ - - self.shape = "rectangular" - DiscreteInterpolator.__init__(self, support) - # whether to assemble a rectangular matrix or a square matrix - self.interpolator_type = InterpolatorType.PIECEWISE_LINEAR - self.support = support - - self.interpolation_weights = { - "cgw": 0.1, - "cpw": 1.0, - "npw": 1.0, - "gpw": 1.0, - "tpw": 1.0, - "ipw": 1.0, - } - self.__str = "Piecewise Linear Interpolator with %i unknowns. \n" % self.nx - self.type = InterpolatorType.PIECEWISE_LINEAR - - def __str__(self): - return self.__str - - def copy(self): - return PiecewiseLinearInterpolator(self.support) - - def setup_interpolator(self, **kwargs): - """ - Searches through kwargs for any interpolation weights and updates - the dictionary. - Then adds the constraints to the linear system using the - interpolation weights values - Parameters - ---------- - kwargs - - interpolation weights - - Returns - ------- - - """ - # can't reset here, clears fold constraints - # self.reset() - for key in kwargs: - if "regularisation" in kwargs: - self.interpolation_weights["cgw"] = 0.1 * kwargs["regularisation"] - self.up_to_date = False - self.interpolation_weights[key] = kwargs[key] - if self.interpolation_weights["cgw"] > 0.0: - self.up_to_date = False - self.add_constant_gradient( - self.interpolation_weights["cgw"], - direction_feature=kwargs.get("direction_feature", None), - direction_vector=kwargs.get("direction_vector", None), - ) - logger.info( - "Using constant gradient regularisation w = %f" - % self.interpolation_weights["cgw"] - ) - logger.info( - "Added %i gradient constraints, %i normal constraints," - "%i tangent constraints and %i value constraints" - % (self.n_g, self.n_n, self.n_t, self.n_i) - ) - self.add_gradient_constraints(self.interpolation_weights["gpw"]) - self.add_norm_constraints(self.interpolation_weights["npw"]) - self.add_value_constraints(self.interpolation_weights["cpw"]) - self.add_tangent_constraints(self.interpolation_weights["tpw"]) - self.add_interface_constraints(self.interpolation_weights["ipw"]) - - def add_constant_gradient( - self, w=0.1, direction_vector=None, direction_feature=None - ): - """ - Add the constant gradient regularisation to the system - - Parameters - ---------- - w (double) - weighting of the cg parameter - - Returns - ------- - - """ - if direction_feature is not None: - direction_vector = direction_feature.evaluate_gradient( - self.support.barycentre - ) - if direction_vector is not None: - if direction_vector.shape[0] == 1: - # if using a constant direction, tile array so it works for cg calc - direction_vector = np.tile( - direction_vector, (self.support.barycentre.shape[0], 1) - ) - if direction_vector is not None: - logger.info("Running constant gradient") - elements_gradients = self.support.get_element_gradients( - np.arange(self.support.ntetra) - ) - if elements_gradients.shape[0] != direction_vector.shape[0]: - logger.error( - "Cannot add directional CG, vector field is not the correct length" - ) - return - region = self.region.astype("int64") - - neighbours = self.support.get_neighbours() - elements = self.support.get_elements() - idc, c, ncons = fold_cg( - elements_gradients, - direction_vector, - neighbours.astype("int64"), - elements.astype("int64"), - self.support.nodes, - ) - - idc = np.array(idc[:ncons, :]) - A = np.array(c[:ncons, :]) - B = np.zeros(c[:ncons, :].shape[0]) - gi = np.zeros(self.support.n_nodes) - gi[:] = -1 - gi[self.region] = np.arange(0, self.nx) - idc = gi[idc] - outside = ~np.any(idc == -1, axis=1) - # w/=A.shape[0] - self.add_constraints_to_least_squares( - A[outside, :], - B[outside], - idc[outside, :], - w=w, - name="direction_regularisation", - ) - else: - logger.info("Running constant gradient") - elements_gradients = self.support.get_element_gradients( - np.arange(self.support.ntetra) - ) - region = self.region.astype("int64") - - neighbours = self.support.get_neighbours() - elements = self.support.get_elements() - idc, c, ncons, area = cg( - elements_gradients, - neighbours.astype("int64"), - elements.astype("int64"), - self.support.nodes, - region.astype("int64"), - ) - - idc = np.array(idc[:ncons, :]) - A = np.array(c[:ncons, :]) - area = np.array(area[:ncons]) - B = np.zeros(A.shape[0]) - gi = np.zeros(self.support.n_nodes) - gi[:] = -1 - gi[self.region] = np.arange(0, self.nx) - idc = gi[idc] - - outside = ~np.any(idc == -1, axis=1) - - # w/=A.shape[0] - self.add_constraints_to_least_squares( - A[outside, :], B[outside], idc[outside, :], w=w, name="regularisation" - ) - - def add_gradient_constraints(self, w=1.0): - """ - Adds gradient constraints to the least squares system with a weight - defined by w - Parameters - ---------- - w : either numpy array of length number of - - Returns - ------- - Notes - ----- - Gradient constraints add a constraint that the gradient of the - implicit function should - be orthogonal to the strike vector and the dip vector defined by the - normal. - This does not control the direction of the gradient and therefore - requires at least two other - value constraints OR a norm constraint for the interpolant to solve. - """ - - points = self.get_gradient_constraints() - if points.shape[0] > 0: - ( - vertices, - element_gradients, - tetras, - inside, - ) = self.support.get_element_gradient_for_location(points[:, :3]) - # e, inside = self.support.elements_for_array(points[:, :3]) - # nodes = self.support.nodes[self.support.elements[e]] - vecs = vertices[:, 1:, :] - vertices[:, 0, None, :] - norm = np.linalg.norm(points[:, 3:6], axis=1) - points[:, 3:6] /= norm[:, None] - element_gradients /= norm[:, None, None] - # d_t *= vol[:,None,None] - strike_vector, dip_vector = get_vectors(points[:, 3:6]) - A = np.einsum("ji,ijk->ik", strike_vector, element_gradients) - gi = np.zeros(self.support.n_nodes).astype(int) - gi[:] = -1 - gi[self.region] = np.arange(0, self.nx).astype(int) - # w /= 3 - idc = gi[tetras] - B = np.zeros(idc.shape[0]) - outside = ~np.any(idc == -1, axis=1) - w *= points[:, 6] - self.add_constraints_to_least_squares( - A[outside, :], - B[outside], - idc[outside, :], - w=w[outside], - name="gradient strike", - ) - A = np.einsum("ji,ijk->ik", dip_vector, element_gradients) - # A *= vol[:, None] - self.add_constraints_to_least_squares( - A[outside, :], - B[outside], - idc[outside, :], - w=w[outside], - name="gradient dip", - ) - - def add_norm_constraints(self, w=1.0): - """ - Extracts the norm vectors from the interpolators p_n list and adds - these to the implicit - system - - Parameters - ---------- - w : double - weighting of the norm constraints in a least squares system - - Returns - ------- - Notes - ----- - Controls the direction and magnitude of the norm of the scalar field - gradient. - This constraint can conflict with value constraints if the magnitude - of the vector doesn't - match with the value constraints added to the implicit system. - """ - - points = self.get_norm_constraints() - if points.shape[0] > 0: - ( - vertices, - element_gradients, - tetras, - inside, - ) = self.support.get_element_gradient_for_location(points[:, :3]) - # e, inside = self.support.elements_for_array(points[:, :3]) - # nodes = self.support.nodes[self.support.elements[e]] - vol = np.zeros(element_gradients.shape[0]) - vecs = vertices[:, 1:, :] - vertices[:, 0, None, :] - vol = np.abs(np.linalg.det(vecs)) / 6 - d_t = element_gradients - d_t[inside, :, :] *= vol[inside, None, None] - # add in the element gradient matrix into the inte - idc = np.tile(tetras[:, :], (3, 1, 1)) - idc = idc.swapaxes(0, 1) - # idc = self.support.elements[e] - gi = np.zeros(self.support.n_nodes).astype(int) - gi[:] = -1 - gi[self.region] = np.arange(0, self.nx).astype(int) - idc = gi[idc] - outside = ~np.any(idc == -1, axis=2) - outside = outside[:, 0] - w = points[:, 6] * w - points[inside, 3:6] *= vol[inside, None] - # w /= 3 - - self.add_constraints_to_least_squares( - d_t[outside, :, :], - points[outside, 3:6], - idc[outside], - w=w[outside], - name="norm", - ) - - def add_value_constraints(self, w=1.0): # for now weight all value points the same - """ - Adds value constraints to the least squares system - - Parameters - ---------- - w : double - weight - - Returns - ------- - - """ - - # get elements for points - points = self.get_value_constraints() - if points.shape[0] > 1: - vertices, c, tetras, inside = self.support.get_element_for_location( - points[:, :3] - ) - # calculate volume of tetras - vecs = vertices[inside, 1:, :] - vertices[inside, 0, None, :] - vol = np.abs(np.linalg.det(vecs)) / 6 - A = c[inside] - A *= vol[:, None] - idc = tetras[inside, :] - # now map the index from global to region create array size of mesh - # initialise as np.nan, then map points inside region to 0->nx - gi = np.zeros(self.support.n_nodes).astype(int) - gi[:] = -1 - - gi[self.region] = np.arange(0, self.nx) - idc = gi[idc] - outside = ~np.any(idc == -1, axis=1) - w *= points[inside, 4] - self.add_constraints_to_least_squares( - A[outside, :], - points[inside, :][outside, 3] * vol[outside], - idc[outside, :], - w=w[outside], - name="value", - ) - - def add_interface_constraints( - self, w=1.0 - ): # for now weight all value points the same - """ - Adds a constraint that defines all points with the same - 'id' to be the same value - Sets all P1-P2 = 0 for all pairs of points - - Parameters - ---------- - w : double - weight - - Returns - ------- - - """ - points = self.get_interface_constraints() - if points.shape[0] > 1: - vertices, c, tetras, inside = self.support.get_element_for_location( - points[:, :3] - ) - # calculate volume of tetras - vecs = vertices[inside, 1:, :] - vertices[inside, 0, None, :] - vol = np.abs(np.linalg.det(vecs)) / 6 - A = c[inside] - A *= vol[:, None] - idc = tetras[inside, :] - for unique_id in np.unique( - points[np.logical_and(~np.isnan(points[:, 3]), inside), 3] - ): - mask = points[inside, 3] == unique_id - ij = np.array( - np.meshgrid( - np.arange(0, A[mask, :].shape[0]), - np.arange(0, A[mask, :].shape[0]), - ) - ).T.reshape(-1, 2) - interface_A = np.hstack( - [A[mask, :][ij[:, 0], :], -A[mask, :][ij[:, 1], :]] - ) - interface_idc = np.hstack( - [idc[mask, :][ij[:, 0], :], idc[mask, :][ij[:, 1], :]] - ) - - # now map the index from global to region create array size of mesh - # initialise as np.nan, then map points inside region to 0->nx - gi = np.zeros(self.support.n_nodes).astype(int) - gi[:] = -1 - - gi[self.region] = np.arange(0, self.nx) - interface_idc = gi[interface_idc] - outside = ~np.any(interface_idc == -1, axis=1) - self.add_constraints_to_least_squares( - interface_A[outside, :], - np.zeros(interface_A[outside, :].shape[0]), - interface_idc[outside, :], - w=w, - name="interface_{}".format(unique_id), - ) - - def add_gradient_orthogonal_constraints( - self, points: np.ndarray, vector: np.ndarray, w: float = 1.0, B: float = 0 - ): - """ - constraints scalar field to be orthogonal to a given vector - - Parameters - ---------- - points : np.ndarray - location of constraints - vector : np.ndarray - vector to be orthogonal to - w : float, default 1.0 - weight of constraint in interpolator - B : float, default 0 - vector dot gradient = B, orthogonal = 0 - - Returns - ------- - - """ - if points.shape[0] > 0: - ( - vertices, - element_gradients, - tetras, - inside, - ) = self.support.get_element_gradient_for_location(points[:, :3]) - # e, inside = self.support.elements_for_array(points[:, :3]) - # nodes = self.support.nodes[self.support.elements[e]] - norm = np.linalg.norm(vector, axis=1) - mask = np.isnan(norm) - mask = ~np.logical_or(mask, norm == 0) - vector[mask, :] /= norm[mask, None] - vecs = vertices[:, 1:, :] - vertices[:, 0, None, :] - # vol = np.abs(np.linalg.det(vecs)) / 6 - element_gradients[mask, :, :] /= norm[mask, None, None] - - A = np.einsum("ij,ijk->ik", vector, element_gradients) - - # A *= vol[:, None] - - gi = np.zeros(self.support.n_nodes).astype(int) - gi[:] = -1 - gi[self.region] = np.arange(0, self.nx).astype(int) - idc = gi[tetras] - B = np.zeros(idc.shape[0]) + B - outside = ~np.any(idc == -1, axis=1) - self.add_constraints_to_least_squares( - A[outside, :], - B[outside], - idc[outside, :], - w=w, - name="gradient orthogonal", - ) diff --git a/LoopStructural/interpolators/supports/_3d_base_structured.py b/LoopStructural/interpolators/supports/_3d_base_structured.py index fdaaf0ea3..6b9b5f043 100644 --- a/LoopStructural/interpolators/supports/_3d_base_structured.py +++ b/LoopStructural/interpolators/supports/_3d_base_structured.py @@ -1,4 +1,5 @@ from LoopStructural.utils.exceptions import LoopException +from abc import ABCMeta, abstractmethod import numpy as np from LoopStructural.utils import getLogger from . import SupportType @@ -6,7 +7,7 @@ logger = getLogger(__name__) -class BaseStructuredSupport: +class BaseStructuredSupport(metaclass=ABCMeta): """ """ def __init__( @@ -45,6 +46,7 @@ def __init__( self._rotation_xy[1, 1] = 1 self._rotation_xy[2, 2] = 1 self.rotation_xy = rotation_xy + self.interpolator = None def to_dict(self): return { @@ -54,6 +56,14 @@ def to_dict(self): "rotation_xy": self.rotation_xy, } + @abstractmethod + def onGeometryChange(self): + """Function to be called when the geometry of the support changes""" + pass + + def associateInterpolator(self, interpolator): + self.interpolator = interpolator + @property def nsteps(self): return self._nsteps @@ -64,6 +74,7 @@ def nsteps(self, nsteps): change_factor = nsteps / self.nsteps self._step_vector /= change_factor self._nsteps = nsteps + self.onGeometryChange() @property def nsteps_cells(self): @@ -110,6 +121,7 @@ def step_vector(self, step_vector): newsteps = self._nsteps / change_factor self._nsteps = np.ceil(newsteps).astype(int) self._step_vector = step_vector + self.onGeometryChange() @property def origin(self): @@ -122,6 +134,7 @@ def origin(self, origin): length /= self.step_vector self._nsteps = np.ceil(length).astype(int) self._origin = origin + self.onGeometryChange() @property def maximum(self): @@ -136,6 +149,7 @@ def maximum(self, maximum): length = maximum - self.origin length /= self.step_vector self._nsteps = np.ceil(length).astype(int) + 1 + self.onGeometryChange() @property def n_nodes(self): @@ -145,6 +159,13 @@ def n_nodes(self): def n_elements(self): return np.prod(self.nsteps_cells) + @property + def elements(self): + global_index = np.arange(self.n_elements) + cell_indexes = self.global_index_to_cell_index(global_index) + + return self.global_node_indices(self.cell_corner_indexes(cell_indexes)) + def __str__(self): return ( "LoopStructural interpolation support: {} \n" @@ -275,34 +296,8 @@ def _global_indicies(self, indexes: np.ndarray, nsteps: np.ndarray) -> np.ndarra + nsteps[None, 0] * nsteps[None, 1] * indexes[:, 2] ) return gi.reshape(original_shape[:-1]) - # if len(indexes.shape) == 2: - # if indexes.shape[1] != 3 and indexes.shape[0] == 3: - # indexes = indexes.swapaxes(0, 1) - # if indexes.shape[1] != 3: - # logger.error("Indexes shape {}".format(indexes.shape)) - # raise ValueError("Cell indexes needs to be Nx3") - # return ( - # indexes[:, 0] - # + nsteps[None, 0] * indexes[:, 1] - # + nsteps[None, 0] * nsteps[None, 1] * indexes[:, 2] - # ) - # if len(indexes.shape) == 3: - # # if indexes.shape[2] != 3 and indexes.shape[1] == 3: - # # indexes = indexes.swapaxes(1, 2) - # # if indexes.shape[2] != 3 and indexes.shape[0] == 3: - # # indexes = indexes.swapaxes(0, 2) - # if indexes.shape[2] != 3: - # logger.error("Indexes shape {}".format(indexes.shape)) - # raise ValueError("Cell indexes needs to be NxNx3") - # return ( - # indexes[:, :, 0] - # + nsteps[None, None, 0] * indexes[:, :, 1] - # + nsteps[None, None, 0] * nsteps[None, None, 1] * indexes[:, :, 2] - # ) - # else: - # raise ValueError("Cell indexes need to be a 2 or 3d numpy array") - - def cell_corner_indexes(self, cell_indexes): + + def cell_corner_indexes(self, cell_indexes: np.ndarray) -> np.ndarray: """ Returns the indexes of the corners of a cell given its location xi, yi, zi @@ -453,3 +448,18 @@ def global_cell_indices(self, indexes) -> np.ndarray: """ return self._global_indicies(indexes, self.nsteps_cells) + + def pyvista(self): + try: + import pyvista as pv + except ImportError: + raise ImportError("pyvista is required for vtk support") + + from pyvista import CellType + + celltype = np.full(self.n_elements, CellType.VOXEL, dtype=np.uint8) + elements = np.hstack( + [np.zeros(self.elements.shape[0], dtype=int)[:, None] + 8, self.elements] + ) + elements = elements.flatten() + return pv.UnstructuredGrid(elements, celltype, self.nodes) diff --git a/LoopStructural/interpolators/supports/_3d_structured_grid.py b/LoopStructural/interpolators/supports/_3d_structured_grid.py index 25f02c635..990221f85 100644 --- a/LoopStructural/interpolators/supports/_3d_structured_grid.py +++ b/LoopStructural/interpolators/supports/_3d_structured_grid.py @@ -40,6 +40,9 @@ def __init__( self.regions = {} self.regions["everywhere"] = np.ones(self.n_nodes).astype(bool) + def onGeometryChange(self): + pass + @property def barycentre(self): return self.cell_centres(np.arange(self.n_elements)) @@ -395,7 +398,7 @@ def evaluate_gradient(self, evaluation_points, property_array): ] ).T - def get_element_gradient_for_location(self, pos): + def get_element_gradient_for_location(self, pos: np.ndarray): """ Get the gradient of the element at the locations. @@ -420,6 +423,7 @@ def get_element_gradient_for_location(self, pos): # xindex, yindex, zindex = self.position_to_cell_index(pos) # cellx, celly, cellz = self.cell_corner_indexes(xindex, yindex,zindex) # x, y, z = self.node_indexes_to_position(cellx, celly, cellz) + pos = np.asarray(pos) T = np.zeros((pos.shape[0], 3, 8)) local_coords = self.position_to_local_coordinates(pos) vertices, inside = self.position_to_cell_vertices(pos) @@ -454,7 +458,7 @@ def get_element_gradient_for_location(self, pos): return vertices, T, elements, inside - def get_element_for_location(self, pos): + def get_element_for_location(self, pos: np.ndarray): """Calculate the shape function of elements for a location diff --git a/LoopStructural/interpolators/supports/_3d_structured_tetra.py b/LoopStructural/interpolators/supports/_3d_structured_tetra.py index a5e8c15c4..f11c6fcb7 100644 --- a/LoopStructural/interpolators/supports/_3d_structured_tetra.py +++ b/LoopStructural/interpolators/supports/_3d_structured_tetra.py @@ -1,12 +1,11 @@ """ Tetmesh based on cartesian grid for piecewise linear interpolation """ -import logging import numpy as np from ._3d_base_structured import BaseStructuredSupport from . import SupportType - +from scipy.sparse import coo_matrix, tril from LoopStructural.utils import getLogger logger = getLogger(__name__) @@ -27,8 +26,32 @@ def __init__( self.tetra_mask = np.array( [[0, 6, 5, 3], [7, 3, 5, 6], [4, 0, 5, 6], [2, 0, 3, 6], [1, 0, 3, 5]] ) - + self.shared_element_relationships = np.zeros( + (self.neighbours[self.neighbours >= 0].flatten().shape[0], 2), dtype=int + ) + self.shared_elements = np.zeros( + (self.neighbours[self.neighbours >= 0].flatten().shape[0], 3), dtype=int + ) self.cg = None + self._elements = None + + self._init_face_table() + + def onGeometryChange(self): + self._elements = None + self.shared_element_relationships = np.zeros( + (self.neighbours[self.neighbours >= 0].flatten().shape[0], 2), dtype=int + ) + self.shared_elements = np.zeros( + (self.neighbours[self.neighbours >= 0].flatten().shape[0], 3), dtype=int + ) + self._init_face_table() + if self.interpolator is not None: + self.interpolator.reset() + + @property + def neighbours(self): + return self.get_neighbours() @property def ntetra(self) -> int: @@ -42,6 +65,28 @@ def n_elements(self) -> int: def n_cells(self) -> int: return np.prod(self.nsteps_cells) + @property + def elements(self): + if self._elements is None: + self._elements = self.get_elements() + return self._elements + + @property + def element_size(self): + """Calculate the volume of a tetrahedron using the 4 corners + volume = abs(det(A))/6 where A is the jacobian of the corners + + Returns + ------- + _type_ + _description_ + """ + vecs = ( + self.nodes[self.elements[:, :4], :][:, 1:, :] + - self.nodes[self.elements[:, :4], :][:, 0, None, :] + ) + return np.abs(np.linalg.det(vecs)) / 6 + @property def barycentre(self) -> np.ndarray: """ @@ -54,10 +99,98 @@ def barycentre(self) -> np.ndarray: barycentres of all tetrahedrons """ - tetra = self.get_elements() + tetra = self.elements barycentre = np.sum(self.nodes[tetra][:, :, :], axis=1) / 4.0 return barycentre + def _init_face_table(self): + """ + Fill table containing elements that share a face, and another + table that contains the nodes for a face. + """ + # need to identify the shared nodes for pairs of elements + # we do this by creating a sparse matrix that has N rows (number of elements) + # and M columns (number of nodes). + # We then fill the location where a node is in an element with true + # Then we create a table for the pairs of elements in the mesh + # we have the neighbour relationships, which are the 4 neighbours for each element + # create a new table that shows the element index repeated four times + # flatten both of these arrays so we effectively have a table with pairs of neighbours + # disgard the negative neighbours because these are border neighbours + rows = np.tile(np.arange(self.n_elements)[:, None], (1, 4)) + elements = self.elements + neighbours = self.get_neighbours() + # add array of bool to the location where there are elements for each node + + # use this to determine shared faces + + element_nodes = coo_matrix( + (np.ones(elements.shape[0] * 4), (rows.ravel(), elements.ravel())), + shape=(self.n_elements, self.n_nodes), + dtype=bool, + ).tocsr() + n1 = np.tile(np.arange(neighbours.shape[0], dtype=int)[:, None], (1, 4)) + n1 = n1.flatten() + n2 = neighbours.flatten() + n1 = n1[n2 >= 0] + n2 = n2[n2 >= 0] + el_rel = np.zeros((self.neighbours.flatten().shape[0], 2), dtype=int) + el_rel[:] = -1 + el_rel[np.arange(n1.shape[0]), 0] = n1 + el_rel[np.arange(n1.shape[0]), 1] = n2 + el_rel = el_rel[el_rel[:, 0] >= 0, :] + + # el_rel2 = np.zeros((self.neighbours.flatten().shape[0], 2), dtype=int) + self.shared_element_relationships[:] = -1 + el_pairs = coo_matrix( + (np.ones(el_rel.shape[0]), (el_rel[:, 0], el_rel[:, 1])) + ).tocsr() + i, j = tril(el_pairs).nonzero() + + self.shared_element_relationships[: len(i), 0] = i + self.shared_element_relationships[: len(i), 1] = j + + self.shared_element_relationships = self.shared_element_relationships[ + self.shared_element_relationships[:, 0] >= 0, : + ] + + faces = element_nodes[self.shared_element_relationships[:, 0], :].multiply( + element_nodes[self.shared_element_relationships[:, 1], :] + ) + shared_faces = faces[np.array(np.sum(faces, axis=1) == 3).flatten(), :] + row, col = shared_faces.nonzero() + row = row[row.argsort()] + col = col[row.argsort()] + shared_face_index = np.zeros((shared_faces.shape[0], 3), dtype=int) + shared_face_index[:] = -1 + shared_face_index[row.reshape(-1, 3)[:, 0], :] = col.reshape(-1, 3) + + self.shared_elements[ + np.arange(self.shared_element_relationships.shape[0]), : + ] = shared_face_index + # resize + self.shared_elements = self.shared_elements[ + : len(self.shared_element_relationships), : + ] + + @property + def shared_element_norm(self): + """ + Get the normal to all of the shared elements + """ + elements = self.shared_elements + v1 = self.nodes[elements[:, 1], :] - self.nodes[elements[:, 0], :] + v2 = self.nodes[elements[:, 2], :] - self.nodes[elements[:, 0], :] + return np.cross(v1, v2, axisa=1, axisb=1) + + @property + def shared_element_size(self): + """ + Get the area of the share triangle + """ + norm = self.shared_element_norm + return 0.5 * np.linalg.norm(norm, axis=1) + def evaluate_value(self, pos: np.ndarray, property_array: np.ndarray) -> np.ndarray: """ Evaluate value of interpolant @@ -77,7 +210,7 @@ def evaluate_value(self, pos: np.ndarray, property_array: np.ndarray) -> np.ndar values[:] = np.nan vertices, c, tetras, inside = self.get_element_for_location(pos) values[inside] = np.sum( - c[inside, :] * property_array[tetras[inside, :]], axis=1 + c[inside, :] * property_array[self.elements[tetras[inside]]], axis=1 ) return values @@ -109,7 +242,8 @@ def evaluate_gradient( ) = self.get_element_gradient_for_location(pos) # grads = np.zeros(tetras.shape) values[inside, :] = ( - element_gradients[inside, :, :] * property_array[tetras[inside, None, :]] + element_gradients[inside, :, :] + * property_array[self.elements[tetras[inside]][:, None, :]] ).sum(2) length = np.sum(values[inside, :], axis=1) # values[inside,:] /= length[:,None] @@ -186,7 +320,13 @@ def get_element_for_location(self, pos: np.ndarray): c[:, :, 3] = vd / v # if all coords are +ve then point is inside cell - mask = np.all(c > 0, axis=2) + mask = np.all(c >= 0, axis=2) + i, j = np.where(mask) + ## find any cases where the point belongs to two cells + ## just use the second cell + pairs = {ii: jj for ii, jj in zip(i, j)} + mask[:] = False + mask[list(pairs.keys()), list(pairs.values())] = True inside = np.logical_and(inside, np.any(mask, axis=1)) # get cell corners @@ -208,11 +348,25 @@ def get_element_for_location(self, pos: np.ndarray): c_return = np.zeros((pos.shape[0], 4)) c_return[:] = np.nan c_return[inside] = c[mask] - tetra_return = np.zeros((pos.shape[0], 4)).astype(int) + tetra_return = np.zeros((pos.shape[0])).astype(int) tetra_return[:] = -1 - tetra_return[inside, :] = tetras[mask, :] + local_tetra_index = np.tile(np.arange(0, 5)[None, :], (mask.shape[0], 1)) + local_tetra_index = local_tetra_index[mask] + tetra_global_index = self.tetra_global_index( + cell_indexes[inside, :], local_tetra_index + ) + tetra_return[inside] = tetra_global_index return vertices_return, c_return, tetra_return, inside + def evaluate_shape(self, locations): + """ + Convenience function returning barycentric coords + + """ + locations = np.array(locations) + verts, c, elements, inside = self.get_element_for_location(locations) + return c, elements, inside + def get_elements(self): """ Get a numpy array of all of the elements in the mesh @@ -226,8 +380,9 @@ def get_elements(self): x = np.arange(0, self.nsteps_cells[0]) y = np.arange(0, self.nsteps_cells[1]) z = np.arange(0, self.nsteps_cells[2]) - - cell_indexes = np.array(np.meshgrid(x, y, z, indexing="ij")).reshape(3, -1).T + ## reverse x and z so that indexing is + zz, yy, xx = np.meshgrid(z, y, x, indexing="ij") + cell_indexes = np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T # get cell corners cell_corners = self.cell_corner_indexes(cell_indexes) even_mask = np.sum(cell_indexes, axis=1) % 2 == 0 @@ -238,6 +393,26 @@ def get_elements(self): return tetras.reshape((tetras.shape[0] * tetras.shape[1], tetras.shape[2])) + def tetra_global_index(self, indices, tetra_index): + """ + Get the global index of a tetra from the cell index and the local tetra index + + Parameters + ---------- + indices + tetra_index + + Returns + ------- + + """ + return ( + tetra_index + + indices[:, 0] * 5 + + self.nsteps_cells[0] * indices[:, 1] * 5 + + self.nsteps_cells[0] * self.nsteps_cells[1] * indices[:, 2] * 5 + ) + def get_element_gradients(self, elements=None): """ Get the gradients of all tetras @@ -256,7 +431,8 @@ def get_element_gradients(self, elements=None): y = np.arange(0, self.nsteps_cells[1]) z = np.arange(0, self.nsteps_cells[2]) - cell_indexes = np.array(np.meshgrid(x, y, z, indexing="ij")).reshape(3, -1).T + zz, yy, xx = np.meshgrid(z, y, x, indexing="ij") + cell_indexes = np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T # c_xi = c_xi.flatten(order="F") # c_yi = c_yi.flatten(order="F") # c_zi = c_zi.flatten(order="F") @@ -310,6 +486,20 @@ def get_element_gradients(self, elements=None): return element_gradients[elements, :, :] + def evaluate_shape_derivatives(self, pos, elements=None): + inside = None + if elements is not None: + inside = np.ones(elements.shape[0], dtype=bool) + if elements is None: + verts, c, elements, inside = self.get_element_for_location(pos) + # np.arange(0, self.n_elements, dtype=int) + + return ( + self.get_element_gradients(elements), + elements, + inside, + ) + def get_element_gradient_for_location(self, pos: np.ndarray): """ Get the gradient of the tetra for a location @@ -397,26 +587,6 @@ def global_cell_indicies(self, indexes: np.ndarray): * indexes[:, :, 2] ) - # def position_to_cell_corners(self, pos: np.ndarray) -> np.ndarray: - # """ - # Find the nodes that belong to a cell which contains a point - - # Parameters - # ---------- - # pos - - # Returns - # ------- - - # """ - # inside = self.inside(pos) - # ix, iy, iz = self.position_to_cell_index(pos) - # cornersx, cornersy, cornersz = self.cell_corner_indexes(ix, iy, iz) - # globalidx = self.global_cell_indicies( - # np.dstack([cornersx, cornersy, cornersz]).T - # ) - # return globalidx, inside - def global_index_to_node_index(self, global_index: np.ndarray): """ Convert from global indexes to xi,yi,zi @@ -498,57 +668,57 @@ def get_neighbours(self) -> np.ndarray: odd_mask = ( np.sum(self.global_index_to_cell_index(tetra_index // 5), axis=0) % 2 == 1 ) - odd_mask = ~odd_mask.astype(bool) + odd_mask = odd_mask.astype(bool) # apply masks to masks = [] masks.append( [ np.logical_and(one_mask, odd_mask), - np.array([[-1, 0, 0, 1], [0, 1, 0, 2], [0, 0, 1, 3]]), + np.array([[1, 0, 0, 1], [0, 1, 0, 2], [0, 0, 1, 4]]), ] ) masks.append( [ np.logical_and(two_mask, odd_mask), - np.array([[1, 0, 0, 2], [0, -1, 0, 1], [0, 0, 1, 4]]), + np.array([[-1, 0, 0, 2], [0, -1, 0, 1], [0, 0, 1, 3]]), ] ) masks.append( [ np.logical_and(three_mask, odd_mask), - np.array([[-1, 0, 0, 4], [0, -1, 0, 3], [0, 0, -1, 2]]), + np.array([[-1, 0, 0, 4], [0, 1, 0, 3], [0, 0, -1, 1]]), ] ) masks.append( [ np.logical_and(four_mask, odd_mask), - np.array([[1, 0, 0, 3], [0, 1, 0, 4], [0, 0, -1, 1]]), + np.array([[1, 0, 0, 3], [0, -1, 0, 4], [0, 0, -1, 2]]), ] ) masks.append( [ np.logical_and(one_mask, ~odd_mask), - np.array([[1, 0, 0, 1], [0, 1, 0, 2], [0, 0, 1, 4]]), + np.array([[-1, 0, 0, 1], [0, 1, 0, 2], [0, 0, 1, 3]]), ] ) masks.append( [ np.logical_and(two_mask, ~odd_mask), - np.array([[-1, 0, 0, 2], [0, -1, 0, 1], [0, 0, 1, 3]]), + np.array([[1, 0, 0, 2], [0, -1, 0, 1], [0, 0, 1, 4]]), ] ) masks.append( [ np.logical_and(three_mask, ~odd_mask), - np.array([[-1, 0, 0, 4], [0, 1, 0, 3], [0, 0, -1, 1]]), + np.array([[-1, 0, 0, 4], [0, -1, 0, 3], [0, 0, -1, 2]]), ] ) masks.append( [ np.logical_and(four_mask, ~odd_mask), - np.array([[1, 0, 0, 3], [0, -1, 0, 4], [0, 0, -1, 2]]), + np.array([[1, 0, 0, 3], [0, 1, 0, 4], [0, 0, -1, 1]]), ] ) @@ -575,8 +745,23 @@ def get_neighbours(self) -> np.ndarray: + neigh_cell[:, :, 1] * self.nsteps_cells[0] + neigh_cell[:, :, 2] * self.nsteps_cells[0] * self.nsteps_cells[1] ) * 5 + mask[:, 3] - global_neighbour_idx[~inside] = -1 neighbours[logic, 1:] = global_neighbour_idx return neighbours + + @property + def vtk(self): + try: + import pyvista as pv + except ImportError: + raise ImportError("pyvista is required for vtk support") + + from pyvista import CellType + + celltype = np.full(self.elements.shape[0], CellType.TETRA, dtype=np.uint8) + elements = np.hstack( + [np.zeros(self.elements.shape[0], dtype=int)[:, None] + 4, self.elements] + ) + elements = elements.flatten() + return pv.UnstructuredGrid(elements, celltype, self.nodes) diff --git a/LoopStructural/interpolators/supports/_3d_unstructured_tetra.py b/LoopStructural/interpolators/supports/_3d_unstructured_tetra.py index 78d8dee8f..9f9176706 100644 --- a/LoopStructural/interpolators/supports/_3d_unstructured_tetra.py +++ b/LoopStructural/interpolators/supports/_3d_unstructured_tetra.py @@ -1,12 +1,13 @@ """ Tetmesh based on cartesian grid for piecewise linear interpolation """ +from ast import Tuple import logging import numpy as np -from scipy.sparse import csr_matrix +from scipy.sparse import csr_matrix, coo_matrix, tril -from ._3d_base_structured import BaseStructuredSupport +from . import StructuredGrid from LoopStructural.utils import getLogger from . import SupportType @@ -73,7 +74,7 @@ def __init__( aabb_nsteps[aabb_nsteps < 2] = 2 aabb_nsteps = np.array(aabb_nsteps, dtype=int) step_vector = (self.maximum - self.minimum) / (aabb_nsteps - 1) - self.aabb_grid = BaseStructuredSupport( + self.aabb_grid = StructuredGrid( self.minimum, nsteps=aabb_nsteps, step_vector=step_vector ) # make a big table to store which tetra are in which element. @@ -83,9 +84,11 @@ def __init__( (self.aabb_grid.n_elements, len(self.elements)), dtype=bool ) self.shared_element_relationships = np.zeros( - (self.elements.shape[0] * 3, 2), dtype=int + (self.neighbours[self.neighbours >= 0].flatten().shape[0], 2), dtype=int + ) + self.shared_elements = np.zeros( + (self.neighbours[self.neighbours >= 0].flatten().shape[0], 3), dtype=int ) - self.shared_elements = np.zeros((self.elements.shape[0] * 3, 3), dtype=int) self._init_face_table() self._initialise_aabb() @@ -94,28 +97,91 @@ def _init_face_table(self): Fill table containing elements that share a face, and another table that contains the nodes for a face. """ - flag = np.zeros(self.elements.shape[0]) - face_index = 0 - for i, t in enumerate(self.elements): - flag[i] = True - for n in self.neighbours[i]: - if n < 0: - continue - if flag[n]: - continue - face_node_index = 0 - self.shared_element_relationships[face_index, 0] = i - self.shared_element_relationships[face_index, 1] = n - for v in t: - if v in self.elements[n, :4]: - self.shared_elements[face_index, face_node_index] = v - face_node_index += 1 - - face_index += 1 - self.shared_elements = self.shared_elements[:face_index, :] + # need to identify the shared nodes for pairs of elements + # we do this by creating a sparse matrix that has N rows (number of elements) + # and M columns (number of nodes). + # We then fill the location where a node is in an element with true + # Then we create a table for the pairs of elements in the mesh + # we have the neighbour relationships, which are the 4 neighbours for each element + # create a new table that shows the element index repeated four times + # flatten both of these arrays so we effectively have a table with pairs of neighbours + # disgard the negative neighbours because these are border neighbours + rows = np.tile(np.arange(self.n_elements)[:, None], (1, 4)) + elements = self.get_elements() + neighbours = self.get_neighbours() + # add array of bool to the location where there are elements for each node + + # use this to determine shared faces + + element_nodes = coo_matrix( + (np.ones(elements.shape[0] * 4), (rows.ravel(), elements.ravel())), + shape=(self.n_elements, self.n_nodes), + dtype=bool, + ).tocsr() + n1 = np.tile(np.arange(neighbours.shape[0], dtype=int)[:, None], (1, 4)) + n1 = n1.flatten() + n2 = neighbours.flatten() + n1 = n1[n2 >= 0] + n2 = n2[n2 >= 0] + el_rel = np.zeros((self.neighbours.flatten().shape[0], 2), dtype=int) + el_rel[:] = -1 + el_rel[np.arange(n1.shape[0]), 0] = n1 + el_rel[np.arange(n1.shape[0]), 1] = n2 + el_rel = el_rel[el_rel[:, 0] >= 0, :] + + # el_rel2 = np.zeros((self.neighbours.flatten().shape[0], 2), dtype=int) + self.shared_element_relationships[:] = -1 + el_pairs = coo_matrix( + (np.ones(el_rel.shape[0]), (el_rel[:, 0], el_rel[:, 1])) + ).tocsr() + i, j = tril(el_pairs).nonzero() + self.shared_element_relationships[: len(i), 0] = i + self.shared_element_relationships[: len(i), 1] = j + self.shared_element_relationships = self.shared_element_relationships[ - :face_index, : + self.shared_element_relationships[:, 0] >= 0, : + ] + + faces = element_nodes[self.shared_element_relationships[:, 0], :].multiply( + element_nodes[self.shared_element_relationships[:, 1], :] + ) + shared_faces = faces[np.array(np.sum(faces, axis=1) == 3).flatten(), :] + row, col = shared_faces.nonzero() + row = row[row.argsort()] + col = col[row.argsort()] + shared_face_index = np.zeros((shared_faces.shape[0], 3), dtype=int) + shared_face_index[:] = -1 + shared_face_index[row.reshape(-1, 3)[:, 0], :] = col.reshape(-1, 3) + + self.shared_elements[ + np.arange(self.shared_element_relationships.shape[0]), : + ] = shared_face_index + # resize + self.shared_elements = self.shared_elements[ + : len(self.shared_element_relationships), : ] + # flag = np.zeros(self.elements.shape[0]) + # face_index = 0 + # for i, t in enumerate(self.elements): + # flag[i] = True + # for n in self.neighbours[i]: + # if n < 0: + # continue + # if flag[n]: + # continue + # face_node_index = 0 + # self.shared_element_relationships[face_index, 0] = i + # self.shared_element_relationships[face_index, 1] = n + # for v in t: + # if v in self.elements[n, :4]: + # self.shared_elements[face_index, face_node_index] = v + # face_node_index += 1 + + # face_index += 1 + # self.shared_elements = self.shared_elements[:face_index, :] + # self.shared_element_relationships = self.shared_element_relationships[ + # :face_index, : + # ] def _initialise_aabb(self): """assigns the tetras to the grid cells where the bounding box @@ -249,19 +315,14 @@ def evaluate_shape_derivatives(self, locations, elements=None): ------- """ - # points = np.zeros((5, 4, self.n_cells, 3)) - # points[:, :, even_mask, :] = nodes[:, even_mask, :][self.tetra_mask_even, :, :] - # points[:, :, ~even_mask, :] = nodes[:, ~even_mask, :][self.tetra_mask, :, :] - - # # changing order to points, tetra, nodes, coord - # points = points.swapaxes(0, 2) - # points = points.swapaxes(1, 2) + inside = None + if elements is not None: + inside = np.zeros(self.n_elements, dtype=bool) + inside[elements] = True if elements is None: - elements = np.arange(0, self.n_elements, dtype=int) - ps = self.nodes[ - self.elements, : - ] # points.reshape(points.shape[0] * points.shape[1], points.shape[2], points.shape[3]) - # vertices = self.nodes[self.elements[col,:]] + verts, c, elements, inside = self.get_element_for_location(locations) + # elements = np.arange(0, self.n_elements, dtype=int) + ps = self.nodes[self.elements, :] m = np.array( [ [ @@ -290,7 +351,7 @@ def evaluate_shape_derivatives(self, locations, elements=None): element_gradients = element_gradients.swapaxes(1, 2) element_gradients = element_gradients @ I - return element_gradients[elements, :, :], elements + return element_gradients[elements, :, :], elements, inside def evaluate_shape(self, locations): """ @@ -320,7 +381,7 @@ def evaluate_value(self, pos, property_array): values[:] = np.nan vertices, c, tetras, inside = self.get_element_for_location(pos) values[inside] = np.sum( - c[inside, :] * property_array[self.elements[tetras][inside, :]], axis=1 + c[inside, :] * property_array[self.elements[tetras[inside], :]], axis=1 ) return values @@ -375,7 +436,7 @@ def inside(self, pos): def get_elements(self): return self.elements - def get_element_for_location(self, points): + def get_element_for_location(self, points: np.ndarray) -> Tuple: """ Determine the tetrahedron from a numpy array of points @@ -444,7 +505,11 @@ def get_element_for_location(self, points): tetras[: npts + npts_step][row[mask]] = col[mask] inside[: npts + npts_step][row[mask]] = True npts += npts_step - return verts, bc, tetras, inside + tetra_return = np.zeros((points.shape[0])).astype(int) + tetra_return[:] = -1 + + tetra_return[inside] = tetras[inside] + return verts, bc, tetra_return, inside def get_element_gradients(self, elements=None): """ @@ -554,3 +619,19 @@ def get_neighbours(self): """ return self.neighbours + + @property + def vtk(self): + try: + import pyvista as pv + except ImportError: + raise ImportError("pyvista is required for vtk support") + + from pyvista import CellType + + celltype = np.full(self.elements.shape[0], CellType.TETRA, dtype=np.uint8) + elements = np.hstack( + [np.zeros(self.elements.shape[0], dtype=int)[:, None] + 4, self.elements] + ) + elements = elements.flatten() + return pv.UnstructuredGrid(elements, celltype, self.nodes) diff --git a/LoopStructural/interpolators/supports/_support_factory.py b/LoopStructural/interpolators/supports/_support_factory.py index 04a94822d..720efc600 100644 --- a/LoopStructural/interpolators/supports/_support_factory.py +++ b/LoopStructural/interpolators/supports/_support_factory.py @@ -18,5 +18,18 @@ def from_dict(d): raise ValueError("No support type specified") return SupportFactory.create_support(support_type, **d) + @staticmethod + def create_support_from_bbox( + support_type, bounding_box, nelements, element_volume=None, buffer: float = 0.2 + ): + if type(support_type) == str: + support_type = SupportType._member_map_[support_type].numerator + bbox = bounding_box.with_buffer(buffer=buffer) + bbox.nelements = nelements + + return support_map[support_type]( + origin=bbox.origin, step_vector=bbox.step_vector, nsteps=bbox.nsteps + ) + # @staticmethod # def create_3d_structured_grid(origin, maximum, step_vector, nsteps) diff --git a/LoopStructural/modelling/core/geological_model.py b/LoopStructural/modelling/core/geological_model.py index d79f18807..5c3a36f30 100644 --- a/LoopStructural/modelling/core/geological_model.py +++ b/LoopStructural/modelling/core/geological_model.py @@ -6,36 +6,8 @@ import numpy as np import pandas as pd -try: - from ...interpolators import DiscreteFoldInterpolator as DFI - dfi = True -except ImportError: - dfi = False -from ...interpolators import FiniteDifferenceInterpolator as FDI - -try: - from ...interpolators import PiecewiseLinearInterpolator as PLI - - pli = True -except ImportError: - pli = False - -# if LoopStructural.experimental: -from ...interpolators import P2Interpolator - -try: - from ...interpolators import SurfeRBFInterpolator as Surfe - - surfe = True - -except ImportError: - surfe = False - -from ...interpolators import StructuredGrid -from ...interpolators import TetMesh from ...modelling.features.fault import FaultSegment -from ...interpolators import DiscreteInterpolator from ...modelling.features.builders import ( FaultBuilder, @@ -54,13 +26,11 @@ FoldFrame, ) -from ...utils.exceptions import InterpolatorError from ...utils.helper import ( all_heading, gradient_vec_names, - strike_dip_vector, - get_vectors, ) +from ...utils import strikedip2vector, get_vectors from ...utils import BoundingBox from ...modelling.intrusions import IntrusionBuilder @@ -148,8 +118,9 @@ def __init__( logger.info("Initialising geological model") self.features = [] self.feature_name_index = {} - self._data = None - self.data = data + self._data = pd.DataFrame() # None + if data is not None: + self.data = data self.nsteps = nsteps # we want to rescale the model area so that the maximum length is @@ -189,9 +160,52 @@ def __init__( logger.info("Reusing interpolation supports: {}".format(self.reuse_supports)) self.stratigraphic_column = None - self.tol = 1e-10 * np.max(self.bounding_box[1, :] - self.bounding_box[0, :]) + self.tol = 1e-10 * np.max(self.bounding_box.maximum - self.bounding_box.origin) self._dtm = None + def to_dict(self): + """ + Convert the geological model to a json string + + Returns + ------- + json : str + json string of the geological model + """ + json = {} + json["model"] = {} + json["model"]["features"] = [f.name for f in self.features] + json["model"]["data"] = self.data.to_json() + json["model"]["origin"] = self.origin.tolist() + json["model"]["maximum"] = self.maximum.tolist() + json["model"]["nsteps"] = self.nsteps + json["model"]["stratigraphic_column"] = self.stratigraphic_column + json["features"] = [f.to_json() for f in self.features] + return json + + # @classmethod + # def from_json(cls,json): + # """ + # Create a geological model from a json string + + # Parameters + # ---------- + # json : str + # json string of the geological model + + # Returns + # ------- + # model : GeologicalModel + # a geological model + # """ + # model = cls(json["model"]["origin"],json["model"]["maximum"],data=None) + # model.stratigraphic_column = json["model"]["stratigraphic_column"] + # model.nsteps = json["model"]["nsteps"] + # model.data = pd.read_json(json["model"]["data"]) + # model.features = [] + # for feature in json["features"]: + # model.features.append(GeologicalFeature.from_json(feature,model)) + # return model def __str__(self): lengths = self.maximum - self.origin _str = "GeologicalModel - {} x {} x {}\n".format(*lengths) @@ -424,10 +438,7 @@ def dtm(self, dtm): """ if not callable(dtm): - raise BaseException( - "DTM must be a callable function \n" - "use LoopStructural.utils.dtm_creator to build one" - ) + raise BaseException("DTM must be a callable function \n") else: self._dtm = dtm @@ -618,7 +629,7 @@ def data(self, data: pd.DataFrame): logger.info("Converting strike and dip to vectors") mask = np.all(~np.isnan(self._data.loc[:, ["strike", "dip"]]), axis=1) self._data.loc[mask, gradient_vec_names()] = ( - strike_dip_vector( + strikedip2vector( self._data.loc[mask, "strike"], self._data.loc[mask, "dip"] ) * self._data.loc[mask, "polarity"].to_numpy()[:, None] @@ -794,6 +805,7 @@ def create_and_add_fold_frame( bounding_box=self.bounding_box.with_buffer(buffer), name=foldframe_data, frame=FoldFrame, + nelements=nelements, **kwargs, ) # add data @@ -953,7 +965,7 @@ def create_and_add_folded_fold_frame( interpolatortypes = [ "DFI", "FDI", - interpolatortype, + "FDI", ] fold_frame_builder = StructuralFrameBuilder( interpolatortype=interpolatortypes, @@ -978,7 +990,7 @@ def create_and_add_folded_fold_frame( folded_fold_frame = fold_frame_builder.frame folded_fold_frame.builder = fold_frame_builder - folded_fold_frame.type = "structuralframe" + folded_fold_frame.type = FeatureType.STRUCTURALFRAME self._add_feature(folded_fold_frame) @@ -991,9 +1003,7 @@ def create_and_add_intrusion( intrusion_frame_parameters={}, intrusion_lateral_extent_model=None, intrusion_vertical_extent_model=None, - # parameters_for_extent_sgs={}, geometric_scaling_parameters={}, - # faults=None, # LG seems unused? **kwargs, ): """ @@ -1322,12 +1332,16 @@ def create_and_add_fault( interpolatortype="FDI", tol=None, fault_slip_vector=None, + fault_normal_vector=None, fault_center=None, major_axis=None, minor_axis=None, intermediate_axis=None, faultfunction="BaseFault", faults=[], + force_mesh_geometry: bool = False, + points: bool = False, + fault_buffer=0.2, **kwargs, ): """ @@ -1371,8 +1385,10 @@ def create_and_add_fault( logger.info(f"Major axis: {major_axis}") logger.info(f"Minor axis: {minor_axis}") logger.info(f"Intermediate axis: {intermediate_axis}") - fault_slip_vector = np.array(fault_slip_vector, dtype="float") - fault_center = np.array(fault_center, dtype="float") + if fault_slip_vector is not None: + fault_slip_vector = np.array(fault_slip_vector, dtype="float") + if fault_center is not None: + fault_center = np.array(fault_center, dtype="float") for k, v in kwargs.items(): logger.info(f"{k}: {v}") @@ -1391,10 +1407,6 @@ def create_and_add_fault( kwargs.pop("data_region") logger.error("kwarg data_region currently not supported, disabling") displacement_scaled = displacement / self.scale_factor - # create fault frame - # interpolator = self.get_interpolator(**kwargs) - # faults arent supported for surfe - fault_frame_builder = FaultBuilder( interpolatortype, bounding_box=self.bounding_box, @@ -1403,91 +1415,17 @@ def create_and_add_fault( model=self, **kwargs, ) + fault_frame_data = self.data.loc[ + self.data["feature_name"] == fault_surface_data + ].copy() self._add_faults(fault_frame_builder, features=faults) # add data fault_frame_data = self.data.loc[ self.data["feature_name"] == fault_surface_data ].copy() - trace_mask = np.logical_and( - fault_frame_data["coord"] == 0, fault_frame_data["val"] == 0 - ) - logger.info(f"There are {np.sum(trace_mask)} points on the fault trace") - if np.sum(trace_mask) == 0: - logger.error( - "You cannot model a fault without defining the location of the fault" - ) - raise ValueError(f"There are no points on the fault trace") - - mask = np.logical_and( - fault_frame_data["coord"] == 0, ~np.isnan(fault_frame_data["gz"]) - ) - vector_data = fault_frame_data.loc[mask, ["gx", "gy", "gz"]].to_numpy() - mask2 = np.logical_and( - fault_frame_data["coord"] == 0, ~np.isnan(fault_frame_data["nz"]) - ) - vector_data = np.vstack( - [vector_data, fault_frame_data.loc[mask2, ["nx", "ny", "nz"]].to_numpy()] - ) - fault_normal_vector = np.mean(vector_data, axis=0) - logger.info(f"Fault normal vector: {fault_normal_vector}") - - mask = np.logical_and( - fault_frame_data["coord"] == 1, ~np.isnan(fault_frame_data["gz"]) - ) - if fault_slip_vector is None: - if ( - "avgSlipDirEasting" in kwargs - and "avgSlipDirNorthing" in kwargs - and "avgSlipDirAltitude" in kwargs - ): - fault_slip_vector = np.array( - [ - kwargs["avgSlipDirEasting"], - kwargs["avgSlipDirNorthing"], - kwargs["avgSlipDirAltitude"], - ], - dtype=float, - ) - else: - fault_slip_vector = ( - fault_frame_data.loc[mask, ["gx", "gy", "gz"]] - .mean(axis=0) - .to_numpy() - ) - if np.any(np.isnan(fault_slip_vector)): - logger.info("Fault slip vector is nan, estimating from fault normal") - strike_vector, dip_vector = get_vectors(fault_normal_vector[None, :]) - fault_slip_vector = dip_vector[:, 0] - logger.info(f"Estimated fault slip vector: {fault_slip_vector}") if fault_center is not None and ~np.isnan(fault_center).any(): fault_center = self.scale(fault_center, inplace=False) - else: - # if we haven't defined a fault centre take the - # center of mass for lines assocaited with the fault trace - if ( - ~np.isnan(kwargs.get("centreEasting", np.nan)) - and ~np.isnan(kwargs.get("centreNorthing", np.nan)) - and ~np.isnan(kwargs.get("centreAltitude", np.nan)) - ): - fault_center = self.scale( - np.array( - [ - kwargs["centreEasting"], - kwargs["centreNorthing"], - kwargs["centreAltitude"], - ], - dtype=float, - ), - inplace=False, - ) - else: - mask = np.logical_and( - fault_frame_data["coord"] == 0, fault_frame_data["val"] == 0 - ) - fault_center = ( - fault_frame_data.loc[mask, ["X", "Y", "Z"]].mean(axis=0).to_numpy() - ) if minor_axis: minor_axis = minor_axis / self.scale_factor if major_axis: @@ -1495,14 +1433,16 @@ def create_and_add_fault( if intermediate_axis: intermediate_axis = intermediate_axis / self.scale_factor fault_frame_builder.create_data_from_geometry( - fault_frame_data, - fault_center, - fault_normal_vector, - fault_slip_vector, + fault_frame_data=fault_frame_data, + fault_center=fault_center, + fault_normal_vector=fault_normal_vector, + fault_slip_vector=fault_slip_vector, minor_axis=minor_axis, major_axis=major_axis, intermediate_axis=intermediate_axis, - points=kwargs.get("points", False), + points=points, + force_mesh_geometry=force_mesh_geometry, + fault_buffer=fault_buffer, ) if "force_mesh_geometry" not in kwargs: fault_frame_builder.set_mesh_geometry(kwargs.get("fault_buffer", 0.2), 0) @@ -1520,7 +1460,7 @@ def create_and_add_fault( fault.add_region(f) break if displacement == 0: - fault.type = "fault_inactive" + fault.type = FeatureType.INACTIVEFAULT self._add_feature(fault) return fault @@ -1608,7 +1548,7 @@ def regular_grid(self, nsteps=None, shuffle=True, rescale=False, order="C"): # locs = self.rescale(locs) # return locs - def evaluate_model(self, xyz, scale=True): + def evaluate_model(self, xyz: np.ndarray, scale: bool = True) -> np.ndarray: """Evaluate the stratigraphic id at each location Parameters @@ -1823,17 +1763,23 @@ def update(self, verbose=False, progressbar=True): f"Updating geological model. There are: \n {nfeatures} \ geological features that need to be interpolated\n" ) - - from tqdm.auto import tqdm import time start = time.time() - - # Load tqdm with size counter instead of file counter - with tqdm(total=nfeatures) as pbar: + try: + from tqdm.auto import tqdm + except ImportError: + progressbar = False + logger.warning("Failed to import tqdm, disabling progress bar") + + if progressbar: + # Load tqdm with size counter instead of file counter + with tqdm(total=nfeatures) as pbar: + for f in self.features: + pbar.set_description(f"Interpolating {f.name}") + f.builder.up_to_date(callback=pbar.update) + else: for f in self.features: - pbar.set_description(f"Interpolating {f.name}") - f.builder.up_to_date(callback=pbar.update) - + f.builder.up_to_date() if verbose: print(f"Model update took: {time.time()-start} seconds") diff --git a/LoopStructural/modelling/core/geological_model_builder.py b/LoopStructural/modelling/core/geological_model_builder.py deleted file mode 100644 index fcf537ddd..000000000 --- a/LoopStructural/modelling/core/geological_model_builder.py +++ /dev/null @@ -1,38 +0,0 @@ -class GeologicalModelBuilderFactory: - def __init__(self): - pass - - def create_builder(self): - pass - -class GeologicalModelBuilder: - def __init__(self): - pass - - def set_model_name(self, name): - pass - - def add_surface(self, surface): - pass - - def add_fault(self, fault): - pass - - def build(self): - pass - -class GeologicalModel: - def __init__(self, name): - pass - - def add_surface(self, surface): - pass - - def add_fault(self, fault): - pass - - def get_surface(self, name): - pass - - def get_fault(self, name): - pass diff --git a/LoopStructural/modelling/features/__init__.py b/LoopStructural/modelling/features/__init__.py index a95ef5b79..5cc85b9e9 100644 --- a/LoopStructural/modelling/features/__init__.py +++ b/LoopStructural/modelling/features/__init__.py @@ -21,6 +21,7 @@ class FeatureType(IntEnum): INTRUSION = 8 FAULT = 9 DOMAINFAULT = 10 + INACTIVEFAULT = 11 from ._base_geological_feature import BaseFeature diff --git a/LoopStructural/modelling/features/_analytical_feature.py b/LoopStructural/modelling/features/_analytical_feature.py index 8391e36e5..cd15e9004 100644 --- a/LoopStructural/modelling/features/_analytical_feature.py +++ b/LoopStructural/modelling/features/_analytical_feature.py @@ -35,6 +35,20 @@ def __init__( self.origin = np.array(origin, dtype=float) self.type = FeatureType.ANALYTICAL + def to_json(self): + """ + Returns a json representation of the geological feature + + Returns + ------- + json : dict + json representation of the geological feature + """ + json = super().to_json() + json["vector"] = self.vector.tolist() + json["origin"] = self.origin.tolist() + return json + def evaluate_value(self, xyz): xyz = np.array(xyz) if len(xyz.shape) == 1: diff --git a/LoopStructural/modelling/features/_base_geological_feature.py b/LoopStructural/modelling/features/_base_geological_feature.py index 513bc78bd..e384069f9 100644 --- a/LoopStructural/modelling/features/_base_geological_feature.py +++ b/LoopStructural/modelling/features/_base_geological_feature.py @@ -1,8 +1,10 @@ from LoopStructural.modelling.features import FeatureType from LoopStructural.utils import getLogger +from LoopStructural.utils.typing import NumericInput # from LoopStructural import GeologicalModel import numpy as np +from uuid import uuid4 logger = getLogger(__name__) @@ -39,6 +41,24 @@ def __init__( self._model = model self.builder = builder self.faults_enabled = True + self._min = None + self._max = None + + def to_json(self): + """ + Returns a json representation of the geological feature + + Returns + ------- + json : dict + json representation of the geological feature + """ + json = {} + json["name"] = self.name + json["regions"] = [r.to_json() for r in self.regions] + json["faults"] = [f.name for f in self.faults] + json["type"] = self.type + return json def __str__(self): _str = "-----------------------------------------------------\n" @@ -125,6 +145,17 @@ def evaluate_value(self, pos): """ raise NotImplementedError + def evaluate_normalised_value(self, pos: NumericInput): + """Evaluate the feature value scaling between 0 and 1 + + Parameters + ---------- + pos : NumericInput + An array or arraylike object with locations + """ + value = self.evaluate_value(pos) + return (value - self.min()) / (self.max() - self.min()) + def _calculate_mask(self, evaluation_points: np.ndarray) -> np.ndarray: """Calculate the mask for which evaluation points need to be calculated @@ -183,6 +214,7 @@ def min(self): """ if self.model is None: return 0 + return np.nanmin(self.evaluate_value(self.model.regular_grid((10, 10, 10)))) def max(self): diff --git a/LoopStructural/modelling/features/_geological_feature.py b/LoopStructural/modelling/features/_geological_feature.py index ff3730798..76d4e0902 100644 --- a/LoopStructural/modelling/features/_geological_feature.py +++ b/LoopStructural/modelling/features/_geological_feature.py @@ -18,7 +18,7 @@ class GeologicalFeature(BaseFeature): model. For example foliations, fault planes, fold rotation angles etc. Attributes - ---------- + ---------- name : string should be a unique name for the geological feature support : a ScalarField @@ -61,6 +61,20 @@ def __init__( self.builder = builder self.type = FeatureType.INTERPOLATED + def to_json(self): + """ + Returns a json representation of the geological feature + + Returns + ------- + json : dict + json representation of the geological feature + """ + json = super().to_json() + print(self.name, json) + json["interpolator"] = self.interpolator.to_json() + return json + def is_valid(self): return self.interpolator.valid @@ -94,6 +108,7 @@ def evaluate_value(self, evaluation_points: np.ndarray) -> np.ndarray: # TODO need to add a generic type checker for all methods # if evaluation_points is not a numpy array try and convert # otherwise error + evaluation_points = np.asarray(evaluation_points) self.builder.up_to_date() # check if the points are within the display region v = np.zeros(evaluation_points.shape[0]) diff --git a/LoopStructural/modelling/features/_region.py b/LoopStructural/modelling/features/_region.py new file mode 100644 index 000000000..daec7a8e1 --- /dev/null +++ b/LoopStructural/modelling/features/_region.py @@ -0,0 +1,18 @@ +class Region: + def __init__(self, feature, value, sign): + self.feature = feature + self.value = value + self.sign = sign + + def __call__(self, xyz): + if self.sign: + return self.feature.evaluate_value(xyz) > 0 + else: + return self.feature.evaluate_value(xyz) < 0 + + def to_json(self): + return { + "feature": self.feature.name, + "value": self.value, + "sign": self.sign, + } diff --git a/LoopStructural/modelling/features/_structural_frame.py b/LoopStructural/modelling/features/_structural_frame.py index 1da20bc3b..82da666ba 100644 --- a/LoopStructural/modelling/features/_structural_frame.py +++ b/LoopStructural/modelling/features/_structural_frame.py @@ -26,6 +26,21 @@ def __init__(self, name: str, features: list, fold=None): self.fold = fold self.type = FeatureType.STRUCTURALFRAME + def to_json(self): + """ + Return a json representation of the structural frame + + Returns + ------- + json : dict + json representation of the structural frame + """ + json = {} + json["name"] = self.name + json["features"] = [f.name for f in self.features] + json["type"] = self.type + return json + def __getitem__(self, key): """ diff --git a/LoopStructural/modelling/features/_unconformity_feature.py b/LoopStructural/modelling/features/_unconformity_feature.py index 9f21c31e8..0db534499 100644 --- a/LoopStructural/modelling/features/_unconformity_feature.py +++ b/LoopStructural/modelling/features/_unconformity_feature.py @@ -31,6 +31,13 @@ def __init__(self, feature: GeologicalFeature, value: float, sign=True): self.sign = sign self.parent = feature + def to_json(self): + json = super().to_json() + json["value"] = self.value + json["sign"] = self.sign + json["parent"] = self.parent.name + return json + def inverse(self): """Returns an unconformity feature with the sign flipped The feature is a shallow copy with the parent being set to diff --git a/LoopStructural/modelling/features/builders/_fault_builder.py b/LoopStructural/modelling/features/builders/_fault_builder.py index 119041d74..b6e020c4b 100644 --- a/LoopStructural/modelling/features/builders/_fault_builder.py +++ b/LoopStructural/modelling/features/builders/_fault_builder.py @@ -1,7 +1,8 @@ from typing import Union from ._structural_frame_builder import StructuralFrameBuilder - +from LoopStructural.utils import get_vectors import numpy as np +import pandas as pd from ....utils import getLogger, BoundingBox logger = getLogger(__name__) @@ -78,15 +79,17 @@ def update_geometry(self, points): def create_data_from_geometry( self, - data, - fault_center, - normal_vector, - slip_vector, + fault_frame_data: pd.DataFrame, + fault_center=None, + fault_normal_vector=None, + fault_slip_vector=None, minor_axis=None, major_axis=None, intermediate_axis=None, w=1.0, points=False, + force_mesh_geometry=False, + fault_buffer=0.2, ): """Generate the required data for building a fault frame for a fault with the specified parameters @@ -110,13 +113,79 @@ def create_data_from_geometry( intermediate_axis : double fault volume radius in the slip direction """ - self.fault_normal_vector = normal_vector - self.fault_slip_vector = slip_vector + trace_mask = np.logical_and( + fault_frame_data["coord"] == 0, fault_frame_data["val"] == 0 + ) + logger.info(f"There are {np.sum(trace_mask)} points on the fault trace") + if np.sum(trace_mask) == 0: + logger.error( + "You cannot model a fault without defining the location of the fault" + ) + raise ValueError(f"There are no points on the fault trace") + + # get all of the gradient data associated with the fault trace + if fault_normal_vector is None: + gradient_mask = np.logical_and( + fault_frame_data["coord"] == 0, ~np.isnan(fault_frame_data["gz"]) + ) + vector_data = fault_frame_data.loc[ + gradient_mask, ["gx", "gy", "gz"] + ].to_numpy() + normal_mask = np.logical_and( + fault_frame_data["coord"] == 0, ~np.isnan(fault_frame_data["nz"]) + ) + vector_data = np.vstack( + [ + vector_data, + fault_frame_data.loc[normal_mask, ["nx", "ny", "nz"]].to_numpy(), + ] + ) + if len(vector_data) == 0: + logger.error( + "You cannot model a fault without defining the orientation of the fault\n\ + Either add orientation data or define the fault normal vector argument" + ) + raise ValueError(f"There are no points on the fault trace") + fault_normal_vector = np.mean(vector_data, axis=0) + + logger.info(f"Fault normal vector: {fault_normal_vector}") + + # estimate the fault slip vector + if fault_slip_vector is None: + slip_mask = np.logical_and( + fault_frame_data["coord"] == 1, ~np.isnan(fault_frame_data["gz"]) + ) + fault_slip_data = fault_frame_data.loc[slip_mask, ["gx", "gy", "gz"]] + if len(fault_slip_data) == 0: + logger.warning( + "There is no slip vector data for the fault, using vertical slip vector\n\ + projected onto fault surface estimating from fault normal" + ) + strike_vector, dip_vector = get_vectors(fault_normal_vector[None, :]) + fault_slip_vector = dip_vector[:, 0] + logger.info(f"Estimated fault slip vector: {fault_slip_vector}") + else: + fault_slip_vector = fault_slip_data.mean(axis=0).to_numpy() + if fault_center is None: + trace_mask = np.logical_and( + fault_frame_data["coord"] == 0, fault_frame_data["val"] == 0 + ) + fault_center = ( + fault_frame_data.loc[trace_mask, ["X", "Y", "Z"]] + .mean(axis=0) + .to_numpy() + ) + + self.fault_normal_vector = fault_normal_vector + self.fault_slip_vector = fault_slip_vector self.fault_centre = fault_center if major_axis is None: - fault_trace = data.loc[ - np.logical_and(data["coord"] == 0, data["val"] == 0), ["X", "Y"] + fault_trace = fault_frame_data.loc[ + np.logical_and( + fault_frame_data["coord"] == 0, fault_frame_data["val"] == 0 + ), + ["X", "Y"], ].to_numpy() distance = np.linalg.norm( fault_trace[:, None, :] - fault_trace[None, :, :], axis=2 @@ -127,9 +196,9 @@ def create_data_from_geometry( # the fault, its not critical # but probably means the fault isn't well defined. # add any data anyway - usually just orientation data - self.add_data_from_data_frame(data) - self.origin = self.model.bounding_box[0, :] - self.maximum = self.model.bounding_box[1, :] + self.add_data_from_data_frame(fault_frame_data) + self.origin = self.model.bounding_box.origin + self.maximum = self.model.bounding_box.maximum return major_axis = np.max(distance) logger.warning(f"Fault major axis using map length: {major_axis}") @@ -147,18 +216,17 @@ def create_data_from_geometry( self.fault_minor_axis = minor_axis self.fault_major_axis = major_axis self.fault_intermediate_axis = intermediate_axis - normal_vector /= np.linalg.norm(normal_vector) - slip_vector /= np.linalg.norm(slip_vector) + fault_normal_vector /= np.linalg.norm(fault_normal_vector) + fault_slip_vector /= np.linalg.norm(fault_slip_vector) # check if slip vector is inside fault plane, if not project onto fault plane # if not np.isclose(normal_vector @ slip_vector, 0): - - strike_vector = np.cross(normal_vector, slip_vector) + strike_vector = np.cross(fault_normal_vector, fault_slip_vector) self.fault_strike_vector = strike_vector fault_edges = np.zeros((2, 3)) fault_tips = np.zeros((2, 3)) fault_depth = np.zeros((2, 3)) - data.reset_index(inplace=True) + fault_frame_data.reset_index(inplace=True) if not self.fault_major_axis: logger.warning( f"Fault major axis is not set and cannot be determined from the fault trace. \ @@ -179,15 +247,16 @@ def create_data_from_geometry( ) if fault_center is not None: if minor_axis is not None: - fault_edges[0, :] = fault_center[:3] + normal_vector * minor_axis - fault_edges[1, :] = fault_center[:3] - normal_vector * minor_axis + fault_edges[0, :] = fault_center[:3] + fault_normal_vector * minor_axis + fault_edges[1, :] = fault_center[:3] - fault_normal_vector * minor_axis self.update_geometry(fault_edges) # choose whether to add points -1,1 to constrain fault frame or a scaled # vector if points: - data.loc[ - len(data), ["X", "Y", "Z", "feature_name", "val", "coord", "w"] + fault_frame_data.loc[ + len(fault_frame_data), + ["X", "Y", "Z", "feature_name", "val", "coord", "w"], ] = [ fault_edges[0, 0], fault_edges[0, 1], @@ -197,8 +266,9 @@ def create_data_from_geometry( 0, w, ] - data.loc[ - len(data), ["X", "Y", "Z", "feature_name", "val", "coord", "w"] + fault_frame_data.loc[ + len(fault_frame_data), + ["X", "Y", "Z", "feature_name", "val", "coord", "w"], ] = [ fault_edges[1, 0], fault_edges[1, 1], @@ -209,32 +279,44 @@ def create_data_from_geometry( w, ] logger.info("Converting fault norm data to gradient data") - mask = np.logical_and(data["coord"] == 0, ~np.isnan(data["nx"])) - data.loc[mask, ["gx", "gy", "gz"]] = data.loc[ - mask, ["nx", "ny", "nz"] - ] + mask = np.logical_and( + fault_frame_data["coord"] == 0, + ~np.isnan(fault_frame_data["nx"]), + ) + fault_frame_data.loc[ + mask, ["gx", "gy", "gz"] + ] = fault_frame_data.loc[mask, ["nx", "ny", "nz"]] - data.loc[mask, ["nx", "ny", "nz"]] = np.nan - mask = np.logical_and(data["coord"] == 0, ~np.isnan(data["gx"])) - data.loc[mask, ["gx", "gy", "gz"]] /= minor_axis * 0.5 + fault_frame_data.loc[mask, ["nx", "ny", "nz"]] = np.nan + mask = np.logical_and( + fault_frame_data["coord"] == 0, + ~np.isnan(fault_frame_data["gx"]), + ) + fault_frame_data.loc[mask, ["gx", "gy", "gz"]] /= minor_axis * 0.5 if points == False: logger.info( "Rescaling fault norm constraint length for fault frame" ) - mask = np.logical_and(data["coord"] == 0, ~np.isnan(data["gx"])) - data.loc[mask, ["gx", "gy", "gz"]] /= np.linalg.norm( - data.loc[mask, ["gx", "gy", "gz"]], axis=1 + mask = np.logical_and( + fault_frame_data["coord"] == 0, + ~np.isnan(fault_frame_data["gx"]), + ) + fault_frame_data.loc[mask, ["gx", "gy", "gz"]] /= np.linalg.norm( + fault_frame_data.loc[mask, ["gx", "gy", "gz"]], axis=1 )[:, None] # scale vector so that the distance between -1 # and 1 is the minor axis length - data.loc[mask, ["gx", "gy", "gz"]] /= minor_axis * 0.5 - mask = np.logical_and(data["coord"] == 0, ~np.isnan(data["nx"])) - data.loc[mask, ["nx", "ny", "nz"]] /= np.linalg.norm( - data.loc[mask, ["nx", "ny", "nz"]], axis=1 + fault_frame_data.loc[mask, ["gx", "gy", "gz"]] /= minor_axis * 0.5 + mask = np.logical_and( + fault_frame_data["coord"] == 0, + ~np.isnan(fault_frame_data["nx"]), + ) + fault_frame_data.loc[mask, ["nx", "ny", "nz"]] /= np.linalg.norm( + fault_frame_data.loc[mask, ["nx", "ny", "nz"]], axis=1 )[:, None] # scale vector so that the distance between -1 # and 1 is the minor axis length - data.loc[mask, ["nx", "ny", "nz"]] /= minor_axis * 0.5 + fault_frame_data.loc[mask, ["nx", "ny", "nz"]] /= minor_axis * 0.5 # self.builders[0].add_orthogonal_feature(self, # feature, w=1.0, region=None, step=1, B=0): @@ -243,8 +325,9 @@ def create_data_from_geometry( fault_tips[1, :] = fault_center[:3] - strike_vector * 0.5 * major_axis self.update_geometry(fault_tips) # we want the tips of the fault to be -1 and 1 - data.loc[ - len(data), ["X", "Y", "Z", "feature_name", "val", "coord", "w"] + fault_frame_data.loc[ + len(fault_frame_data), + ["X", "Y", "Z", "feature_name", "val", "coord", "w"], ] = [ fault_center[0], fault_center[1], @@ -254,8 +337,9 @@ def create_data_from_geometry( 2, w, ] - data.loc[ - len(data), ["X", "Y", "Z", "feature_name", "val", "coord", "w"] + fault_frame_data.loc[ + len(fault_frame_data), + ["X", "Y", "Z", "feature_name", "val", "coord", "w"], ] = [ fault_tips[1, 0], fault_tips[1, 1], @@ -265,8 +349,9 @@ def create_data_from_geometry( 2, w, ] - data.loc[ - len(data), ["X", "Y", "Z", "feature_name", "val", "coord", "w"] + fault_frame_data.loc[ + len(fault_frame_data), + ["X", "Y", "Z", "feature_name", "val", "coord", "w"], ] = [ fault_tips[0, 0], fault_tips[0, 1], @@ -278,10 +363,15 @@ def create_data_from_geometry( ] strike_vector /= major_axis if intermediate_axis is not None: - fault_depth[0, :] = fault_center[:3] + slip_vector * intermediate_axis - fault_depth[1, :] = fault_center[:3] - slip_vector * intermediate_axis - data.loc[ - len(data), ["X", "Y", "Z", "feature_name", "val", "coord", "w"] + fault_depth[0, :] = ( + fault_center[:3] + fault_slip_vector * intermediate_axis + ) + fault_depth[1, :] = ( + fault_center[:3] - fault_slip_vector * intermediate_axis + ) + fault_frame_data.loc[ + len(fault_frame_data), + ["X", "Y", "Z", "feature_name", "val", "coord", "w"], ] = [ fault_center[0], fault_center[1], @@ -294,9 +384,9 @@ def create_data_from_geometry( self.update_geometry(fault_depth) # TODO need to add data here - slip_vector /= intermediate_axis - data.loc[ - len(data), + fault_slip_vector /= intermediate_axis + fault_frame_data.loc[ + len(fault_frame_data), [ "X", "Y", @@ -314,15 +404,17 @@ def create_data_from_geometry( fault_center[1], fault_center[2], self.name, - slip_vector[0], - slip_vector[1], - slip_vector[2], + fault_slip_vector[0], + fault_slip_vector[1], + fault_slip_vector[2], 0, 1, w, ] - self.add_data_from_data_frame(data) - self.update_geometry(data[["X", "Y", "Z"]].to_numpy()) + self.add_data_from_data_frame(fault_frame_data) + if force_mesh_geometry: + self.set_mesh_geometry(fault_buffer, None) + self.update_geometry(fault_frame_data[["X", "Y", "Z"]].to_numpy()) def set_mesh_geometry(self, buffer, rotation): """set the mesh geometry diff --git a/LoopStructural/modelling/features/builders/_folded_feature_builder.py b/LoopStructural/modelling/features/builders/_folded_feature_builder.py index 968ccef5c..868641af1 100644 --- a/LoopStructural/modelling/features/builders/_folded_feature_builder.py +++ b/LoopStructural/modelling/features/builders/_folded_feature_builder.py @@ -45,6 +45,7 @@ def __init__( region=region, **kwargs ) + self.interpolator.fold = fold self.fold = fold self.fold_weights = fold_weights self.kwargs = kwargs @@ -119,9 +120,10 @@ def build(self, data_region=None, constrained=None, **kwargs): logger.info("Adding fold to {}".format(self.name)) self.interpolator.fold = self.fold # if we have fold weights use those, otherwise just use default - self.interpolator.add_fold_constraints(**self.fold_weights) + # self.interpolator.add_fold_constraints(**self.fold_weights) + kwargs["fold_weights"] = self.fold_weights if "cgw" not in kwargs: # try adding very small cg kwargs["cgw"] = 0.0 # now the fold is set up run the standard interpolation - GeologicalFeatureBuilder.build(self, data_region=data_region, **kwargs) + super().build(self, data_region=data_region, **kwargs) diff --git a/LoopStructural/modelling/features/builders/_geological_feature_builder.py b/LoopStructural/modelling/features/builders/_geological_feature_builder.py index 4beb1c8ff..b0d8f3cfd 100644 --- a/LoopStructural/modelling/features/builders/_geological_feature_builder.py +++ b/LoopStructural/modelling/features/builders/_geological_feature_builder.py @@ -26,7 +26,7 @@ ) from ....utils import RegionEverywhere from ....interpolators import DiscreteInterpolator -from ....interpolators import get_interpolator +from ....interpolators import InterpolatorFactory logger = getLogger(__name__) @@ -39,7 +39,7 @@ def __init__( nelements: int = 1000, name="Feature", interpolation_region=None, - **kwargs, + **kwarg, ): """ Constructor for a GeologicalFeatureBuilder @@ -53,11 +53,12 @@ def __init__( kwargs - name of the feature, region to interpolate the feature """ BaseBuilder.__init__(self, name) - interpolator = get_interpolator( - bounding_box=bounding_box, + interpolator = InterpolatorFactory.create_interpolator( interpolatortype=interpolatortype, + boundingbox=bounding_box, nelements=nelements, ) + if issubclass(type(interpolator), GeologicalInterpolator) == False: raise TypeError( "interpolator is {} and must be a GeologicalInterpolator".format( @@ -65,8 +66,6 @@ def __init__( ) ) self._interpolator = interpolator - # self._interpolator.set_property_name(self._name) - # everywhere region is just a lambda that returns true for all locations header = ( xyz_names() diff --git a/LoopStructural/modelling/input/process_data.py b/LoopStructural/modelling/input/process_data.py index 79111e8e5..53d0f5d07 100644 --- a/LoopStructural/modelling/input/process_data.py +++ b/LoopStructural/modelling/input/process_data.py @@ -1,7 +1,7 @@ import pandas as pd import numpy as np from .fault_network import FaultNetwork -from ...utils import strike_dip_vector +from ...utils import strikedip2vector from ...utils import getLogger logger = getLogger(__name__) @@ -151,7 +151,6 @@ def colours(self): @colours.setter def colours(self, colours): - if colours is None: self._colours = {} for s in self.stratigraphic_name: @@ -559,7 +558,7 @@ def contact_orientations(self, contact_orientations): contact_orientations["nx"] = np.nan contact_orientations["ny"] = np.nan contact_orientations["nz"] = np.nan - contact_orientations[["nx", "ny", "nz"]] = strike_dip_vector( + contact_orientations[["nx", "ny", "nz"]] = strikedip2vector( contact_orientations["strike"], contact_orientations["dip"] ) if ( @@ -616,7 +615,7 @@ def fault_orientations(self, fault_orientations): fault_orientations["gx"] = np.nan fault_orientations["gy"] = np.nan fault_orientations["gz"] = np.nan - fault_orientations[["gx", "gy", "gz"]] = strike_dip_vector( + fault_orientations[["gx", "gy", "gz"]] = strikedip2vector( fault_orientations["strike"], fault_orientations["dip"] ) if ( diff --git a/LoopStructural/utils/__init__.py b/LoopStructural/utils/__init__.py index 4ea099d9e..d167c1534 100644 --- a/LoopStructural/utils/__init__.py +++ b/LoopStructural/utils/__init__.py @@ -11,14 +11,20 @@ LoopValueError, ) from ._transformation import EuclideanTransformation -from .map2loop import process_map2loop, build_model from .helper import ( - get_data_axis_aligned_bounding_box, get_data_bounding_box, get_data_bounding_box_map, ) from ..datatypes._bounding_box import BoundingBox -from .helper import get_dip_vector, get_strike_vector, get_vectors, strike_dip_vector +from .maths import ( + get_dip_vector, + get_strike_vector, + get_vectors, + strikedip2vector, + azimuthplunge2vector, + normal_vector_to_strike_and_dip, +) +from .helper import create_surface, create_box from .regions import RegionEverywhere, RegionFunction, NegativeRegion, PositiveRegion from .json_encoder import LoopJSONEncoder diff --git a/LoopStructural/utils/dtm_creator.py b/LoopStructural/utils/dtm_creator.py index 8093e1228..95d975ba6 100644 --- a/LoopStructural/utils/dtm_creator.py +++ b/LoopStructural/utils/dtm_creator.py @@ -1,13 +1,17 @@ -def create_dtm_with_rasterio(dtm_path): - try: +from ctypes import Union +from pathlib import Path - from map2loop.map import MapUtil - except ImportError: - print("map2loop not installed. Please install it and try again") + +def create_dtm_with_rasterio(dtm_path: Union[str, Path]): try: import rasterio except ImportError: print("rasterio not installed. Please install it and try again.") return - dtm_map = MapUtil(None, dtm=rasterio.open(dtm_path)) - return lambda xyz: dtm_map.evaluate_dtm_at_points(xyz[:, :2]) + try: + from map2loop.map import MapUtil + + dtm_map = MapUtil(None, dtm=rasterio.open(dtm_path)) + return lambda xyz: dtm_map.evaluate_dtm_at_points(xyz[:, :2]) + except ImportError: + print("map2loop not installed. Please install it and try again") diff --git a/LoopStructural/utils/helper.py b/LoopStructural/utils/helper.py index df4298b66..aed48eb8b 100644 --- a/LoopStructural/utils/helper.py +++ b/LoopStructural/utils/helper.py @@ -1,4 +1,6 @@ +from ctypes import Union import logging +import ntpath import numpy as np import pandas as pd @@ -9,65 +11,6 @@ logger = getLogger(__name__) -def get_data_axis_aligned_bounding_box(xyz, buffer): - """ - - Parameters - ---------- - xyz - buffer - - Returns - ------- - - """ - minx = np.min(xyz[:, 0]) - maxx = np.max(xyz[:, 0]) - miny = np.min(xyz[:, 1]) - maxy = np.max(xyz[:, 1]) - minz = np.min(xyz[:, 2]) - maxz = np.max(xyz[:, 2]) - - xlen = maxx - minx - ylen = maxy - miny - zlen = maxz - minz - length = np.max([xlen, ylen, zlen]) - minx -= length * buffer - maxx += length * buffer - - miny -= length * buffer - maxy += length * buffer - - minz -= length * buffer - maxz += length * buffer - - bb = np.array([[minx, miny, minz], [maxx, maxy, maxz]]) - - def region(xyz): - """ - - Parameters - ---------- - xyz - - Returns - ------- - - """ - # print(xyz) - # print(bb) - b = np.ones(xyz.shape[0]).astype(bool) - b = np.logical_and(b, xyz[:, 0] > minx) - b = np.logical_and(b, xyz[:, 0] < maxx) - b = np.logical_and(b, xyz[:, 1] > miny) - b = np.logical_and(b, xyz[:, 1] < maxy) - # b = np.logical_and(b,xyz[:,2]>minz) - # b = np.logical_and(b,xyz[:,2] np.ndarray: +# """Convert plunge and plunge direction to a vector + +# Parameters +# ---------- +# plunge : Union[np.ndarray, list] +# array or array like of plunge values +# plunge_dir : Union[np.ndarray, list] +# array or array like of plunge direction values + +# Returns +# ------- +# np.array +# nx3 vector +# """ +# plunge = np.deg2rad(plunge) +# plunge_dir = np.deg2rad(plunge_dir) +# vec = np.zeros(3) +# vec[0] = np.sin(plunge_dir) * np.cos(plunge) +# vec[1] = np.cos(plunge_dir) * np.cos(plunge) +# vec[2] = -np.sin(plunge) +# return vec def create_surface(bounding_box, nstep): @@ -266,105 +225,6 @@ def create_box(bounding_box, nsteps): return points, tri -def get_vectors(normal): - length = np.linalg.norm(normal, axis=1)[:, None] - normal /= length # np.linalg.norm(normal,axis=1)[:,None] - strikedip = normal_vector_to_strike_and_dip(normal) - strike_vec = get_strike_vector(strikedip[:, 0]) - strike_vec /= np.linalg.norm(strike_vec, axis=0)[None, :] - dip_vec = np.cross( - strike_vec, normal, axisa=0, axisb=1 - ).T # (strikedip[:, 0], strikedip[:, 1]) - dip_vec /= np.linalg.norm(dip_vec, axis=0)[None, :] - return strike_vec * length.T, dip_vec * length.T - - -def get_strike_vector(strike): - """ - - Parameters - ---------- - strike - - Returns - ------- - - """ - - v = np.array( - [ - np.sin(np.deg2rad(-strike)), - -np.cos(np.deg2rad(-strike)), - np.zeros(strike.shape[0]), - ] - ) - - return v - - -def get_dip_vector(strike, dip): - v = np.array( - [ - -np.cos(np.deg2rad(-strike)) * np.cos(-np.deg2rad(dip)), - np.sin(np.deg2rad(-strike)) * np.cos(-np.deg2rad(dip)), - np.sin(-np.deg2rad(dip)), - ] - ) - return v - - -def rotation(axis, angle): - c = np.cos(np.deg2rad(angle)) - s = np.sin((np.deg2rad(angle))) - C = 1.0 - c - x = axis[0] - y = axis[1] - z = axis[2] - xs = x * s - ys = y * s - zs = z * s - xC = x * C - yC = y * C - zC = z * C - xyC = x * yC - yzC = y * zC - zxC = z * xC - rotation_mat = np.zeros((3, 3)) - rotation_mat[0][0] = x * xC + c - rotation_mat[0][1] = xyC - zs - rotation_mat[0][2] = zxC + ys - - rotation_mat[1][0] = xyC + zs - rotation_mat[1][1] = y * yC + c - rotation_mat[1][2] = yzC - xs - - rotation_mat[2][0] = zxC - ys - rotation_mat[2][1] = yzC + xs - rotation_mat[2][2] = z * zC + c - return rotation_mat - - -def strike_dip_vector(strike, dip): - vec = np.zeros((len(strike), 3)) - s_r = np.deg2rad(strike) - d_r = np.deg2rad((dip)) - vec[:, 0] = np.sin(d_r) * np.cos(s_r) - vec[:, 1] = -np.sin(d_r) * np.sin(s_r) - vec[:, 2] = np.cos(d_r) - vec /= np.linalg.norm(vec, axis=1)[:, None] - return vec - - -def normal_vector_to_strike_and_dip(normal_vector): - normal_vector /= np.linalg.norm(normal_vector, axis=1)[:, None] - dip = np.rad2deg(np.arccos(normal_vector[:, 2])) - strike = -np.rad2deg( - np.arctan2(normal_vector[:, 1], normal_vector[:, 0]) - ) # atan2(v2[1],v2[0])*rad2deg; - - return np.array([strike, dip]).T - - def xyz_names(): return ["X", "Y", "Z"] diff --git a/LoopStructural/utils/map2loop.py b/LoopStructural/utils/map2loop.py deleted file mode 100644 index 02e0ddd2a..000000000 --- a/LoopStructural/utils/map2loop.py +++ /dev/null @@ -1,526 +0,0 @@ -import pandas as pd -import numpy as np -import os -import logging -import networkx -from scipy.stats import truncnorm - -from ..utils import getLogger - -logger = getLogger(__name__) - - -def process_map2loop(m2l_directory, flags={}): - """ - Extracts relevant information from map2loop outputs - - Parameters - ---------- - m2l_directory : string - absolute path to a directory containing map2loop outputs - Returns - ------- - m2l_data : dict - a dictionary containing the extracted and collated data - """ - gradient = flags.get("gradient", False) - vector_scale = flags.get("vector_scale", None) - tangents = pd.read_csv(m2l_directory + "/tmp/raw_contacts.csv") - groups = pd.read_csv(m2l_directory + "/tmp/all_sorts_clean.csv", index_col=0) - contact_orientations = pd.read_csv(m2l_directory + "/output/orientations_clean.csv") - # formation_thickness = pd.read_csv) - contacts = pd.read_csv(m2l_directory + "/output/contacts_clean.csv") - displacements = pd.read_csv(m2l_directory + "/output/fault_displacements3.csv") - fault_orientations = pd.read_csv(m2l_directory + "/output/fault_orientations.csv") - fault_locations = pd.read_csv(m2l_directory + "/output/faults.csv") - fault_fault_relations = pd.read_csv( - m2l_directory + "/output/fault-fault-relationships.csv" - ) - fault_strat_relations = pd.read_csv( - m2l_directory + "/output/group-fault-relationships.csv" - ) - fault_dimensions = pd.read_csv(m2l_directory + "/output/fault_dimensions.csv") - fault_graph = networkx.read_gml(m2l_directory + "/tmp/fault_network.gml") - - # whether to simulate thickness variations - thickness_probabilities = flags.get("thickness_probabilities", False) - orientation_probabilities = flags.get("orientation_probabilities", False) - fault_orientation_probabilities = flags.get( - "fault_orientation_probabilities", False - ) - fault_slip_vector = flags.get("fault_slip_vector", None) - ## read supergroups file - supergroups = {} - sgi = 0 - contact_orientations.loc[contact_orientations["polarity"] == 0, "polarity"] = -1 - try: - with open(m2l_directory + "/tmp/super_groups.csv") as f: - for l in f: - for g in l.split(","): - g = g.replace("-", "_").replace(" ", "_") - if g.find("\n") > 0: - g = g[: g.find("\n")] - supergroups[g] = "supergroup_{}".format(sgi) - if g == "\n": - sgi += 1 - break - except: - for g in groups["group"].unique(): - supergroups[g] = g - try: - supergroups.pop("\n") - except KeyError: - pass - - ## read bounding box - bb = pd.read_csv(m2l_directory + "/tmp/bbox.csv") - - ## read fault intersection graph - fault_intersection_angles = {} - with open(m2l_directory + "/graph/fault-fault-intersection.txt", "r") as f: - for l in f: - fault_id = int(l.split(",")[1]) - fault_intersection_angles["Fault_{}".format(fault_id)] = [] - intersections = l[l.find("{") + 1 : l.find("}")] - intersections = intersections.replace("(", "") - intersections = intersections.replace(")", "") - intersection_data = intersections.split(",") - i = 0 - while i < len(intersection_data): - fault_intersection_angles["Fault_{}".format(fault_id)].append( - [ - "Fault_{}".format(int(intersection_data[i])), - intersection_data[i + 1], - float(intersection_data[i + 2]), - ] - ) - i += 3 - - # process tangent data to be tx, ty, tz - tangents["tz"] = 0 - tangents["tx"] = tangents["lsx"] - tangents["ty"] = tangents["lsy"] - tangents.drop(["angle", "lsx", "lsy"], inplace=True, axis=1) - - # convert azimuth and dip to gx, gy, gz - - # calculate scalar field values - i = 0 - thickness = {} - max_thickness = 0 - thickness_file = flags.get( - "thickness", m2l_directory + "/output/formation_summary_thicknesses.csv" - ) - with open(thickness_file) as file: - for l in file: - if i >= 1: - linesplit = l.split(",") - if thickness_probabilities: - std = float(linesplit[2]) - mean = float(linesplit[1]) - if np.isnan(std): - std = float(linesplit[1]) - a = (0 - mean) / std - b = 100.0 - thickness[linesplit[0]] = (truncnorm.rvs(a, b, size=1) * std)[ - 0 - ] + mean - else: - thickness[linesplit[0]] = float(linesplit[1]) - - # normalise the thicknesses - if float(linesplit[1]) > max_thickness: - max_thickness = float(linesplit[1]) - # print(l.split(',')[1]) - i += 1 - # for k in thickness.keys(): - # thickness[k] /= max_thickness - if vector_scale is None: - vector_scale = max_thickness - - if gradient: - from LoopStructural.utils.helper import strike_dip_vector - - contact_orientations["strike"] = contact_orientations["azimuth"] - 90 - contact_orientations["gx"] = np.nan - contact_orientations["gy"] = np.nan - contact_orientations["gz"] = np.nan - contact_orientations[["gx", "gy", "gz"]] = ( - strike_dip_vector( - contact_orientations["strike"], contact_orientations["dip"] - ) - * max_thickness - ) - if ( - np.sum(contact_orientations["polarity"] == 0) > 0 - and np.sum(contact_orientations["polarity"] == -1) == 0 - ): - # contact_orientations['polarity']+=1 - contact_orientations.loc[contact_orientations["polarity"] == 0] = -1 - if not gradient: - from LoopStructural.utils.helper import strike_dip_vector - - contact_orientations["strike"] = contact_orientations["azimuth"] - 90 - contact_orientations["nx"] = np.nan - contact_orientations["ny"] = np.nan - contact_orientations["nz"] = np.nan - contact_orientations[["nx", "ny", "nz"]] = ( - strike_dip_vector( - contact_orientations["strike"], contact_orientations["dip"] - ) - * vector_scale - * contact_orientations["polarity"].to_numpy()[:, None] - ) - contact_orientations.drop(["strike", "dip", "azimuth"], inplace=True, axis=1) - # with open(m2l_directory + '/output/formation_summary_thicknesses.csv') as file: - - # thickness = {} - # for f in formation_thickness['formation'].unique(): - # thickness[f] = formation_thickness[formation_thickness['formation'] == f]['thickness']) - - strat_val = {} - stratigraphic_column = {} - unit_id = 0 - val = {} - for i in groups["group number"].unique(): - g = supergroups[groups.loc[groups["group number"] == i, "group"].iloc[0]] - if g not in stratigraphic_column: - stratigraphic_column[g] = {} - val[g] = 0 - - for c, colour in zip( - groups.loc[groups["group number"] == i, "code"], - groups.loc[groups["group number"] == i, "colour"], - ): - strat_val[c] = np.nan - if c in thickness: - stratigraphic_column[g][c] = { - "max": val[g], - "min": val[g] - thickness[c], - "id": unit_id, - "colour": colour, - } - unit_id += 1 - strat_val[c] = val[g] - thickness[c] - val[g] -= thickness[c] - group_name = None - for g, i in stratigraphic_column.items(): - if len(i) == 0: - for gr, sg in supergroups.items(): - if sg == g: - group_name = gr - break - try: - if group_name is None: - continue - c = groups.loc[groups["group"] == group_name, "code"].to_numpy()[0] - strat_val[c] = 0 - stratigraphic_column[g] = {c: {"min": 0, "max": 9999, "id": unit_id}} - unit_id += 1 - group_name = None - except: - print("Couldnt process {}".format(g)) - # whether to use thickness or interface - use_thickness = flags.get("use_thickness", True) - if use_thickness: - contacts["val"] = np.nan - for o in strat_val: - contacts.loc[contacts["formation"] == o, "val"] = strat_val[o] - if use_thickness == False: - contacts["interface"] = np.nan - interface_val = 0 - for u in contacts["formation"].unique(): - contacts.loc[contacts["formation"] == u, "interface"] = interface_val - interface_val += 1 - tangents["feature_name"] = tangents["group"] - contact_orientations["feature_name"] = None - contacts["feature_name"] = None - for g in groups["group"].unique(): - val = 0 - for c in groups.loc[groups["group"] == g, "code"]: - contact_orientations.loc[ - contact_orientations["formation"] == c, "feature_name" - ] = supergroups[g] - contacts.loc[contacts["formation"] == c, "feature_name"] = supergroups[g] - displacements["dip_dir"] = np.nan - for fname in fault_orientations["formation"].unique(): - displacements.loc[displacements["fname"] == fname, "dip_dir"] = np.mean( - fault_orientations.loc[ - fault_orientations["formation"] == fname, "DipDirection" - ] - ) - max_displacement = {} - downthrow_dir = {} - fault_locations["val"] = 0 - fault_locations["coord"] = 0 - fault_orientations["coord"] = 0 - fault_orientations["gx"] = np.nan - fault_orientations["gy"] = np.nan - fault_orientations["gz"] = np.nan - - stratigraphic_column["faults"] = {} - for f in displacements["fname"].unique(): - fault_centers = np.zeros(6) - normal_vector = np.zeros(3) - strike_vector = np.zeros(3) - slip_vector = np.zeros(3) - - fault_edges = np.zeros((2, 3)) - fault_tips = np.zeros((2, 3)) - fault_depth = np.zeros((2, 3)) - displacements_numpy = displacements.loc[ - displacements["fname"] == f, - ["vertical_displacement", "downthrow_dir", "dip_dir", "X", "Y"], - ].to_numpy() - # index = np.argmax(np.abs(displacements_numpy[:, 0]), ) - index = np.argsort(np.abs(displacements_numpy[:, 0]))[ - len(np.abs(displacements_numpy[:, 0])) // 2 - ] - - max_displacement[f] = displacements_numpy[index, 0] - downthrow_dir[f] = displacements_numpy[index, [1, 3, 4]] - if displacements_numpy[index, 1] == 1.0: - pass - # raise ValueError('Downthrow direction is not defined for fault {}'.format(f)) - # fault_intersection_angles[f] - if np.abs(displacements_numpy[index, 1] - displacements_numpy[index, 2]) > 90: - # fault_orientations.loc[fault_orientations['formation'] == f, ['gx','gy','gy']]=-fault_orientations.loc[fault_orientations['formation'] == f, ['gx','gy','gy']] - fault_orientations.loc[ - fault_orientations["formation"] == f, "DipDirection" - ] -= 180 # displacements_numpy[ - # index, 1] - # find the middle of the fault as the mean of the line, average dip direction and the influence distance - fault_centers[:3] = np.mean( - fault_locations.loc[fault_locations["formation"] == f, ["X", "Y", "Z"]], - axis=0, - ) - fault_centers[3] = np.mean( - fault_orientations.loc[ - fault_orientations["formation"] == f, ["DipDirection"] - ] - ) - fault_centers[4] = fault_dimensions.loc[ - fault_dimensions["Fault"] == f, "InfluenceDistance" - ] - fault_centers[5] = fault_dimensions.loc[ - fault_dimensions["Fault"] == f, "HorizontalRadius" - ] - if type(fault_slip_vector) == dict: - fault_slip = fault_slip_vector[f] - elif type(fault_slip_vector) == np.array: - fault_slip = fault_slip_vector - else: - fault_slip = np.array([0.0, 0.0, -1.0]) - stratigraphic_column["faults"][f] = { - "FaultCenter": fault_centers[:3], - "FaultDipDirection": fault_centers[3], - "InfluenceDistance": fault_dimensions.loc[ - fault_dimensions["Fault"] == f, "InfluenceDistance" - ].to_numpy(), - "HorizontalRadius": fault_dimensions.loc[ - fault_dimensions["Fault"] == f, "HorizontalRadius" - ].to_numpy(), - "VerticalRadius": fault_dimensions.loc[ - fault_dimensions["Fault"] == f, "VerticalRadius" - ].to_numpy(), - "FaultSlip": fault_slip, - "FaultNorm": normal_vector, - } - - if "colour" in fault_dimensions.columns: - stratigraphic_column["faults"][f]["colour"] = fault_dimensions.loc[ - fault_dimensions["Fault"] == f, "colour" - ].to_numpy() - normal_vector[0] = np.sin(np.deg2rad(fault_centers[3])) - normal_vector[1] = np.cos(np.deg2rad(fault_centers[3])) - strike_vector[0] = normal_vector[1] - strike_vector[1] = -normal_vector[0] - slip_vector[2] = 1 - fault_edges[0, :] = fault_centers[:3] + normal_vector * fault_centers[4] - fault_edges[1, :] = fault_centers[:3] - normal_vector * fault_centers[4] - fault_tips[0, :] = fault_centers[:3] + strike_vector * fault_centers[5] - fault_tips[1, :] = fault_centers[:3] - strike_vector * fault_centers[5] - - fault_orientations["strike"] = fault_orientations["DipDirection"] - 90 - fault_orientations[["gx", "gy", "gz"]] = strike_dip_vector( - fault_orientations["strike"], fault_orientations["dip"] - ) - for g in groups["group"].unique(): - groups.loc[groups["group"] == g, "group"] = supergroups[g] - - fault_orientations.drop( - ["strike", "DipDirection", "dip", "DipPolarity"], inplace=True, axis=1 - ) - fault_orientations["feature_name"] = fault_orientations["formation"] - fault_locations["feature_name"] = fault_locations["formation"] - - data = pd.concat( - [tangents, contact_orientations, contacts, fault_orientations, fault_locations] - ) - data.reset_index(inplace=True) - - return { - "data": data, - "groups": groups, - "max_displacement": max_displacement, - "fault_fault": fault_fault_relations, - "stratigraphic_column": stratigraphic_column, - "bounding_box": bb, - "strat_va": strat_val, - "downthrow_dir": downthrow_dir, - "fault_graph": fault_graph, - "fault_intersection_angles": fault_intersection_angles, - "thickness": thickness, - } - - -def build_model( - m2l_data, - evaluate=True, - skip_faults=False, - unconformities=False, - fault_params=None, - foliation_params=None, - rescale=True, - skip_features=[], - **kwargs -): - """[summary] - - [extended_summary] - - Parameters - ---------- - m2l_data : dict - [description] - skip_faults : bool, optional - [description], by default False - fault_params : dict, optional - [description], by default None - foliation_params : dict, optional - [description], by default None - - Returns - ------- - [type] - [description] - """ - from LoopStructural import GeologicalModel - - boundary_points = np.zeros((2, 3)) - boundary_points[0, 0] = m2l_data["bounding_box"]["minx"] - boundary_points[0, 1] = m2l_data["bounding_box"]["miny"] - boundary_points[0, 2] = m2l_data["bounding_box"]["lower"] - boundary_points[1, 0] = m2l_data["bounding_box"]["maxx"] - boundary_points[1, 1] = m2l_data["bounding_box"]["maxy"] - boundary_points[1, 2] = m2l_data["bounding_box"]["upper"] - - model = GeologicalModel( - boundary_points[0, :], boundary_points[1, :], rescale=rescale - ) - # m2l_data['data']['val'] /= model.scale_factor - model.set_model_data(m2l_data["data"]) - if not skip_faults: - faults = [] - for f in m2l_data["max_displacement"].keys(): - if model.data[model.data["feature_name"] == f].shape[0] == 0: - continue - if f in skip_features: - continue - fault_id = f - overprints = [] - try: - overprint_id = m2l_data["fault_fault"][ - m2l_data["fault_fault"][fault_id] == 1 - ]["fault_id"].to_numpy() - for i in overprint_id: - overprints.append(i) - logger.info("Adding fault overprints {}".format(f)) - except: - logger.info("No entry for %s in fault_fault_relations" % f) - # continue - fault_center = m2l_data["stratigraphic_column"]["faults"][f]["FaultCenter"] - fault_influence = m2l_data["stratigraphic_column"]["faults"][f][ - "InfluenceDistance" - ] - fault_extent = m2l_data["stratigraphic_column"]["faults"][f][ - "HorizontalRadius" - ] - fault_vertical_radius = m2l_data["stratigraphic_column"]["faults"][f][ - "VerticalRadius" - ] - fault_slip_vector = m2l_data["stratigraphic_column"]["faults"][f][ - "FaultSlip" - ] - faults.append( - model.create_and_add_fault( - f, - -m2l_data["max_displacement"][f], - faultfunction="BaseFault", - fault_slip_vector=fault_slip_vector, - fault_center=fault_center, - fault_extent=fault_extent, - fault_influence=fault_influence, - fault_vectical_radius=fault_vertical_radius, - # overprints=overprints, - **fault_params, - ) - ) - # for f in m2l_data['fault_intersection_angles']: - # if f in m2l_data['max_displacement'].keys(): - # f1_norm = m2l_data['stratigraphic_column']['faults'][f]['FaultNorm'] - # for intersection in m2l_data['fault_intersection_angles'][f]: - # if intersection[0] in m2l_data['max_displacement'].keys(): - # f2_norm = m2l_data['stratigraphic_column']['faults'][intersection[0]]['FaultNorm'] - # if intersection[2] < 30 and np.dot(f1_norm,f2_norm)>0: - # logger.info('Adding splay {} to {}'.format(intersection[0],f)) - # if model[f] is None: - # logger.error('Fault {} does not exist, cannot be added as splay') - # elif model[intersection[0]] is None: - # logger.error('Fault {} does not exist') - # else: - # model[intersection[0]].builder.add_splay(model[f]) - - # else: - # logger.info('Adding abut {} to {}'.format(intersection[0],f)) - # model[intersection[0]].add_abutting_fault(model[f]) - faults = m2l_data.get("fault_graph", None) - if faults: - for f in faults.nodes: - f1_norm = m2l_data["stratigraphic_column"]["faults"][f]["FaultNorm"] - for e in faults.edges(f): - data = faults.get_edge_data(*e) - f2_norm = m2l_data["stratigraphic_column"]["faults"][e[1]]["FaultNorm"] - - if float(data["angle"]) < 30 and np.dot(f1_norm, f2_norm) > 0: - if model[f] is None or model[e[1]] is None: - logger.error( - "Fault {} does not exist, cannot be added as splay" - ) - elif model[e[1]] is None: - logger.error("Fault {} does not exist") - else: - region = model[e[1]].builder.add_splay(model[f]) - model[e[1]].splay[model[f].name] = region - else: - if model[f] is None or model[e[1]] is None: - continue - - logger.info("Adding abut {} to {}".format(e[1], f)) - model[e[1]].add_abutting_fault(model[f]) - ## loop through all of the groups and add them to the model in youngest to oldest. - group_features = [] - for i in np.sort(m2l_data["groups"]["group number"].unique()): - g = ( - m2l_data["groups"] - .loc[m2l_data["groups"]["group number"] == i, "group"] - .unique()[0] - ) - group_features.append(model.create_and_add_foliation(g, **foliation_params)) - # if the group was successfully added (not null) then lets add the base (0 to be unconformity) - if group_features[-1] and unconformities: - model.add_unconformity(group_features[-1], 0) - model.set_stratigraphic_column(m2l_data["stratigraphic_column"]) - if evaluate: - model.update(verbose=True) - return model diff --git a/LoopStructural/utils/maths.py b/LoopStructural/utils/maths.py new file mode 100644 index 000000000..aef0032a7 --- /dev/null +++ b/LoopStructural/utils/maths.py @@ -0,0 +1,218 @@ +from LoopStructural.utils.typing import NumericInput +import numpy as np +from typing import Union +import numbers + + +def strikedip2vector(strike: NumericInput, dip: NumericInput) -> np.ndarray: + """Convert strike and dip to a vector + + Parameters + ---------- + strike : _type_ + _description_ + dip : _type_ + _description_ + + Returns + ------- + _type_ + _description_ + """ + vec = np.zeros((len(strike), 3)) + s_r = np.deg2rad(strike) + d_r = np.deg2rad((dip)) + vec[:, 0] = np.sin(d_r) * np.cos(s_r) + vec[:, 1] = -np.sin(d_r) * np.sin(s_r) + vec[:, 2] = np.cos(d_r) + vec /= np.linalg.norm(vec, axis=1)[:, None] + return vec + + +def azimuthplunge2vector( + plunge: NumericInput, + plunge_dir: NumericInput, + degrees: bool = True, +) -> np.ndarray: + """Convert plunge and plunge direction to a vector + + Parameters + ---------- + plunge : Union[np.ndarray, list] + array or array like of plunge values + plunge_dir : Union[np.ndarray, list] + array or array like of plunge direction values + + Returns + ------- + np.array + nx3 vector + """ + if isinstance(plunge, numbers.Number): + plunge = np.array([plunge], dtype=float) + else: + plunge = np.array(plunge, dtype=float) + if isinstance(plunge_dir, numbers.Number): + plunge_dir = np.array([plunge_dir], dtype=float) + else: + plunge_dir = np.array(plunge_dir, dtype=float) + if degrees: + plunge = np.deg2rad(plunge) + plunge_dir = np.deg2rad(plunge_dir) + vec = np.zeros((len(plunge), 3)) + vec[:, 0] = np.sin(plunge_dir) * np.cos(plunge) + vec[:, 1] = np.cos(plunge_dir) * np.cos(plunge) + vec[:, 2] = -np.sin(plunge) + return vec + + +def normal_vector_to_strike_and_dip( + normal_vector: NumericInput, degrees: bool = True +) -> np.ndarray: + """Convert from a normal vector to strike and dip + + Parameters + ---------- + normal_vector : np.ndarray, list + array of normal vectors + degrees : bool, optional + whether to return in degrees or radians, by default True + Returns + ------- + np.ndarray + 2xn array of strike and dip values + + Notes + ------ + + if a 1d array is passed in it is assumed to be a single normal vector + and cast into a 1x3 array + + """ + normal_vector = np.array(normal_vector) + if len(normal_vector.shape) == 1: + normal_vector = normal_vector[None, :] + # normalise the normal vector + normal_vector /= np.linalg.norm(normal_vector, axis=1)[:, None] + dip = np.arccos(normal_vector[:, 2]) + strike = -np.arctan2(normal_vector[:, 1], normal_vector[:, 0]) + if degrees: + dip = np.rad2deg(dip) + strike = np.rad2deg(strike) + + return np.array([strike, dip]).T + + +def rotation(axis: NumericInput, angle: float) -> np.ndarray: + """Create a rotation matrix for an axis and angle + + Parameters + ---------- + axis : Union[np.ndarray, list] + vector defining the axis of rotation + angle : float + angle to rotate in degrees + + Returns + ------- + np.ndarray + 3x3 rotation matrix + """ + c = np.cos(np.deg2rad(angle)) + s = np.sin((np.deg2rad(angle))) + C = 1.0 - c + x = axis[0] + y = axis[1] + z = axis[2] + xs = x * s + ys = y * s + zs = z * s + xC = x * C + yC = y * C + zC = z * C + xyC = x * yC + yzC = y * zC + zxC = z * xC + rotation_mat = np.zeros((3, 3)) + rotation_mat[0][0] = x * xC + c + rotation_mat[0][1] = xyC - zs + rotation_mat[0][2] = zxC + ys + + rotation_mat[1][0] = xyC + zs + rotation_mat[1][1] = y * yC + c + rotation_mat[1][2] = yzC - xs + + rotation_mat[2][0] = zxC - ys + rotation_mat[2][1] = yzC + xs + rotation_mat[2][2] = z * zC + c + return rotation_mat + + +def get_vectors(normal: NumericInput) -> tuple[np.ndarray, np.ndarray]: + """Find strike and dip vectors for a normal vector. + Makes assumption the strike vector is horizontal component and the dip is vertical. + Found by calculating strike and and dip angle and then finding the appropriate vectors + + Parameters + ---------- + normal : Union[np.ndarray, list] + input + + Returns + ------- + np.ndarray, np.ndarray + strike vector, dip vector + """ + length = np.linalg.norm(normal, axis=1)[:, None] + normal /= length # np.linalg.norm(normal,axis=1)[:,None] + strikedip = normal_vector_to_strike_and_dip(normal) + strike_vec = get_strike_vector(strikedip[:, 0]) + strike_vec /= np.linalg.norm(strike_vec, axis=0)[None, :] + dip_vec = np.cross( + strike_vec, normal, axisa=0, axisb=1 + ).T # (strikedip[:, 0], strikedip[:, 1]) + dip_vec /= np.linalg.norm(dip_vec, axis=0)[None, :] + return strike_vec * length.T, dip_vec * length.T + + +def get_strike_vector(strike: NumericInput, degrees: bool = True) -> np.ndarray: + """Return the vector aligned with the strike direction + + Parameters + ---------- + strike : np.ndarray + strike direction + degrees : bool, optional + whether to return in degrees or radians, by default True + + Returns + ------- + np.ndarray + vector aligned with strike direction + + """ + if isinstance(strike, numbers.Number): + strike = np.array([strike]) + strike = np.array(strike) + if degrees: + strike = np.deg2rad(strike) + v = np.array( + [ + np.sin(-strike), + -np.cos(-strike), + np.zeros(strike.shape[0]), + ] + ) + + return v + + +def get_dip_vector(strike, dip): + v = np.array( + [ + -np.cos(np.deg2rad(-strike)) * np.cos(-np.deg2rad(dip)), + np.sin(np.deg2rad(-strike)) * np.cos(-np.deg2rad(dip)), + np.sin(-np.deg2rad(dip)), + ] + ) + return v diff --git a/LoopStructural/utils/typing.py b/LoopStructural/utils/typing.py new file mode 100644 index 000000000..ea5c5b4d0 --- /dev/null +++ b/LoopStructural/utils/typing.py @@ -0,0 +1,8 @@ +from typing import TypeVar, Union, List +import numbers +import numpy + +T = TypeVar("T") +Array = Union[List[T]] + +NumericInput = Union[numbers.Number, Array[numbers.Number]] diff --git a/LoopStructural/visualisation/model_plotter.py b/LoopStructural/visualisation/model_plotter.py index c60fa6cea..7f280e4b3 100644 --- a/LoopStructural/visualisation/model_plotter.py +++ b/LoopStructural/visualisation/model_plotter.py @@ -19,7 +19,7 @@ BaseFeature, StructuralFrame, ) -from ..utils.helper import create_surface, get_vectors, create_box +from ..utils import create_surface, create_box class BaseModelPlotter: diff --git a/conda/meta.yaml b/conda/meta.yaml index 81f93b262..9ac43a534 100644 --- a/conda/meta.yaml +++ b/conda/meta.yaml @@ -13,8 +13,6 @@ build: requirements: build: - - {{ compiler('cxx') }} - - Cython - numpy host: - pip diff --git a/docs/Dockerfile b/docs/Dockerfile index c0c5ea243..83afabfa2 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -32,10 +32,10 @@ RUN conda install -c conda-forge\ python=3.10\ -y RUN pip install git+https://github.com/geopandas/geopandas.git@v0.10.2 -RUN pip install lavavu-osmesa==1.8.45 +RUN pip install lavavu-osmesa==1.8.45 ENV LD_LIBRARY_PATH=/opt/conda/lib/python3.10/site-packages/lavavu_osmesa.libs RUN conda install -c conda-forge pydata-sphinx-theme - +RUN pip install sphinxcontrib-bibtex ENV TINI_VERSION v0.19.0 ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini RUN chmod +x /tini diff --git a/docs/requirements.txt b/docs/requirements.txt index 609171a96..2e5ec648f 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,5 @@ sphinx==3.5.4 sphinx_gallery sphinxcontrib-bibtex -sphinx_rtd_theme +pydata_sphinx_theme myst-parser \ No newline at end of file diff --git a/docs/source/API.rst b/docs/source/API.rst index 160248e12..69c54d46a 100644 --- a/docs/source/API.rst +++ b/docs/source/API.rst @@ -1 +1,16 @@ -.. automodule:: LoopStructural +API +--- + + +.. autosummary:: + :caption: API + :toctree: _autosummary + :template: custom-module-template.rst + :recursive: + + LoopStructural + LoopStructural.modelling + LoopStructural.interpolators + LoopStructural.api + LoopStructural.visualisation + LoopStructural.datatypes diff --git a/docs/source/_static/custom-icon.js b/docs/source/_static/custom-icon.js new file mode 100644 index 000000000..cd949b3b7 --- /dev/null +++ b/docs/source/_static/custom-icon.js @@ -0,0 +1,16 @@ +/******************************************************************************* + * Set a custom icon for pypi as it's not available in the fa built-in brands + */ +FontAwesome.library.add( + (faListOldStyle = { + prefix: "fa-custom", + iconName: "pypi", + icon: [ + 17.313, // viewBox width + 19.807, // viewBox height + [], // ligature + "e001", // unicode codepoint - private use area + "m10.383 0.2-3.239 1.1769 3.1883 1.1614 3.239-1.1798zm-3.4152 1.2411-3.2362 1.1769 3.1855 1.1614 3.2369-1.1769zm6.7177 0.00281-3.2947 1.2009v3.8254l3.2947-1.1988zm-3.4145 1.2439-3.2926 1.1981v3.8254l0.17548-0.064132 3.1171-1.1347zm-6.6564 0.018325v3.8247l3.244 1.1805v-3.8254zm10.191 0.20931v2.3137l3.1777-1.1558zm3.2947 1.2425-3.2947 1.1988v3.8254l3.2947-1.1988zm-8.7058 0.45739c0.00929-1.931e-4 0.018327-2.977e-4 0.027485 0 0.25633 0.00851 0.4263 0.20713 0.42638 0.49826 1.953e-4 0.38532-0.29327 0.80469-0.65542 0.93662-0.36226 0.13215-0.65608-0.073306-0.65613-0.4588-6.28e-5 -0.38556 0.2938-0.80504 0.65613-0.93662 0.068422-0.024919 0.13655-0.038114 0.20156-0.039466zm5.2913 0.78369-3.2947 1.1988v3.8247l3.2947-1.1981zm-10.132 1.239-3.2362 1.1769 3.1883 1.1614 3.2362-1.1769zm6.7177 0.00213-3.2926 1.2016v3.8247l3.2926-1.2009zm-3.4124 1.2439-3.2947 1.1988v3.8254l3.2947-1.1988zm-6.6585 0.016195v3.8275l3.244 1.1805v-3.8254zm16.9 0.21143-3.2947 1.1988v3.8247l3.2947-1.1981zm-3.4145 1.2411-3.2926 1.2016v3.8247l3.2926-1.2009zm-3.4145 1.2411-3.2926 1.2016v3.8247l3.2926-1.2009zm-3.4124 1.2432-3.2947 1.1988v3.8254l3.2947-1.1988zm-6.6585 0.019027v3.8247l3.244 1.1805v-3.8254zm13.485 1.4497-3.2947 1.1988v3.8247l3.2947-1.1981zm-3.4145 1.2411-3.2926 1.2016v3.8247l3.2926-1.2009zm2.4018 0.38127c0.0093-1.83e-4 0.01833-3.16e-4 0.02749 0 0.25633 0.0085 0.4263 0.20713 0.42638 0.49826 1.97e-4 0.38532-0.29327 0.80469-0.65542 0.93662-0.36188 0.1316-0.65525-0.07375-0.65542-0.4588-1.95e-4 -0.38532 0.29328-0.80469 0.65542-0.93662 0.06842-0.02494 0.13655-0.03819 0.20156-0.03947zm-5.8142 0.86403-3.244 1.1805v1.4201l3.244 1.1805z", // svg path (https://simpleicons.org/icons/pypi.svg) + ], + }) +); diff --git a/docs/source/_static/infinity_loop_icon.svg b/docs/source/_static/infinity_loop_icon.svg new file mode 100644 index 000000000..a69ecd1b2 --- /dev/null +++ b/docs/source/_static/infinity_loop_icon.svg @@ -0,0 +1,4 @@ + + \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index 275e226ad..0f961bc99 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -26,6 +26,10 @@ # The full version, including alpha/beta/rc tags release = LoopStructural.__version__ +# -- Internationalization ---------------------------------------------------- + +# specifying the natural language populates some key tags +language = "en" # -- General configuration --------------------------------------------------- autoclass_content = "both" # include both class docstring and __init__ @@ -54,7 +58,10 @@ "sphinx_gallery.gen_gallery", # citations "myst_parser", + "sphinxcontrib.bibtex", ] +bibtex_bibfiles = ["docs_references.bib"] +bibtex_default_style = "plain" # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] @@ -66,22 +73,35 @@ html_theme_options = { "icon_links": [ + { + "name": "Twitter", + "url": "https://twitter.com/loop3d", + "icon": "fab fa-twitter", + }, { "name": "GitHub", "url": "https://github.com/loop3d/LoopStructural", - "icon": "fab fa-github-square", + "icon": "fab fa-github", }, { - "name": "Twitter", - "url": "https://twitter.com/loop3d", - "icon": "fab fa-twitter-square", + "name": "PyPI", + "url": "https://pypi.org/project/LoopStructural", + "icon": "fa-custom fa-pypi", }, ], + # "navbar_start": ["navbar-logo", "navbar-version"], # "use_edit_page_button": True, + "collapse_navigation": True, "external_links": [ {"name": "Loop3d", "url": "https://www.loop3d.org"}, ], + "header_links_before_dropdown": 4, + "logo": { + "text": "LoopStructural - {}".format(release), + "image_light": "_static/infinity_loop_icon.svg", + "image_dark": "_static/infinity_loop_icon.svg", + }, } # -- Options for HTML output ------------------------------------------------- @@ -89,11 +109,17 @@ # a list of builtin themes. # html_theme = "pydata_sphinx_theme" +html_sourcelink_suffix = "" +html_last_updated_fmt = "" # to reveal the build date in the pages meta # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] +html_css_files = ["custom.css"] +html_js_files = ["custom-icon.js"] +todo_include_todos = True + autosummary_mock_imports = [ "LoopStructural.interpolators._cython", ] diff --git a/docs/source/docs_references.bib b/docs/source/docs_references.bib new file mode 100644 index 000000000..b018c892e --- /dev/null +++ b/docs/source/docs_references.bib @@ -0,0 +1,82 @@ +@article{Frank2007, + abstract = {In this paper we introduce a new, precise and adaptive method for the implicit reconstruction of faulted surfaces with complex geometry from scattered, unorganized points as obtained from seismic data or laser scanners. We embed the point set into a 3d-complex on which a 3d-implicit function is interpolated. The 3d-complex is a set of tetrahedrons and the implicit function represents a surface that lies as close as possible to the input data points. The density of the 3d-complex can be adapted to efficiently control both the precision of the implicit function and the size of triangles of the reconstructed surface. Discontinuities in the topology of the tetrahedral mesh make it possible to reconstruct discontinuous, bounded surfaces and very close parallel patches without introducing unwanted connections (topological "handles") between these regions. To compute the implicit function we use the discrete smooth interpolation (DSI) method with a set of boundary, off-boundary and smoothness constraints. The interpolation problem does not primarily depend on the number of input data points but on the magnitude of the 3d-complex. This method can be applied to the construction of faulted horizons and salt-top surfaces. © 2007 Elsevier Ltd. All rights reserved.}, + author = {Tobias Frank and Anne-Laure Laure Tertois and Jean-Laurent Laurent Mallet}, + doi = {10.1016/j.cageo.2006.11.014}, + issn = {00983004}, + issue = {7}, + journal = {Computers & Geosciences}, + keywords = {Computational geometry,Discrete smooth interpolation,Ill-conditioned point data,Implicit function,Surface reconstruction,Tetrahedral mesh,computational geometry,discrete,ill-conditioned point data,implicit function,smooth interpolation,surface reconstruction,tetrahedral mesh}, + month = {7}, + pages = {932-943}, + title = {3D-reconstruction of complex geological interfaces from irregularly distributed and noisy point data}, + volume = {33}, + url = {http://linkinghub.elsevier.com/retrieve/pii/S0098300407000581}, + year = {2007} +} +@article{Grose2018, + author = {L. Grose and G. Laurent and L. Aillères and R. Armit and M. Jessell and T. Cousin-Dechenaud}, + doi = {10.1029/2017JB015177}, + issn = {21699313}, + issue = {8}, + journal = {Journal of Geophysical Research: Solid Earth}, + pages = {6318-6333}, + title = {Inversion of structural geology data for fold geometry}, + volume = {123}, + url = {http://doi.wiley.com/10.1029/2017JB015177}, + year = {2018} +} +@article{Grose2019, + abstract = {The process of building three-dimensional (3D) geological models can be framed as an inverse problem where a model describing the 3D distribution of rock units is non-uniquely derived from geological observations. The inverse problem theory provides a powerful framework for inferring these parameters from all geological observations, in a similar way to how a geologist can iteratively update their structural interpretation while mapping. Existing geological knowledge is usually indirectly incorporated into 3D models using the geologist's non-unique interpretation as form lines, cross sections and level maps. These approaches treat constraints derived from geological knowledge in the same way as direct observations, diluting and confusing both information provided by geological knowledge and hard data resulting in significant subjectivity. We present a geological inversion using Bayesian inference where geological knowledge can be incorporated directly into the interpolation scheme with likelihood functions and informative prior distributions. We demonstrate these approaches on a series of synthetic fold shapes as a proof of concept and a case study from the Proterozoic Davenport Province in the Northern Territory, Australia. The combined inversion of geological data and knowledge significantly reduces the uncertainty in possible fold geometries where data is sparse or highly ambiguous. This could be used by geologists while mapping to propagate information about uncertainties throughout the mapping/model building process and would allow for different structural interpretations to be rapidly tested for targeted data collection.}, + author = {Lachlan Grose and Laurent Ailleres and Gautier Laurent and Robin Armit and Mark Jessell}, + doi = {10.1016/j.jsg.2018.11.010}, + issn = {01918141}, + issue = {November 2018}, + journal = {Journal of Structural Geology}, + keywords = {3D modeling,Bayesian inference,Folding,Geological inversion,Geological uncertainty,Inverse problem,Structural geology,bayesian inference,folding,geological inversion,geological uncertainty,structural geology}, + pages = {1-14}, + publisher = {Elsevier}, + title = {Inversion of geological knowledge for fold geometry}, + volume = {119}, + url = {https://doi.org/10.1016/j.jsg.2018.11.010}, + year = {2019} +} +@article{Hillier2014, + author = {Michael J. Hillier and Ernst M. Schetselaar and Eric A. de Kemp and Gervais Perron}, + doi = {10.1007/s11004-014-9540-3}, + issn = {1874-8961}, + issue = {8}, + journal = {Mathematical Geosciences}, + month = {11}, + pages = {931-953}, + title = {Three-Dimensional Modelling of Geological Surfaces Using Generalized Interpolation with Radial Basis Functions}, + volume = {46}, + url = {http://link.springer.com/10.1007/s11004-014-9540-3}, + year = {2014} +} +@article{Laurent2016, + abstract = {© 2016 Elsevier B.V. Three-dimensional structural modeling is gaining importance for a broad range of quantitative geoscientific applications. However, existing approaches are still limited by the type of structural data they are able to use and by their lack of structural meaning. Most techniques heavily rely on spatial data for modeling folded layers, but are unable to completely use cleavage and lineation information for constraining the shape of modeled folds. This lack of structural control is generally compensated by expert knowledge introduced in the form of additional interpretive data such as cross-sections and maps. With this appro ach, folds are explicitly designed by the user instead of being derived from data. This makes the resulting structures subjective and deterministic. This paper introduces a numerical framework for modeling folds and associated foliations from typical field data. In this framework, a parametric description of fold geometry is incorporated into the interpolation algorithm. This way the folded geometry is implicitly derived from observed data, while being controlled through structural parameters such as fold wavelength, amplitude and tightness. A fold coordinate system is used to support the numerical description of fold geometry and to modify the behavior of classical structural interpolators. This fold frame is constructed from fold-related structural elements such as axial foliations, intersection lineations, and vergence. Poly-deformed terranes are progressively modeled by successively modeling each folding event going backward through time. The proposed framework introduces a new modeling paradigm, which enables the building of three-dimensional geological models of complex poly-deformed terranes. It follows a process based on the structural geologist approach and is able to produce geomodels that honor both structural data and geological knowledge.}, + author = {Gautier Laurent and Laurent Ailleres and Lachlan Grose and Guillaume Caumon and Mark Jessell and Robin Armit}, + doi = {10.1016/j.epsl.2016.09.040}, + issn = {0012821X}, + journal = {Earth and Planetary Science Letters}, + keywords = {3D modeling,3d modeling,fold,foliation,geological structures,implicit modeling,vergence}, + pages = {26-38}, + publisher = {Elsevier B.V.}, + title = {Implicit modeling of folds and overprinting deformation}, + volume = {456}, + url = {http://linkinghub.elsevier.com/retrieve/pii/S0012821X16305209}, + year = {2016} +} +@article{Irakarama2020, + abstract = {We introduce a new method for implicit structural modeling. The main developments in this paper are the new regularization operators we propose by extending inherent properties of the classic one-dimensional discrete second derivative operator to higher dimensions. The proposed regularization operators discretize naturally on the Cartesian grid using finite differences, owing to the highly symmetric nature of the Cartesian grid. Furthermore, the proposed regularization operators do not require any special treatment on boundary nodes, and their generalization to higher dimensions is straightforward. As a result, the proposed method has the advantage of being simple to implement. Numerical examples show that the proposed method is robust and numerically efficient.}, + author = {Modeste Irakarama and Gautier Laurent and Julien Renaudeau and Guillaume Caumon}, + doi = {10.1007/s11004-020-09887-w}, + issn = {18748953}, + journal = {Mathematical Geosciences}, + keywords = {Finite-differences,Implicit modeling,Interpolation,Regularization operators,Structural modeling}, + publisher = {Springer Berlin Heidelberg}, + title = {Finite Difference Implicit Structural Modeling of Geological Structures}, + url = {https://doi.org/10.1007/s11004-020-09887-w}, + year = {2020} +} + diff --git a/docs/source/getting_started/contributors_guide.rst b/docs/source/getting_started/contributors_guide.rst index c1bb9fbc7..894928316 100644 --- a/docs/source/getting_started/contributors_guide.rst +++ b/docs/source/getting_started/contributors_guide.rst @@ -28,7 +28,27 @@ Contributing new code Any contributions to the documentation and code for LoopStructural are welcome. If you would like to contribute code to LoopStructural please open an `issue `_ either -a bug report or a feature request and then submit a pull request and link it to this issue. +a bug report or a feature request and then submit a pull request and link it to this issue. + +Getting setup +~~~~~~~~~~~ +To get started, fork and clone the repository. It's then best to get started with conda:: + + conda activate + pip install -r requirements.txt + conda list + +N.B. On Linux, the LavaVu package requires the installation of extra tools:: + + sudo apt install build-essential libgl1-mesa-dev libx11-dev zlib1g-dev + +For changes to ``./docs``:: + + pip install -r docs/requirements.txt + make -C ./docs html + +Building the docs the first time takes a little while. After this, you will see a build directory. From here you can preview the generated HTML +in your browser. Commit messages --------------- diff --git a/docs/source/index.rst b/docs/source/index.rst index 6ead52de6..ee3211079 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -19,12 +19,14 @@ LoopStructural is the forward geological modelling engine for the loop and conta elements in a 3D geological model. Including stratigraphy, unconformities, fault and folds. LoopStructural contains three native interpolation algorithms: -1. Piecewise-linear interpolation :cite:`Frank2007` -2. Discrete Fold Interpolator :cite:`Laurent2016,Grose2017,Grose2018,Grose2019` -3. Finite Difference Interpolation :cite:`Irakarama2020` +1. Piecewise-linear interpolation :cite:p:`Frank2007` +2. Discrete Fold Interpolator :cite:p:`Laurent2016,Grose2017,Grose2018,Grose2019` +3. Finite Difference Interpolation :cite:p:`Irakarama2020` and a wrapper for the generalised radial basis functions provided by Surfe :cite:`Hillier2014`. +.. bibliography:: + .. toctree:: :hidden: @@ -37,12 +39,6 @@ and a wrapper for the generalised radial basis functions provided by Surfe :cite :caption: LoopStructural API :hidden: -.. autosummary:: - :caption: API - :toctree: _autosummary - :template: custom-module-template.rst - :recursive: - - LoopStructural + API diff --git a/examples/2_fold/plot_adding_folds_to_surfaces.py b/examples/2_fold/plot_adding_folds_to_surfaces.py index e4d24cfb1..2bb907577 100644 --- a/examples/2_fold/plot_adding_folds_to_surfaces.py +++ b/examples/2_fold/plot_adding_folds_to_surfaces.py @@ -22,9 +22,9 @@ from LoopStructural import GeologicalModel from LoopStructural.datasets import load_noddy_single_fold from LoopStructural.visualisation import LavaVuModelViewer, RotationAnglePlotter -from LoopStructural.utils.helper import ( - strike_dip_vector, - plunge_and_plunge_dir_to_vector, +from LoopStructural.utils import ( + strikedip2vector, + azimuthplunge2vector, ) import pandas as pd import numpy as np diff --git a/examples/3_fault/plot_faulted_intrusion.py b/examples/3_fault/plot_faulted_intrusion.py index 9fb1e3eb9..5db1a9b95 100644 --- a/examples/3_fault/plot_faulted_intrusion.py +++ b/examples/3_fault/plot_faulted_intrusion.py @@ -31,7 +31,7 @@ # representative of modelling an intrusion. # -intrusion = lambda x, y: (x * 2) ** 2 + (y ** 2) +intrusion = lambda x, y: (x * 2) ** 2 + (y**2) x = np.linspace(-10, 10, 100) y = np.linspace(-10, 10, 100) xx, yy = np.meshgrid(x, y) @@ -55,7 +55,7 @@ model = GeologicalModel(bb[0, :], bb[1, :]) model.set_model_data(data) fault = model.create_and_add_fault( - "fault", 500, nelements=10000, steps=4, interpolatortype="PLI", buffer=0.3 + "fault", 500, nelements=10000, steps=4, interpolatortype="FDI", buffer=0.3 ) viewer = LavaVuModelViewer(model) @@ -68,7 +68,11 @@ xyz = xyz[fault.evaluate(xyz).astype(bool), :] viewer.add_vector_field(fault, locations=xyz) viewer.add_points( - model.data[model.data["feature_name"] == "strati"][["X", "Y", "Z"]], name="prefault" + model.rescale( + model.data[model.data["feature_name"] == "strati"][["X", "Y", "Z"]], + inplace=False, + ), + name="prefault", ) viewer.rotation = [-73.24819946289062, -86.82220458984375, -13.912878036499023] viewer.display() @@ -95,7 +99,11 @@ # slices=[0,1]#nslices=10 ) viewer.add_points( - model.data[model.data["feature_name"] == "strati"][["X", "Y", "Z"]], name="prefault" + model.rescale( + model.data[model.data["feature_name"] == "strati"][["X", "Y", "Z"]], + inplace=False, + ), + name="prefault", ) viewer.rotation = [-73.24819946289062, -86.82220458984375, -13.912878036499023] viewer.display() diff --git a/setup.py b/setup.py index 0ba15a418..000e8f668 100644 --- a/setup.py +++ b/setup.py @@ -7,11 +7,7 @@ raise RuntimeError("Cannot import setuptools \n" "python -m pip install setuptools") sys.exit(1) -try: - from Cython.Build import cythonize -except: - raise RuntimeError("Cannot import cython \n" "python -m pip install cython") - sys.exit(1) + try: import numpy except: @@ -61,10 +57,6 @@ ], version=version, packages=find_packages(), - ext_modules=cythonize( - "LoopStructural/interpolators/_cython/*.pyx", - compiler_directives={"language_level": "3"}, - ), include_dirs=[numpy.get_include()], include_package_data=True, package_data={ diff --git a/tests/integration/test_interpolator.py b/tests/integration/test_interpolator.py index 3f574275d..3d395b95e 100644 --- a/tests/integration/test_interpolator.py +++ b/tests/integration/test_interpolator.py @@ -1,5 +1,5 @@ from LoopStructural import GeologicalModel -from LoopStructural.datasets import load_claudius +from LoopStructural.datasets import load_claudius, load_horizontal import numpy as np @@ -105,6 +105,20 @@ def test_model_with_data_outside_of_bounding_box(): pass +def test_horizontal_layers(interpolatortype, nelements): + data, bb = load_horizontal() + model = GeologicalModel(bb[0, :], bb[1, :]) + + model.data = data + model.create_and_add_foliation( + "strati", interpolatortype=interpolatortype, nelements=1e4 + ) + + assert np.all( + np.isclose(model["strati"].evaluate_value(data[["X", "Y", "Z"]]), data["val"]) + ) + + if __name__ == "__main__": test_create_model() test_add_data() @@ -116,4 +130,6 @@ def test_model_with_data_outside_of_bounding_box(): test_create_stratigraphy_PLI_lu() test_create_stratigraphy_PLI_pyamg() test_model_with_data_outside_of_bounding_box() + test_horizontal_layers("FDI", 1000) + test_horizontal_layers("PLI", 1000) print("ok") diff --git a/tests/unit/modelling/test_structural_frame.py b/tests/unit/modelling/test_structural_frame.py index 28bd77f98..73ce63901 100644 --- a/tests/unit/modelling/test_structural_frame.py +++ b/tests/unit/modelling/test_structural_frame.py @@ -3,9 +3,11 @@ GeologicalFeature, ) from LoopStructural.modelling.features.builders import StructuralFrameBuilder +from LoopStructural.datatypes import BoundingBox from LoopStructural import GeologicalModel import numpy as np +import pandas as pd def test_structural_frame(): @@ -48,3 +50,38 @@ def get_item(): def test_structural_frame_cross_product(): pass + + +def test_create_structural_frame_pli(): + data = pd.DataFrame( + [ + [5.1, 5.1, 5, 0, 0, 1, 0, 0], + [5, 5.1, 5, 0, 1, 0, 1, 0], + [5.1, 5, 5, 1, 0, 0, 2, 0], + ], + columns=["X", "Y", "Z", "nx", "ny", "nz", "coord", "val"], + ) + data["feature_name"] = "fault" + + bb = BoundingBox(3, origin=np.zeros(3), maximum=np.ones(3) * 10) + model = GeologicalModel(bb.origin, bb.maximum) + + model.data = data + + fault = model.create_and_add_fault( + "fault", 10, nelements=2000, steps=4, interpolatortype="PLI", buffer=2 + ) + model.update() + assert np.all( + np.isclose(fault[0].evaluate_gradient(np.array([[5, 5, 5]])), [0, 0, 1]) + ) + assert np.all( + np.isclose(fault[1].evaluate_gradient(np.array([[5, 5, 5]])), [0, 1, 0]) + ) + assert np.all( + np.isclose(fault[2].evaluate_gradient(np.array([[5, 5, 5]])), [1, 0, 0]) + ) + + +if __name__ == "__main__": + test_create_structural_frame_pli() diff --git a/tests/unit/utils/test_conversions.py b/tests/unit/utils/test_conversions.py new file mode 100644 index 000000000..8f732ce19 --- /dev/null +++ b/tests/unit/utils/test_conversions.py @@ -0,0 +1,56 @@ +from LoopStructural.utils import strikedip2vector, azimuthplunge2vector +import numpy as np +import pandas as pd + +import numpy as np +from LoopStructural.utils.maths import strikedip2vector + + +def test_strikedip2vector(): + strike = [0, 45, 90] + dip = [30, 60, 90] + expected_result = np.array( + [ + [0.5, 0.0, 0.8660254], + [0.61237, -0.61237, 0.5], + [0.0, -1.0, 0.0], + ] + ) + + result = strikedip2vector(strike, dip) + print(result - expected_result) + assert np.allclose(result, expected_result, atol=1e-3) + + +# import numpy as np +# from LoopStructural.utils.maths import azimuthplunge2vector + + +def test_azimuthplunge2vector_single_values(): + plunge = 0 + plunge_dir = 90 + expected_result = np.array([[1, 0, 0]]) + result = azimuthplunge2vector(plunge, plunge_dir) + assert np.allclose(result, expected_result) + + +def test_azimuthplunge2vector_array_values(): + plunge = [0, 90, 0] + plunge_dir = [90, 90, 0] + expected_result = np.array( + [ + [1, 0, 0], + [0.0, 0.0, -1.0], + [0, 1, 0], + ] + ) + result = azimuthplunge2vector(plunge, plunge_dir) + assert np.allclose(result, expected_result) + + +def test_azimuthplunge2vector_empty_arrays(): + plunge = [] + plunge_dir = [] + expected_result = np.empty((1, 3)) + result = azimuthplunge2vector(plunge, plunge_dir) + # assert np.allclose(result, expected_result)