<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing with OASIS Tables v3.0 20080202//EN" "https://jats.nlm.nih.gov/nlm-dtd/publishing/3.0/journalpub-oasis3.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:oasis="http://docs.oasis-open.org/ns/oasis-exchange/table" xml:lang="en" dtd-version="3.0" article-type="research-article">
  <front>
    <journal-meta><journal-id journal-id-type="publisher">GMD</journal-id><journal-title-group>
    <journal-title>Geoscientific Model Development</journal-title>
    <abbrev-journal-title abbrev-type="publisher">GMD</abbrev-journal-title><abbrev-journal-title abbrev-type="nlm-ta">Geosci. Model Dev.</abbrev-journal-title>
  </journal-title-group><issn pub-type="epub">1991-9603</issn><publisher>
    <publisher-name>Copernicus Publications</publisher-name>
    <publisher-loc>Göttingen, Germany</publisher-loc>
  </publisher></journal-meta>
    <article-meta>
      <article-id pub-id-type="doi">10.5194/gmd-19-3455-2026</article-id><title-group><article-title>Curlew 1.0: Spatio-temporal implicit geological  modelling with neural fields in python</article-title><alt-title>Curlew</alt-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="author" corresp="yes" rid="aff1 aff5">
          <name><surname>Kamath</surname><given-names>Akshay V.</given-names></name>
          <email>a.kamath@hzdr.de</email>
        <ext-link>https://orcid.org/0000-0003-3407-5222</ext-link></contrib>
        <contrib contrib-type="author" corresp="no" rid="aff1">
          <name><surname>Thiele</surname><given-names>Samuel T.</given-names></name>
          
        </contrib>
        <contrib contrib-type="author" corresp="no" rid="aff2">
          <name><surname>Moulard</surname><given-names>Marie</given-names></name>
          
        </contrib>
        <contrib contrib-type="author" corresp="no" rid="aff3">
          <name><surname>Grose</surname><given-names>Lachlan</given-names></name>
          
        </contrib>
        <contrib contrib-type="author" corresp="no" rid="aff1">
          <name><surname>Tolosana-Delgado</surname><given-names>Raimon</given-names></name>
          
        <ext-link>https://orcid.org/0000-0001-9847-0462</ext-link></contrib>
        <contrib contrib-type="author" corresp="no" rid="aff4">
          <name><surname>Hillier</surname><given-names>Michael J.</given-names></name>
          
        </contrib>
        <contrib contrib-type="author" corresp="no" rid="aff5 aff6">
          <name><surname>Wellmann</surname><given-names>Florian</given-names></name>
          
        <ext-link>https://orcid.org/0000-0003-2552-1876</ext-link></contrib>
        <contrib contrib-type="author" corresp="no" rid="aff1">
          <name><surname>Gloaguen</surname><given-names>Richard</given-names></name>
          
        <ext-link>https://orcid.org/0000-0002-4383-473X</ext-link></contrib>
        <aff id="aff1"><label>1</label><institution>Helmholtz-Zentrum Dresden-Rossendorf, Helmholtz Institute Freiberg, Chemnitzer Str. 40, 09599 Freiberg, Germany</institution>
        </aff>
        <aff id="aff2"><label>2</label><institution>École Nationale Supérieure de Géologie (ENSG), Université de Lorraine, 54000 Nancy, France</institution>
        </aff>
        <aff id="aff3"><label>3</label><institution>School of Earth, Atmosphere and Environment, Monash University, Clayton, VIC, Australia</institution>
        </aff>
        <aff id="aff4"><label>4</label><institution>Geological Survey of Canada, Natural Resources Canada, 601 Booth Street, Ottawa, ON K1A 0E8, Canada</institution>
        </aff>
        <aff id="aff5"><label>5</label><institution>Institute of Computational Geoscience, Geothermics and Reservoir Geophysics (CG3),  RWTH Aachen University, 52074 Aachen, Germany</institution>
        </aff>
        <aff id="aff6"><label>6</label><institution>Fraunhofer Research Institution for Energy Infrastructures and Geothermal Systems (IEG), 44801 Bochum, Germany</institution>
        </aff>
      </contrib-group>
      <author-notes><corresp id="corr1">Akshay V. Kamath (a.kamath@hzdr.de)</corresp></author-notes><pub-date><day>27</day><month>April</month><year>2026</year></pub-date>
      
      <volume>19</volume>
      <issue>8</issue>
      <fpage>3455</fpage><lpage>3475</lpage>
      <history>
        <date date-type="received"><day>15</day><month>October</month><year>2025</year></date>
           <date date-type="rev-request"><day>13</day><month>November</month><year>2025</year></date>
           <date date-type="rev-recd"><day>13</day><month>March</month><year>2026</year></date>
           <date date-type="accepted"><day>10</day><month>April</month><year>2026</year></date>
      </history>
      <permissions>
        <copyright-statement>Copyright: © 2026 Akshay V. Kamath et al.</copyright-statement>
        <copyright-year>2026</copyright-year>
      <license license-type="open-access"><license-p>This work is licensed under the Creative Commons Attribution 4.0 International License. To view a copy of this licence, visit <ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link></license-p></license></permissions><self-uri xlink:href="https://gmd.copernicus.org/articles/19/3455/2026/gmd-19-3455-2026.html">This article is available from https://gmd.copernicus.org/articles/19/3455/2026/gmd-19-3455-2026.html</self-uri><self-uri xlink:href="https://gmd.copernicus.org/articles/19/3455/2026/gmd-19-3455-2026.pdf">The full text article is available as a PDF file from https://gmd.copernicus.org/articles/19/3455/2026/gmd-19-3455-2026.pdf</self-uri>
      <abstract><title>Abstract</title>

      <p id="d2e184">We present <italic>curlew</italic>, an open-source python package for structural geological modelling using neural fields. This modelling framework incorporates various local constraints (value, gradient, tangent and (in)equalities) and tailored global loss functions to ensure data-consistent and geologically realistic predictions. Random Fourier Feature (RFF) encodings are used to improve model convergence and facilitate stochastic uncertainty quantification, while simultaneously improving the model's ability to learn naturally periodic features such as folds. These advances are integrated into a software framework that allows incremental construction of complex geological models through temporally-linked neural fields, each representing a specific deposition, intrusion or faulting event. Significantly, this framework allows semi-supervised learning to integrate diverse unlabelled datasets (e.g., geochemistry, petrophysics), reducing interpretation bias and potentially improving model robustness. We describe and demonstrate these various capabilities using synthetic examples and real data from a faulted stratigraphic digital outcrop model from Newcastle, Australia.</p>
  </abstract>
    
<funding-group>
<award-group id="gs1">
<funding-source>HORIZON EUROPE European Research Council</funding-source>
<award-id>101058483</award-id>
</award-group>
<award-group id="gs2">
<funding-source>Klaus Tschira Stiftung</funding-source>
<award-id>GSO/KT 76</award-id>
</award-group>
</funding-group>
</article-meta>
  <notes notes-type="copyrightstatement">
  
      <p id="d2e197">The works published in this journal are distributed under the Creative Commons Attribution 4.0 License. This license does not affect the Crown copyright work, which is re-usable under the Open Government Licence (OGL). The Creative Commons Attribution 4.0 License and the OGL are interoperable and do not conflict with, reduce or limit each other. The co-author Michael J. Hillier is an employee of the Canadian Government and therefore claims Crown copyright for the respective contributions.  © Crown copyright 2026</p>
</notes></front>
<body>
      


<sec id="Ch1.S1" sec-type="intro">
  <label>1</label><title>Introduction</title>
      <p id="d2e211">Three-dimensional (3D) structural geological models (SGMs) integrate disparate geological and geophysical data to make predictions about the subsurface distribution of rock types <xref ref-type="bibr" rid="bib1.bibx61" id="paren.1"/>. The results can be used to inform our understanding of large-scale processes, like plate tectonics, geomorphology and hydrogeology. For example, SGMs can provide crucial constraints on construction works (such as tunnelling), guide minerals explorationists and mine operators that supply society with metals, and are fundamental to successful groundwater modelling and management, geothermal energy extraction and (potentially) subsurface carbon storage <xref ref-type="bibr" rid="bib1.bibx42 bib1.bibx61" id="paren.2"/>.</p>
      <p id="d2e220">The uncertainty of SGMs <xref ref-type="bibr" rid="bib1.bibx2 bib1.bibx4 bib1.bibx35 bib1.bibx42 bib1.bibx58 bib1.bibx61 bib1.bibx62" id="paren.3"/> limits geoscientists' ability to disentangle Earth's history and leads to challenging risks for infrastructure, mining or energy projects. Inconsistencies regarding the thickness of a subsurface reservoir unit and its connectivity with surrounding rock types, for example, will significantly influence estimates of its storage capacity <xref ref-type="bibr" rid="bib1.bibx61" id="paren.4"/>, with crucial implications for groundwater management or geothermal energy.</p>
      <p id="d2e229">Geological model uncertainty derives from two key challenges: data sparsity, and erroneous constraints. Subsurface data is expensive to collect, meaning that geological problems are nearly always under-constrained. Statistical methods have been developed to quantify this uncertainty <xref ref-type="bibr" rid="bib1.bibx61" id="paren.5"/>, and so mitigate risks or inform future data collection <xref ref-type="bibr" rid="bib1.bibx34" id="paren.6"/>, but these cannot account for the subjectivity of drill core or outcrop interpretations from which model inputs are derived <xref ref-type="bibr" rid="bib1.bibx42" id="paren.7"/>. Additionally, the methods used to generate geological models often do not fully explore the space of geologically plausible solutions, which can result in an under-sampling of the geological uncertainty.</p>
      <p id="d2e241">With this contribution we propose a framework for SGM construction using neural fields (see Sect. 2) that allow a better integration of geological constraints and unlabelled quantitative information (e.g., geochemistry or hyperspectral datasets) into interpolation workflows. We have implemented our approach in an open-source python toolbox for neural field modeling, <italic>curlew</italic>, and aim here to document the theory underlying our approach and demonstrate its applicability to a variety of geological structures and settings.</p>
</sec>
<sec id="Ch1.S2">
  <label>2</label><title>Context and theory</title>
      <p id="d2e255">Current state-of-the-art SGM approaches, such as Aspen-SKUA (Aspen-SKUA: <uri>https://www.aspentech.com/en/products/sse/aspen-skua</uri>, last access: 10 October 2025) (formerly Gocad-SKUA), 3D-GeoModeller (3D Geomodeller: <uri>https://www.intrepid-geophysics.com/products/geomodeller/</uri>, last access: 10 October 2025), Leapfrog (Leapfrog: <uri>https://www.seequent.com/products-solutions/leapfrog-geo/</uri>, last access: 10 October 2025), GemPy <xref ref-type="bibr" rid="bib1.bibx11" id="paren.8"/> and LoopStructural <xref ref-type="bibr" rid="bib1.bibx21" id="paren.9"/>, represent geological bodies numerically using continuous implicit fields <xref ref-type="bibr" rid="bib1.bibx61" id="paren.10"/>. These fields can be interpolated from sparse data, and isosurfaces that represent geological contacts extracted for visualisation or further analysis. While many interpolation approaches can be used to construct these implicit fields, interpolation using spatial neural networks (hereafter referred to as neural fields) present a highly adaptable new approach <xref ref-type="bibr" rid="bib1.bibx24" id="paren.11"/>.</p>
      <p id="d2e280">Neural fields (a.k.a implicit neural representations, coordinate-based/spatial neural networks) are neural networks that take spatial coordinates as inputs, and output predictions by learning the “geometry” of the spatial domain <xref ref-type="bibr" rid="bib1.bibx66" id="paren.12"/>. The flexibility allowed by this parameterisation has been used to address a variety of geoscience problems, including to interpolate potential fields <xref ref-type="bibr" rid="bib1.bibx52" id="paren.13"/>, full tensor gradiometry data <xref ref-type="bibr" rid="bib1.bibx28" id="paren.14"/>, and for geophysical inversions <xref ref-type="bibr" rid="bib1.bibx67" id="paren.15"/>.</p>
      <p id="d2e295">For SGM applications, neural fields have been applied to predict implicit field values, constrained by contact and orientation information <xref ref-type="bibr" rid="bib1.bibx24" id="paren.16"/>. Unlike many other interpolation approaches, the loss function used to train a neural field can be adapted to perform customised and multi-objective optimisation. It is this ability that theoretically allows neural field interpolators to better incorporate geological rules, physical constraints <xref ref-type="bibr" rid="bib1.bibx45" id="paren.17"/> and/or diverse unlabelled quantitative data.</p>
      <p id="d2e304">Recent studies have laid the groundwork for the use of neural fields in SGM construction <xref ref-type="bibr" rid="bib1.bibx14 bib1.bibx24" id="paren.18"/>, though many challenges remain. These include difficulties getting neural fields to converge appropriately without pre-fitting and careful initialisation <xref ref-type="bibr" rid="bib1.bibx24" id="paren.19"><named-content content-type="pre">e.g.</named-content></xref>. More complex geological structures (e.g., faults) also remain difficult, although step-function based faults <xref ref-type="bibr" rid="bib1.bibx6" id="paren.20"/> have been implemented in previous works.</p>
<sec id="Ch1.S2.SS1">
  <label>2.1</label><title>Local constraints</title>
      <p id="d2e326">Most SGMs use two types of constraint: value constraints, which represent a geological contact and specify the value of the underlying implicit field at specific locations, and gradient constraints that constrain the direction of the field's gradient to match structural (e.g. bedding) measurements <xref ref-type="bibr" rid="bib1.bibx11" id="paren.21"/>. Standard loss functions (e.g., Mean Squared Error) can be used to fit neural field predictions to value constraints by penalising differences between predicted and observed values at the constraint locations. Non-standard loss functions <xref ref-type="bibr" rid="bib1.bibx24" id="paren.22"><named-content content-type="pre">e.g., involving the signed distance for geological contact information as used in</named-content></xref> can also be implemented for such constraints.</p>
      <p id="d2e337">Spatial gradients of the neural field can be obtained through automatic differentiation <xref ref-type="bibr" rid="bib1.bibx38" id="paren.23"/> with respect to the inputs (coordinates). As shown by <xref ref-type="bibr" rid="bib1.bibx24" id="text.24"/>, this gradient can be compared to structural measurements to penalise violations of gradient constraints (i.e. bedding measurements). This comparison can be signed (called gradient constraints), using e.g., Mean-Squared Error (MSE) loss on the normalised gradient vector, or unsigned (called tangent constraints), using absolute cosine similarity as a loss function <xref ref-type="bibr" rid="bib1.bibx7" id="paren.25"/>. The former implies a known gradient (“younging”) direction, while the latter minimises the acute angle between the two vectors such that orientation is constrained to a known axis (strike and dip), but with unspecified younging direction.</p>
      <p id="d2e349">Finally, as outlined in Sect. 3, we show that quantitative measurements of meaningful rock properties (e.g., features derived from geochemical, hyperspectral or petrophysical data) can be used to directly constrain neural field interpolations. The core assumption here is that an implicit field that accurately represents subsurface geology should explain a maximal amount of the variance in the measured properties. Property constraints can thus be implemented using a reconstruction loss, where predicted implicit field values are passed through a learned forward model (cf. Sect. 3.2) to reconstruct property measurements and penalise errors (using e.g., MSE loss).</p>
</sec>
<sec id="Ch1.S2.SS2">
  <label>2.2</label><title>Relational constraints</title>
      <p id="d2e360">As shown by <xref ref-type="bibr" rid="bib1.bibx24" id="text.26"/>, neural field interpolations can be formulated so that value constraints are replaced by constraints on the relation (equality and inequality) between pairs of observation points. Because the interpolated values in an SGM are typically arbitrary, such approaches find any (under-constrained) solution in which geological contacts have the same value i.e., equality constraints, and points in younger geological units are systematically greater than (or less than, depending on convention) older ones, i.e., inequality constraints. This representation has been suggested to enable significantly more flexibility than traditional value constraints <xref ref-type="bibr" rid="bib1.bibx24" id="paren.27"/>.</p>
</sec>
<sec id="Ch1.S2.SS3">
  <label>2.3</label><title>Global constraints</title>
      <p id="d2e377">The previously mentioned local and relational constraints can be used to generate models that fit a sparse set of data. However, the under-constrained nature of the problem means a typically infinite number of solutions can be found, many of which are geologically implausible. Global constraints are thus typically used to encourage smoother solutions over others, and integrate geological rules that penalise geologically unrealistic geometries.</p>
      <p id="d2e380">Smoothness regularisation is used in discrete implicit modelling to constrain the evolution of the interpolated field between geological observations. In a discrete interpolation framework the implicit function is represented on a piece-wise support (e.g., tetrahedral mesh or cartesian grid) and geological constraints are applied by adding linear constraints to the element containing each observation. The smoothness constraints are applied globally to the interpolation function by adding additional constraints to locally minimize variations in the function gradient. This can be done by minimising the variations in the second derivatives of the implicit field using finite differences <xref ref-type="bibr" rid="bib1.bibx26" id="paren.28"/> or by minimising the variation in function gradient projected across the shared elements <xref ref-type="bibr" rid="bib1.bibx13" id="paren.29"/>. Data supported methods such as co-kriging <xref ref-type="bibr" rid="bib1.bibx29" id="paren.30"/> or radial basis function (RBF) interpolation <xref ref-type="bibr" rid="bib1.bibx25" id="paren.31"/> typically use globally smooth basis functions, which naturally yield smooth interpolants. The smoothness can be controlled by the shape parameters of the basis function used. In neural field approaches, the loss functions can be adjusted to implement smoothness regularisation. This is based either on the magnitude of the gradient vector <xref ref-type="bibr" rid="bib1.bibx24" id="paren.32"><named-content content-type="pre">e.g.</named-content></xref>, or on the second derivative tensor (i.e., the hessian) of the implicit field. These adjustments require some global support (set of points) at which these losses are accumulated.</p>
      <p id="d2e400">However, smooth implicit fields can still have geologically impossible isosurfaces. Most standard interpolators operate on the minimum curvature principle and are often implemented via biharmonic splines or discrete smooth interpolation. It implies that the resulting field approximates a solution to the biharmonic equation (<inline-formula><mml:math id="M1" display="inline"><mml:mrow><mml:msup><mml:mi mathvariant="normal">∇</mml:mi><mml:mn mathvariant="normal">4</mml:mn></mml:msup><mml:mi mathvariant="italic">ϕ</mml:mi><mml:mo>=</mml:mo><mml:mn mathvariant="normal">0</mml:mn></mml:mrow></mml:math></inline-formula>) <xref ref-type="bibr" rid="bib1.bibx5 bib1.bibx49 bib1.bibx36 bib1.bibx53" id="paren.33"/>. Biharmonic functions, while smooth, lack a maximum principle and therefore impose no limitations on the formation of local extrema. This leads the interpolator to generate closed isosurfaces (colloquially known as ”bubbles”), that contradict the fundamental principles of original horizontality <xref ref-type="bibr" rid="bib1.bibx54 bib1.bibx58 bib1.bibx59" id="paren.34"/>. To encourage geologically realistic, bubble-free stratigraphy without allowing the network to collapse to a trivial solution, we have implemented a set of global geologically-informed loss functions (Sect. 3.2.2).</p>
      <p id="d2e426">In many cases it is also desirable to enforce a constant (or approximately constant) gradient magnitude <xref ref-type="bibr" rid="bib1.bibx31" id="paren.35"/>. Changes in gradient magnitude imply variations in the thicknesses of the sedimentary units (which correspondingly imply changes in volume during deformation events), which might be unwanted. For example, <xref ref-type="bibr" rid="bib1.bibx24" id="text.36"/> penalised gradient vectors of non-unit lengths to limit lateral variations in the thickness of interpolated sedimentary units. Such constant gradient constraints can be useful when interpolating true distance fields, such as the distance from a fault surface, but quickly become problematic when modelling units that change thickness (e.g., sedimentary series that change thickness, folds, etc.).</p>
</sec>
<sec id="Ch1.S2.SS4">
  <label>2.4</label><title>Deformation fields and time-aware modelling</title>
      <p id="d2e443">Many geological objects (folds, faults, intrusions) create discontinuities. Such discontinuities contradict the principles of global and local maximisation of smoothness. One approach to include such discontinuous geological features is to modify the interpolation framework and incorporate the geometries from separate implicit fields in a step-wise fashion, as in e.g., LoopStructural <xref ref-type="bibr" rid="bib1.bibx21 bib1.bibx20" id="paren.37"/>. LoopStructural models are built using a temporally successive approach where the most recent geological objects are built first, and these objects are used to constrain the geometry of older geological objects through kinematic reconstruction.</p>
      <p id="d2e449">In the LoopStructural approach, faults are incorporated into the implicit model in two steps. First, constraints along the fault are used to interpolate an implicit field representing the fault surface. Then, this field is either used to split the interpolator into separate domains (i.e., domain boundary faults), as a step function in the polynomial trend <xref ref-type="bibr" rid="bib1.bibx6 bib1.bibx10" id="paren.38"/> or to constrain a kinematic operator for the fault <xref ref-type="bibr" rid="bib1.bibx17 bib1.bibx20 bib1.bibx32" id="paren.39"/>. The kinematic approach provides the most control over the faulted surface, but requires a priori knowledge to constrain the displacement and slip direction.</p>
      <p id="d2e458">Folds also pose challenges for smoothness regularisation. One approach is to modify the implicit interpolator by reducing the regularisation in the direction orthogonal to the axial surface and along the fold axis, such that high curvature in geologically plausible directions (i.e. fold hinges) is not penalised <xref ref-type="bibr" rid="bib1.bibx19 bib1.bibx31" id="paren.40"/>.</p>
</sec>
</sec>
<sec id="Ch1.S3">
  <label>3</label><title>Methods: geological neural fields with <italic>curlew</italic></title>
      <p id="d2e476">We have developed an open-source python package, <italic>curlew</italic>, that provides a flexible toolbox for building SGMs with neural fields. The following sections explore the Random Fourier Feature mapping (Sect. 3.1) used to improve convergence during training, and customisable loss functions (Sect. 3.2) that have been implemented to capture many of the different constraint types reviewed in the previous section. Additionally, we have established a framework (Sect. 3.3) for combining multiple neural fields representing different structures and introducing geological discontinuities.</p>
      <p id="d2e482">A SGM in <italic>curlew</italic> is parameterised by a temporally directed graph of neural fields (see Sect. 3.3). Importantly, each neural field can be trained independently to fit specified constraints, and simultaneously to optimize parameters that influence multiple fields (e.g., fault slip). We also introduce a novel approach that, through a learnable forward model, allows the set of neural fields to be jointly fit to compositional data (e.g., drill core geochemistry or hyperspectral data) and/or lithological classifications (i.e., geological maps or logs) to directly update geological geometries. The inclusion of ancillary datasets (e.g., compositional, categorical, ordinal, etc.) into the modelling framework allows the model to decrease the uncertainties associated with interpretative bias and reduce the subjectivity associated with interpreted information.</p>
<sec id="Ch1.S3.SS1">
  <label>3.1</label><title>Random Fourier feature encoding and neural field architecture</title>
      <p id="d2e495">SGMs are defined in 2- or 3-D space. This low dimensionality presents a challenge for the neural fields, which can struggle to converge given the small number of input features. To help mitigate these limitations, <xref ref-type="bibr" rid="bib1.bibx56" id="text.41"/> proposed Random Fourier Feature (RFF) mappings, a method that maps input coordinates into a higher-dimensional feature space by projecting the input coordinates onto randomly chosen direction vectors and passing the resulting lengths through sine and cosine functions. This work was built on the contribution by <xref ref-type="bibr" rid="bib1.bibx44" id="text.42"/>, who proposed these feature mappings as approximators for large-scale kernel machines. This mapping (1) improves the representation of high-frequency signals in neural fields, (2) accelerates convergence during neural network training, avoiding the problem of spectral bias in neural networks <xref ref-type="bibr" rid="bib1.bibx43" id="paren.43"/>, and (3) allows the network to learn a quasi-Fourier representation of the spatial variability. This approach is particularly effective to accurately capture geological complexities, especially in situations where geological structures display periodic geometries (i.e. folds).</p>
      <p id="d2e507">In our implementation, input coordinates are projected onto a set of <inline-formula><mml:math id="M2" display="inline"><mml:mi>M</mml:mi></mml:math></inline-formula> 2- or 3-D vectors in which each component is randomly drawn from a Gaussian distribution with zero mean and unit variance. The resulting values are scaled by a frequency term and passed through a sine and a cosine function, to give <inline-formula><mml:math id="M3" display="inline"><mml:mrow><mml:mn mathvariant="normal">2</mml:mn><mml:mi>M</mml:mi></mml:mrow></mml:math></inline-formula> features ranging between <inline-formula><mml:math id="M4" display="inline"><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="normal">1</mml:mn></mml:mrow></mml:math></inline-formula> and 1, analogous to amplitudes in a sparsely sampled Fourier domain. The frequencies of the mapping are explicitly scaled with length scale parameters to match the expected scale of variability in the model being constructed. These features effectively seed the network and guide feature learning at desired scales.</p>
      <p id="d2e537">For a 3D input, we get a <inline-formula><mml:math id="M5" display="inline"><mml:mrow><mml:mn mathvariant="normal">2</mml:mn><mml:mi>M</mml:mi></mml:mrow></mml:math></inline-formula> dimensional feature vector <inline-formula><mml:math id="M6" display="inline"><mml:mrow><mml:msub><mml:mi mathvariant="bold-italic">ν</mml:mi><mml:mi mathvariant="normal">s</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula>  for every length scale <inline-formula><mml:math id="M7" display="inline"><mml:mrow><mml:msub><mml:mi mathvariant="normal">ℓ</mml:mi><mml:mi mathvariant="normal">s</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> given by

            <disp-formula id="Ch1.E1" content-type="numbered"><label>1</label><mml:math id="M8" display="block"><mml:mtable rowspacing="0.2ex" class="aligned" columnspacing="1em" displaystyle="true" columnalign="right left"><mml:mtr><mml:mtd><mml:mrow><mml:mstyle displaystyle="true" class="stylechange"/><mml:msub><mml:mi mathvariant="bold-italic">ν</mml:mi><mml:mi mathvariant="normal">s</mml:mi></mml:msub><mml:mo>=</mml:mo></mml:mrow></mml:mtd><mml:mtd><mml:mrow><mml:mstyle displaystyle="true" class="stylechange"/><mml:mo>[</mml:mo><mml:mi>sin⁡</mml:mi><mml:mo>(</mml:mo><mml:mn mathvariant="normal">2</mml:mn><mml:mi mathvariant="italic">π</mml:mi><mml:msub><mml:mi mathvariant="bold">W</mml:mi><mml:mi mathvariant="normal">s</mml:mi></mml:msub><mml:mi mathvariant="bold-italic">r</mml:mi><mml:mo>)</mml:mo><mml:mo>,</mml:mo><mml:mi>cos⁡</mml:mi><mml:mo>(</mml:mo><mml:mn mathvariant="normal">2</mml:mn><mml:mi mathvariant="italic">π</mml:mi><mml:msub><mml:mi mathvariant="bold">W</mml:mi><mml:mi mathvariant="normal">s</mml:mi></mml:msub><mml:mi mathvariant="bold-italic">r</mml:mi><mml:mo>)</mml:mo><mml:mo>]</mml:mo><mml:mspace width="0.33em" linebreak="nobreak"/><mml:mo>,</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mstyle displaystyle="true" class="stylechange"/></mml:mtd><mml:mtd><mml:mrow><mml:mstyle displaystyle="true" class="stylechange"/><mml:mtext>where </mml:mtext><mml:msub><mml:mi mathvariant="bold">W</mml:mi><mml:mi mathvariant="normal">s</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:msubsup><mml:mi mathvariant="normal">ℓ</mml:mi><mml:mi mathvariant="normal">s</mml:mi><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="normal">1</mml:mn></mml:mrow></mml:msubsup><mml:msup><mml:mi mathvariant="bold">W</mml:mi><mml:mrow><mml:mi>M</mml:mi><mml:mo>×</mml:mo><mml:mn mathvariant="normal">3</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>

          where <inline-formula><mml:math id="M9" display="inline"><mml:mrow><mml:mi mathvariant="bold-italic">r</mml:mi><mml:mo>=</mml:mo><mml:mo>[</mml:mo><mml:mi>x</mml:mi><mml:mo>,</mml:mo><mml:mi>y</mml:mi><mml:mo>,</mml:mo><mml:mi>z</mml:mi><mml:msup><mml:mo>]</mml:mo><mml:mi mathvariant="normal">T</mml:mi></mml:msup></mml:mrow></mml:math></inline-formula>, <inline-formula><mml:math id="M10" display="inline"><mml:mrow><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>∼</mml:mo><mml:mi mathvariant="script">N</mml:mi><mml:mo>(</mml:mo><mml:mn mathvariant="normal">0</mml:mn><mml:mo>,</mml:mo><mml:mn mathvariant="normal">1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>, and the sine/cosine transformations are applied element-wise followed by concatenation along the feature axis. Hence, for <inline-formula><mml:math id="M11" display="inline"><mml:mi>L</mml:mi></mml:math></inline-formula> length scales (<inline-formula><mml:math id="M12" display="inline"><mml:mrow><mml:msub><mml:mi mathvariant="normal">ℓ</mml:mi><mml:mi mathvariant="normal">s</mml:mi></mml:msub><mml:mo>∀</mml:mo><mml:mi>s</mml:mi><mml:mo>=</mml:mo><mml:mn mathvariant="normal">1</mml:mn><mml:mi mathvariant="normal">…</mml:mi><mml:mi>L</mml:mi></mml:mrow></mml:math></inline-formula>), we get a <inline-formula><mml:math id="M13" display="inline"><mml:mrow><mml:mn mathvariant="normal">2</mml:mn><mml:mi>M</mml:mi><mml:mi>L</mml:mi></mml:mrow></mml:math></inline-formula>-dimensional feature vector, which is then fed into the subsequent multi-layer perceptron to get the implicit field value (Fig. <xref ref-type="fig" rid="F1"/>a).</p>

      <fig id="F1" specific-use="star"><label>Figure 1</label><caption><p id="d2e774">Overview of the <italic>curlew</italic> geological modeling approach, which combines <bold>(a)</bold> Random Fourier Feature (RFF) mappings, which transform input (spatial) coordinates into higher-dimensional Fourier features, with neural fields (multi-layer perceptrons) to predict implicit field values and gradients, guided by geological constraints. Different geological structures are each parameterised with their own neural field, such that a model is constructed from a temporally sequential combination of events (e.g., unconformities, faults, and dykes). Generative (stratigraphic deposition), kinematic (fault displacement), and dilative (dyke intrusion) events are chained through differentiable relationships, allowing integrated modeling and semi-supervised updates via a learnable forward model informed by quantitative property data and lithological classifications.</p></caption>
          <graphic xlink:href="https://gmd.copernicus.org/articles/19/3455/2026/gmd-19-3455-2026-f01.png"/>

        </fig>

      <p id="d2e789">This RFF approach enhances the model's ability to learn high-frequency geological features. Furthermore, the encoding introduces inherent stochasticity into the interpolation process. Models trained with different seeds will learn different representations of the subsurface geometry for the same constraints, due to the random nature of the projection directions. Therefore, we can generate model ensembles by changing the seed of the random number generator to get different projections for each model. This becomes useful for stochastic uncertainty estimation, as multiple outputs can be generated without perturbing the input data <xref ref-type="bibr" rid="bib1.bibx62" id="paren.44"><named-content content-type="pre">as is typical in established approaches; e.g.,</named-content></xref>. In specific cases, it theoretically also allows known information on e.g., fold wavelength to be indirectly integrated as a model parameter by seeding the Fourier frequencies (and potentially directions) according to measured fold geometry.</p>
      <p id="d2e797">The RFF encoding is followed by a standard multi-layer perceptron (MLP). The MLP block applies non-linear activation functions to all hidden layers. Because our framework computes derivatives via automatic differentiation (AD), the chosen activations must be <inline-formula><mml:math id="M14" display="inline"><mml:mrow><mml:msup><mml:mi>C</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msup></mml:mrow></mml:math></inline-formula> differentiable (as the network requires a second backward pass for the optimisation). Functions lacking this property, such as <italic>ReLU</italic>, produce abrupt edges in the resulting interpolation. Even among <inline-formula><mml:math id="M15" display="inline"><mml:mrow><mml:msup><mml:mi>C</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msup></mml:mrow></mml:math></inline-formula> differentiable options, performances may vary. For instance, the hyperbolic tangent (TanH) is stable, but its second-order derivatives are small and prone to saturation, which can impede convergence. Empirical testing showed that Swish <xref ref-type="bibr" rid="bib1.bibx46" id="paren.45"><named-content content-type="pre"><italic>SiLU</italic>,</named-content></xref> and <italic>Mish</italic>
<xref ref-type="bibr" rid="bib1.bibx39" id="paren.46"/> provided the best overall results. We have also experimented with options in which the Fourier features are directly connected to the predicted scalar field with a single layer of neurons (i.e. with no hidden layers and no activation functions). Early tests suggest that this minimalist neural field performs well in many situations, so we suggest that this architecture is used as a starting point (and extra complexity is added only if needed).</p>
</sec>
<sec id="Ch1.S3.SS2">
  <label>3.2</label><title>Loss functions</title>
      <p id="d2e848">A neural field stores the implicit field within the weights and biases of the MLP block. These weights and biases are trained using a combination of local and global losses (the RFF projection remains fixed after initialisation). Local losses are accumulated at specific coordinates for which there is information on the implicit field value, its gradient, or its relation to other points in the model (see Sect. 2.2). Global losses are accumulated either over a grid of points that covers (or extends slightly beyond) the model domain, or (typically) on points that are randomly sampled from a grid during each training epoch. We have implemented loss functions in <italic>curlew</italic> such that each of these aspects can be introduced via corresponding hyperparameters. However, we recommend that any specific neural field should only use two to four of these loss functions (usually a local/relational loss coupled with a global loss), with the remaining terms disabled by setting their corresponding hyperparameter to zero. Every additional loss term adds an extra objective for the optimiser to minimise, quickly turning the interpolation into a complicated multi-objective optimisation problem (see Sect. 5.3).</p>
<sec id="Ch1.S3.SS2.SSS1">
  <label>3.2.1</label><title>Local losses – geological data</title>
      <p id="d2e861">Each local constraint is defined by a coordinate and a value, represented in <italic>curlew</italic> as pairs of arrays to allow vectorised <italic>pytorch</italic> <xref ref-type="bibr" rid="bib1.bibx41" id="paren.47"/> operations. The input coordinates can be presented in any Euclidean coordinate system (e.g., UTM) as the feature mapping has a normalisation effect. The values can represent known stratigraphic position (value constraints), younging direction (gradient constraints) or strike and dip (tangent constraints). Therefore, the local loss <inline-formula><mml:math id="M16" display="inline"><mml:mrow><mml:msub><mml:mi mathvariant="script">L</mml:mi><mml:mi mathvariant="normal">l</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> is

              <disp-formula id="Ch1.E2" content-type="numbered"><label>2</label><mml:math id="M17" display="block"><mml:mrow><mml:msub><mml:mi mathvariant="script">L</mml:mi><mml:mi mathvariant="normal">l</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mover><mml:mover accent="true" class="overbrace"><mml:mrow><mml:mi mathvariant="italic">α</mml:mi><mml:mo>‖</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:msub><mml:mi mathvariant="normal">p</mml:mi><mml:mn mathvariant="normal">1</mml:mn></mml:msub></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:msub><mml:mi mathvariant="normal">c</mml:mi><mml:mn mathvariant="normal">1</mml:mn></mml:msub></mml:mrow></mml:msub><mml:msub><mml:mo>‖</mml:mo><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow><mml:mo mathvariant="normal">︷</mml:mo></mml:mover><mml:mtext>Value Loss</mml:mtext></mml:mover><mml:mo>+</mml:mo><mml:mover><mml:mover accent="true" class="overbrace"><mml:mrow><mml:mi mathvariant="italic">β</mml:mi><mml:mo>‖</mml:mo><mml:msub><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mrow><mml:msub><mml:mi mathvariant="normal">p</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mrow><mml:msub><mml:mi mathvariant="normal">c</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:msub><mml:msub><mml:mo>‖</mml:mo><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow><mml:mo mathvariant="normal">︷</mml:mo></mml:mover><mml:mtext>Gradient Loss</mml:mtext></mml:mover><mml:mo>+</mml:mo><mml:mover><mml:mover class="overbrace" accent="true"><mml:mrow><mml:mi mathvariant="italic">γ</mml:mi><mml:mfenced close=")" open="("><mml:mrow><mml:mn mathvariant="normal">1</mml:mn><mml:mo>-</mml:mo><mml:mfenced open="|" close="|"><mml:mrow><mml:msub><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover><mml:mrow><mml:msub><mml:mi mathvariant="normal">p</mml:mi><mml:mn mathvariant="normal">3</mml:mn></mml:msub></mml:mrow></mml:msub><mml:mo>⋅</mml:mo><mml:msub><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover><mml:mrow><mml:msub><mml:mi mathvariant="normal">c</mml:mi><mml:mn mathvariant="normal">3</mml:mn></mml:msub></mml:mrow></mml:msub></mml:mrow></mml:mfenced></mml:mrow></mml:mfenced></mml:mrow><mml:mo mathvariant="normal">︷</mml:mo></mml:mover><mml:mtext>Tangent Loss</mml:mtext></mml:mover></mml:mrow></mml:math></disp-formula>

            
            where <inline-formula><mml:math id="M18" display="inline"><mml:mi>f</mml:mi></mml:math></inline-formula> refers to the implicit field value and <inline-formula><mml:math id="M19" display="inline"><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover></mml:math></inline-formula> refers to the normalised implicit field gradient. The subscripts p and c refer to the predicted and constraint (measured) terms, <inline-formula><mml:math id="M20" display="inline"><mml:mrow><mml:mo>‖</mml:mo><mml:mo>⋅</mml:mo><mml:msub><mml:mo>‖</mml:mo><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> is the <inline-formula><mml:math id="M21" display="inline"><mml:mrow><mml:mi>L</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:mrow></mml:math></inline-formula> norm, and <inline-formula><mml:math id="M22" display="inline"><mml:mi mathvariant="italic">α</mml:mi></mml:math></inline-formula>, <inline-formula><mml:math id="M23" display="inline"><mml:mi mathvariant="italic">β</mml:mi></mml:math></inline-formula>, and <inline-formula><mml:math id="M24" display="inline"><mml:mi mathvariant="italic">γ</mml:mi></mml:math></inline-formula> are the corresponding hyperparameters. As with any multi-objective optimisation problem, careful selection of these hyperparameters can be crucial for achieving good results. Also note that where the absolute direction of the gradient is unknown, the tangent loss accumulates errors proportional to the angular distance between the predicted and measured orientation axes by maximising cosine similarity.</p>
      <p id="d2e1092">Relational constraints (inequalities and equalities) are implemented using a computationally efficient approach that randomly samples <inline-formula><mml:math id="M25" display="inline"><mml:mi>P</mml:mi></mml:math></inline-formula> pairs of points from the left- and right-hand sides of each (in)equality. This avoids needing to construct a large pairwise difference matrix, which would be prohibitively expensive for large datasets, and is analogous to widely used mini-batch approaches used when training deep learning models. Thus, if we construct a difference term <inline-formula><mml:math id="M26" display="inline"><mml:mrow><mml:mi mathvariant="normal">Δ</mml:mi><mml:mo>=</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi mathvariant="normal">p</mml:mi><mml:mo>∈</mml:mo><mml:mtext>LHS</mml:mtext></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mrow><mml:mi mathvariant="normal">p</mml:mi><mml:mo>∈</mml:mo><mml:mtext>RHS</mml:mtext></mml:mrow></mml:msub></mml:mrow></mml:math></inline-formula>, the relational loss term <inline-formula><mml:math id="M27" display="inline"><mml:mrow><mml:msub><mml:mi mathvariant="script">L</mml:mi><mml:mi mathvariant="normal">r</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> can thus be expressed as:

              <disp-formula id="Ch1.E3" content-type="numbered"><label>3</label><mml:math id="M28" display="block"><mml:mrow><mml:mtable columnspacing="1em" rowspacing="0.2ex" class="aligned" displaystyle="true" columnalign="right left"><mml:mtr><mml:mtd><mml:mstyle class="stylechange" displaystyle="true"/></mml:mtd><mml:mtd><mml:mrow><mml:mstyle displaystyle="true" class="stylechange"/><mml:msub><mml:mi mathvariant="script">L</mml:mi><mml:mi mathvariant="normal">r</mml:mi></mml:msub><mml:mo>=</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mstyle displaystyle="true" class="stylechange"/></mml:mtd><mml:mtd><mml:mrow><mml:mstyle class="stylechange" displaystyle="true"/><mml:mfenced open="{" close=""><mml:mtable rowspacing="0.2ex" class="cases" columnspacing="1em" columnalign="left left" framespacing="0em"><mml:mtr><mml:mtd><mml:mrow><mml:mi mathvariant="italic">λ</mml:mi><mml:mo>‖</mml:mo><mml:mi mathvariant="normal">Δ</mml:mi><mml:msub><mml:mo>‖</mml:mo><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:mtd><mml:mtd><mml:mrow><mml:mo>∀</mml:mo><mml:mtext>LHS</mml:mtext><mml:mo>=</mml:mo><mml:mtext>RHS</mml:mtext><mml:mo>,</mml:mo><mml:mtext>Contact equality</mml:mtext></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mi mathvariant="italic">λ</mml:mi><mml:mo>min⁡</mml:mo><mml:mo>(</mml:mo><mml:mi mathvariant="normal">Δ</mml:mi><mml:mo>,</mml:mo><mml:mn mathvariant="normal">0</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:mtd><mml:mtd><mml:mrow><mml:mo>∀</mml:mo><mml:mtext>LHS</mml:mtext><mml:mo>&gt;</mml:mo><mml:mtext>RHS</mml:mtext><mml:mo>,</mml:mo><mml:mtext>Above contact inequality</mml:mtext></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mrow><mml:mi mathvariant="italic">λ</mml:mi><mml:mo>max⁡</mml:mo><mml:mo>(</mml:mo><mml:mi mathvariant="normal">Δ</mml:mi><mml:mo>,</mml:mo><mml:mn mathvariant="normal">0</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:mtd><mml:mtd><mml:mrow><mml:mo>∀</mml:mo><mml:mtext>LHS</mml:mtext><mml:mo>&lt;</mml:mo><mml:mtext>RHS</mml:mtext><mml:mo>,</mml:mo><mml:mtext>Below contact inequality</mml:mtext></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mfenced></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mrow></mml:math></disp-formula>

            where LHS and RHS refer to the set of points sampled above and below a specific geological interface (inequalities), or along a single interface (equalities), and <inline-formula><mml:math id="M29" display="inline"><mml:mi mathvariant="italic">λ</mml:mi></mml:math></inline-formula> is the corresponding hyperparameter. The first case in Eq. (<xref ref-type="disp-formula" rid="Ch1.E3"/>) corresponds to the equality constraints, where <inline-formula><mml:math id="M30" display="inline"><mml:mi>N</mml:mi></mml:math></inline-formula> points are drawn randomly from the LHS and RHS sets each (i.e. <inline-formula><mml:math id="M31" display="inline"><mml:mi>N</mml:mi></mml:math></inline-formula> pairs), and the absolute difference between the predicted implicit field values at these points is minimized. The remaining cases correspond to the inequality constraints, where the clamp function is used to ignore differences above or below 0, as per the direction of the inequality.</p>
</sec>
<sec id="Ch1.S3.SS2.SSS2">
  <label>3.2.2</label><title>Global losses – geological rules</title>
      <p id="d2e1297">Global losses control more general properties of the interpolated implicit field. This can be used to e.g., encourage constant gradient (resulting in geological units that minimize changes in thickness) or discourage geologically implausible closed isosurfaces (bubbles). Unlike local losses, they are not related to any specific data, and are evaluated on either a grid of points covering the model domain, or at a set of <inline-formula><mml:math id="M32" display="inline"><mml:mi>N</mml:mi></mml:math></inline-formula> points chosen randomly across the model domain during each epoch. We have currently implemented three global losses <inline-formula><mml:math id="M33" display="inline"><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi mathvariant="script">L</mml:mi><mml:mi mathvariant="normal">g</mml:mi></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> in <italic>curlew</italic>: flatness loss, thickness loss and monotonicity loss. These can be written as: 

              <disp-formula id="Ch1.E4" content-type="numbered"><label>4</label><mml:math id="M34" display="block"><mml:mtable class="aligned" rowspacing="0.2ex" columnspacing="1em" displaystyle="true" columnalign="right left"><mml:mtr><mml:mtd><mml:mrow><mml:mstyle displaystyle="true" class="stylechange"/><mml:msub><mml:mi mathvariant="script">L</mml:mi><mml:mi mathvariant="normal">g</mml:mi></mml:msub></mml:mrow></mml:mtd><mml:mtd><mml:mrow><mml:mstyle displaystyle="true" class="stylechange"/><mml:mo>=</mml:mo><mml:mover><mml:mover class="overbrace" accent="true"><mml:mrow><mml:mi mathvariant="italic">η</mml:mi><mml:mo>‖</mml:mo><mml:msub><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mrow><mml:mi mathvariant="normal">p</mml:mi><mml:mo>∈</mml:mo><mml:mi>G</mml:mi></mml:mrow></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mtext>trend</mml:mtext></mml:msub><mml:msub><mml:mo>‖</mml:mo><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow><mml:mo mathvariant="normal">︷</mml:mo></mml:mover><mml:mtext>Flatness Loss</mml:mtext></mml:mover><mml:mo>+</mml:mo><mml:mover><mml:mover accent="true" class="overbrace"><mml:mrow><mml:mi mathvariant="italic">ϵ</mml:mi><mml:mfenced close=")" open="("><mml:mstyle displaystyle="true"><mml:mfrac style="display"><mml:mrow><mml:mtext>Var</mml:mtext><mml:mfenced open="(" close=")"><mml:mrow><mml:mo>‖</mml:mo><mml:msub><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mrow><mml:mi mathvariant="normal">p</mml:mi><mml:mo>∈</mml:mo><mml:mi>G</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mo>‖</mml:mo><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:mfenced></mml:mrow><mml:mrow><mml:mi mathvariant="double-struck">E</mml:mi><mml:mo>[</mml:mo><mml:mo>‖</mml:mo><mml:msub><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mrow><mml:mi mathvariant="normal">p</mml:mi><mml:mo>∈</mml:mo><mml:mi>G</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mo>‖</mml:mo><mml:mn mathvariant="normal">2</mml:mn></mml:msub><mml:msup><mml:mo>]</mml:mo><mml:mn mathvariant="normal">2</mml:mn></mml:msup></mml:mrow></mml:mfrac></mml:mstyle></mml:mfenced></mml:mrow><mml:mo mathvariant="normal">︷</mml:mo></mml:mover><mml:mtext>Thickness Loss</mml:mtext></mml:mover></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mstyle class="stylechange" displaystyle="true"/></mml:mtd><mml:mtd><mml:mrow><mml:mstyle class="stylechange" displaystyle="true"/><mml:mo>+</mml:mo><mml:mover><mml:mover accent="true" class="overbrace"><mml:mrow><mml:mi mathvariant="italic">κ</mml:mi><mml:mfenced open="|" close="|"><mml:mrow><mml:mi mathvariant="normal">∇</mml:mi><mml:mo>⋅</mml:mo><mml:mfenced open="(" close=")"><mml:mrow><mml:msub><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mrow><mml:mi mathvariant="normal">p</mml:mi><mml:mo>∈</mml:mo><mml:mi>G</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:mfenced></mml:mrow></mml:mfenced></mml:mrow><mml:mo mathvariant="normal">︷</mml:mo></mml:mover><mml:mtext>Monotonicity Loss</mml:mtext></mml:mover></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:math></disp-formula>

            where <inline-formula><mml:math id="M35" display="inline"><mml:mi>G</mml:mi></mml:math></inline-formula> is the set of grid points sampled in each epoch, <inline-formula><mml:math id="M36" display="inline"><mml:mrow><mml:msub><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover><mml:mtext>trend</mml:mtext></mml:msub></mml:mrow></mml:math></inline-formula> is the user-defined global gradient trend, <inline-formula><mml:math id="M37" display="inline"><mml:mrow><mml:mi mathvariant="normal">∇</mml:mi><mml:mo>(</mml:mo><mml:mo>⋅</mml:mo><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is the divergence operator, and <inline-formula><mml:math id="M38" display="inline"><mml:mi mathvariant="italic">η</mml:mi></mml:math></inline-formula>, <inline-formula><mml:math id="M39" display="inline"><mml:mi mathvariant="italic">ϵ</mml:mi></mml:math></inline-formula>, and <inline-formula><mml:math id="M40" display="inline"><mml:mi mathvariant="italic">κ</mml:mi></mml:math></inline-formula> are the corresponding hyperparameters. Flatness loss acts as a (weak) gradient constraint on each grid point to encourage unconstrained parts of the implicit field to have a specific orientation. The thickness loss penalises the relative deviation of gradient magnitudes from their mean, encouraging a uniform and non-zero gradient magnitude (i.e. constant thickness). It is equivalent to minimising the squared coefficient of variation (<inline-formula><mml:math id="M41" display="inline"><mml:mrow><mml:msup><mml:mtext>CV</mml:mtext><mml:mn mathvariant="normal">2</mml:mn></mml:msup></mml:mrow></mml:math></inline-formula>) of the gradient norm – that is, the variance normalised by the squared mean – making the penalty scale-invariant.</p>
      <p id="d2e1557">Finally, we have implemented a loss that discourages local maxima and minima (bubbles) in the interpolated implicit field. This ‘monotonicity' loss is defined as the divergence of the normalised (unit length) gradient vector field. Rather than directly penalizing the Laplacian (which would aim for an ideal harmonic implicit field), we adopt a curvature-based divergence regulariser, by using normalized gradients. This approach is analogous to total variation regularisation <xref ref-type="bibr" rid="bib1.bibx48" id="paren.48"/> and mean curvature flow formulations <xref ref-type="bibr" rid="bib1.bibx40" id="paren.49"/>, and in this context helps to preserve geological interfaces and prevents the formation of spurious internal extrema, as recently demonstrated for neural field applications <xref ref-type="bibr" rid="bib1.bibx18 bib1.bibx51" id="paren.50"/>. To save memory, the second derivative (hessian) matrices used to compute the monotonicity loss are estimated numerically. This numerical estimation also allows us to have an external control on the scale of curvatures we want to minimise, by controlling the size of the grid-step used to numerically compute the hessian.</p>
</sec>
</sec>
<sec id="Ch1.S3.SS3">
  <label>3.3</label><title>Generative and kinematic events – a framework for combining neural fields</title>
      <p id="d2e1578">The aforementioned sections explain the constraints and controls over the interpolation of a single, smooth, and continuous implicit field. However, most SGMs contain multiple geological events, separated by unconformable, intrusive or faulted contacts. Each of these events typically need to be represented using their own interpolated implicit field, before temporally guided intersections are used to derive a combined model <xref ref-type="bibr" rid="bib1.bibx20 bib1.bibx61" id="paren.51"/>.</p>
      <p id="d2e1584">In <italic>curlew</italic>, this is achieved using overprint and/or offset relations (Fig. <xref ref-type="fig" rid="F1"/>b). Therefore, most neural fields in a model built using <italic>curlew</italic> represent a geological event (Table <xref ref-type="table" rid="T1"/>) that (1) creates new units (generative events, such as e.g., depositions or intrusions) or (2) displaces older units (kinematic events, such as faults). Sheet intrusions (e.g., dykes) are an example of events that do both – generate a new unit (the dyke fill) while displacing the older units (due to opening along the dyke).</p>

<table-wrap id="T1" specific-use="star"><label>Table 1</label><caption><p id="d2e1600">Overview of structural fields available in <italic>curlew</italic>, highlighting generative and kinematic components. Domain boundaries partition the model into separate domains, each governed by distinct implicit fields or chains of implicit fields (<inline-formula><mml:math id="M42" display="inline"><mml:mi>L</mml:mi></mml:math></inline-formula>, <inline-formula><mml:math id="M43" display="inline"><mml:mi>G</mml:mi></mml:math></inline-formula>).</p></caption><oasis:table frame="topbot"><oasis:tgroup cols="4">
     <oasis:colspec colnum="1" colname="col1" align="left"/>
     <oasis:colspec colnum="2" colname="col2" align="justify" colwidth="25mm"/>
     <oasis:colspec colnum="3" colname="col3" align="justify" colwidth="35mm"/>
     <oasis:colspec colnum="4" colname="col4" align="left"/>
     <oasis:thead>
       <oasis:row rowsep="1">
         <oasis:entry namest="col1" nameend="col4" align="center">Structural Fields in <italic>curlew</italic></oasis:entry>
       </oasis:row>
     </oasis:thead>
     <oasis:tbody>
       <oasis:row rowsep="1">
         <oasis:entry colname="col1">Name</oasis:entry>
         <oasis:entry colname="col2" align="left">Use Cases</oasis:entry>
         <oasis:entry colname="col3" align="left">Generative Component</oasis:entry>
         <oasis:entry colname="col4">Kinematic Component</oasis:entry>
       </oasis:row>
       <oasis:row rowsep="1">
         <oasis:entry colname="col1">Stratigraphy</oasis:entry>
         <oasis:entry colname="col2" align="left">Layer cakes, folds, unconformities</oasis:entry>
         <oasis:entry colname="col3" align="left"><inline-formula><mml:math id="M50" display="inline"><mml:mrow><mml:mi>f</mml:mi><mml:mo>≤</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> or <inline-formula><mml:math id="M51" display="inline"><mml:mrow><mml:mi>f</mml:mi><mml:mo>≥</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>, where <inline-formula><mml:math id="M52" display="inline"><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> is the boundary isovalue</oasis:entry>
         <oasis:entry colname="col4">None</oasis:entry>
       </oasis:row>
       <oasis:row rowsep="1">
         <oasis:entry colname="col1">Fault</oasis:entry>
         <oasis:entry colname="col2" align="left">Faults, shear zones</oasis:entry>
         <oasis:entry colname="col3" align="left">None</oasis:entry>
         <oasis:entry colname="col4"><inline-formula><mml:math id="M53" display="inline"><mml:mrow><mml:mi mathvariant="bold-italic">u</mml:mi><mml:mo>=</mml:mo><mml:mi>D</mml:mi><mml:mstyle displaystyle="true"><mml:mfrac style="display"><mml:mrow><mml:mover accent="true"><mml:mi mathvariant="bold-italic">s</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mo>-</mml:mo><mml:mo>(</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">s</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mo>⋅</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover><mml:mo>)</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mo>‖</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">s</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mo>-</mml:mo><mml:mo>(</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">s</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover><mml:mo>⋅</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mo>)</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:msub><mml:mo>‖</mml:mo><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:mfrac></mml:mstyle></mml:mrow></mml:math></inline-formula></oasis:entry>
       </oasis:row>
       <oasis:row rowsep="1">
         <oasis:entry colname="col1">Sheet Intrusion</oasis:entry>
         <oasis:entry colname="col2" align="left">Dykes, sills</oasis:entry>
         <oasis:entry colname="col3" align="left"><inline-formula><mml:math id="M54" display="inline"><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub><mml:mo>≤</mml:mo><mml:mi>f</mml:mi><mml:mo>≤</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mn mathvariant="normal">1</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>, where <inline-formula><mml:math id="M55" display="inline"><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>, <inline-formula><mml:math id="M56" display="inline"><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mn mathvariant="normal">1</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> are contact isovalues</oasis:entry>
         <oasis:entry colname="col4"><inline-formula><mml:math id="M57" display="inline"><mml:mrow><mml:mi mathvariant="bold-italic">u</mml:mi><mml:mo>=</mml:mo><mml:mi>A</mml:mi><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover></mml:mrow></mml:math></inline-formula></oasis:entry>
       </oasis:row>
       <oasis:row>
         <oasis:entry colname="col1">Domain Boundary</oasis:entry>
         <oasis:entry colname="col2" align="left">Onlap geometries, large offset faults</oasis:entry>
         <oasis:entry colname="col3" align="left"><inline-formula><mml:math id="M58" display="inline"><mml:mrow><mml:mi>M</mml:mi><mml:mo>=</mml:mo><mml:mfenced open="{" close=""><mml:mtable class="cases" rowspacing="0.2ex" columnspacing="1em" columnalign="left left" framespacing="0em"><mml:mtr><mml:mtd><mml:mi>L</mml:mi></mml:mtd><mml:mtd><mml:mrow><mml:mi>f</mml:mi><mml:mo>≤</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd><mml:mi>G</mml:mi></mml:mtd><mml:mtd><mml:mrow><mml:mi>f</mml:mi><mml:mo>&gt;</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mfenced></mml:mrow></mml:math></inline-formula></oasis:entry>
         <oasis:entry colname="col4">None</oasis:entry>
       </oasis:row>
     </oasis:tbody>
   </oasis:tgroup></oasis:table><table-wrap-foot><p id="d2e1620"><inline-formula><mml:math id="M44" display="inline"><mml:mi>f</mml:mi></mml:math></inline-formula>: Implicit field value; <inline-formula><mml:math id="M45" display="inline"><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover></mml:math></inline-formula>: Implicit field gradient direction; <inline-formula><mml:math id="M46" display="inline"><mml:mover accent="true"><mml:mi mathvariant="bold-italic">s</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover></mml:math></inline-formula>: Shortening direction; <inline-formula><mml:math id="M47" display="inline"><mml:mi>D</mml:mi></mml:math></inline-formula>: Fault offset; <inline-formula><mml:math id="M48" display="inline"><mml:mi>A</mml:mi></mml:math></inline-formula>: Sheet aperture; <inline-formula><mml:math id="M49" display="inline"><mml:mi mathvariant="bold-italic">u</mml:mi></mml:math></inline-formula>: Displacement.</p></table-wrap-foot></table-wrap>

      <p id="d2e1989">Generative events in <italic>curlew</italic> are combined with older ones using an inequality: implicit field values representing deposition above an unconformity “overprint” older values where they exceed a threshold value, and values defining a dyke overprint older values within a specified range (proportional to the dyke aperture). Conversely, kinematic events define vector displacements (based on the gradient-direction and/or tangent plane of the interpolated implicit field) that retro-deform the input coordinates used to constrain (or evaluate) older fields. Importantly these vector displacements are differentiable, allowing the combined training of multiple neural fields to learn additional unknown parameters (e.g., fault slip magnitude).</p>
      <p id="d2e1995">Overprint relations are implemented using a differentiable sigmoid function, such that values from a younger implicit field replace those of older ones based on an inequality. For unconformities, this inequality specifies a basement value below which the interpolated values are ignored (to retain predictions of basement lithology from older neural fields). An Event ID (<inline-formula><mml:math id="M59" display="inline"><mml:mrow><mml:msub><mml:mi>e</mml:mi><mml:mtext>id</mml:mtext></mml:msub></mml:mrow></mml:math></inline-formula>, see Fig. <xref ref-type="fig" rid="F1"/>b) is assigned to each overprinted location to keep track of the neural field that generated each implicit field value (and distinguish regions representing e.g., the interior of a dyke from those representing the surrounding stratigraphy). The geological operators outlining the implementation of kinematic events in <italic>curlew</italic> are outlined in the following sections, though terminology for these operations varies across the literature <xref ref-type="bibr" rid="bib1.bibx20 bib1.bibx21 bib1.bibx6 bib1.bibx24" id="paren.52"/></p>
<sec id="Ch1.S3.SS3.SSS1">
  <label>3.3.1</label><title>Faults</title>
      <p id="d2e2023">Faults are implemented in <italic>curlew</italic> using displacement fields. These fields modify existing geological structures by offsetting them along geologically plausible slip directions. The displacement direction is determined based on the interaction between the regional stress field and the local geometry of the fault surface.</p>
      <p id="d2e2029">Specifically, the slip direction vector <inline-formula><mml:math id="M60" display="inline"><mml:mover accent="true"><mml:mi mathvariant="bold-italic">u</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover></mml:math></inline-formula> is computed by projecting a vector representing the maximum shortening direction <inline-formula><mml:math id="M61" display="inline"><mml:mover accent="true"><mml:mi mathvariant="bold-italic">s</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover></mml:math></inline-formula> onto the plane perpendicular to the normalised gradient <inline-formula><mml:math id="M62" display="inline"><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover></mml:math></inline-formula> of the implicit field defining the fault surface:

              <disp-formula id="Ch1.E5" content-type="numbered"><label>5</label><mml:math id="M63" display="block"><mml:mrow><mml:mover accent="true"><mml:mi mathvariant="bold-italic">u</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mo>=</mml:mo><mml:mstyle displaystyle="true"><mml:mfrac style="display"><mml:mrow><mml:mover accent="true"><mml:mi mathvariant="bold-italic">s</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover><mml:mo>-</mml:mo><mml:mo>(</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">s</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mo>⋅</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mo>)</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover></mml:mrow><mml:mrow><mml:mo>‖</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">s</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mo>-</mml:mo><mml:mo>(</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">s</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover><mml:mo>⋅</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover><mml:mo>)</mml:mo><mml:mspace linebreak="nobreak" width="0.125em"/><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:msub><mml:mo>‖</mml:mo><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:mfrac></mml:mstyle><mml:mo>.</mml:mo></mml:mrow></mml:math></disp-formula>

            Here, the term <inline-formula><mml:math id="M64" display="inline"><mml:mrow><mml:mo>(</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">s</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover><mml:mo>⋅</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mo>)</mml:mo><mml:mspace width="0.125em" linebreak="nobreak"/><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover></mml:mrow></mml:math></inline-formula> gives the component of the shortening vector normal to the fault plane. Subtracting this from <inline-formula><mml:math id="M65" display="inline"><mml:mover accent="true"><mml:mi mathvariant="bold-italic">s</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover></mml:math></inline-formula> yields the component tangential to the fault, ensuring that the resulting slip direction is (locally) fault-parallel and consistent with mechanical considerations (Fig. <xref ref-type="fig" rid="F2"/>a). The resulting vector is then normalised to give a unit slip direction vector <inline-formula><mml:math id="M66" display="inline"><mml:mover accent="true"><mml:mi mathvariant="bold-italic">u</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover></mml:math></inline-formula> that can be scaled by the desired displacement magnitude (<inline-formula><mml:math id="M67" display="inline"><mml:mi>D</mml:mi></mml:math></inline-formula>) to produce the final displacement vector <inline-formula><mml:math id="M68" display="inline"><mml:mi mathvariant="bold-italic">u</mml:mi></mml:math></inline-formula>.</p>

      <fig id="F2" specific-use="star"><label>Figure 2</label><caption><p id="d2e2212">Fault displacement in <italic>curlew</italic>. The displacement direction at every point is calculated by projecting the (pre-defined) shortening direction <inline-formula><mml:math id="M69" display="inline"><mml:mrow><mml:mo>(</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">s</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> onto the tangent plane of the implicit field that defines the fault geometry <bold>(a)</bold>. The resulting displacement direction vector <inline-formula><mml:math id="M70" display="inline"><mml:mrow><mml:mo>(</mml:mo><mml:mover accent="true"><mml:mi mathvariant="bold-italic">u</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> is then scaled by the (optionally learnable) offset magnitude <inline-formula><mml:math id="M71" display="inline"><mml:mrow><mml:mo>(</mml:mo><mml:mi>D</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula> to derive a displacement vector <inline-formula><mml:math id="M72" display="inline"><mml:mrow><mml:mo>(</mml:mo><mml:mi mathvariant="bold-italic">u</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>. For curved fault systems (i.e., where the curvature of the fault plane is relevant with respect to the fault offset), <italic>curlew</italic> offers an optional correction <bold>(b)</bold>, which uses the difference between the implicit field values at the displaced and initial locations, to project the point back onto the initial isosurface. This approximation results in some change in the displacement magnitude, but for most realistic fault geometries this error is negligible.</p></caption>
            <graphic xlink:href="https://gmd.copernicus.org/articles/19/3455/2026/gmd-19-3455-2026-f02.png"/>

          </fig>

      <p id="d2e2289">For infinite faults, the magnitude is defined by a single scalar value (optionally learnable). For finite faults, the magnitude would need to vary as a function of distance from the fault center – following empirical displacement profiles <xref ref-type="bibr" rid="bib1.bibx21 bib1.bibx20" id="paren.53"/>. Once the displacement vector is determined, its direction (sign) is assigned by applying two sigmoid functions that impose opposite slip directions on either side (hangingwall and footwall) of the fault. The sharpness of the sigmoid can be tuned to simulate either brittle fault behavior (sharp transition) or ductile shear zones (smooth transition), enabling the generation of simple drag folds adjacent to brittle faults.</p>
      <p id="d2e2295">For highly curved (with respect to displacement magnitude) faults, <italic>curlew</italic> applies an optional gradient correction to correct for curvature in the displacement vector between the initial and displaced positions (Fig. <xref ref-type="fig" rid="F2"/>b). This correction ensures that displaced points remain (approximately) on the same isosurface of the fault implicit field, and is given by:

              <disp-formula id="Ch1.E6" content-type="numbered"><label>6</label><mml:math id="M73" display="block"><mml:mrow><mml:msub><mml:mi mathvariant="bold-italic">u</mml:mi><mml:mtext>corr</mml:mtext></mml:msub><mml:mo>=</mml:mo><mml:mi mathvariant="bold-italic">u</mml:mi><mml:mo>+</mml:mo><mml:mo>(</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo>)</mml:mo><mml:msub><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mi mathvariant="normal">u</mml:mi></mml:msub></mml:mrow></mml:math></disp-formula>

            where <inline-formula><mml:math id="M74" display="inline"><mml:mrow><mml:msub><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover><mml:mi mathvariant="normal">u</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> is the gradient unit vector at the displaced location, <inline-formula><mml:math id="M75" display="inline"><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> and <inline-formula><mml:math id="M76" display="inline"><mml:mrow><mml:msub><mml:mi>f</mml:mi><mml:mi>u</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> are the implicit field values at the original and displaced positions, respectively. The term <inline-formula><mml:math id="M77" display="inline"><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>-</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mi>u</mml:mi></mml:msub><mml:mo>)</mml:mo><mml:msub><mml:mover accent="true"><mml:mi mathvariant="bold-italic">g</mml:mi><mml:mo stretchy="false" mathvariant="normal">^</mml:mo></mml:mover><mml:mi mathvariant="normal">u</mml:mi></mml:msub></mml:mrow></mml:math></inline-formula> projects the scalar mismatch back along the gradient, correcting for small misalignments introduced by displacement in regions of high curvature or gradient variation.</p>
</sec>
<sec id="Ch1.S3.SS3.SSS2">
  <label>3.3.2</label><title>Sheet intrusions</title>
      <p id="d2e2419">Sheet intrusions, such as dykes and sills, are implemented using the logic of both generative and kinematic implicit fields. These fields create the new geological units (solidified magma), and displace older ones to account for dilation during intrusion.</p>
      <p id="d2e2422">The generative component uses an overprinting relationship defined between an upper and lower scalar bound, assigning new implicit field values (and event IDs) between two isosurfaces that represent the intrusion contacts. An offset function is also defined to move pre-existing structures outward, in the direction of the gradient of the intrusion's implicit field. The gradient is flipped – based on the sign of the implicit field – so that vectors consistently point away from the intrusion's centerplane, which is represented by the zero isosurface. An aperture term scales the gradient's magnitude to reflect the intrusion's thickness.</p>
</sec>
</sec>
<sec id="Ch1.S3.SS4">
  <label>3.4</label><title>Domain boundaries – multi-domain structural models in <italic>curlew</italic></title>
      <p id="d2e2437">The final structural element implemented in <italic>curlew</italic> is the domain boundary, which enables the segmentation of geological models into distinct sub-domains, each governed by its own sequence of (older) geological events. Domain boundaries thus partition the model into separate neural field chains, based on inequalities defined on the domain boundary implicit field, and can be combined to construct complex geological structures and relations.</p>
      <p id="d2e2443">A typical use case arises when sedimentary units are deposited above an unconformity but pinch-out laterally (onlap geometries). Because such deposits do not follow the generative logic of a conformal event (i.e., they are not stratigraphically contiguous with the unconformity), they cannot be represented within a single event chain. Instead, the unconformity is defined as a domain boundary, which splits the model into two stratigraphically and structurally independent domains: one below the unconformity and one above it.</p>
      <p id="d2e2446">Another key application of domain boundaries is in the modelling of major fault zones. When a fault induces large displacement, or juxtaposes basement rock against cover sequences, the stratigraphic relationship between the foot- and hanging-wall often becomes unresolvable. For such structurally complex settings, the fault is most simply defined as a domain boundary separating sub-models with different geological histories.</p>
      <p id="d2e2450">This formalism allows <italic>curlew</italic> to integrate geologically inconsistent or discontinuous regions within a unified framework. It removes the need for users to manually partition their models or construct separate simulations for each geological block, and facilitates instances where domain boundaries are affected by younger deformations (e.g. faults).</p>
</sec>
</sec>
<sec id="Ch1.S4">
  <label>4</label><title>Results</title>
      <p id="d2e2466">We have applied <italic>curlew</italic> to a set of synthetic examples that explore the various capabilities of our neural field based interpolation approach, and illustrate different aspects of the underlying loss function. These are presented in the following sections, followed by an application to a real example constrained by data extracted from a digital outcrop model from Newcastle, Australia.</p>
      <p id="d2e2472">To help document and introduce potential users to specifics of the <italic>curlew</italic> modelling approach, each of these examples is provided as a jupyter notebook (see Code and Data availability section).</p>
<sec id="Ch1.S4.SS1">
  <label>4.1</label><title>Folded stratigraphy</title>
      <p id="d2e2486">To illustrate the various loss terms described above, and to showcase the importance of the length scale parameter used during the Fourier feature projection, we have generated a simple test model containing a folded stratigraphic sequence (Fig. <xref ref-type="fig" rid="F3"/>). This model involves two generations of folding events, a small wavelength small amplitude folding overprinted on large wavelength large amplitude folds. A Fourier series was used to generate similar folds with relatively sharp noses (Fig. <xref ref-type="fig" rid="F3"/>a). This sequence was then sampled along hypothetical boreholes (Fig. <xref ref-type="fig" rid="F3"/>b) to simulate gradient, value and tangent constraints. Note that while this synthetic example is 2D for ease of visualisation, all of the aforementioned methods (and the successive results) could be applied in 3D too.</p>

      <fig id="F3" specific-use="star"><label>Figure 3</label><caption><p id="d2e2497">Synthetic (analytical) geological model simulating a folded stratigraphy. The implicit field <bold>(a)</bold> was sampled to derive gradient orientations (arrows) and implicit field values (circles) as constraints for a subsequent interpolation. Isosurfaces extracted from the implicit field at predefined seed positions also give a stratigraphic classification <bold>(b)</bold>, which were used to define relational constraints. The red triangles represent the locations of the boreholes along which the constraints were sampled.</p></caption>
          <graphic xlink:href="https://gmd.copernicus.org/articles/19/3455/2026/gmd-19-3455-2026-f03.png"/>

        </fig>

<sec id="Ch1.S4.SS1.SSS1">
  <label>4.1.1</label><title>Local, relational and global losses</title>
      <p id="d2e2519">We used different combinations of constraints and losses to show the effect of each part of our loss function (Eqs. <xref ref-type="disp-formula" rid="Ch1.E2"/>–<xref ref-type="disp-formula" rid="Ch1.E4"/>) on the interpolated result (Fig. <xref ref-type="fig" rid="F4"/>). To start, we fit a neural field model using each constraint type in isolation (Fig. <xref ref-type="fig" rid="F4"/>a–d), without any additional global losses. The results show that tangent constraints (Fig. <xref ref-type="fig" rid="F4"/>a) on their own are (unsurprisingly) extremely weak, since they only encode the axis of the gradient. This results in closed isosurfaces (i.e. “bubbles”), where the implicit field curls back on itself due to the undefined direction of the gradient.</p>

      <fig id="F4" specific-use="star"><label>Figure 4</label><caption><p id="d2e2534">Interpolated stratigraphy for different combinations of constraints and losses, using the same network architecture (1 layer, 512 neurons), 2000 training epochs, and two (150, 1000) length scales. An early stopping criterion was applied to stop training after 100 epochs with less than <inline-formula><mml:math id="M78" display="inline"><mml:mrow><mml:msup><mml:mn mathvariant="normal">10</mml:mn><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="normal">4</mml:mn></mml:mrow></mml:msup></mml:mrow></mml:math></inline-formula> improvement in the loss. The individual models show results using tangent constraints <bold>(a)</bold>, gradient constraints <bold>(b)</bold>, relational constraints <bold>(c)</bold>, and value constraints <bold>(d)</bold> in isolation. The addition of monotonicity loss to tangent constraints <bold>(e)</bold> and the thickness loss to the gradient constraints <bold>(f)</bold> improves the resulting interpolation. The flexibility afforded by relational <bold>(g, h)</bold> rather than value <bold>(i, j)</bold> constraints also seems to help the model converge to a more geologically realistic solution.</p></caption>
            <graphic xlink:href="https://gmd.copernicus.org/articles/19/3455/2026/gmd-19-3455-2026-f04.jpg"/>

          </fig>

      <p id="d2e2582">This is partially resolved when gradient constraints are used (Fig. <xref ref-type="fig" rid="F4"/>b). The isosurfaces are now close to monotonic, as the directionality encoded in the gradient constraints forces the implicit field to “flow” from the bottom to the top of the model. However, away from the data, the interpolated field behaves randomly, and includes geologically unlikely variations in gradient magnitude (changes in thickness).</p>
      <p id="d2e2589">The use of equality and inequality (relational) constraints (Fig. <xref ref-type="fig" rid="F4"/>c) results in a smoothly undulating model that correctly matches all of the contacts sampled by the boreholes, as well as maintaining correct stratigraphic relationships across the domain. Interestingly, however, the use of value constraints results in a very different interpolation (Fig. <xref ref-type="fig" rid="F4"/>d) in which the neural field accurately reproduced the values at constraint locations but created artefacts (such as bubbles) elsewhere.</p>
      <p id="d2e2596">Introducing the monotonicity loss removes the bubble artefacts from results obtained with the aforementioned constraint types (Fig. <xref ref-type="fig" rid="F4"/>e and g), but produces an interpolation with potentially unrealistic variations in unit thickness. The monotonicity loss seems to require further fine tuning in combination with value constraints (Fig. <xref ref-type="fig" rid="F4"/>i).</p>
      <p id="d2e2603">Use of the thickness loss term resulted in an interpolation that honours unit thickness (class 1B parallel folds), giving geologically plausible models in combination with gradient and relational constraints. In combination with value constraints, however, the neural field found a solution dominated by angular bubbles that we consider to be unreasonable.</p>
      <p id="d2e2606">These additional global losses thus prove to be extremely useful in constraining the model in areas lacking local constraints, and can be adapted to enforce various geological rules. Interestingly, the global losses also appear to work best with relational constraints (inequalities), rather than value constraints.</p>
</sec>
<sec id="Ch1.S4.SS1.SSS2">
  <label>4.1.2</label><title>Length scales</title>
      <p id="d2e2618">The synthetic folds model also allows us to explore one of the most important parameters for our implementation of a neural field interpolator: the length scale used during the Fourier feature encoding (cf. Sect. 3.1). To show the impact of different length scales, we used relational and gradient constraints in combination with a global monotonicity loss while varying only the length scales used by the neural field (Fig. <xref ref-type="fig" rid="F5"/>).</p>

      <fig id="F5" specific-use="star"><label>Figure 5</label><caption><p id="d2e2625">Interpolated stratigraphy for different pairs of length scales (but retaining the same settings as Fig. <xref ref-type="fig" rid="F4"/>). Small length scales <bold>(a)</bold> result in over-fitting and random artefacts away from the data, whereas large length scales <bold>(d)</bold> result in under-fitting. Note also that the model is most influenced by the smaller length scale (as seen in <bold>b</bold> and <bold>c</bold>), whereas the larger length scale likely has more influence on large scale trends.</p></caption>
            <graphic xlink:href="https://gmd.copernicus.org/articles/19/3455/2026/gmd-19-3455-2026-f05.png"/>

          </fig>

      <p id="d2e2648">The resulting models highlight the importance of choosing appropriate length scales. Scales that are too small (Fig. <xref ref-type="fig" rid="F5"/>a) result in severe over-fitting and largely meaningless predictions away from the data. Conversely, when the length scale is too large (Fig. <xref ref-type="fig" rid="F5"/>d), the neural field under-fits and cannot make accurate predictions at the constraint locations, due to its limited capability to learn higher frequency variations.</p>
      <p id="d2e2656">For this example, the length scales of (150, 500) seems to be a good trade-off between fitting the data and avoiding random overfitting artefacts (Fig. <xref ref-type="fig" rid="F5"/>b). A comparison of Fig. <xref ref-type="fig" rid="F5"/>b and c suggest that the results are most heavily influenced by the smaller length scale (while the large length scale serves to reproduce the overall trend and so is less sensitive, albeit still important). As a rule of thumb, the longest length scale should be 4 times the longest model dimension, while the shortest should be determined by the data spacing.</p>
</sec>
</sec>
<sec id="Ch1.S4.SS2">
  <label>4.2</label><title>Combining multiple neural fields</title>
      <p id="d2e2673">Only the simplest geological models can be represented by a single implicit field. Hence, we develop a second synthetic model that highlights <italic>curlew</italic>'s approach to fault displacement, dyke injection and the use of domain boundaries to model intrusive and onlap relations (Fig. <xref ref-type="fig" rid="F6"/>a). The implicit fields associated with the depositional and kinematic events of the model are generated using predefined analytic equations, allowing us to construct a completely synthetic model for testing. Seed locations within these implicit fields were used to define isosurfaces that mimic stratigraphic contacts (Fig. <xref ref-type="fig" rid="F6"/>b). Value and gradient constraints were sampled from each implicit field along vertical drill holes, to be used as synthetic data (Fig. <xref ref-type="fig" rid="F6"/>d–j) when fitting (training) an interpolated neural field based model. Every individual field in the model influences every field that came before it with displacements, overprinting, or a combination of both actions (Fig. <xref ref-type="fig" rid="F7"/>c).</p>

      <fig id="F6" specific-use="star"><label>Figure 6</label><caption><p id="d2e2689">A structurally complex analytical synthetic model built in <italic>curlew</italic>. The resulting implicit field <bold>(a)</bold>, corresponding lithology <bold>(b)</bold>, and event ID <bold>(c)</bold> are also shown. This model represents a sequence of seven geological events, starting with a “layer cake” stratigraphy <bold>(j)</bold>. Subsequently, this stratigraphy is intersected by a dyke <bold>(i)</bold>, and intruded by a stock in the lower left corner of the model domain, represented through a domain boundary <bold>(h)</bold>. The intrusion is assigned an arbitrary implicit field value (in this case <inline-formula><mml:math id="M79" display="inline"><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="normal">2</mml:mn></mml:mrow></mml:math></inline-formula>) as its boundary is the only relevant entity. A listric fault then cuts through the units <bold>(g)</bold> followed by an unconformity that erodes through the deposited sequence, which is overlaid by a secondary deposition <bold>(f)</bold>. As the deposited unit is not conformable to the unconformity surface (i.e., onlap), the unconformity is also modelled as a domain boundary <bold>(e)</bold>. Finally, a second listric fault <bold>(d)</bold> cuts through the entire sequence.</p></caption>
          <graphic xlink:href="https://gmd.copernicus.org/articles/19/3455/2026/gmd-19-3455-2026-f06.png"/>

        </fig>

      <fig id="F7" specific-use="star"><label>Figure 7</label><caption><p id="d2e2745">Interpolation results <bold>(a)</bold> and corresponding lithology classification <bold>(b)</bold> derived by fitting a network of neural fields <bold>(c)</bold> to the sparse constraints shown in Fig. <xref ref-type="fig" rid="F6"/>. As mentioned earlier, the intrusive body is represented by a constant scalar field value (<inline-formula><mml:math id="M80" display="inline"><mml:mrow><mml:mo>-</mml:mo><mml:mn mathvariant="normal">2</mml:mn></mml:mrow></mml:math></inline-formula> in this case) as only the boundary of the intrusion (defined by the domain boundary field) is of interest.</p></caption>
          <graphic xlink:href="https://gmd.copernicus.org/articles/19/3455/2026/gmd-19-3455-2026-f07.png"/>

        </fig>

      <p id="d2e2776">A neural field was parameterised (with different hyperparameters for various losses) for each set of constraints. The gradient loss term for all the implicit fields was given a higher weight, to give more importance to the shape of the reconstructed model. For the depositional events (<inline-formula><mml:math id="M81" display="inline"><mml:mrow><mml:msub><mml:mi>s</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula> and <inline-formula><mml:math id="M82" display="inline"><mml:mrow><mml:msub><mml:mi>s</mml:mi><mml:mn mathvariant="normal">2</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>), the value loss was given a very low weight (as the values are arbitrarily defined and may impede convergence), and the monotonicity loss enabled used to encourage a bubble-free geometry. For the dyke, as the values determine the aperture of the dyke, the value loss was enabled, along with the thickness loss, to ensure it retains a constant thickness in interpolated regions. Finally, the domain boundaries and faults were fit by prioritising value-loss in combination with a thickness loss that acts to keep a constant implicit field gradient. For this example fault-displacement was assumed to be known, so the principal shortening direction (<inline-formula><mml:math id="M83" display="inline"><mml:mover accent="true"><mml:mi mathvariant="bold-italic">s</mml:mi><mml:mo mathvariant="normal" stretchy="false">^</mml:mo></mml:mover></mml:math></inline-formula>) and slip magnitude were defined a-priori (though the slip magnitude can also be treated as a learnable parameter, as shown in the following section).</p>
      <p id="d2e2812">After 1000 epochs of training, the interpolated <italic>curlew</italic> model closely matches the original synthetic one, with a few visible differences (Fig. 7). The implicit values for the two models are very different (0–600 vs. 80–100 for <inline-formula><mml:math id="M84" display="inline"><mml:mrow><mml:msub><mml:mi>s</mml:mi><mml:mn mathvariant="normal">0</mml:mn></mml:msub></mml:mrow></mml:math></inline-formula>), but this is coherent with the disabled value loss function for these two fields and does not necessarily influence the isosurface geometry (given we extract the isosurfaces for the lithologies based on seed positions).</p>
</sec>
<sec id="Ch1.S4.SS3">
  <label>4.3</label><title>Digital outcrop reconstruction</title>
      <p id="d2e2837">Finally, to demonstrate that <italic>curlew</italic> can be applied to real (3D) data, we present a model generated from a digital outcrop in Australia. The digital outcrop consists of a faulted stratigraphic sequence intruded by a dyke. The Compass plugin for CloudCompare <xref ref-type="bibr" rid="bib1.bibx60" id="paren.54"/> was used to extract (1) traces along the contacts between the stratigraphic units, dyke margin and fault surface, and (2) orientation measurements (strike and dip) of each of these geological structures.</p>
      <p id="d2e2846">Since these traces included a large number of points (due to the high resolution of the point cloud), they were subsampled to acquire evenly-spaced points along the stratigraphic contacts, the fault surface, and the dyke (Fig. <xref ref-type="fig" rid="F8"/>a). While these traces could be directly used as relational constraints, we used the subsampled points as value constraints, with the values being the approximate height of the layer from the base of the digital outcrop. These constraints were then used to constrain three consecutive neural fields: (1) a stratigraphic field, (2) a fault field, and (3) dyke field.</p>

      <fig id="F8" specific-use="star"><label>Figure 8</label><caption><p id="d2e2853">Interface and gradient constraints extracted from a digital outcrop model <bold>(a)</bold> from Newcastle, Australia. These were used to construct a 3D <italic>curlew</italic> model describing the main geological structures and stratigraphic units and derive a lithological classification <bold>(b)</bold>, plotted here as an overlay on the original RGB colours for clarity. The fitted displacement fields from the <italic>curlew</italic> model were also used to offset these points and remove displacement from the dyke and fault, resulting in an undeformed point cloud closer to the original depositional geometry <bold>(c)</bold>.</p></caption>
          <graphic xlink:href="https://gmd.copernicus.org/articles/19/3455/2026/gmd-19-3455-2026-f08.jpg"/>

        </fig>

      <p id="d2e2878">After fitting each neural field separately to generate an initial geometry, we froze the geometries of the fault and dyke fields by fixing the weights of their corresponding neural fields. The fault slip and stratigraphic interpolation were then fit simultaneously, to find an optimal displacement for the fault (i.e. by finding the fault offset that results, after reconstruction, in the smoothest interpolation of the older stratigraphy field).</p>
      <p id="d2e2881">The resulting model (Fig. <xref ref-type="fig" rid="F8"/>b) and solutions to the displacement fields induced by the fault and the dyke were then used to “undeform” the outcrop and retrieve pre-deformation locations for all of the points in the digital outcrop model (Fig. <xref ref-type="fig" rid="F8"/>c). Note that while the dyke in the original outcrop is finite, the modelled dyke is infinite. Finite dykes (and faults) are planned to be implemented in future versions of <italic>curlew</italic>.</p>
</sec>
</sec>
<sec id="Ch1.S5">
  <label>5</label><title>Discussion</title>
      <p id="d2e2900">We have presented an open-source python package, <italic>curlew</italic>, which leverages neural fields to generate implicit geological representations of structural geological models. The neural field approach employed by <italic>curlew</italic> is highly versatile and adaptable, capable of handling a wide range of constraint types, data modalities, and loss functions. Its representation of structural geological models in a fully differentiable architecture, and the ability of the underlying neural fields to arbitrarily adapt the geometry they represent, opens the door to a variety of new approaches to common earth modelling, property field interpolation and geophysical inversion.</p>
      <p id="d2e2909">Similarly to radial basis functions (RBF) or co-kriging approaches, models built with <italic>curlew</italic> are entirely meshless. Once trained, they can be evaluated at arbitrary locations, facilitating advanced methods for contour extraction or adaptive gridding strategies. Furthermore, given its <italic>pytorch</italic> core, training and inference operations are GPU-parallelised and scalable on large computing infrastructure. This contrasts against established methods such as co-kriging and RBF, which scale poorly with increasing data volume. Specifically, the requirement to invert a dense <inline-formula><mml:math id="M85" display="inline"><mml:mrow><mml:mi>N</mml:mi><mml:mo>×</mml:mo><mml:mi>N</mml:mi></mml:mrow></mml:math></inline-formula> covariance or interpolation matrix incurs a cubic computational cost (<inline-formula><mml:math id="M86" display="inline"><mml:mrow><mml:mi mathvariant="script">O</mml:mi><mml:mo>(</mml:mo><mml:msup><mml:mi>N</mml:mi><mml:mn mathvariant="normal">3</mml:mn></mml:msup><mml:mo>)</mml:mo></mml:mrow></mml:math></inline-formula>), making these methods inefficient without numerical simplifications like compactly supported kernels, fast multipole methods, or domain decomposition <xref ref-type="bibr" rid="bib1.bibx47 bib1.bibx8 bib1.bibx1 bib1.bibx65" id="paren.55"/>.</p>
      <p id="d2e2950">The differentiable nature of the generated models also means model outputs can include the gradient of the underlying implicit fields (i.e. structural orientation measurements), and if necessary, higher order derivatives (e.g., curvature) through automatic differentiation (although the second order derivatives are computed numerically in <italic>curlew</italic> for computational scalability). As shown in Sect. 3.2.2, this has been leveraged to suppress (albeit not eliminate) common interpolation artefacts such as “bubbles”, and further applications are envisaged when interpolating property fields (e.g., permeability, ore grade) given a structural geological model.</p>
      <p id="d2e2957">Curiously, our experiments also suggest that, in the context of geological modelling with neural fields, the inclusion of value constraints (instead of somewhat equivalent inequality constraints) can make it difficult for neural fields to converge to a geologically realistic geometry. This behaviour might be explained by the under-determined nature of gradient-only constraints – the network's loss is invariant to adding a constant offset to the field. Thus, the optimisation has a flat direction, yielding a continuum (valley) of equally valid solutions rather than a single narrow optimum. By contrast, specifying exact field values removes this degeneracy, collapsing the solution space to isolated optima that are much harder to find. Prior work in neural network training has noted that having such flat minima or degenerate solution manifolds can make optimisation easier and models generalise better <xref ref-type="bibr" rid="bib1.bibx33" id="paren.56"/>. Through their work on Sobolev Training, <xref ref-type="bibr" rid="bib1.bibx9" id="text.57"/>, further demonstrate that leveraging derivative information (gradients) in training leads to improved learning efficiency, implying that matching the shape of the target function is often more tractable than matching exact values.</p>
<sec id="Ch1.S5.SS1">
  <label>5.1</label><title>Random Fourier Features</title>
      <p id="d2e2974">The inclusion of Random Fourier Feature (RFF) mapping allows <italic>curlew</italic> to train neural fields without using the pre-training protocol outlined by <xref ref-type="bibr" rid="bib1.bibx24" id="text.58"/> for network weight initialisation. Although there are several alternative approaches that could behave similarly <xref ref-type="bibr" rid="bib1.bibx51" id="paren.59"><named-content content-type="pre">e.g., SiREN;</named-content></xref>, the RFF mapping has the advantage that length-scales used by the interpolator can be carefully controlled, and potentially seeded for specific geological settings (e.g., in folded rocks with known amplitude and axial plane). However, this sensitivity can also be a disadvantage, as the manual selection of appropriate length scales can be quite challenging.</p>
      <p id="d2e2988">Interestingly, the randomness inherent to RFF mapping means multiple model realisations can be obtained by training models on the same data but with different RFF seeds (Fig. <xref ref-type="fig" rid="F9"/>). While model ensembles can also be derived from conventional neural fields, we suspect that the significant differences in (learned) geometric representation that result when randomly sampling different RFFs enable greater exploration of the potential solution space. The precise meaning of these variations remains a topic of future research, but it could facilitate methods for quantifying the epistemic uncertainty of the interpolated result.</p>

      <fig id="F9" specific-use="star"><label>Figure 9</label><caption><p id="d2e2995">Selected realisations <bold>(a–d)</bold> from an ensemble of 25 <italic>curlew</italic> models derived using different random seeds for the RFF encoding. The median predicted class <bold>(e)</bold> highlights the predominance of geologically plausible solutions. The Shannon information entropy of the predicted classes <bold>(f)</bold> shows high uncertainty (brighter colour) at the fold hinges and away from the drill hole data, suggesting this approach might be used to meaningfully quantify model uncertainty.</p></caption>
          <graphic xlink:href="https://gmd.copernicus.org/articles/19/3455/2026/gmd-19-3455-2026-f09.png"/>

        </fig>

      <p id="d2e3017">For example, we have generated 25 random realisations of the folded stratigraphy model (see Sect. 4.1) and computed the information entropy <xref ref-type="bibr" rid="bib1.bibx50" id="paren.60"/> of the resulting lithology predictions (Fig. <xref ref-type="fig" rid="F9"/>f). This clearly highlights increased uncertainty near fold hinges and away from the constraints, aiding interpretation and potentially facilitating targeted data collection <xref ref-type="bibr" rid="bib1.bibx55 bib1.bibx63 bib1.bibx64 bib1.bibx61" id="paren.61"/>.</p>
      <p id="d2e3028">A full-variational Bayesian Neural Network <xref ref-type="bibr" rid="bib1.bibx16" id="paren.62"><named-content content-type="pre">BNN;</named-content></xref> provides a first order uncertainty quantification, as the weights and biases of the neural network are not fixed values and can be approximated as probability distributions. Thus, this more comprehensive representation of variability may help to capture epistemic uncertainty arising from the data. However, BNNs are computationally expensive, even more than the ensembles for highly optimised networks, and require more complex training procedures compared to standard neural networks with RFF encoding. Some studies propose Monte-Carlo dropout <xref ref-type="bibr" rid="bib1.bibx22" id="paren.63"/> or Hamiltonian Monte-Carlo simulations within implicit neural representations <xref ref-type="bibr" rid="bib1.bibx15" id="paren.64"/> as a simpler alternative to BNNs. These approaches are all theoretically compatible with our <italic>curlew</italic> architecture, and could be explored in future works.</p>
</sec>
<sec id="Ch1.S5.SS2">
  <label>5.2</label><title>Learnable forward models</title>
      <p id="d2e3053">As hinted at previously, the differentiability of the combined network of neural fields can also be leveraged to update the model geometry according to some additional loss that depends on the model as a whole (rather than the individual neural fields). This can potentially be applied to integrate unlabelled auxiliary data through an additional neural network (Fig. <xref ref-type="fig" rid="F1"/>b), which learns the relationship between the outputs of the structural model (implicit field value and event ID; cf. Fig. <xref ref-type="fig" rid="F1"/>b) and arbitrary measurements (e.g., geochemistry or petrophysics). The primary role of this neural network is not to accurately predict the measured properties (as it cannot account for lateral variability), but rather to encourage the model geometry to converge on a solution that explains as much property variance as possible (i.e. a common earth model). By jointly training this learnable forward model and the geological geometry fields, geometries that are most predictive of the measured properties will be favoured.</p>
      <p id="d2e3060">We illustrate this approach using a simple model containing a folded sequence, in which three arbitrary property measurements have been made along the boreholes. These are visualised using a ternary colour mapping that gives a false-colour representation of the measurements (Fig. <xref ref-type="fig" rid="F10"/>a). An initial model, fit to sparse constraints from these drill holes, does a reasonable job at predicting the field geometry (Fig. <xref ref-type="fig" rid="F10"/>b) given the available information. But, when this is coupled with the learnable forward model, the added unlabelled drillhole information (property measurements) result in a solution much closer to the original (Fig. <xref ref-type="fig" rid="F10"/>c).</p>

      <fig id="F10" specific-use="star"><label>Figure 10</label><caption><p id="d2e3071">Synthetic <bold>(a)</bold> and interpolated implicit field derived without <bold>(b)</bold> and with <bold>(c)</bold> the learnable forward model. The original implicit field was sampled to extract sparse conventional (value and gradient) constraints and denser property measurements (coloured lines; analogous to geochemistry assays or hyperspectral drill core scans). The predicted model without the use of property constraints shows a wider folding in the basement due to lack of constraints. The addition of the learnable forward model allows the use of extra data, thereby tightening the wavelength of the interpolated model. The learnable forward model can also be evaluated across the entire domain <bold>(d)</bold> to show the recovered property field.</p></caption>
          <graphic xlink:href="https://gmd.copernicus.org/articles/19/3455/2026/gmd-19-3455-2026-f10.png"/>

        </fig>

      <p id="d2e3093">This approach potentially adds a powerful semi-supervised aspect to the modelling process: constraints are used to define an approximate (“supervised”) geometry, which is further refined to find solutions that best explains additional quantitative data or lithological logs, reducing interpretation bias and enabling the rapid integration of new data without explicit interpretation. A similar approach might also be used to integrate geophysical constraints on model geometry, albeit with physical forward models.</p>
</sec>
<sec id="Ch1.S5.SS3">
  <label>5.3</label><title>Future directions</title>
      <p id="d2e3104">The parameterisation of geological models using neural networks opens up several exciting directions for future research and application. The introduction of Random Fourier Features, while powerful, necessitates research into the extraction of appropriate length scales from sparse data, instead of the trial-and-error approach used in the current work. Progressive implicit representations <xref ref-type="bibr" rid="bib1.bibx30" id="paren.65"/> might also help in mitigating random noise generated by including high frequencies, while fitting sparse data better. Using the domain of the model and known sparsity of the constraints, one could also generate a power-spectrum of length scales to seed the model. This would effectively eliminate the need for a length scales hyperparameter. However, as the norms of the spatial wavenumbers follow a Chi distribution, the resulting distribution of wavenumbers includes a heavy tail. Careful optimisation is necessary to ensure that no spectral noise overtakes the interpolation process of the model.</p>
      <p id="d2e3110">We also note that our method of generating multiple model realisations by changing the Random Fourier Feature encoding has several similarities to the turning bands and spectral methods used to simulate Gaussian random fields <xref ref-type="bibr" rid="bib1.bibx37" id="paren.66"/>, suggesting that a deeper stochastic link to other Gaussian process methods may be possible. The turning bands method uses the covariance matrix to seed the random field generation, which could possibly be adapted for the sparse datasets used as inputs to geological models. However, unlike the turning bands method, our method relies on Bochner's theorem <xref ref-type="bibr" rid="bib1.bibx3" id="paren.67"/>, which states that a shift invariant kernel i.e., the interpolator that the model is trying to approximate, is defined by the Fourier transform of the probability distribution of the frequencies used to construct the kernel. Using a uniform distribution <xref ref-type="bibr" rid="bib1.bibx12" id="paren.68"><named-content content-type="pre">as suggested by</named-content><named-content content-type="post">for turning bands simulations</named-content></xref> signifies that the interpolator will behave as a Sinc- or Bessel-type kernel, and could cause ringing artifacts. A careful examination of alternative distributions for drawing these frequencies is also an avenue for future research.</p>
      <p id="d2e3126">The flexibility of the loss function used to train neural fields opens the door to additional innovative constraints that better encode geological rules or prior knowledge, e.g., topological relationships of the model outputs <xref ref-type="bibr" rid="bib1.bibx58 bib1.bibx59" id="paren.69"/>. However, incorporating multiple physical and geometric constraints further increases the complexity of our already complex multi-objective optimisation problem. Because different constraints often have entirely different numerical scales and physical units, each additional loss component requires careful hyperparameter tuning to prevent one objective from dominating the network's gradient updates.</p>
      <p id="d2e3132">To mitigate this in <italic>curlew</italic>, we implement an optional initial loss normalization strategy in which each individual loss component is divided by its detached initial value at the very start of training. This initial scaling forces all the losses to begin at 1, and can be thought of as an assumption that the losses at initialisation are equally bad. Once the losses are mapped to this common (unitless) scale, users can apply a single, intuitive scaling hyperparameter to explicitly dictate the relative physical or geological importance of one constraint over another. Our tests show that setting the local loss scaling hyperparameters to 1, and the global loss scaling hyperparameters between 0.01 and 0.1 serves as a good starting point for most models.</p>
      <p id="d2e3139">Alternative approaches, such as Gradient Surgery <xref ref-type="bibr" rid="bib1.bibx68" id="paren.70"/> and other gradient based algorithms <xref ref-type="bibr" rid="bib1.bibx69" id="paren.71"/>, exist to automatically select and adapt hyperparameters during training. However, further work is needed to assess the extent to which these are able to replace manual hyperparameter optimisation in the context of <italic>curlew</italic>. We have implemented one of these, SoftAdapt <xref ref-type="bibr" rid="bib1.bibx23" id="paren.72"/>, but with mixed results.</p>
</sec>
</sec>
<sec id="Ch1.S6" sec-type="conclusions">
  <label>6</label><title>Conclusion</title>
      <p id="d2e3163">To conclude, we suggest that neural fields offer an exciting and flexible method for parameterising structural geological models, and hope that the lightweight, modular, open-source and well-documented software framework provided by <italic>curlew</italic> facilitates further development in this direction. While much work remains (cf. Sect. 5.3), we have applied <italic>curlew</italic> to real (Sect. 4.3) and structurally complex synthetic (Sect. 4.2) data to model geological structures that would challenge established approaches. Our novel loss function helps encode geological rules regarding lateral continuity (i.e. monotonicity loss), and extends earlier work by <xref ref-type="bibr" rid="bib1.bibx24" id="text.73"/> to avoid assigning geological data arbitrary but restrictive values (value constraints, cf. Sect. 2.1), through our implementation of pairwise relational constraints. By chaining neural fields together, through overprinting and offsetting relationships, <italic>curlew</italic> is also able to represent a variety of geological structures (faults, dykes and domain boundaries) and incrementally build complex geological models. Our results indicate that the flexibility of neural fields, and their ability to incorporate geological rules and unlabelled categorical or quantitative geological data, make them a powerful tool for the next generation of structural geological models. We hope that <italic>curlew</italic> helps unlock some of this potential, and ultimately facilitates more accurate and less biased representations of the subsurface.</p>
</sec>

      
      </body>
    <back><notes notes-type="codedataavailability"><title>Code and data availability</title>

      <p id="d2e3185"><italic>curlew</italic> is an open-source Python library licensed under the MIT License. It is currently hosted on <uri>https://github.com/hifexplo/curlew</uri> (last access: 24 September 2025), and the version associated with this publication is archived at <ext-link xlink:href="https://doi.org/10.5281/zenodo.17187731" ext-link-type="DOI">10.5281/zenodo.17187731</ext-link> <xref ref-type="bibr" rid="bib1.bibx57" id="paren.74"/>. Documentation is available within the package and is hosted at <uri>https://samthiele.github.io/curlew/</uri> (last access: 22 April 2025).</p>

      <p id="d2e3202">The synthetic examples and the digital outcrop model shown in this contribution have been organised into documented jupyter notebooks hosted at <uri>https://github.com/k4m4th/curlew_examples</uri> (last access: 13 March 2026), and can also be found at <ext-link xlink:href="https://doi.org/10.5281/zenodo.19002735" ext-link-type="DOI">10.5281/zenodo.19002735</ext-link> <xref ref-type="bibr" rid="bib1.bibx27" id="paren.75"/>. The specific versions of packages used to create the figures in the publication can be found in the <italic>requirements.txt</italic> file within the repository. The original digital outcrop from Newcastle, Australia, can be found at <uri>https://ausgeol.org/sitedetails/?site=NewcastleUAV3</uri> (last access: 13 March 2026).</p>
  </notes><notes notes-type="authorcontribution"><title>Author contributions</title>

      <p id="d2e3223">AVK: Conceptualisation, Formal analysis, Code development, Investigation, Methodology, Visualisation, Writing – original draft; STT: Conceptualisation, Methodology, Code development, Writing – original draft, review and editing, Funding; MM: Code development, Writing – original draft; LG: Code testing, Writing – original draft, review and editing; RTD: Methodology, Writing – review and editing; MJH: Code testing, Writing – review and editing; FW: Writing – review and editing; RG: Writing – review and editing.</p>
  </notes><notes notes-type="competinginterests"><title>Competing interests</title>

      <p id="d2e3229">The contact author has declared that none of the authors has any competing interests.</p>
  </notes><notes notes-type="disclaimer"><title>Disclaimer</title>

      <p id="d2e3235">Publisher's note: Copernicus Publications remains neutral with regard to jurisdictional claims made in the text, published maps, institutional affiliations, or any other geographical representation in this paper. The authors bear the ultimate responsibility for providing appropriate place names. Views expressed in the text are those of the authors and do not necessarily reflect the views of the publisher.</p>
  </notes><ack><title>Acknowledgements</title><p id="d2e3241">Akshay V. Kamath and Samuel T. Thiele were partially supported by funding from the European Union's HORIZON Europe Research Council and UK Research and Innovation (UKRI) under grant agreement No. 101058483 (VECTOR). This research has also received funding from the Klaus Tschira Boost Fund, a joint initiative of GSO – Guidance, Skills &amp; Opportunities e.V. and Klaus Tschira Stiftung.</p></ack><notes notes-type="financialsupport"><title>Financial support</title>

      <p id="d2e3247">This research has been supported by the HORIZON EUROPE European Research Council (grant no. 101058483) and the Klaus Tschira Stiftung (grant-no. GSO/KT 76).  The article processing charges for this open-access  publication were covered by the Helmholtz-Zentrum  Dresden-Rossendorf (HZDR).</p>
  </notes><notes notes-type="reviewstatement"><title>Review statement</title>

      <p id="d2e3260">This paper was edited by Evangelos Moulas and reviewed by Ítalo Gonçalves and one anonymous referee.</p>
  </notes><ref-list>
    <title>References</title>

      <ref id="bib1.bibx1"><label>Beatson et al.(2001)Beatson, Light, and Billings</label><mixed-citation>Beatson, R. K., Light, W. A., and Billings, S.: Fast Solution of the Radial Basis Function Interpolation Equations: Domain Decomposition Methods, SIAM J. Sci. Comput., 22, 1717–1740, <ext-link xlink:href="https://doi.org/10.1137/s1064827599361771" ext-link-type="DOI">10.1137/s1064827599361771</ext-link>, 2001.</mixed-citation></ref>
      <ref id="bib1.bibx2"><label>Bjerre et al.(2020)Bjerre, Kristensen, Engesgaard, and Højberg</label><mixed-citation>Bjerre, E., Kristensen, L. S., Engesgaard, P., and Højberg, A. L.: Drivers and barriers for taking account of geological uncertainty in decision making for groundwater protection, Sci. Total Environ., 746, 141045, <ext-link xlink:href="https://doi.org/10.1016/j.scitotenv.2020.141045" ext-link-type="DOI">10.1016/j.scitotenv.2020.141045</ext-link>, 2020.</mixed-citation></ref>
      <ref id="bib1.bibx3"><label>Bochner(1955)</label><mixed-citation>Bochner, S.: Harmonic Analysis and the Theory of Probability, University of California Press, <ext-link xlink:href="https://doi.org/10.1525/9780520345294" ext-link-type="DOI">10.1525/9780520345294</ext-link>, 1955.</mixed-citation></ref>
      <ref id="bib1.bibx4"><label>Bond(2015)</label><mixed-citation>Bond, C. E.: Uncertainty in structural interpretation: Lessons to be learnt, J. Struct. Geol., 74, 185–200, <ext-link xlink:href="https://doi.org/10.1016/j.jsg.2015.03.003" ext-link-type="DOI">10.1016/j.jsg.2015.03.003</ext-link>, 2015.</mixed-citation></ref>
      <ref id="bib1.bibx5"><label>Briggs(1974)</label><mixed-citation>Briggs, I. C.: Machine contouring using minimum curvature, Geophysics, 39, 39–48, <ext-link xlink:href="https://doi.org/10.1190/1.1440410" ext-link-type="DOI">10.1190/1.1440410</ext-link>, 1974.</mixed-citation></ref>
      <ref id="bib1.bibx6"><label>Calcagno et al.(2008)Calcagno, Chilès, Courrioux, and Guillen</label><mixed-citation>Calcagno, P., Chilès, J., Courrioux, G., and Guillen, A.: Geological modelling from field data and geological knowledge, Phys. Earth Planet. In., 171, 147–157, <ext-link xlink:href="https://doi.org/10.1016/j.pepi.2008.06.013" ext-link-type="DOI">10.1016/j.pepi.2008.06.013</ext-link>, 2008.</mixed-citation></ref>
      <ref id="bib1.bibx7"><label>Caumon et al.(2013)Caumon, Gray, Antoine, and Titeux</label><mixed-citation>Caumon, G., Gray, G., Antoine, C., and Titeux, M.-O.: Three-Dimensional Implicit Stratigraphic Model Building From Remote Sensing Data on Tetrahedral Meshes: Theory and Application to a Regional Model of La Popa Basin, NE Mexico, IEEE T. Geosci. Remote, 51, 1613–1621, <ext-link xlink:href="https://doi.org/10.1109/tgrs.2012.2207727" ext-link-type="DOI">10.1109/tgrs.2012.2207727</ext-link>, 2013.</mixed-citation></ref>
      <ref id="bib1.bibx8"><label>Cavoretto et al.(2016)Cavoretto, De Rossi, and Perracchione</label><mixed-citation>Cavoretto, R., De Rossi, A., and Perracchione, E.: Efficient computation of partition of unity interpolants through a block-based searching technique, arXiv [preprint], <ext-link xlink:href="https://doi.org/10.48550/arXiv.1604.04585" ext-link-type="DOI">10.48550/arXiv.1604.04585</ext-link>, 2016.</mixed-citation></ref>
      <ref id="bib1.bibx9"><label>Czarnecki et al.(2017)Czarnecki, Osindero, Jaderberg, Świrszcz, and Pascanu</label><mixed-citation>Czarnecki, W. M., Osindero, S., Jaderberg, M., Świrszcz, G., and Pascanu, R.: Sobolev Training for Neural Networks, arXiv [preprint], <ext-link xlink:href="https://doi.org/10.48550/arXiv.1706.04859" ext-link-type="DOI">10.48550/arXiv.1706.04859</ext-link>, 2017.</mixed-citation></ref>
      <ref id="bib1.bibx10"><label>de la Varga and Wellmann(2016)</label><mixed-citation>de la Varga, M. and Wellmann, J. F.: Structural geologic modeling as an inference problem: A Bayesian perspective, Interpretation, 4, SM1–SM16, <ext-link xlink:href="https://doi.org/10.1190/int-2015-0188.1" ext-link-type="DOI">10.1190/int-2015-0188.1</ext-link>, 2016.</mixed-citation></ref>
      <ref id="bib1.bibx11"><label>de la Varga et al.(2019)de la Varga, Schaaf, and Wellmann</label><mixed-citation>de la Varga, M., Schaaf, A., and Wellmann, F.: GemPy 1.0: open-source stochastic geological modeling and inversion, Geosci. Model Dev., 12, 1–32, <ext-link xlink:href="https://doi.org/10.5194/gmd-12-1-2019" ext-link-type="DOI">10.5194/gmd-12-1-2019</ext-link>, 2019.</mixed-citation></ref>
      <ref id="bib1.bibx12"><label>Emery and Lantuéjoul(2006)</label><mixed-citation>Emery, X. and Lantuéjoul, C.: TBSIM: A computer program for conditional simulation of three-dimensional Gaussian random fields via the turning bands method, Comput. Geosci., 32, 1615–1628, <ext-link xlink:href="https://doi.org/10.1016/j.cageo.2006.03.001" ext-link-type="DOI">10.1016/j.cageo.2006.03.001</ext-link>, 2006.</mixed-citation></ref>
      <ref id="bib1.bibx13"><label>Frank et al.(2007)Frank, Tertois, and Mallet</label><mixed-citation>Frank, T., Tertois, A.-L., and Mallet, J.-L.: 3D-reconstruction of complex geological interfaces from irregularly distributed and noisy point data, Comput. Geosci., 33, 932–943, <ext-link xlink:href="https://doi.org/10.1016/j.cageo.2006.11.014" ext-link-type="DOI">10.1016/j.cageo.2006.11.014</ext-link>, 2007.</mixed-citation></ref>
      <ref id="bib1.bibx14"><label>Gao and Wellmann(2025)</label><mixed-citation>Gao, K. and Wellmann, F.: Fault representation in structural modelling with implicit neural representations, Comput. Geosci., 199, 105911, <ext-link xlink:href="https://doi.org/10.1016/j.cageo.2025.105911" ext-link-type="DOI">10.1016/j.cageo.2025.105911</ext-link>, 2025.</mixed-citation></ref>
      <ref id="bib1.bibx15"><label>Gao et al.(2026)Gao, Hillier, and Wellmann</label><mixed-citation>Gao, K., Hillier, M., and Wellmann, F.: Uncertainty quantification using Hamiltonian Monte Carlo for structural geological modelling with implicit neural representations (INR), Comput. Geosci., 209, 106123, <ext-link xlink:href="https://doi.org/10.1016/j.cageo.2026.106123" ext-link-type="DOI">10.1016/j.cageo.2026.106123</ext-link>, 2026.</mixed-citation></ref>
      <ref id="bib1.bibx16"><label>Goan and Fookes(2020)</label><mixed-citation>Goan, E. and Fookes, C.: Bayesian Neural Networks: An Introduction and Survey, Springer International Publishing, 45–87, <ext-link xlink:href="https://doi.org/10.1007/978-3-030-42553-1_3" ext-link-type="DOI">10.1007/978-3-030-42553-1_3</ext-link>, 2020.</mixed-citation></ref>
      <ref id="bib1.bibx17"><label>Godefroy et al.(2018)Godefroy, Caumon, Ford, Laurent, and Jackson</label><mixed-citation>Godefroy, G., Caumon, G., Ford, M., Laurent, G., and Jackson, C. A.-L.: A parametric fault displacement model to introduce kinematic control into modeling faults from sparse data, Interpretation, 6, B1–B13, <ext-link xlink:href="https://doi.org/10.1190/int-2017-0059.1" ext-link-type="DOI">10.1190/int-2017-0059.1</ext-link>, 2018.</mixed-citation></ref>
      <ref id="bib1.bibx18"><label>Gropp et al.(2020)Gropp, Yariv, Haim, Atzmon, and Lipman</label><mixed-citation>Gropp, A., Yariv, L., Haim, N., Atzmon, M., and Lipman, Y.: Implicit Geometric Regularization for Learning Shapes, arXiv [preprint], <ext-link xlink:href="https://doi.org/10.48550/arXiv.2002.10099" ext-link-type="DOI">10.48550/arXiv.2002.10099</ext-link>, 2020.</mixed-citation></ref>
      <ref id="bib1.bibx19"><label>Grose et al.(2017)Grose, Laurent, Aillères, Armit, Jessell, and Caumon</label><mixed-citation>Grose, L., Laurent, G., Aillères, L., Armit, R., Jessell, M., and Caumon, G.: Structural data constraints for implicit modeling of folds, J. Struct. Geol., 104, 80–92, <ext-link xlink:href="https://doi.org/10.1016/j.jsg.2017.09.013" ext-link-type="DOI">10.1016/j.jsg.2017.09.013</ext-link>, 2017.</mixed-citation></ref>
      <ref id="bib1.bibx20"><label>Grose et al.(2021a)Grose, Ailleres, Laurent, Caumon, Jessell, and Armit</label><mixed-citation>Grose, L., Ailleres, L., Laurent, G., Caumon, G., Jessell, M., and Armit, R.: Modelling of faults in LoopStructural 1.0, Geosci. Model Dev., 14, 6197–6213, <ext-link xlink:href="https://doi.org/10.5194/gmd-14-6197-2021" ext-link-type="DOI">10.5194/gmd-14-6197-2021</ext-link>, 2021a.</mixed-citation></ref>
      <ref id="bib1.bibx21"><label>Grose et al.(2021b)Grose, Ailleres, Laurent, and Jessell</label><mixed-citation>Grose, L., Ailleres, L., Laurent, G., and Jessell, M.: LoopStructural 1.0: time-aware geological modelling, Geosci. Model Dev., 14, 3915–3937, <ext-link xlink:href="https://doi.org/10.5194/gmd-14-3915-2021" ext-link-type="DOI">10.5194/gmd-14-3915-2021</ext-link>, 2021b.</mixed-citation></ref>
      <ref id="bib1.bibx22"><label>Hasan et al.(2022)Hasan, Khosravi, Hossain, Rahman, and Nahavandi</label><mixed-citation>Hasan, M., Khosravi, A., Hossain, I., Rahman, A., and Nahavandi, S.: Controlled Dropout for Uncertainty Estimation, arXiv [preprint], <ext-link xlink:href="https://doi.org/10.48550/arXiv.2205.03109" ext-link-type="DOI">10.48550/arXiv.2205.03109</ext-link>, 2022.</mixed-citation></ref>
      <ref id="bib1.bibx23"><label>Heydari et al.(2019)Heydari, Thompson, and Mehmood</label><mixed-citation>Heydari, A. A., Thompson, C. A., and Mehmood, A.: SoftAdapt: Techniques for Adaptive Loss Weighting of Neural Networks with Multi-Part Loss Functions, arXiv [preprint], <ext-link xlink:href="https://doi.org/10.48550/arXiv.1912.12355" ext-link-type="DOI">10.48550/arXiv.1912.12355</ext-link>, 2019.</mixed-citation></ref>
      <ref id="bib1.bibx24"><label>Hillier et al.(2023)Hillier, Wellmann, De Kemp, Brodaric, Schetselaar, and Bédard</label><mixed-citation>Hillier, M., Wellmann, F., de Kemp, E. A., Brodaric, B., Schetselaar, E., and Bédard, K.: GeoINR 1.0: an implicit neural network approach to three-dimensional geological modelling, Geosci. Model Dev., 16, 6987–7012, <ext-link xlink:href="https://doi.org/10.5194/gmd-16-6987-2023" ext-link-type="DOI">10.5194/gmd-16-6987-2023</ext-link>, 2023.</mixed-citation></ref>
      <ref id="bib1.bibx25"><label>Hillier et al.(2014)Hillier, Schetselaar, de Kemp, and Perron</label><mixed-citation>Hillier, M. J., Schetselaar, E. M., de Kemp, E. A., and Perron, G.: Three-Dimensional Modelling of Geological Surfaces Using Generalized Interpolation with Radial Basis Functions, Math. Geosci., 46, 931–953, <ext-link xlink:href="https://doi.org/10.1007/s11004-014-9540-3" ext-link-type="DOI">10.1007/s11004-014-9540-3</ext-link>, 2014.</mixed-citation></ref>
      <ref id="bib1.bibx26"><label>Irakarama et al.(2018)Irakarama, Laurent, Renaudeau, and Caumon</label><mixed-citation>Irakarama, M., Laurent, G., Renaudeau, J., and Caumon, G.: Finite Difference Implicit Modeling of Geological Structures, in: Proceedings, EAGE Publications BV, Copenhagen, Denmark, <ext-link xlink:href="https://doi.org/10.3997/2214-4609.201800794" ext-link-type="DOI">10.3997/2214-4609.201800794</ext-link>, 2018.</mixed-citation></ref>
      <ref id="bib1.bibx27"><label>Kamath and Thiele(2026)</label><mixed-citation>Kamath, A. and Thiele, S.: k4m4th/curlew_examples: curlew_examples, Zenodo [code], <ext-link xlink:href="https://doi.org/10.5281/ZENODO.19002735" ext-link-type="DOI">10.5281/ZENODO.19002735</ext-link>, 2026.</mixed-citation></ref>
      <ref id="bib1.bibx28"><label>Kamath et al.(2025)Kamath, Thiele, Ugalde, Morris, Tolosana-Delgado, Kirsch, and Gloaguen</label><mixed-citation>Kamath, A. V., Thiele, S. T., Ugalde, H., Morris, B., Tolosana-Delgado, R., Kirsch, M., and Gloaguen, R.: TensorWeave 1.0: Interpolating geophysical tensor fields with spatial neural networks, EGUsphere [preprint], <ext-link xlink:href="https://doi.org/10.5194/egusphere-2025-2345" ext-link-type="DOI">10.5194/egusphere-2025-2345</ext-link>, 2025.</mixed-citation></ref>
      <ref id="bib1.bibx29"><label>Lajaunie et al.(1997)Lajaunie, Courrioux, and Manuel</label><mixed-citation>Lajaunie, C., Courrioux, G., and Manuel, L.: Foliation fields and 3D cartography in geology: Principles of a method based on potential interpolation, Math. Geol., 29, 571–584, <ext-link xlink:href="https://doi.org/10.1007/BF02775087" ext-link-type="DOI">10.1007/BF02775087</ext-link>, 1997.</mixed-citation></ref>
      <ref id="bib1.bibx30"><label>Landgraf et al.(2022)Landgraf, Hornung, and Cabral</label><mixed-citation>Landgraf, Z., Hornung, A. S., and Cabral, R. S.: PINs: Progressive Implicit Networks for Multi-Scale Neural Representations, arXiv [preprint], <ext-link xlink:href="https://doi.org/10.48550/arXiv.2202.04713" ext-link-type="DOI">10.48550/arXiv.2202.04713</ext-link>, 2022.</mixed-citation></ref>
      <ref id="bib1.bibx31"><label>Laurent(2016)</label><mixed-citation>Laurent, G.: Iterative Thickness Regularization of Stratigraphic Layers in Discrete Implicit Modeling, Math. Geosci., 48, 811–833, <ext-link xlink:href="https://doi.org/10.1007/s11004-016-9637-y" ext-link-type="DOI">10.1007/s11004-016-9637-y</ext-link>, publisher: Springer Science and Business Media LLC, 2016.</mixed-citation></ref>
      <ref id="bib1.bibx32"><label>Laurent et al.(2013)Laurent, Caumon, Bouziat, and Jessell</label><mixed-citation>Laurent, G., Caumon, G., Bouziat, A., and Jessell, M.: A parametric method to model 3D displacements around faults with volumetric vector fields, Tectonophysics, 590, 83–93, <ext-link xlink:href="https://doi.org/10.1016/j.tecto.2013.01.015" ext-link-type="DOI">10.1016/j.tecto.2013.01.015</ext-link>, 2013.</mixed-citation></ref>
      <ref id="bib1.bibx33"><label>Li et al.(2017)Li, Xu, Taylor, Studer, and Goldstein</label><mixed-citation>Li, H., Xu, Z., Taylor, G., Studer, C., and Goldstein, T.: Visualizing the Loss Landscape of Neural Nets, arXiv [preprint], <ext-link xlink:href="https://doi.org/10.48550/arXiv.1712.09913" ext-link-type="DOI">10.48550/arXiv.1712.09913</ext-link>, 2017.</mixed-citation></ref>
      <ref id="bib1.bibx34"><label>Lindi et al.(2024)Lindi, Aladejare, Ozoji, and Ranta</label><mixed-citation>Lindi, O. T., Aladejare, A. E., Ozoji, T. M., and Ranta, J.-P.: Uncertainty Quantification in Mineral Resource Estimation, Natural Resources Research, 33, 2503–2526, <ext-link xlink:href="https://doi.org/10.1007/s11053-024-10394-6" ext-link-type="DOI">10.1007/s11053-024-10394-6</ext-link>, 2024.</mixed-citation></ref>
      <ref id="bib1.bibx35"><label>Lindsay et al.(2013)Lindsay, Jessell, Ailleres, Perrouty, De Kemp, and Betts</label><mixed-citation>Lindsay, M., Jessell, M., Ailleres, L., Perrouty, S., De Kemp, E., and Betts, P.: Geodiversity: Exploration of 3D geological model space, Tectonophysics, 594, 27–37, <ext-link xlink:href="https://doi.org/10.1016/j.tecto.2013.03.013" ext-link-type="DOI">10.1016/j.tecto.2013.03.013</ext-link>, 2013.</mixed-citation></ref>
      <ref id="bib1.bibx36"><label>Mallet(1989)</label><mixed-citation>Mallet, J.-L.: Discrete smooth interpolation, ACM T. Graphic., 8, 121–144, <ext-link xlink:href="https://doi.org/10.1145/62054.62057" ext-link-type="DOI">10.1145/62054.62057</ext-link>, 1989.</mixed-citation></ref>
      <ref id="bib1.bibx37"><label>Mantoglou and Wilson(1982)</label><mixed-citation>Mantoglou, A. and Wilson, J. L.: The Turning Bands Method for simulation of random fields using line generation by a spectral method, Water Resour. Res., 18, 1379–1394, <ext-link xlink:href="https://doi.org/10.1029/WR018i005p01379" ext-link-type="DOI">10.1029/WR018i005p01379</ext-link>, 1982.</mixed-citation></ref>
      <ref id="bib1.bibx38"><label>Margossian(2019)</label><mixed-citation>Margossian, C. C.: A Review of automatic differentiation and its efficient implementation, WIREs, 9, e1305, <ext-link xlink:href="https://doi.org/10.1002/WIDM.1305" ext-link-type="DOI">10.1002/WIDM.1305</ext-link>, 2019.</mixed-citation></ref>
      <ref id="bib1.bibx39"><label>Misra(2019)</label><mixed-citation>Misra, D.: Mish: A Self Regularized Non-Monotonic Activation Function, arXiv [preprint], <ext-link xlink:href="https://doi.org/10.48550/arXiv.1908.08681" ext-link-type="DOI">10.48550/arXiv.1908.08681</ext-link>, 2019. </mixed-citation></ref>
      <ref id="bib1.bibx40"><label>Osher and Sethian(1988)</label><mixed-citation>Osher, S. and Sethian, J. A.: Fronts propagating with curvature-dependent speed: Algorithms based on Hamilton-Jacobi formulations, J. Comput. Phys., 79, 12–49, <ext-link xlink:href="https://doi.org/10.1016/0021-9991(88)90002-2" ext-link-type="DOI">10.1016/0021-9991(88)90002-2</ext-link>, 1988.</mixed-citation></ref>
      <ref id="bib1.bibx41"><label>Paszke et al.(2019)Paszke, Gross, Massa, Lerer, Bradbury, Chanan, Killeen, Lin, Gimelshein, Antiga, Desmaison, Köpf, Yang, DeVito, Raison, Tejani, Chilamkurthy, Steiner, Fang, Bai, and Chintala</label><mixed-citation>Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., Desmaison, A., Köpf, A., Yang, E., DeVito, Z., Raison, M., Tejani, A., Chilamkurthy, S., Steiner, B., Fang, L., Bai, J., and Chintala, S.: PyTorch: An Imperative Style, High-Performance Deep Learning Library, arXiv [preprint], <ext-link xlink:href="https://doi.org/10.48550/arXiv.1912.01703" ext-link-type="DOI">10.48550/arXiv.1912.01703</ext-link>, 2019.</mixed-citation></ref>
      <ref id="bib1.bibx42"><label>Pérez-Díaz et al.(2020)Pérez-Díaz, Alcalde, and Bond</label><mixed-citation>Pérez-Díaz, L., Alcalde, J., and Bond, C. E.: Introduction: Handling uncertainty in the geosciences: identification, mitigation and communication, Solid Earth, 11, 889–897, <ext-link xlink:href="https://doi.org/10.5194/se-11-889-2020" ext-link-type="DOI">10.5194/se-11-889-2020</ext-link>, 2020.</mixed-citation></ref>
      <ref id="bib1.bibx43"><label>Rahaman et al.(2019)Rahaman, Baratin, Arpit, Draxler, Lin, Hamprecht, Bengio, and Courville</label><mixed-citation>Rahaman, N., Baratin, A., Arpit, D., Draxler, F., Lin, M., Hamprecht, F. A., Bengio, Y., and Courville, A.: On the Spectral Bias of Neural Networks, in: Proceedings of the 36th International Conference on Machine Learning (ICML), in: Proceedings of Machine Learning Research, vol. 97, 5301–5310, <ext-link xlink:href="https://doi.org/10.48550/arXiv.1806.08734" ext-link-type="DOI">10.48550/arXiv.1806.08734</ext-link>, 2019.</mixed-citation></ref>
      <ref id="bib1.bibx44"><label>Rahimi and Recht(2007)</label><mixed-citation>Rahimi, A. and Recht, B.: Random Features for Large-Scale Kernel Machines, in: Advances in Neural Information Processing Systems, edited by: Platt, J., Koller, D., Singer, Y., and Roweis, S., vol. 20, Curran Associates, Inc., <uri>https://proceedings.neurips.cc/paper_files/paper/2007/file/013a006f03dbc5392effeb8f18fda755-Paper.pdf</uri>, 2007.</mixed-citation></ref>
      <ref id="bib1.bibx45"><label>Raissi et al.(2019)Raissi, Perdikaris, and Karniadakis</label><mixed-citation>Raissi, M., Perdikaris, P., and Karniadakis, G. E.: Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations, J. Comput. Phys., 378, 686–707, <ext-link xlink:href="https://doi.org/10.1016/j.jcp.2018.10.045" ext-link-type="DOI">10.1016/j.jcp.2018.10.045</ext-link>, 2019.</mixed-citation></ref>
      <ref id="bib1.bibx46"><label>Ramachandran et al.(2017)Ramachandran, Zoph, and Le</label><mixed-citation>Ramachandran, P., Zoph, B., and Le, Q. V.: Searching for Activation Functions, arXiv [preprint], <ext-link xlink:href="https://doi.org/10.48550/arXiv.1710.05941" ext-link-type="DOI">10.48550/arXiv.1710.05941</ext-link>, 2017.</mixed-citation></ref>
      <ref id="bib1.bibx47"><label>Rasmussen and Williams(2005)</label><mixed-citation>Rasmussen, C. E. and Williams, C. K. I.: Gaussian Processes for Machine Learning, The MIT Press, <ext-link xlink:href="https://doi.org/10.7551/mitpress/3206.001.0001" ext-link-type="DOI">10.7551/mitpress/3206.001.0001</ext-link>, 2005.</mixed-citation></ref>
      <ref id="bib1.bibx48"><label>Rudin et al.(1992)Rudin, Osher, and Fatemi</label><mixed-citation>Rudin, L. I., Osher, S., and Fatemi, E.: Nonlinear total variation based noise removal algorithms, Physica D, 60, 259–268, <ext-link xlink:href="https://doi.org/10.1016/0167-2789(92)90242-F" ext-link-type="DOI">10.1016/0167-2789(92)90242-F</ext-link>, 1992.</mixed-citation></ref>
      <ref id="bib1.bibx49"><label>Sandwell(1987)</label><mixed-citation>Sandwell, D. T.: Biharmonic spline interpolation of GEOS-3 and SEASAT altimeter data, Geophys. Res. Lett., 14, 139–142, <ext-link xlink:href="https://doi.org/10.1029/gl014i002p00139" ext-link-type="DOI">10.1029/gl014i002p00139</ext-link>, 1987.</mixed-citation></ref>
      <ref id="bib1.bibx50"><label>Shannon(1948)</label><mixed-citation>Shannon, C. E.: A Mathematical Theory of Communication, Bell Syst. Tech. J., 27, 379–423, <ext-link xlink:href="https://doi.org/10.1002/j.1538-7305.1948.tb01338.x" ext-link-type="DOI">10.1002/j.1538-7305.1948.tb01338.x</ext-link>, 1948.</mixed-citation></ref>
      <ref id="bib1.bibx51"><label>Sitzmann et al.(2020)Sitzmann, Martel, Bergman, Lindell, and Wetzstein</label><mixed-citation>Sitzmann, V., Martel, J. N. P., Bergman, A. W., Lindell, D. B., and Wetzstein, G.: Implicit Neural Representations with Periodic Activation Functions, arXiv [preprint], <ext-link xlink:href="https://doi.org/10.48550/arXiv.2006.09661" ext-link-type="DOI">10.48550/arXiv.2006.09661</ext-link>, 2020.</mixed-citation></ref>
      <ref id="bib1.bibx52"><label>Smith et al.(2025)Smith, Horrocks, Akhtar, Holden, and Wedge</label><mixed-citation>Smith, L. T., Horrocks, T., Akhtar, N., Holden, E.-J., and Wedge, D.: Implicit neural representation for potential field geophysics, Sci. Rep., 15, <ext-link xlink:href="https://doi.org/10.1038/s41598-024-83979-z" ext-link-type="DOI">10.1038/s41598-024-83979-z</ext-link>, 2025.</mixed-citation></ref>
      <ref id="bib1.bibx53"><label>Smith and Wessel(1990)</label><mixed-citation>Smith, W. H. F. and Wessel, P.: Gridding with continuous curvature splines in tension, Geophysics, 55, 293–305, <ext-link xlink:href="https://doi.org/10.1190/1.1442837" ext-link-type="DOI">10.1190/1.1442837</ext-link>, 1990.</mixed-citation></ref>
      <ref id="bib1.bibx54"><label>Steno and Oldenburg(1671)</label><mixed-citation>Steno, N. and Oldenburg, H.: The prodromus to a dissertation concerning solids naturally contained within solids: laying a foundation for the rendering a rational accompt both of the frame and the several changes of the masse of the Earth, as also of the various productions in the same, Printed by F. Winter, and are to be sold by Moses Pitt, <ext-link xlink:href="https://doi.org/10.5962/bhl.title.145115" ext-link-type="DOI">10.5962/bhl.title.145115</ext-link>, 1671.</mixed-citation></ref>
      <ref id="bib1.bibx55"><label>Tacher et al.(2006)Tacher, Pomian-Srzednicki, and Parriaux</label><mixed-citation>Tacher, L., Pomian-Srzednicki, I., and Parriaux, A.: Geological uncertainties associated with 3-D subsurface models, Comput. Geosci., 32, 212–221, <ext-link xlink:href="https://doi.org/10.1016/j.cageo.2005.06.010" ext-link-type="DOI">10.1016/j.cageo.2005.06.010</ext-link>, 2006.</mixed-citation></ref>
      <ref id="bib1.bibx56"><label>Tancik et al.(2020)Tancik, Srinivasan, Mildenhall, Fridovich-Keil, Raghavan, Singhal, Ramamoorthi, Barron, and Ng</label><mixed-citation>Tancik, M., Srinivasan, P. P., Mildenhall, B., Fridovich-Keil, S., Raghavan, N., Singhal, U., Ramamoorthi, R., Barron, J. T., and Ng, R.: Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains, arXiv [preprint], <ext-link xlink:href="https://doi.org/10.48550/arXiv.2006.10739" ext-link-type="DOI">10.48550/arXiv.2006.10739</ext-link>, 2020.</mixed-citation></ref>
      <ref id="bib1.bibx57"><label>Thiele and Kamath(2025)</label><mixed-citation>Thiele, S. and Kamath, A.: samthiele/curlew: Curlew 1.00, Zenodo [code], <ext-link xlink:href="https://doi.org/10.5281/ZENODO.17187731" ext-link-type="DOI">10.5281/ZENODO.17187731</ext-link>, 2025.</mixed-citation></ref>
      <ref id="bib1.bibx58"><label>Thiele et al.(2016a)Thiele, Jessell, Lindsay, Ogarko, Wellmann, and Pakyuz-Charrier</label><mixed-citation>Thiele, S. T., Jessell, M. W., Lindsay, M., Ogarko, V., Wellmann, J. F., and Pakyuz-Charrier, E.: The topology of geology 1: Topological analysis, J. Struct. Geol., 91, 27–38, <ext-link xlink:href="https://doi.org/10.1016/j.jsg.2016.08.009" ext-link-type="DOI">10.1016/j.jsg.2016.08.009</ext-link>, 2016a.</mixed-citation></ref>
      <ref id="bib1.bibx59"><label>Thiele et al.(2016b)Thiele, Jessell, Lindsay, Wellmann, and Pakyuz-Charrier</label><mixed-citation>Thiele, S. T., Jessell, M. W., Lindsay, M., Wellmann, J. F., and Pakyuz-Charrier, E.: The topology of geology 2: Topological uncertainty, J. Struct. Geol., 91, 74–87, <ext-link xlink:href="https://doi.org/10.1016/j.jsg.2016.08.010" ext-link-type="DOI">10.1016/j.jsg.2016.08.010</ext-link>, 2016b.</mixed-citation></ref>
      <ref id="bib1.bibx60"><label>Thiele et al.(2017)Thiele, Grose, Samsu, Micklethwaite, Vollgger, and Cruden</label><mixed-citation>Thiele, S. T., Grose, L., Samsu, A., Micklethwaite, S., Vollgger, S. A., and Cruden, A. R.: Rapid, semi-automatic fracture and contact mapping for point clouds, images and geophysical data, Solid Earth, 8, 1241–1253, <ext-link xlink:href="https://doi.org/10.5194/se-8-1241-2017" ext-link-type="DOI">10.5194/se-8-1241-2017</ext-link>, 2017.</mixed-citation></ref>
      <ref id="bib1.bibx61"><label>Wellmann and Caumon(2018)</label><mixed-citation>Wellmann, F. and Caumon, G.: 3-D Structural geological models: Concepts, methods, and uncertainties, Adv. Geophys., 59, 1–121, <ext-link xlink:href="https://doi.org/10.1016/bs.agph.2018.09.001" ext-link-type="DOI">10.1016/bs.agph.2018.09.001</ext-link>, 2018.</mixed-citation></ref>
      <ref id="bib1.bibx62"><label>Wellmann and Regenauer-Lieb(2012)</label><mixed-citation>Wellmann, J. F. and Regenauer-Lieb, K.: Uncertainties have a meaning: Information entropy as a quality measure for 3-D geological models, Tectonophysics, 526–529, 207–216, <ext-link xlink:href="https://doi.org/10.1016/j.tecto.2011.05.001" ext-link-type="DOI">10.1016/j.tecto.2011.05.001</ext-link>, 2012.</mixed-citation></ref>
      <ref id="bib1.bibx63"><label>Wellmann et al.(2010)Wellmann, Horowitz, Schill, and Regenauer-Lieb</label><mixed-citation>Wellmann, J. F., Horowitz, F. G., Schill, E., and Regenauer-Lieb, K.: Towards incorporating uncertainty of structural data in 3D geological inversion, Tectonophysics, 490, 141–151, <ext-link xlink:href="https://doi.org/10.1016/j.tecto.2010.04.022" ext-link-type="DOI">10.1016/j.tecto.2010.04.022</ext-link>, 2010.</mixed-citation></ref>
      <ref id="bib1.bibx64"><label>Wellmann et al.(2014)Wellmann, Lindsay, Poh, and Jessell</label><mixed-citation>Wellmann, J. F., Lindsay, M., Poh, J., and Jessell, M.: Validating 3-D Structural Models with Geological Knowledge for Improved Uncertainty Evaluations, Energ. Proced., 59, 374–381, <ext-link xlink:href="https://doi.org/10.1016/j.egypro.2014.10.391" ext-link-type="DOI">10.1016/j.egypro.2014.10.391</ext-link>, 2014.</mixed-citation></ref>
      <ref id="bib1.bibx65"><label>Wendland(1995)</label><mixed-citation>Wendland, H.: Piecewise polynomial, positive definite and compactly supported radial functions of minimal degree, Adv. Comput. Math., 4, 389–396, <ext-link xlink:href="https://doi.org/10.1007/bf02123482" ext-link-type="DOI">10.1007/bf02123482</ext-link>, 1995.</mixed-citation></ref>
      <ref id="bib1.bibx66"><label>Xie et al.(2022)Xie, Takikawa, Saito, Litany, Yan, Khan, Tombari, Tompkin, Sitzmann, and Sridhar</label><mixed-citation>Xie, Y., Takikawa, T., Saito, S., Litany, O., Yan, S., Khan, N., Tombari, F., Tompkin, J., Sitzmann, V., and Sridhar, S.: Neural Fields in Visual Computing and Beyond, Comput. Graph. Forum, 41, 641–676, <ext-link xlink:href="https://doi.org/10.1111/cgf.14505" ext-link-type="DOI">10.1111/cgf.14505</ext-link>, 2022.</mixed-citation></ref>
      <ref id="bib1.bibx67"><label>Xu and Heagy(2025)</label><mixed-citation>Xu, A. and Heagy, L. J.: Toward Understanding the Benefits of Neural Network Parameterizations in Geophysical Inversions: A Study With Neural Fields, IEEE T. Geosci. Remote, 63, 1–14, <ext-link xlink:href="https://doi.org/10.1109/tgrs.2025.3583970" ext-link-type="DOI">10.1109/tgrs.2025.3583970</ext-link>, 2025.</mixed-citation></ref>
      <ref id="bib1.bibx68"><label>Yu et al.(2020)Yu, Kumar, Gupta, Levine, Hausman, and Finn</label><mixed-citation>Yu, T., Kumar, S., Gupta, A., Levine, S., Hausman, K., and Finn, C.: Gradient Surgery for Multi-Task Learning, arXiv [preprint], <ext-link xlink:href="https://doi.org/10.48550/arXiv.2001.06782" ext-link-type="DOI">10.48550/arXiv.2001.06782</ext-link>, 2020.</mixed-citation></ref>
      <ref id="bib1.bibx69"><label>Zhang et al.(2024)Zhang, Zhao, Yu, Lin, Chen, Zhao, and Zhang</label><mixed-citation>Zhang, X., Zhao, L., Yu, Y., Lin, X., Chen, Y., Zhao, H., and Zhang, Q.: LibMOON: A Gradient-based MultiObjective OptimizatioN Library in PyTorch, arXiv [preprint], <ext-link xlink:href="https://doi.org/10.48550/arXiv.2409.02969" ext-link-type="DOI">10.48550/arXiv.2409.02969</ext-link>, 2024.</mixed-citation></ref>

  </ref-list></back>
    <!--<article-title-html>Curlew 1.0: Spatio-temporal implicit geological  modelling with neural fields in python</article-title-html>
<abstract-html/>
<ref-html id="bib1.bib1"><label>Beatson et al.(2001)Beatson, Light, and Billings</label><mixed-citation>
       Beatson, R. K., Light, W. A., and
Billings, S.: Fast Solution of the Radial Basis Function Interpolation Equations: Domain Decomposition Methods, SIAM
J. Sci. Comput., 22, 1717–1740, <a href="https://doi.org/10.1137/s1064827599361771" target="_blank">https://doi.org/10.1137/s1064827599361771</a>, 2001.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib2"><label>Bjerre et al.(2020)Bjerre, Kristensen, Engesgaard, and Højberg</label><mixed-citation>
       Bjerre, E.,
Kristensen, L. S., Engesgaard, P., and Højberg, A. L.: Drivers and barriers for taking account of geological
uncertainty in decision making for groundwater protection, Sci. Total Environ., 746, 141045,
<a href="https://doi.org/10.1016/j.scitotenv.2020.141045" target="_blank">https://doi.org/10.1016/j.scitotenv.2020.141045</a>, 2020.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib3"><label>Bochner(1955)</label><mixed-citation>
       Bochner, S.: Harmonic Analysis and the Theory of Probability, University of
California Press, <a href="https://doi.org/10.1525/9780520345294" target="_blank">https://doi.org/10.1525/9780520345294</a>, 1955.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib4"><label>Bond(2015)</label><mixed-citation>
       Bond, C. E.: Uncertainty in structural interpretation: Lessons to be
learnt, J. Struct. Geol., 74, 185–200, <a href="https://doi.org/10.1016/j.jsg.2015.03.003" target="_blank">https://doi.org/10.1016/j.jsg.2015.03.003</a>, 2015.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib5"><label>Briggs(1974)</label><mixed-citation>
       Briggs, I. C.: Machine contouring using minimum curvature, Geophysics, 39, 39–48,
<a href="https://doi.org/10.1190/1.1440410" target="_blank">https://doi.org/10.1190/1.1440410</a>, 1974.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib6"><label>Calcagno et al.(2008)Calcagno, Chilès, Courrioux, and Guillen</label><mixed-citation>
       Calcagno, P.,
Chilès, J., Courrioux, G., and Guillen, A.: Geological modelling from field data and geological knowledge, Phys. Earth
Planet. In., 171, 147–157, <a href="https://doi.org/10.1016/j.pepi.2008.06.013" target="_blank">https://doi.org/10.1016/j.pepi.2008.06.013</a>, 2008.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib7"><label>Caumon et al.(2013)Caumon, Gray, Antoine, and Titeux</label><mixed-citation>
       Caumon, G., Gray, G.,
Antoine, C., and Titeux, M.-O.: Three-Dimensional Implicit Stratigraphic Model Building From Remote
Sensing Data on Tetrahedral Meshes: Theory and Application to a Regional Model of La Popa Basin,
NE Mexico, IEEE T. Geosci. Remote, 51, 1613–1621, <a href="https://doi.org/10.1109/tgrs.2012.2207727" target="_blank">https://doi.org/10.1109/tgrs.2012.2207727</a>, 2013.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib8"><label>Cavoretto et al.(2016)Cavoretto, De Rossi, and Perracchione</label><mixed-citation>
      
Cavoretto, R., De Rossi, A., and Perracchione, E.: Efficient computation of partition of unity interpolants through a block-based searching technique, arXiv [preprint], <a href="https://doi.org/10.48550/arXiv.1604.04585" target="_blank">https://doi.org/10.48550/arXiv.1604.04585</a>, 2016.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib9"><label>Czarnecki et al.(2017)Czarnecki, Osindero, Jaderberg, Świrszcz, and Pascanu</label><mixed-citation>
      
Czarnecki, W. M., Osindero, S., Jaderberg, M., Świrszcz, G., and Pascanu, R.: Sobolev Training for Neural Networks, arXiv [preprint], <a href="https://doi.org/10.48550/arXiv.1706.04859" target="_blank">https://doi.org/10.48550/arXiv.1706.04859</a>, 2017.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib10"><label>de la Varga and Wellmann(2016)</label><mixed-citation>
       de la Varga, M. and Wellmann, J. F.: Structural
geologic modeling as an inference problem: A Bayesian perspective, Interpretation, 4, SM1–SM16,
<a href="https://doi.org/10.1190/int-2015-0188.1" target="_blank">https://doi.org/10.1190/int-2015-0188.1</a>, 2016.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib11"><label>de la Varga et al.(2019)de la Varga, Schaaf, and Wellmann</label><mixed-citation>
       de la Varga, M.,
Schaaf, A., and Wellmann, F.: GemPy 1.0: open-source stochastic geological modeling and inversion, Geosci. Model Dev.,
12, 1–32, <a href="https://doi.org/10.5194/gmd-12-1-2019" target="_blank">https://doi.org/10.5194/gmd-12-1-2019</a>, 2019.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib12"><label>Emery and Lantuéjoul(2006)</label><mixed-citation>
       Emery, X. and Lantuéjoul, C.: TBSIM: A computer program for
conditional simulation of three-dimensional Gaussian random fields via the turning bands method, Comput. Geosci., 32,
1615–1628, <a href="https://doi.org/10.1016/j.cageo.2006.03.001" target="_blank">https://doi.org/10.1016/j.cageo.2006.03.001</a>, 2006.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib13"><label>Frank et al.(2007)Frank, Tertois, and Mallet</label><mixed-citation>
       Frank, T., Tertois, A.-L., and
Mallet, J.-L.: 3D-reconstruction of complex geological interfaces from irregularly distributed and noisy point data,
Comput. Geosci., 33, 932–943, <a href="https://doi.org/10.1016/j.cageo.2006.11.014" target="_blank">https://doi.org/10.1016/j.cageo.2006.11.014</a>, 2007.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib14"><label>Gao and Wellmann(2025)</label><mixed-citation>
       Gao, K. and Wellmann, F.: Fault representation in structural
modelling with implicit neural representations, Comput. Geosci., 199, 105911, <a href="https://doi.org/10.1016/j.cageo.2025.105911" target="_blank">https://doi.org/10.1016/j.cageo.2025.105911</a>, 2025.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib15"><label>Gao et al.(2026)Gao, Hillier, and Wellmann</label><mixed-citation>
       Gao, K., Hillier, M., and Wellmann, F.: Uncertainty
quantification using Hamiltonian Monte Carlo for structural geological modelling with implicit neural representations
(INR), Comput. Geosci., 209, 106123, <a href="https://doi.org/10.1016/j.cageo.2026.106123" target="_blank">https://doi.org/10.1016/j.cageo.2026.106123</a>, 2026.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib16"><label>Goan and Fookes(2020)</label><mixed-citation>
       Goan, E. and Fookes, C.: Bayesian Neural Networks: An Introduction and
Survey, Springer International Publishing, 45–87, <a href="https://doi.org/10.1007/978-3-030-42553-1_3" target="_blank">https://doi.org/10.1007/978-3-030-42553-1_3</a>, 2020.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib17"><label>Godefroy et al.(2018)Godefroy, Caumon, Ford, Laurent, and Jackson</label><mixed-citation>
       Godefroy, G.,
Caumon, G., Ford, M., Laurent, G., and Jackson, C. A.-L.: A parametric fault displacement model to introduce kinematic
control into modeling faults from sparse data, Interpretation, 6, B1–B13, <a href="https://doi.org/10.1190/int-2017-0059.1" target="_blank">https://doi.org/10.1190/int-2017-0059.1</a>, 2018.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib18"><label>Gropp et al.(2020)Gropp, Yariv, Haim, Atzmon, and Lipman</label><mixed-citation>
      
Gropp, A., Yariv, L., Haim, N., Atzmon, M., and Lipman, Y.: Implicit Geometric Regularization for Learning Shapes, arXiv [preprint],
<a href="https://doi.org/10.48550/arXiv.2002.10099" target="_blank">https://doi.org/10.48550/arXiv.2002.10099</a>, 2020.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib19"><label>Grose et al.(2017)Grose, Laurent, Aillères, Armit, Jessell, and Caumon</label><mixed-citation>
       Grose, L.,
Laurent, G., Aillères, L., Armit, R., Jessell, M., and Caumon, G.: Structural data constraints for implicit modeling
of folds, J. Struct. Geol., 104, 80–92, <a href="https://doi.org/10.1016/j.jsg.2017.09.013" target="_blank">https://doi.org/10.1016/j.jsg.2017.09.013</a>, 2017.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib20"><label>Grose et al.(2021a)Grose, Ailleres, Laurent, Caumon, Jessell, and Armit</label><mixed-citation>
       Grose, L.,
Ailleres, L., Laurent, G., Caumon, G., Jessell, M., and Armit, R.: Modelling of faults in LoopStructural 1.0,
Geosci. Model Dev., 14, 6197–6213, <a href="https://doi.org/10.5194/gmd-14-6197-2021" target="_blank">https://doi.org/10.5194/gmd-14-6197-2021</a>, 2021a.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib21"><label>Grose et al.(2021b)Grose, Ailleres, Laurent, and Jessell</label><mixed-citation>
       Grose, L.,
Ailleres, L., Laurent, G., and Jessell, M.: LoopStructural 1.0: time-aware geological modelling, Geosci. Model Dev.,
14, 3915–3937, <a href="https://doi.org/10.5194/gmd-14-3915-2021" target="_blank">https://doi.org/10.5194/gmd-14-3915-2021</a>, 2021b.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib22"><label>Hasan et al.(2022)Hasan, Khosravi, Hossain, Rahman, and Nahavandi</label><mixed-citation>
      
Hasan, M., Khosravi, A., Hossain, I., Rahman, A., and Nahavandi, S.: Controlled Dropout for Uncertainty Estimation, arXiv [preprint],
<a href="https://doi.org/10.48550/arXiv.2205.03109" target="_blank">https://doi.org/10.48550/arXiv.2205.03109</a>, 2022.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib23"><label>Heydari et al.(2019)Heydari, Thompson, and Mehmood</label><mixed-citation>
      
Heydari, A. A., Thompson, C. A., and Mehmood, A.: SoftAdapt: Techniques for Adaptive Loss Weighting of Neural Networks with Multi-Part Loss Functions, arXiv [preprint], <a href="https://doi.org/10.48550/arXiv.1912.12355" target="_blank">https://doi.org/10.48550/arXiv.1912.12355</a>, 2019.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib24"><label>Hillier et al.(2023)Hillier, Wellmann, De Kemp, Brodaric, Schetselaar, and Bédard</label><mixed-citation>
      
Hillier, M., Wellmann, F., de Kemp, E. A., Brodaric, B., Schetselaar, E., and Bédard, K.: GeoINR 1.0: an implicit
neural network approach to three-dimensional geological modelling, Geosci. Model Dev., 16, 6987–7012,
<a href="https://doi.org/10.5194/gmd-16-6987-2023" target="_blank">https://doi.org/10.5194/gmd-16-6987-2023</a>, 2023.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib25"><label>Hillier et al.(2014)Hillier, Schetselaar, de Kemp, and Perron</label><mixed-citation>
       Hillier, M. J.,
Schetselaar, E. M., de Kemp, E. A., and Perron, G.: Three-Dimensional Modelling of Geological Surfaces Using
Generalized Interpolation with Radial Basis Functions, Math. Geosci., 46, 931–953, <a href="https://doi.org/10.1007/s11004-014-9540-3" target="_blank">https://doi.org/10.1007/s11004-014-9540-3</a>,
2014.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib26"><label>Irakarama et al.(2018)Irakarama, Laurent, Renaudeau, and Caumon</label><mixed-citation>
      
Irakarama, M., Laurent, G., Renaudeau, J., and Caumon, G.: Finite Difference Implicit Modeling of Geological Structures, in: Proceedings, EAGE Publications BV, Copenhagen, Denmark, <a href="https://doi.org/10.3997/2214-4609.201800794" target="_blank">https://doi.org/10.3997/2214-4609.201800794</a>, 2018.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib27"><label>Kamath and Thiele(2026)</label><mixed-citation>
      
Kamath, A. and Thiele, S.: k4m4th/curlew_examples: curlew_examples, Zenodo [code], <a href="https://doi.org/10.5281/ZENODO.19002735" target="_blank">https://doi.org/10.5281/ZENODO.19002735</a>, 2026.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib28"><label>Kamath et al.(2025)Kamath, Thiele, Ugalde, Morris, Tolosana-Delgado, Kirsch, and
Gloaguen</label><mixed-citation>
       Kamath, A. V., Thiele, S. T., Ugalde, H., Morris, B., Tolosana-Delgado, R.,
Kirsch, M., and Gloaguen, R.: TensorWeave 1.0: Interpolating geophysical tensor fields with spatial neural networks,
EGUsphere [preprint], <a href="https://doi.org/10.5194/egusphere-2025-2345" target="_blank">https://doi.org/10.5194/egusphere-2025-2345</a>, 2025.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib29"><label>Lajaunie et al.(1997)Lajaunie, Courrioux, and Manuel</label><mixed-citation>
       Lajaunie, C., Courrioux, G.,
and Manuel, L.: Foliation fields and 3D cartography in geology: Principles of a method based on potential
interpolation, Math. Geol., 29, 571–584, <a href="https://doi.org/10.1007/BF02775087" target="_blank">https://doi.org/10.1007/BF02775087</a>, 1997.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib30"><label>Landgraf et al.(2022)Landgraf, Hornung, and Cabral</label><mixed-citation>
      
Landgraf, Z., Hornung, A. S., and Cabral, R. S.: PINs: Progressive Implicit Networks for Multi-Scale Neural Representations, arXiv [preprint],
<a href="https://doi.org/10.48550/arXiv.2202.04713" target="_blank">https://doi.org/10.48550/arXiv.2202.04713</a>, 2022.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib31"><label>Laurent(2016)</label><mixed-citation>
       Laurent, G.: Iterative Thickness Regularization of Stratigraphic
Layers in Discrete Implicit Modeling, Math. Geosci., 48, 811–833, <a href="https://doi.org/10.1007/s11004-016-9637-y" target="_blank">https://doi.org/10.1007/s11004-016-9637-y</a>,
publisher: Springer Science and Business Media LLC, 2016.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib32"><label>Laurent et al.(2013)Laurent, Caumon, Bouziat, and Jessell</label><mixed-citation>
       Laurent, G., Caumon, G.,
Bouziat, A., and Jessell, M.: A parametric method to model 3D displacements around faults with volumetric vector
fields, Tectonophysics, 590, 83–93, <a href="https://doi.org/10.1016/j.tecto.2013.01.015" target="_blank">https://doi.org/10.1016/j.tecto.2013.01.015</a>, 2013.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib33"><label>Li et al.(2017)Li, Xu, Taylor, Studer, and Goldstein</label><mixed-citation>
      
Li, H., Xu, Z., Taylor, G., Studer, C., and Goldstein, T.: Visualizing the Loss Landscape of Neural Nets, arXiv [preprint],
<a href="https://doi.org/10.48550/arXiv.1712.09913" target="_blank">https://doi.org/10.48550/arXiv.1712.09913</a>, 2017.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib34"><label>Lindi et al.(2024)Lindi, Aladejare, Ozoji, and Ranta</label><mixed-citation>
       Lindi, O. T., Aladejare, A. E.,
Ozoji, T. M., and Ranta, J.-P.: Uncertainty Quantification in Mineral Resource Estimation, Natural Resources
Research, 33, 2503–2526, <a href="https://doi.org/10.1007/s11053-024-10394-6" target="_blank">https://doi.org/10.1007/s11053-024-10394-6</a>, 2024.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib35"><label>Lindsay et al.(2013)Lindsay, Jessell, Ailleres, Perrouty, De Kemp, and Betts</label><mixed-citation>
      
Lindsay, M., Jessell, M., Ailleres, L., Perrouty, S., De Kemp, E., and Betts, P.: Geodiversity: Exploration of 3D
geological model space, Tectonophysics, 594, 27–37, <a href="https://doi.org/10.1016/j.tecto.2013.03.013" target="_blank">https://doi.org/10.1016/j.tecto.2013.03.013</a>, 2013.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib36"><label>Mallet(1989)</label><mixed-citation>
       Mallet, J.-L.: Discrete smooth interpolation, ACM T. Graphic., 8,
121–144, <a href="https://doi.org/10.1145/62054.62057" target="_blank">https://doi.org/10.1145/62054.62057</a>, 1989.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib37"><label>Mantoglou and Wilson(1982)</label><mixed-citation>
       Mantoglou, A. and Wilson, J. L.: The Turning Bands
Method for simulation of random fields using line generation by a spectral method, Water Resour. Res., 18,
1379–1394, <a href="https://doi.org/10.1029/WR018i005p01379" target="_blank">https://doi.org/10.1029/WR018i005p01379</a>, 1982.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib38"><label>Margossian(2019)</label><mixed-citation>
      
Margossian, C. C.: A Review of automatic differentiation and its efficient implementation, WIREs, 9, e1305, <a href="https://doi.org/10.1002/WIDM.1305" target="_blank">https://doi.org/10.1002/WIDM.1305</a>, 2019.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib39"><label>Misra(2019)</label><mixed-citation>
      
Misra, D.: Mish: A Self Regularized Non-Monotonic Activation Function, arXiv
[preprint], <a href="https://doi.org/10.48550/arXiv.1908.08681" target="_blank">https://doi.org/10.48550/arXiv.1908.08681</a>, 2019.


    </mixed-citation></ref-html>
<ref-html id="bib1.bib40"><label>Osher and Sethian(1988)</label><mixed-citation>
       Osher, S. and Sethian, J. A.: Fronts propagating with
curvature-dependent speed: Algorithms based on Hamilton-Jacobi formulations, J. Comput. Phys., 79, 12–49,
<a href="https://doi.org/10.1016/0021-9991(88)90002-2" target="_blank">https://doi.org/10.1016/0021-9991(88)90002-2</a>, 1988.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib41"><label>Paszke et al.(2019)Paszke, Gross, Massa, Lerer, Bradbury, Chanan, Killeen, Lin, Gimelshein, Antiga, Desmaison,
Köpf, Yang, DeVito, Raison, Tejani, Chilamkurthy, Steiner, Fang, Bai, and Chintala</label><mixed-citation>
      
Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., Desmaison, A., Köpf, A., Yang, E., DeVito, Z., Raison, M., Tejani, A., Chilamkurthy, S., Steiner, B.,
Fang, L., Bai, J., and Chintala, S.: PyTorch: An Imperative Style, High-Performance Deep Learning Library, arXiv [preprint], <a href="https://doi.org/10.48550/arXiv.1912.01703" target="_blank">https://doi.org/10.48550/arXiv.1912.01703</a>, 2019.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib42"><label>Pérez-Díaz et al.(2020)Pérez-Díaz, Alcalde, and Bond</label><mixed-citation>
       Pérez-Díaz, L.,
Alcalde, J., and Bond, C. E.: Introduction: Handling uncertainty in the geosciences: identification, mitigation and
communication, Solid Earth, 11, 889–897, <a href="https://doi.org/10.5194/se-11-889-2020" target="_blank">https://doi.org/10.5194/se-11-889-2020</a>, 2020.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib43"><label>Rahaman et al.(2019)Rahaman, Baratin, Arpit, Draxler, Lin, Hamprecht, Bengio, and
Courville</label><mixed-citation>
      
Rahaman, N., Baratin, A., Arpit, D., Draxler, F., Lin, M., Hamprecht, F. A.,
Bengio, Y., and Courville, A.: On the Spectral Bias of Neural Networks, in: Proceedings of the 36th International Conference on Machine Learning (ICML), in: Proceedings of Machine Learning Research, vol. 97, 5301–5310, <a href="https://doi.org/10.48550/arXiv.1806.08734" target="_blank">https://doi.org/10.48550/arXiv.1806.08734</a>, 2019.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib44"><label>Rahimi and Recht(2007)</label><mixed-citation>
       Rahimi, A. and Recht, B.: Random Features for Large-Scale
Kernel Machines, in: Advances in Neural Information Processing Systems, edited by: Platt, J., Koller, D.,
Singer, Y., and Roweis, S., vol. 20, Curran Associates, Inc.,
<a href="https://proceedings.neurips.cc/paper_files/paper/2007/file/013a006f03dbc5392effeb8f18fda755-Paper.pdf" target="_blank"/>, 2007.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib45"><label>Raissi et al.(2019)Raissi, Perdikaris, and Karniadakis</label><mixed-citation>
       Raissi, M.,
Perdikaris, P., and Karniadakis, G. E.: Physics-informed neural networks: A deep learning framework for solving
forward and inverse problems involving nonlinear partial differential equations, J. Comput. Phys., 378, 686–707,
<a href="https://doi.org/10.1016/j.jcp.2018.10.045" target="_blank">https://doi.org/10.1016/j.jcp.2018.10.045</a>, 2019.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib46"><label>Ramachandran et al.(2017)Ramachandran, Zoph, and Le</label><mixed-citation>
      
Ramachandran, P., Zoph, B., and Le, Q. V.: Searching for Activation Functions, arXiv [preprint], <a href="https://doi.org/10.48550/arXiv.1710.05941" target="_blank">https://doi.org/10.48550/arXiv.1710.05941</a>, 2017.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib47"><label>Rasmussen and Williams(2005)</label><mixed-citation>
       Rasmussen, C. E. and Williams, C. K. I.: Gaussian Processes for
Machine Learning, The MIT Press, <a href="https://doi.org/10.7551/mitpress/3206.001.0001" target="_blank">https://doi.org/10.7551/mitpress/3206.001.0001</a>, 2005.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib48"><label>Rudin et al.(1992)Rudin, Osher, and Fatemi</label><mixed-citation>
       Rudin, L. I., Osher, S., and Fatemi, E.:
Nonlinear total variation based noise removal algorithms, Physica D, 60, 259–268,
<a href="https://doi.org/10.1016/0167-2789(92)90242-F" target="_blank">https://doi.org/10.1016/0167-2789(92)90242-F</a>, 1992.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib49"><label>Sandwell(1987)</label><mixed-citation>
       Sandwell, D. T.: Biharmonic spline interpolation of GEOS-3 and SEASAT altimeter
data, Geophys. Res. Lett., 14, 139–142, <a href="https://doi.org/10.1029/gl014i002p00139" target="_blank">https://doi.org/10.1029/gl014i002p00139</a>, 1987.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib50"><label>Shannon(1948)</label><mixed-citation>
       Shannon, C. E.: A Mathematical Theory of Communication, Bell
Syst. Tech. J., 27, 379–423, <a href="https://doi.org/10.1002/j.1538-7305.1948.tb01338.x" target="_blank">https://doi.org/10.1002/j.1538-7305.1948.tb01338.x</a>, 1948.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib51"><label>Sitzmann et al.(2020)Sitzmann, Martel, Bergman, Lindell, and Wetzstein</label><mixed-citation>
      
Sitzmann, V., Martel, J. N. P., Bergman, A. W., Lindell, D. B., and Wetzstein, G.: Implicit Neural Representations with Periodic Activation Functions, arXiv [preprint], <a href="https://doi.org/10.48550/arXiv.2006.09661" target="_blank">https://doi.org/10.48550/arXiv.2006.09661</a>, 2020.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib52"><label>Smith et al.(2025)Smith, Horrocks, Akhtar, Holden, and Wedge</label><mixed-citation>
       Smith, L. T.,
Horrocks, T., Akhtar, N., Holden, E.-J., and Wedge, D.: Implicit neural representation for potential field geophysics,
Sci. Rep., 15, <a href="https://doi.org/10.1038/s41598-024-83979-z" target="_blank">https://doi.org/10.1038/s41598-024-83979-z</a>, 2025.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib53"><label>Smith and Wessel(1990)</label><mixed-citation>
       Smith, W. H. F. and Wessel, P.: Gridding with continuous curvature splines
in tension, Geophysics, 55, 293–305, <a href="https://doi.org/10.1190/1.1442837" target="_blank">https://doi.org/10.1190/1.1442837</a>, 1990.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib54"><label>Steno and Oldenburg(1671)</label><mixed-citation>
       Steno, N. and Oldenburg, H.: The prodromus to a dissertation concerning
solids naturally contained within solids: laying a foundation for the rendering a rational accompt both of the frame
and the several changes of the masse of the Earth, as also of the various productions in the same, Printed
by F. Winter, and are to be sold by Moses Pitt, <a href="https://doi.org/10.5962/bhl.title.145115" target="_blank">https://doi.org/10.5962/bhl.title.145115</a>, 1671.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib55"><label>Tacher et al.(2006)Tacher, Pomian-Srzednicki, and Parriaux</label><mixed-citation>
       Tacher, L.,
Pomian-Srzednicki, I., and Parriaux, A.: Geological uncertainties associated with 3-D subsurface models,
Comput. Geosci., 32, 212–221, <a href="https://doi.org/10.1016/j.cageo.2005.06.010" target="_blank">https://doi.org/10.1016/j.cageo.2005.06.010</a>, 2006.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib56"><label>Tancik et al.(2020)Tancik, Srinivasan, Mildenhall, Fridovich-Keil, Raghavan, Singhal, Ramamoorthi, Barron, and
Ng</label><mixed-citation>
      
Tancik, M., Srinivasan, P. P., Mildenhall, B., Fridovich-Keil, S., Raghavan, N., Singhal, U., Ramamoorthi, R., Barron, J. T., and Ng, R.: Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains, arXiv [preprint], <a href="https://doi.org/10.48550/arXiv.2006.10739" target="_blank">https://doi.org/10.48550/arXiv.2006.10739</a>, 2020.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib57"><label>Thiele and Kamath(2025)</label><mixed-citation>
      
Thiele, S. and Kamath, A.: samthiele/curlew: Curlew 1.00, Zenodo [code], <a href="https://doi.org/10.5281/ZENODO.17187731" target="_blank">https://doi.org/10.5281/ZENODO.17187731</a>, 2025.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib58"><label>Thiele et al.(2016a)Thiele, Jessell, Lindsay, Ogarko, Wellmann, and
Pakyuz-Charrier</label><mixed-citation>
       Thiele, S. T., Jessell, M. W., Lindsay, M., Ogarko, V., Wellmann, J. F.,
and Pakyuz-Charrier, E.: The topology of geology 1: Topological analysis, J. Struct. Geol., 91, 27–38,
<a href="https://doi.org/10.1016/j.jsg.2016.08.009" target="_blank">https://doi.org/10.1016/j.jsg.2016.08.009</a>, 2016a.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib59"><label>Thiele et al.(2016b)Thiele, Jessell, Lindsay, Wellmann, and Pakyuz-Charrier</label><mixed-citation>
      
Thiele, S. T., Jessell, M. W., Lindsay, M., Wellmann, J. F., and Pakyuz-Charrier, E.: The topology of geology 2:
Topological uncertainty, J. Struct. Geol., 91, 74–87, <a href="https://doi.org/10.1016/j.jsg.2016.08.010" target="_blank">https://doi.org/10.1016/j.jsg.2016.08.010</a>, 2016b.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib60"><label>Thiele et al.(2017)Thiele, Grose, Samsu, Micklethwaite, Vollgger, and Cruden</label><mixed-citation>
      
Thiele, S. T., Grose, L., Samsu, A., Micklethwaite, S., Vollgger, S. A., and Cruden, A. R.: Rapid, semi-automatic
fracture and contact mapping for point clouds, images and geophysical data, Solid Earth, 8, 1241–1253,
<a href="https://doi.org/10.5194/se-8-1241-2017" target="_blank">https://doi.org/10.5194/se-8-1241-2017</a>, 2017.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib61"><label>Wellmann and Caumon(2018)</label><mixed-citation>
       Wellmann, F. and Caumon, G.: 3-D Structural geological
models: Concepts, methods, and uncertainties, Adv. Geophys., 59, 1–121, <a href="https://doi.org/10.1016/bs.agph.2018.09.001" target="_blank">https://doi.org/10.1016/bs.agph.2018.09.001</a>, 2018.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib62"><label>Wellmann and Regenauer-Lieb(2012)</label><mixed-citation>
       Wellmann, J. F. and Regenauer-Lieb, K.:
Uncertainties have a meaning: Information entropy as a quality measure for 3-D geological models, Tectonophysics,
526–529, 207–216, <a href="https://doi.org/10.1016/j.tecto.2011.05.001" target="_blank">https://doi.org/10.1016/j.tecto.2011.05.001</a>, 2012.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib63"><label>Wellmann et al.(2010)Wellmann, Horowitz, Schill, and Regenauer-Lieb</label><mixed-citation>
       Wellmann, J. F.,
Horowitz, F. G., Schill, E., and Regenauer-Lieb, K.: Towards incorporating uncertainty of structural data in 3D
geological inversion, Tectonophysics, 490, 141–151, <a href="https://doi.org/10.1016/j.tecto.2010.04.022" target="_blank">https://doi.org/10.1016/j.tecto.2010.04.022</a>, 2010.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib64"><label>Wellmann et al.(2014)Wellmann, Lindsay, Poh, and Jessell</label><mixed-citation>
       Wellmann, J. F.,
Lindsay, M., Poh, J., and Jessell, M.: Validating 3-D Structural Models with Geological Knowledge for
Improved Uncertainty Evaluations, Energ. Proced., 59, 374–381, <a href="https://doi.org/10.1016/j.egypro.2014.10.391" target="_blank">https://doi.org/10.1016/j.egypro.2014.10.391</a>, 2014.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib65"><label>Wendland(1995)</label><mixed-citation>
       Wendland, H.: Piecewise polynomial, positive definite and compactly
supported radial functions of minimal degree, Adv. Comput. Math., 4, 389–396,
<a href="https://doi.org/10.1007/bf02123482" target="_blank">https://doi.org/10.1007/bf02123482</a>, 1995.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib66"><label>Xie et al.(2022)Xie, Takikawa, Saito, Litany, Yan, Khan, Tombari, Tompkin, Sitzmann, and
Sridhar</label><mixed-citation>
       Xie, Y., Takikawa, T., Saito, S., Litany, O., Yan, S., Khan, N., Tombari, F.,
Tompkin, J., Sitzmann, V., and Sridhar, S.: Neural Fields in Visual Computing and Beyond,
Comput. Graph. Forum, 41, 641–676, <a href="https://doi.org/10.1111/cgf.14505" target="_blank">https://doi.org/10.1111/cgf.14505</a>, 2022.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib67"><label>Xu and Heagy(2025)</label><mixed-citation>
       Xu, A. and Heagy, L. J.: Toward Understanding the Benefits of
Neural Network Parameterizations in Geophysical Inversions: A Study With Neural Fields, IEEE
T. Geosci. Remote, 63, 1–14, <a href="https://doi.org/10.1109/tgrs.2025.3583970" target="_blank">https://doi.org/10.1109/tgrs.2025.3583970</a>, 2025.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib68"><label>Yu et al.(2020)Yu, Kumar, Gupta, Levine, Hausman, and Finn</label><mixed-citation>
      
Yu, T., Kumar, S., Gupta, A., Levine, S., Hausman, K., and Finn, C.: Gradient Surgery for Multi-Task Learning, arXiv [preprint],
<a href="https://doi.org/10.48550/arXiv.2001.06782" target="_blank">https://doi.org/10.48550/arXiv.2001.06782</a>, 2020.

    </mixed-citation></ref-html>
<ref-html id="bib1.bib69"><label>Zhang et al.(2024)Zhang, Zhao, Yu, Lin, Chen, Zhao, and Zhang</label><mixed-citation>
      
Zhang, X., Zhao, L., Yu, Y., Lin, X., Chen, Y., Zhao, H., and Zhang, Q.: LibMOON: A Gradient-based MultiObjective OptimizatioN Library in PyTorch, arXiv [preprint], <a href="https://doi.org/10.48550/arXiv.2409.02969" target="_blank">https://doi.org/10.48550/arXiv.2409.02969</a>, 2024.

    </mixed-citation></ref-html>--></article>
