diff --git a/.gitattributes b/.gitattributes index 77a616165898f4788082f580f1b51327ecc5d8be..1ef325f1b111266a6b26e0196871bd78baa8c2f3 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,5 +1,59 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.lz4 filter=lfs diff=lfs merge=lfs -text +*.mds filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +# Audio files - uncompressed +*.pcm filter=lfs diff=lfs merge=lfs -text +*.sam filter=lfs diff=lfs merge=lfs -text +*.raw filter=lfs diff=lfs merge=lfs -text +# Audio files - compressed +*.aac filter=lfs diff=lfs merge=lfs -text +*.flac filter=lfs diff=lfs merge=lfs -text +*.mp3 filter=lfs diff=lfs merge=lfs -text +*.ogg filter=lfs diff=lfs merge=lfs -text +*.wav filter=lfs diff=lfs merge=lfs -text +# Image files - uncompressed +*.bmp filter=lfs diff=lfs merge=lfs -text *.gif filter=lfs diff=lfs merge=lfs -text -*.webp filter=lfs diff=lfs merge=lfs -text *.png filter=lfs diff=lfs merge=lfs -text +*.tiff filter=lfs diff=lfs merge=lfs -text +# Image files - compressed *.jpg filter=lfs diff=lfs merge=lfs -text *.jpeg filter=lfs diff=lfs merge=lfs -text +*.webp filter=lfs diff=lfs merge=lfs -text +# Video files - compressed +*.mp4 filter=lfs diff=lfs merge=lfs -text +*.webm filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 45e71b254015b6ac179415301d410a977fcd7549..0000000000000000000000000000000000000000 --- a/.gitignore +++ /dev/null @@ -1,210 +0,0 @@ -# Ignore W&B -wandb/** - -# Mac os files -.DS_Store - -# Ignore .specstory directory -.specstory/ - -# Local data store -**.npz -**.json - -# Ignore model files -**.pt -**.pth - -# Ignore local scripts (and local images) -local_scripts/** -tests/test-images -# **.png -**.jpeg -**.pdf - - -#ignore runner scripts -runner* -slurm* -# Ignore local directories -notebooks/** -local-scripts/** -.vscode/** - -# Logging folders -test-images/** -logs/** -wandb/** -outputs/** - -# wandb artifacts containing model checkpoints -artifacts/** - -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints -temp.ipynb - -# Model checkpoints -*ckpt - -# IPython -profile_default/ -ipython_config.py - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/#use-with-ide -.pdm.toml - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ - -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ -# images/*.gif -# images/*.png -# images/*.gif -# images/*.png diff --git a/DATA_GUIDE.md b/DATA_GUIDE.md deleted file mode 100644 index 39c94c3c345a656eee50079a35df0291c2fde704..0000000000000000000000000000000000000000 --- a/DATA_GUIDE.md +++ /dev/null @@ -1,825 +0,0 @@ -# PDEInvBench Data Guide -Data guide for the dataset accompanying PDEInvBench. - - -## Table of Contents - -1. [Dataset Link](#1-dataset-link) -2. [Downloading Data](#2-downloading-data) -3. [Overview](#3-overview) - - [3.1 Data Format](#31-data-format) - - [3.2 Parameter Extraction from Filenames](#32-parameter-extraction-from-filenames) - - [3.3 Working with High-Resolution Data](#33-working-with-high-resolution-data) - - [3.4 Data Loading Parameters](#34-data-loading-parameters) - - [3.5 Parameter Normalization](#35-parameter-normalization) -4. [Datasets](#4-datasets) - - [4a. 2D Reaction Diffusion](#4a-2d-reaction-diffusion) - - [4b. 2D Navier Stokes (Unforced)](#4b-2d-navier-stokes-unforced) - - [4c. 2D Turbulent Flow (Forced Navier Stokes)](#4c-2d-turbulent-flow-forced-navier-stokes) - - [4d. 1D Korteweg-De Vries](#4d-1d-korteweg-de-vries) - - [4e. 2D Darcy Flow](#4e-2d-darcy-flow) -5. [Adding a New Dataset](#5-adding-a-new-dataset) - - -## 1. Dataset Link - -The dataset used in this project can be found here: -https://huggingface.co/datasets/DabbyOWL/PDE_Inverse_Problem_Benchmarking/tree/main - -## 2. Downloading Data - -We provide a python script: [`huggingface_pdeinv_download.py`](huggingface_pdeinv_download.py) to batch download our hugging-face data. We will update the readme of our hugging-face dataset and our github repo to reflect this addition. To run this: - -```bash -pip install huggingface_hub -python3 huggingface_pdeinv_download.py [--dataset DATASET_NAME] [--split SPLIT] [--local-dir PATH] -``` - -**Available datasets:** `darcy-flow-241`, `darcy-flow-421`, `korteweg-de-vries-1d`, `navier-stokes-forced-2d-2048`, `navier-stokes-forced-2d`, `navier-stokes-unforced-2d`, `reaction-diffusion-2d-du-512`, `reaction-diffusion-2d-du`, `reaction-diffusion-2d-k-512`, `reaction-diffusion-2d-k` - -**Available splits:** `*` (all), `train`, `validation`, `test`, `out_of_distribution`, `out_of_distribution_extreme` - - -## 3. Overview - -The PDEInvBench dataset contains five PDE systems spanning parabolic, hyperbolic, and elliptic classifications, designed for benchmarking inverse parameter estimation. - -### Dataset Scale and Scope - -The dataset encompasses **over 1.2 million individual simulations** across five PDE systems, with varying spatial and temporal resolutions: - -- **2D Reaction Diffusion**: 28×28×27 = 21,168 parameter combinations × 5 trajectories = 105,840 simulations -- **2D Navier Stokes**: 101 parameter values × 192 trajectories = 19,392 simulations -- **2D Turbulent Flow**: 120 parameter values × 108 trajectories = 12,960 simulations -- **1D Korteweg-De Vries**: 100 parameter values × 100 trajectories = 10,000 simulations -- **2D Darcy Flow**: 2,048 unique coefficient fields - -### Multi-Resolution Architecture - -The dataset provides multiple spatial resolutions for each system, enabling studies on resolution-dependent generalization: - -- **Low Resolution**: 64×64 (2D systems), 256 (1D KdV), 241×241 (Darcy Flow) -- **Medium Resolution**: 128×128 (2D systems), 256×256 (Turbulent Flow) -- **High Resolution**: 256×256, 512×512, 2048x2048 (2D systems), 421×421 (Darcy Flow) - -### Physical and Mathematical Diversity - -**Parabolic Systems** (Time-dependent, diffusive): -- **2D Reaction Diffusion**: Chemical pattern formation with Fitzhugh-Nagumo dynamics -- **2D Navier Stokes**: Fluid flow without external forcing -- **2D Turbulent Flow**: Forced fluid dynamics with Kolmogorov forcing - -**Hyperbolic Systems** (Wave propagation): -- **1D Korteweg-De Vries**: Soliton dynamics in shallow water waves - -**Elliptic Systems** (Steady-state): -- **2D Darcy Flow**: Groundwater flow through porous media - -### Parameter Space Coverage - -The dataset systematically explores parameter spaces across different physical regimes: - -- **Reaction Diffusion**: k ∈ [0.005,0.1], Du ∈ [0.01,0.5], Dv ∈ [0.01,0.5] (Turing bifurcations) -- **Navier Stokes**: ν ∈ [10⁻⁴,10⁻²] (Reynolds: 80-8000, laminar to transitional) -- **Turbulent Flow**: ν ∈ [10⁻⁵,10⁻²] (fully developed turbulence) -- **Korteweg-De Vries**: δ ∈ [0.8,5] (dispersion strength in shallow water) -- **Darcy Flow**: Piecewise constant diffusion coefficients (porous media heterogeneity) - -### Evaluation Framework - -The dataset implements a sophisticated three-tier evaluation system for comprehensive generalization testing: - -1. **In-Distribution (ID)**: Parameters within training ranges for baseline performance -2. **Out-of-Distribution (Non-Extreme)**: Middle-range parameters excluded from training -3. **Out-of-Distribution (Extreme)**: Extremal parameter values for stress testing - -This framework enables systematic evaluation of model robustness across parameter space, critical for real-world deployment where models must generalize beyond training distributions. - - -### Data Organization and Accessibility - -The dataset is organized in a standardized HDF5 format with: - -- **Hierarchical Structure**: Train/validation/test splits with consistent naming conventions -- **Parameter Encoding**: Filenames encode parameter values for easy parsing -- **Multi-Channel Support**: 2D systems support multiple solution channels (velocity components, chemical species) -- **Grid Information**: Complete spatial and temporal coordinate information -- **Normalization Statistics**: Pre-computed parameter normalization for consistent preprocessing - -### Key Features for Inverse Problem Benchmarking - -1. **Multi-Physics Coverage**: Spans chemical, fluid, wave, and porous media physics -2. **Resolution Scalability**: Enables studies on resolution-dependent model behavior -3. **Parameter Diversity**: Systematic exploration of parameter spaces across physical regimes -4. **Generalization Testing**: Built-in evaluation framework for out-of-distribution performance -5. **Computational Efficiency**: Optimized data loading and preprocessing pipelines -6. **Reproducibility**: Complete documentation of generation parameters and solver configurations - -This comprehensive dataset provides researchers with a unified platform for developing and evaluating inverse problem solving methods across diverse scientific domains, enabling systematic comparison of approaches and identification of fundamental limitations in current methodologies. - -### 3.1 Data Format - -All datasets are stored in HDF5 format with specific structure depending on the PDE system. - -#### Directory Structure - -Datasets should be organized in the following directory structure: - -``` -/path/to/data/ -├── train/ -│ ├── param_file_1.h5 -│ ├── param_file_2.h5 -│ └── ... -├── validation/ -│ ├── param_file_3.h5 -│ └── ... -└── test/ - ├── param_file_4.h5 - └── ... -``` - -### 3.2 Parameter Extraction from Filenames - -Parameters are extracted from filenames using pattern matching. For example: - -- **2D Reaction Diffusion**: `Du=0.1_Dv=0.2_k=0.05.h5` - - Du = 0.1, Dv = 0.2, k = 0.05 - -- **2D Navier Stokes**: `83.0.h5` - - Reynolds number = 83.0 - -- **1D KdV**: `delta=3.5_ic=42.h5` - - δ = 3.5 - -### 3.3 Working with High-Resolution Data - -For high-resolution datasets, we provide configurations for downsampling: - -| PDE System | Original Resolution | High-Resolution | -|------------|:-------------------:|:---------------:| -| 2D Reaction Diffusion | 128×128 | 512×512 | -| 2D Navier Stokes | 64×64 | 256×256 | -| 2D Turbulent Flow | 64x64 | 2048x2048 | -| Darcy Flow | 241×241 | 421×421 | - -When working with high-resolution data, set the following parameters: - -```bash -high_resolution=True -data.downsample_factor=4 # e.g., for 512×512 → 128×128 -data.batch_size=2 # Reduce batch size for GPU memory -``` - -### 3.4 Data Loading Parameters - -Key parameters for loading data: - -- `data.every_nth_window`: Controls sampling frequency of time windows -- `data.frac_ics_per_param`: Fraction of initial conditions per parameter to use -- `data.frac_param_combinations`: Fraction of parameter combinations to use -- `data.train_window_end_percent`: Percentage of trajectory used for training -- `data.test_window_start_percent`: Percentage where test window starts - -### 3.5 Parameter Normalization - -Parameters are normalized using the following statistics, where the mean and standard deviation are computed using the span of the parameters in the dataset: - -```python -PARAM_NORMALIZATION_STATS = { - PDE.ReactionDiffusion2D: { - "k": (0.06391126306498819, 0.029533048151465856), # (mean, std) - "Du": (0.3094992685910578, 0.13865605073673604), # (mean, std) - "Dv": (0.259514500345804, 0.11541850276902947), # (mean, std) - }, - PDE.NavierStokes2D: {"re": (1723.425, 1723.425)}, # (mean, std) - PDE.TurbulentFlow2D: {"nu": (0.001372469573118451, 0.002146258280849241)}, - PDE.KortewegDeVries1D: {"delta": (2.899999997019768, 1.2246211546444339)}, - # Add more as needed -} -``` - -## 4. Datasets - -This section provides detailed information about each PDE system in the dataset. Each subsection includes visualizations, descriptions, and technical specifications. - -### 4a. 2D Reaction Diffusion - -2DRD-Activator -2DRD-Inhibitor - -**Description:** The 2D Reaction-Diffusion system models chemical reactions with spatial diffusion using the Fitzhugh-Nagumo equations. This dataset contains two-channel solutions (activator u and inhibitor v) with parameters k (threshold for excitement), Du (activator diffusivity), and Dv (inhibitor diffusivity). The system exhibits complex pattern formation including spots, stripes, and labyrinthine structures, spanning from dissipative to Turing bifurcations. - -**Mathematical Formulation:** -The activator u and inhibitor v coupled system follows: - -``` -∂tu = Du∂xxu + Du∂yyu + Ru -∂tv = Dv∂xxv + Dv∂yyv + Rv -``` - -where Ru and Rv are defined by the Fitzhugh-Nagumo equations: - -``` -Ru(u,v) = u - u³ - k - v -Rv(u,v) = u - v -``` - -**Parameters of Interest:** -- **Du**: Activator diffusion coefficient -- **Dv**: Inhibitor diffusion coefficient -- **k**: Threshold for excitement - -**Data Characteristics:** -- Partial Derivatives: 5 -- Time-dependent: Yes (parabolic) -- Spatial Resolutions: 128×128, 512x512 -- Parameters: k ∈ [0.005,0.1], Du ∈ [0.01,0.5], Dv ∈ [0.01,0.5] -- Temporal Resolution: 0.049/5 seconds -- Parameter Values: k - 28, Du - 28, Dv - 27 -- Initial Conditions/Trajectories: 5 - -**Evaluation Splits:** -- **Test (ID)**: k ∈ [0.01,0.04] ∪ [0.08,0.09], Du ∈ [0.08,0.2] ∪ [0.4,0.49], Dv ∈ [0.08,0.2] ∪ [0.4,0.49] -- **OOD (Non-Extreme)**: k ∈ [0.04,0.08], Du ∈ [0.2,0.4], Dv ∈ [0.2,0.4] -- **OOD (Extreme)**: k ∈ [0.001,0.01] ∪ [0.09,0.1], Du ∈ [0.02,0.08] ∪ [0.49,0.5], Dv ∈ [0.02,0.08] ∪ [0.49,0.5] - -**Generation Parameters:** -- **Solver**: Explicit Runge-Kutta method of order 5(4) (RK45) -- **Error Tolerance**: Relative error tolerance of 10⁻⁶ -- **Spatial Discretization**: Finite Volume Method (FVM) with uniform 128×128 grid -- **Domain**: [-1,1] × [-1,1] with cell size Δx = Δy = 0.015625 -- **Burn-in Period**: 1 simulation second -- **Dataset Simulation Time**: [0,5] seconds, 101 time steps -- **Nominal Time Step**: Δt ≈ 0.05 seconds (adaptive) -- **Generation Time**: ≈ 1 week on CPU - -**File Structure:** -``` -filename: Du=0.1_Dv=0.2_k=0.05.h5 -``` -Contents: -- `0001/data`: Solution field [time, spatial_dim_1, spatial_dim_2, channels] -- `0001/grid/x`: x-coordinate grid points -- `0001/grid/y`: y-coordinate grid points -- `0001/grid/t`: Time points - -### 4b. 2D Navier Stokes (Unforced) - -2DNS - -**Description:** The 2D Navier-Stokes equations describe incompressible fluid flow without external forcing. This dataset contains velocity field solutions with varying Reynolds numbers, showcasing different flow regimes from laminar to transitional flows. - -**Mathematical Formulation:** -We consider the vorticity form of the unforced Navier-Stokes equations: - -``` -∂w(t,x,y)/∂t + u(t,x,y)·∇w(t,x,y) = νΔw(t,x,y) -``` - -for t ∈ [0,T] and (x,y) ∈ (0,1)², with auxiliary conditions: -- w = ∇ × u -- ∇ · u = 0 -- w(0,x,y) = w₀(x,y) (Boundary Conditions) - -**Parameters of Interest:** -- **ν**: The physical parameter of interest, representing viscosity - -**Data Characteristics:** -- Partial Derivatives: 3 -- Time-dependent: Yes (parabolic) -- Spatial Resolutions: 64×64, 256x256 -- Parameters: ν ∈ [10⁻⁴,10⁻²] (Reynolds: 80-8000) -- Temporal Resolution: 0.0468/3 seconds -- Parameter Values: 101 -- Initial Conditions/Trajectories: 192 - -The files contain spatial resolutions at 256x256, which are later downsampled using scipy decimate to 64x64 - - -**Evaluation Splits:** -- **Test (ID)**: ν ∈ [10⁻³·⁸, 10⁻³·²] ∪ [10⁻²·⁸, 10⁻²·²] -- **OOD (Non-Extreme)**: ν ∈ [10⁻³·², 10⁻²·⁸] -- **OOD (Extreme)**: ν ∈ [10⁻⁴, 10⁻³·⁸] ∪ [10⁻²·², 10⁻²] - -**Generation Parameters:** -- **Solver**: Pseudo-spectral solver with Crank-Nicolson time-stepping -- **Implementation**: Written in Jax and GPU-accelerated -- **Generation Time**: ≈ 3.5 GPU days (batch size=32) -- **Burn-in Period**: 15 simulation seconds -- **Saved Data**: Next 3 simulation seconds saved as dataset -- **Initial Conditions**: Sampled according to Gaussian random field (length scale=0.8) -- **Recording**: Solution recorded every 1 simulation second -- **Simulation dt**: 1e-4 -- **Resolution**: 256×256 - -**File Structure:** -``` -filename: 83.0.h5 -``` -Contents: -- `0001/data`: Solution field [time, spatial_dim_1, spatial_dim_2, channels] -- `0001/grid/x`: x-coordinate grid points -- `0001/grid/y`: y-coordinate grid points -- `0001/grid/t`: Time points - -### 4c. 2D Turbulent Flow (Forced Navier Stokes) - -2DTF - -**Description:** The 2D Turbulent Flow dataset represents forced Navier-Stokes equations that generate fully developed turbulent flows. This dataset is particularly valuable for studying complex, multi-scale fluid dynamics and turbulent phenomena. All solutions exhibit turbulence across various Reynolds numbers. - -**Mathematical Formulation:** -The forced Navier-Stokes equations with the Kolmogorov forcing function are similar to the unforced case with an additional forcing term: - -``` -∂ₜw + u·∇w = νΔw + f(k,y) - αw -``` - -where the forcing function f(k,y) is defined as: -``` -f(k,y) = -kcos(ky) -``` - -**Parameters of Interest:** -- **ν**: Kinematic viscosity (similar to unforced NS) -- **α**: Drag coefficient (fixed at α = 0.1) -- **k**: Forced wavenumber (fixed at k = 2) - -The drag coefficient α primarily serves to keep the total energy of the system constant, acting as drag. The task is to predict ν. - -**Numerical Convergence** -We examine convergence across all solutions we generated. However, at the spatial and temporal resolution used to produce this dataset, simulations with kinematic viscosity ν < 5e-4 may not be fully converged due to the fine scale turbulence dynamics. We include all generated trajectories in the training set to maximize coverage of the parameter space and to expose models to a broader range of flow regimes. Nevertheless, we recommend restricting quantitative evaluation and model selection to runs with ν >= 5e-4. For more details, please see our paper. - -**Data Characteristics:** -- Partial Derivatives: 3 -- Time-dependent: Yes (parabolic) -- Spatial Resolutions: 64x64, 2048x2048 -- Parameters: ν ∈ [10⁻⁵,10⁻²] -- Temporal Resolution: 0.23/14.75 seconds -- Parameter Values: 120 -- Initial Conditions/Trajectories: 108 - -**Evaluation Splits:** -- **Test (ID)**: ν ∈ [10⁻⁴·⁷, 10⁻³·⁸] ∪ [10⁻³·², 10⁻²·³] -- **OOD (Non-Extreme)**: ν ∈ [10⁻³·⁸, 10⁻³·²] -- **OOD (Extreme)**: ν ∈ [10⁻⁵, 10⁻⁴·⁷] ∪ [10⁻²·³, 10⁻²] - -**Generation Parameters:** -- **Solver**: Pseudo-spectral solver with Crank-Nicolson time-stepping -- **Implementation**: Written in Jax (leveraging Jax-CFD), similar to 2D NS -- **Generation Time**: ≈ 4 GPU days (A100) -- **Burn-in Period**: 40 simulation seconds -- **Saved Data**: Next 15 simulation seconds saved as dataset -- **Simulator Resolution**: 256×256 -- **Downsampling**: Downsamples to 64×64 before saving -- **Temporal Resolution (Saved)**: ∂t = 0.25 simulation seconds - -**File Structure:** -``` -filename: nu=0.001.h5 -``` -Contents: -- `0001/data`: Solution field [time, spatial_dim_1, spatial_dim_2, channels] -- `0001/grid/x`: x-coordinate grid points -- `0001/grid/y`: y-coordinate grid points -- `0001/grid/t`: Time points - -### 4d. 1D Korteweg-De Vries - -KdV - -**Description:** The Korteweg-De Vries (KdV) equation is a nonlinear partial differential equation that describes shallow water waves and solitons. This 1D dataset contains soliton solutions with varying dispersion parameters, demonstrating wave propagation and interaction phenomena. - -**Mathematical Formulation:** -KdV is a 1D PDE representing waves on a shallow-water surface. The governing equation follows the form: - -``` -0 = ∂ₜu + u·∂ₓu + δ²∂ₓₓₓu -``` - -**Parameters of Interest:** -- **δ**: The physical parameter representing the strength of the dispersive effect on the system -- In shallow water wave theory, δ is a unit-less quantity roughly indicating the relative depth of the water - -**Data Characteristics:** -- Partial Derivatives: 3 -- Time-dependent: Yes (hyperbolic) -- Spatial Resolution: 256 -- Parameters: δ ∈ [0.8,5] -- Temporal Resolution: 0.73/102 seconds -- Parameter Values: 100 -- Initial Conditions/Trajectories: 100 - -**Evaluation Splits:** -- **Test (ID)**: δ ∈ [1.22, 2.48] ∪ [3.32, 4.58] -- **OOD (Non-Extreme)**: δ ∈ [2.48, 3.32] -- **OOD (Extreme)**: δ ∈ [0.8, 1.22] ∪ [4.58, 5] - -**Generation Parameters:** -- **Domain**: Periodic domain [0,L] -- **Spatial Discretization**: Pseudospectral method with Fourier basis (Nₓ = 256 grid points) -- **Time Integration**: Implicit Runge-Kutta method (Radau IIA, order 5) -- **Implementation**: SciPy's `solve_ivp` on CPU -- **Generation Time**: ≈ 12 hours -- **Burn-in Period**: 40 simulation seconds - -**Initial Conditions:** -Initial conditions are sampled from a distribution over a truncated Fourier Series: - -``` -u₀(x) = Σ_{k=1}^K A_k sin(2πl_k x/L + φ_k) -``` - -where: -- A_k, φ_k ~ U(0,1) -- l_k ~ U(1,3) - -**File Structure:** -``` -filename: delta=3.5_ic=42.h5 -``` -Contents: -- `tensor`: Solution field with shape [time, spatial_dim] -- `x-coordinate`: Spatial grid points -- `t-coordinate`: Time points - -### 4e. 2D Darcy Flow - -2DDF - -**Description:** The 2D Darcy Flow dataset represents steady-state flow through porous media with piecewise constant diffusion coefficients. This time-independent system is commonly used in groundwater flow modeling and subsurface transport problems. All solutions converge to a non-trivial steady-state solution based on the diffusion coefficient field. - -**Mathematical Formulation:** -The 2D steady-state Darcy flow equation on a unit box Ω = (0,1)² is a second-order linear elliptic PDE with Dirichlet boundary conditions: - -``` --∇·(a(x)∇u(x)) = f(x), for x ∈ Ω -u(x) = 0, for x ∈ ∂Ω -``` - -where: -- a ∈ L∞((0,1)²;R⁺) is a piecewise constant diffusion coefficient -- u(x) is the pressure field -- f(x) = 1 is a fixed forcing function - -**Parameters of Interest:** -- **a(x)**: Piecewise constant diffusion coefficient field (spatially varying parameter) - -**Data Characteristics:** -- Partial Derivatives: 2 -- Time-dependent: No (elliptic) -- Spatial Resolutions: 241×241, 421×421 -- Parameters: Piecewise constant diffusion coefficient a ∈ L∞((0,1)²;R⁺) -- Temporal Resolution: N/A (steady-state) -- Parameter Values: 2048 -- Initial Conditions/Trajectories: N/A - -**Evaluation Splits:** - -Unlike time-dependent systems with scalar parameters, Darcy Flow does not admit parameter splits based on numeric ranges. Instead, splits are defined using a derived statistic of the coefficient field. - -Let \( r(a) \) denote the fraction of grid points in the coefficient field \( a(x) \) that take the maximum value (12). -This statistic is approximately normally distributed across coefficient fields. - -Splits are defined as: - -- **Test (ID):** Coefficient fields whose \( r(a) \) lies within the central mass of the distribution -- **OOD (Non-Extreme):** Not applicable -- **OOD (Extreme):** Coefficient fields whose \( r(a) \) lies in the tails beyond \( \pm 1.5\sigma \) - - -**Generation Parameters:** -- **Solver**: Second-order finite difference method -- **Implementation**: Originally written in Matlab, runs on CPU -- **Resolution**: 421×421 (original), with lower resolution dataset generated by downsampling -- **Coefficient Field Sampling**: a(x) is sampled from μ = Γ(N(0, -Δ + 9I)⁻²) -- **Gamma Mapping**: Element-wise map where a_i ~ N(0, -Δ + 9I)⁻² → {3,12} - - a_i → 12 when a_i ≥ 0 - - a_i → 3 when a_i < 0 -- **Boundary Conditions**: Zero Neumann boundary conditions on the Laplacian over the coefficient field - -**File Structure:** -``` -filename: sample_1024.h5 -``` -Contents: -- `coeff`: Piecewise constant coefficient field -- `sol`: Solution field - - -## 5. Adding a New Dataset - -The PDEInvBench framework is designed to be modular, allowing you to add new PDE systems. This section describes how to add a new dataset to the repository. For information about data format requirements, see [Section 4.1](#41-data-format). - -### Table of Contents - - [Step 1: Add PDE Type to Utils](#step-1-add-pde-type-to-utils) - - [Step 2: Add PDE Attributes](#step-2-add-pde-attributes) - - [Step 3: Add Parameter Normalization Stats](#step-3-add-parameter-normalization-stats) - - [Step 4: Add Parameter Extraction Logic](#step-4-add-parameter-extraction-logic) - - [Step 5: Create a Dataset Handler](#step-5-create-a-dataset-handler-if-needed) - - [Step 6: Create a Data Configuration](#step-6-create-a-data-configuration) - - [Step 7: Add Residual Functions](#step-7-add-residual-functions) - - [Step 8: Create a Combined Configuration](#step-8-create-a-combined-configuration) - - [Step 9: Generate and Prepare Data](#step-9-generate-and-prepare-data) - - [Step 10: Run Experiments](#step-10-run-experiments) - - [Data Format Requirements](#data-format-requirements) - -### Step 1: Add PDE Type to Utils - -First, add your new PDE system to `pdeinvbench/utils/types.py`: - -```python -class PDE(enum.Enum): - """ - Describes which PDE system currently being used. - """ - # Existing PDEs... - ReactionDiffusion1D = "Reaction Diffusion 1D" - ReactionDiffusion2D = "Reaction Diffusion 2D" - NavierStokes2D = "Navier Stokes 2D" - # Add your new PDE - YourNewPDE = "Your New PDE Description" -``` - -### Step 2: Add PDE Attributes - -Update the attribute dictionaries in `pdeinvbench/utils/types.py` with information about your new PDE: - -```python -# Number of partial derivatives -PDE_PARTIALS = { - # Existing PDEs... - PDE.YourNewPDE: 3, # Number of partial derivatives needed -} - -# Number of spatial dimensions -PDE_NUM_SPATIAL = { - # Existing PDEs... - PDE.YourNewPDE: 2, # 1 for 1D PDEs, 2 for 2D PDEs -} - -# Spatial size of the grid -PDE_SPATIAL_SIZE = { - # Existing PDEs... - PDE.YourNewPDE: [128, 128], # Spatial dimensions of your dataset -} - -# High-resolution spatial size (if applicable) -HIGH_RESOLUTION_PDE_SPATIAL_SIZE = { - # Existing PDEs... - PDE.YourNewPDE: [512, 512], # High-res dimensions -} - -# Number of parameters -PDE_NUM_PARAMETERS = { - # Existing PDEs... - PDE.YourNewPDE: 2, # Number of parameters in your PDE -} - -# Parameter values -PDE_PARAM_VALUES = { - # Existing PDEs... - PDE.YourNewPDE: { - "param1": [0.1, 0.2, 0.3], # List of possible values for param1 - "param2": [1.0, 2.0, 3.0], # List of possible values for param2 - }, -} - -# Number of data channels -PDE_NUM_CHANNELS = { - # Existing PDEs... - PDE.YourNewPDE: 2, # Number of channels in your solution field -} - -# Number of timesteps in the trajectory -PDE_TRAJ_LEN = { - # Existing PDEs... - PDE.YourNewPDE: 100, # Number of timesteps in your trajectories -} -``` - -### Step 3: Add Parameter Normalization Stats - -Update `pdeinvbench/data/utils.py` with normalization statistics for your PDE parameters: - -```python -PARAM_NORMALIZATION_STATS = { - # Existing PDEs... - PDE.YourNewPDE: { - "param1": (0.2, 0.05), # (mean, std) for param1 - "param2": (2.0, 0.5), # (mean, std) for param2 - }, -} -``` - -### Step 4: Add Parameter Extraction Logic - -Add logic to extract parameters from your dataset files in `extract_params_from_path` function inside the dataset class: - -```python -def extract_params_from_path(path: str, pde: PDE) -> dict: - # Existing code... - elif pde == PDE.YourNewPDE: - # Parse the filename to extract parameters - name = os.path.basename(path) - # Example: extract parameters from filename format "param1=X_param2=Y.h5" - param1 = torch.Tensor([float(name.split("param1=")[1].split("_")[0])]) - param2 = torch.Tensor([float(name.split("param2=")[1].split(".")[0])]) - param_dict = {"param1": param1, "param2": param2} - # Existing code... - return param_dict -``` - -### Step 5: Create a Dataset Handler (if needed) - -If your PDE requires special handling beyond what `PDE_MultiParam` provides, create a new dataset class in `pdeinvbench/data/`: - -```python -# Example: pdeinvbench/data/your_new_pde_dataset.py -import torch -from torch.utils.data import Dataset - -class YourNewPDEDataset(Dataset): - """ - Custom dataset class for your new PDE system. - """ - def __init__( - self, - data_root: str, - pde: PDE, - n_past: int, - n_future: int, - mode: str, - train: bool, - # Other parameters... - ): - # Initialization code... - pass - - def __len__(self): - # Implementation... - pass - - def __getitem__(self, index: int): - # Implementation... - pass -``` - -Add your new dataset to `pdeinvbench/data/__init__.py`: - -```python -from .pde_multiparam import PDE_MultiParam -from .your_new_pde_dataset import YourNewPDEDataset - -__all__ = ["PDE_MultiParam", "YourNewPDEDataset"] -``` - -```markdown -### Step 6: Create System Configuration - -Create `configs/system_params/your_new_pde.yaml`: - -```yaml -# configs/system_params/your_new_pde.yaml -defaults: - - base - -# ============ Data Parameters ============ -name: "your_new_pde_inverse" -data_root: "/path/to/your/data" -pde_name: "Your New PDE Description" # Must match PDE enum value -num_channels: 2 # Number of solution channels (e.g., u and v) -cutoff_first_n_frames: 0 # How many initial frames to skip - -# ============ Model Parameters ============ -downsampler_input_dim: 2 # 1 for 1D systems, 2 for 2D systems -params_to_predict: ["param1", "param2"] # What parameters to predict -normalize: True # Whether to normalize predicted parameters -``` - -Then create the top-level config `configs/your_new_pde.yaml`: - -```yaml -# configs/your_new_pde.yaml -name: your_new_pde -defaults: - - _self_ - - base - - override system_params: your_new_pde -``` - -The existing configs/data/base.yaml automatically references ${system_params.*} so data loading works out of the box. Run experiments with: - - -```yaml - python train_inverse.py --config-name=your_new_pde - python train_inverse.py --config-name=your_new_pde model=fno - python train_inverse.py --config-name=your_new_pde model=resnet -``` - -### Step 7: Add Residual Functions - -Implement residual functions for your PDE in `pdeinvbench/losses/pde_residuals.py`: - -```python -def your_new_pde_residual( - sol: torch.Tensor, - params: Dict[str, torch.Tensor], - spatial_grid: Tuple[torch.Tensor, ...], - t: torch.Tensor, - return_partials: bool = False, -) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: - """ - Compute the residual for your new PDE. - - Args: - sol: Solution field - params: Dictionary of PDE parameters - spatial_grid: Spatial grid coordinates - t: Time coordinates - return_partials: Whether to return partial derivatives - - Returns: - Residual tensor or (residual, partials) if return_partials=True - """ - # Implementation... - pass -``` - -Register your residual function in `get_pde_residual_function`: - -```python -def get_pde_residual_function(pde: PDE) -> Callable: - """Return the appropriate residual function for the given PDE.""" - if pde == PDE.ReactionDiffusion2D: - return reaction_diffusion_2d_residual - # Add your PDE - elif pde == PDE.YourNewPDE: - return your_new_pde_residual - # Other PDEs... - else: - raise ValueError(f"Unknown PDE type: {pde}") -``` - -### Step 8: Create a Combined Configuration - -Create a combined configuration that uses your dataset: - -```yaml -# configs/your_new_pde.yaml -name: "your_new_pde" -defaults: - - _self_ - - base - - override data: your_new_pde -``` - -### Step 9: Generate and Prepare Data - -Make sure your data is properly formatted and stored in the expected directory structure: - -``` -/path/to/your/data/ -├── train/ -│ ├── param1=0.1_param2=1.0.h5 -│ ├── param1=0.2_param2=2.0.h5 -│ └── ... -├── validation/ -│ ├── param1=0.15_param2=1.5.h5 -│ └── ... -└── test/ - ├── param1=0.25_param2=2.5.h5 - └── ... -``` - -Each HDF5 file should contain: -- Solution trajectories -- Grid information (x, y, t) -- Any other metadata needed for your PDE - -### Step 10: Run Experiments - -You can now run experiments with your new dataset: - -```bash -python train_inverse.py --config-name=your_new_pde -``` - -### Data Format Requirements - -The primary dataset class `PDE_MultiParam` expects data in HDF5 format with specific structure: - -- **1D PDEs**: Each HDF5 file contains a single trajectory with keys: - - `tensor`: The solution field with shape `[time, spatial_dim]` - - `x-coordinate`: Spatial grid points - - `t-coordinate`: Time points - -- **2D PDEs**: Each HDF5 file contains multiple trajectories (one per IC): - - `0001/data`: Solution field with shape `[time, spatial_dim_1, spatial_dim_2, channels]` - - `0001/grid/x`: x-coordinates - - `0001/grid/y`: y-coordinates - - `0001/grid/t`: Time points - -- **File naming**: The filename should encode the PDE parameters, following the format expected by `extract_params_from_path` - diff --git a/MODEL_GUIDE.md b/MODEL_GUIDE.md deleted file mode 100644 index 371980cd3523e199df4efe57fa361058ec434e9c..0000000000000000000000000000000000000000 --- a/MODEL_GUIDE.md +++ /dev/null @@ -1,352 +0,0 @@ -# PDEInvBench -## Adding a New Model - -The PDEInvBench framework is designed to be modular, allowing you to easily add new model architectures. This section describes how to add a new encoder architecture to the repository. - -## Table of Contents -- [Model Architecture Components](#model-architecture-components) -- [Adding a new model](#adding-a-new-model) - - [Step 1: Create a New Encoder Class](#step-1-create-a-new-encoder-class) - - [Step 2: Import and Register Your Model](#step-2-import-and-register-your-model) - - [Step 3: Create a Configuration File](#step-3-create-a-configuration-file) - - [Step 4: Run Experiments with Your Model](#step-4-run-experiments-with-your-model) - -## Model Architecture Components - -The inverse model architecture in PDEInvBench consists of three main components: - - -``` -Input Solution Field → Encoder → Downsampler → Parameter Network → PDE Parameters -``` - -1. **Encoder**: Extracts features from the input solution field (e.g., FNO, ResNet, ScOT) -2. **Downsampler**: Reduces the spatial dimensions of the features (e.g., ConvDownsampler) -3. **Parameter Network**: Predicts PDE parameters from the downsampled features - - -## Adding a new model - -When creating a new model, you typically only need to modify one of these components while keeping the others the same. - -### Step 1: Create a New Encoder Class - -First, create a new encoder class in `pdeinvbench/models/encoder.py`. Your new encoder should follow the interface of existing encoders like `FNOEncoder`, `ResnetEncoder`, or `SwinEncoder`: - -```python -import torch -import torch.nn as nn -from pdeinvbench.utils.types import PDE -from pdeinvbench.models.encoder import resolve_number_input_channels - -class YourEncoder(nn.Module): - """ - Your custom encoder for PDE inverse problems. - """ - - def __init__( - self, - n_modes: int, # Or equivalent parameter for your architecture - n_layers: int, - n_past: int, - n_future: int, - pde: PDE, - data_channels: int, - hidden_channels: int, - use_partials: bool, - mode: str, - batch_size: int - # Add any architecture-specific parameters - ): - super(YourEncoder, self).__init__() - - # Store essential parameters - self.n_past = n_past - self.n_future = n_future - self.pde = pde - self.data_channels = data_channels - self.hidden_channels = hidden_channels - self.use_partials = use_partials - self.mode = mode - self.batch_size = batch_size - - - # Calculate input channels similar to existing encoders - in_channels = resolve_number_input_channels( - n_past=n_past, - data_channels=data_channels, - use_partials=use_partials, - pde=pde, - ) - - # Define your model architecture - # Example: Custom neural network layers - self.encoder_layers = nn.ModuleList([ - # Your custom layers here - nn.Conv2d(in_channels, hidden_channels, kernel_size=3, padding=1), - nn.ReLU(), - # Add more layers as needed - ]) - - # Output layer to match expected output dimensions - self.output_layer = nn.Conv2d(hidden_channels, hidden_channels, kernel_size=1) - - def forward(self, x, **kwargs): - """ - Forward pass of your encoder. - - Args: - x: Input tensor of shape [batch, channels, height, width] - **kwargs: Additional arguments (may include 't' for time-dependent models) - - Returns: - Output tensor of shape [batch, hidden_channels, height, width] - """ - # Implement your forward pass - for layer in self.encoder_layers: - x = layer(x) - - x = self.output_layer(x) - return x -``` - -#### Creating Custom Downsamplers - -If you need a custom downsampler, create it in `pdeinvbench/models/downsampler.py`: - -```python -import torch -import torch.nn as nn - -class YourDownsampler(nn.Module): - """ - Your custom downsampler for reducing spatial dimensions. - """ - - def __init__( - self, - input_dimension: int, - n_layers: int, - in_channels: int, - out_channels: int, - kernel_size: int, - stride: int, - padding: int, - dropout: float, - ): - super(YourDownsampler, self).__init__() - - # Define your downsampling layers - self.layers = nn.ModuleList([ - # Your custom downsampling layers here - nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding), - nn.ReLU(), - nn.Dropout(dropout), - ]) - - def forward(self, x): - """ - Forward pass of your downsampler. - - Args: - x: Input tensor of shape [batch, channels, height, width] - - Returns: - Downsampled tensor - """ - for layer in self.layers: - x = layer(x) - return x -``` - -#### Creating Custom MLPs - -If you need a custom MLP, create it in `pdeinvbench/models/mlp.py`: - -```python -import torch -import torch.nn as nn - -class YourMLP(nn.Module): - """ - Your custom MLP for parameter prediction. - """ - - def __init__( - self, - in_dim: int, - hidden_size: int, - dropout: float, - out_dim: int, - num_layers: int, - activation: str, - ): - super(YourMLP, self).__init__() - - # Define your MLP layers - layers = [] - current_dim = in_dim - - for i in range(num_layers): - layers.append(nn.Linear(current_dim, hidden_size)) - layers.append(nn.ReLU() if activation == "relu" else nn.Tanh()) - layers.append(nn.Dropout(dropout)) - current_dim = hidden_size - - layers.append(nn.Linear(current_dim, out_dim)) - self.layers = nn.Sequential(*layers) - - def forward(self, x): - """ - Forward pass of your MLP. - - Args: - x: Input tensor of shape [batch, features] - - Returns: - Output tensor of shape [batch, out_dim] - """ - return self.layers(x) -``` - -### Step 2: Import and Register Your Model - -Make sure your encoder is imported in `pdeinvbench/models/__init__.py`: - -```python -from .encoder import FNOEncoder, ResnetEncoder, ScOTEncoder, YourEncoder -``` - -This makes your encoder available for use in configuration files. - -### Step 3: Create a Configuration File - -The configuration system has three levels: - -#### 3.1: Create Model Architecture Config - -Create `configs/model/yourmodel.yaml`: - -```yaml -# configs/model/yourmodel.yaml -name: "${system_params.name}_yourmodel" -dropout: ${system_params.yourmodel_dropout} -predict_variance: False -hidden_channels: ${system_params.yourmodel_hidden_channels} -encoder_layers: ${system_params.yourmodel_encoder_layers} -downsampler_layers: ${system_params.yourmodel_downsampler_layers} -mlp_layers: ${system_params.yourmodel_mlp_layers} - -model_config: - _target_: pdeinvbench.models.inverse_model.InverseModel - paramnet: - _target_: pdeinvbench.models.inverse_model.ParameterNet - pde: ${data.pde} - normalize: ${system_params.normalize} - logspace: ${system_params.logspace} - params_to_predict: ${system_params.params_to_predict} - predict_variance: ${model.predict_variance} - mlp_type: ${system_params.mlp_type} - encoder: - _target_: pdeinvbench.models.encoder.YourEncoder - n_modes: ${system_params.yourmodel_n_modes} - n_past: ${n_past} - n_future: ${n_future} - n_layers: ${model.encoder_layers} - data_channels: ${data.num_channels} - hidden_channels: ${model.hidden_channels} - use_partials: True - pde: ${data.pde} - mode: ${mode} - batch_size: ${data.batch_size} - use_cn: false - task: inverse - downsampler: ${system_params.yourmodel_downsampler} - mlp_hidden_size: ${model.hidden_channels} - mlp_layers: ${model.mlp_layers} - mlp_activation: "relu" - mlp_dropout: ${model.dropout} - downsample_factor: ${data.downsample_factor} -``` - -#### 3.2: Add Defaults to `configs/system_params/base.yaml` - -Add architecture defaults that work across all PDE systems: - -```yaml -# configs/system_params/base.yaml - -# ============ YourModel Architecture ============ -yourmodel_hidden_channels: 64 -yourmodel_encoder_layers: 4 -yourmodel_downsampler_layers: 4 -yourmodel_dropout: 0 -yourmodel_mlp_layers: 1 -yourmodel_n_modes: 16 - -yourmodel_downsampler: - _target_: pdeinvbench.models.downsampler.ConvDownsampler - input_dimension: ${system_params.downsampler_input_dim} - n_layers: ${model.downsampler_layers} - in_channels: ${model.hidden_channels} - out_channels: ${model.hidden_channels} - kernel_size: 3 - stride: 1 - padding: 2 - dropout: ${model.dropout} -``` - -#### 3.3: (Optional) Add System-Specific Overrides - -Override defaults for specific systems in `configs/system_params/{system}.yaml`: - -```yaml -# configs/system_params/2dtf.yaml -defaults: - - base - -# ... existing system config ... - -# Override architecture for this system -yourmodel_hidden_channels: 128 # Needs larger model -yourmodel_encoder_layers: 6 -``` - -**That's it!** Your model now works with all PDE systems: -```bash -python train_inverse.py --config-name=1dkdv model=yourmodel -python train_inverse.py --config-name=2dtf model=yourmodel -``` - - -#### Important Notes - -- **System-specific parameters** (like `params_to_predict`, `normalize`, `downsampler_input_dim`) go in `configs/system_params/{system}.yaml` -- **Architecture defaults** go in `configs/system_params/base.yaml` -- **Model structure** goes in `configs/model/{architecture}.yaml` -- For special cases like Darcy Flow, override the downsampler in the system_params file: - ```yaml - # configs/system_params/2ddf.yaml - yourmodel_downsampler: - _target_: pdeinvbench.models.downsampler.IdentityMap - ``` - -### Step 4: Run Experiments with Your Model - -You can now run experiments with your custom model on **any** PDE system: - -```bash -# Use your model with different PDE systems -python train_inverse.py --config-name=1dkdv model=yourmodel -python train_inverse.py --config-name=2dtf model=yourmodel -python train_inverse.py --config-name=2dns model=yourmodel - -# Use model variants if you created them -python train_inverse.py --config-name=2drdk model=yourmodel_large - -# Override parameters from command line -python train_inverse.py --config-name=2dtf model=yourmodel model.hidden_channels=96 - -# Combine multiple overrides -python train_inverse.py --config-name=2ddf model=yourmodel data.batch_size=16 model.encoder_layers=6 -``` diff --git a/README.md b/README.md deleted file mode 100644 index 51785ab808bf02db504e52f29ed0c53874bd4182..0000000000000000000000000000000000000000 --- a/README.md +++ /dev/null @@ -1,515 +0,0 @@ -# PDEInvBench - -A one-stop shop repository for the benchmarking Neural Operators on inverse problems in partial differential equations. - - - -## Overview - -Inverse problems in partial differential equations (PDEs) involve recovering unknown physical parameters of a system—such as viscosity, diffusivity, or reaction coefficients—from observed spatiotemporal solution fields. Formally, given a PDE - -$$[F_{\phi}(u(x,t)) = 0]$$ - -where *u(x,t)* is the solution field and *φ* represents physical parameters, the **forward problem** maps *φ ↦ u*, while the **inverse problem** seeks the reverse mapping *u ↦ φ*. - -Inverse problems are inherently ill-posed and highly sensitive to noise, making them a challenging yet foundational task in scientific computing and engineering. They arise in diverse applications such as geophysical exploration, fluid mechanics, biomedical imaging, and materials design—where estimating hidden parameters from observed dynamics is essential. - -**PDEInvBench** provides a comprehensive benchmark for inverse problems in partial differential equations (PDEs). The codebase supports multiple PDE systems, training strategies, and neural network architectures. - -## DATASET LINK: -The datasets used in this project can be found here: -https://huggingface.co/datasets/DabbyOWL/PDE_Inverse_Problem_Benchmarking/tree/main - - -## Table of Contents -1. [Overview](#overview) -2. [Supported Systems](#supported-systems) -3. [Supported Inverse Methods](#supported-inverse-methods) -4. [Models Implemented](#models-implemented) -5. [Directory Structure](#directory-structure) -6. [Environment Setup](#environment-setup) -7. [Downloading Data](#downloading-data) -8. [Running Experiments](#running-experiments) - - [How Configs Work](#how-configs-work) - - [Basic Commands](#basic-commands) - - [Common Overrides](#common-overrides) - - [Multi-GPU and Distributed Training](#multi-gpu-and-distributed-training) - - [Experiment Patterns Along Core Design Axes](#-experiment-patterns-along-core-design-axes) - - [Training/Optimization Strategies](#1️⃣-trainingoptimization-strategies) - - [Problem Representation and Inductive Bias](#2️⃣-problem-representation-and-inductive-bias) - - [Scaling Experiments](#3️⃣-scaling-experiments) - -9. [Testing](#Testing) -10. [Shape Checking](#Shape-Checking) -11. [Adding a New Model](#adding-a-new-model) -12. [Adding a New Dataset](#adding-a-new-dataset) - -## Supported Systems - -- **[1D Korteweg–De Vries (KdV) Equation](DATA_GUIDE.md#4d-1d-korteweg-de-vries)** -- **[2D Reaction Diffusion](DATA_GUIDE.md#4a-2d-reaction-diffusion)** -- **[2D Unforced Navier Stokes](DATA_GUIDE.md#4b-2d-navier-stokes-unforced)** -- **[2D Forced Navier Stokes](DATA_GUIDE.md#4c-2d-turbulent-flow-forced-navier-stokes)** -- **[2D Darcy Flow](DATA_GUIDE.md#4e-2d-darcy-flow)** - -For detailed technical information on each PDE system — including governing equations, parameter ranges, and dataset download instructions — refer to the [Data Guide](DATA_GUIDE.md). - -## Supported Inverse Methods - -- **Fully data-driven** -- **PDE Residual Loss** -- **Test-Time Tailoring (TTT)** - -## Models Implemented - -- **[FNO (Fourier Neural Operator)](https://arxiv.org/pdf/2010.08895)** -- **[scOT (scalable Operator Transformer)](https://proceedings.neurips.cc/paper_files/paper/2024/file/84e1b1ec17bb11c57234e96433022a9a-Paper-Conference.pdf)** -- **[ResNet](https://arxiv.org/pdf/1512.03385)** - -For detailed technical information on the model architecture, refer to the [Model Guide](MODEL_GUIDE.md). - - -## Directory Structure - -``` -PDEInvBench -├── configs/ # Inverse problem Hydra configuration files -│ ├── callbacks/ # Training callbacks (checkpointing, logging) -│ ├── data/ # Dataset and data loading configurations -│ ├── lightning_module/ # PyTorch Lightning module configurations -│ ├── logging/ # Weights & Biases logging configurations -│ ├── loss/ # Loss function configurations -│ ├── lr_scheduler/ # Learning rate scheduler configurations -│ ├── model/ # Neural network model configurations -│ ├── optimizer/ # Optimizer configurations -| ├── system_params # PDE-specific model and experiment parameters -│ ├── tailoring_optimizer/ # Test-time tailoring optimizer configs -│ └── trainer/ # PyTorch Lightning trainer configurations -├── scripts/ # Utility and data processing scripts -│ ├── darcy-flow-scripts/ # Darcy flow specific data processing -│ ├── parameter-perturb/ # Parameter perturbation utilities -│ ├── reaction-diffusion-scripts/ # Reaction-diffusion data processing -│ ├── data_splitter.py # Splits datasets into train/validation sets -│ └── process_navier_stokes.py # Processes raw Navier-Stokes data -├── pdeinvbench/ # Main package source code -│ ├── data/ # Data loading and preprocessing modules -│ ├── lightning_modules/ # PyTorch Lightning training modules -│ ├── losses/ # Loss function implementations -│ ├── models/ # Neural network model implementations -│ │ ├── __init__.py # Package initialization -│ │ ├── conv_head.py # Convolutional head for parameter prediction -│ │ ├── downsampler.py # Spatial downsampling layers -│ │ ├── encoder.py # FNO and other encoder architectures -│ │ ├── inverse_model.py # Main inverse problem model -│ │ └── mlp.py # Multi-layer perceptron components -│ └── utils/ # Utility functions and type definitions -│ ├── __init__.py # Package initialization -│ ├── config_utils.py # Hydra configuration utilities -│ ├── types.py # Type definitions and PDE system constants -│ └── ... # Additional utility modules -└── train_inverse.py # Main training script for inverse problems -``` - -## Environment Setup - -This project requires **Python 3.11** with PyTorch 2.7, PyTorch Lightning, and several scientific computing libraries. - -### Quick Setup (Recommended) - -Using the provided `environment.yml`: - -```bash -# Create environment (use micromamba or conda) -conda env create -f environment.yml -conda activate inv-env-tmp - -# Install the package in editable mode -pip install -e . -``` - -### Manual Setup - -Alternatively, use the `build_env.sh` script: - -```bash -chmod +x build_env.sh -./build_env.sh -``` - -### Key Dependencies - -- **Deep Learning**: PyTorch 2.7, PyTorch Lightning 2.5 -- **Neural Operators**: neuraloperator 0.3.0, scOT (Poseidon fork) -- **Scientific Computing**: scipy, numpy, h5py, torch-harmonics -- **Configuration**: Hydra 1.3, OmegaConf 2.3 -- **Logging**: Weights & Biases (wandb) -- **Type Checking**: jaxtyping 0.3.2, typeguard 2.13.3 - -**Note**: The scOT architecture requires a custom fork installed from GitHub (automatically handled in setup scripts). - -### Verify Installation - -```bash -python -c "import torch; import lightning; import pdeinvbench; print('Setup successful!')" -``` - -## Downloading Data - -We provide datasets on [HuggingFace](https://huggingface.co/datasets/DabbyOWL/PDE_Inverse_Problem_Benchmarking/tree/main) with a convenient download script. Use `huggingface_pdeinv_download.py` to batch download specific datasets and splits: - -```bash -pip install huggingface_hub -python3 huggingface_pdeinv_download.py --dataset darcy-flow-241 --split train --local-dir ./data -``` - -Available datasets include `darcy-flow-241`, `korteweg-de-vries-1d`, `navier-stokes-forced-2d`, `reaction-diffusion-2d-du`, and more. For complete dataset documentation, parameter ranges, and detailed download instructions, see the [Data Guide](DATA_GUIDE.md#2-downloading-data). - -## Running Experiments - -We use `hydra` to manage experiment configurations. The repository supports all combinations of: -- **PDE Systems**: `1dkdv`, `2drd`, `2dns`, `2dtf`, `2ddf` -- **Problem Representation**: - - **Derivative conditioning** - - **Temporal conditioning**: conditioning on 2, 5,10,15,20,25 - - **Model architectures**: FNO, ResNet, scOT (scalable Operator Transformer) -- **Training / Optimization strategies**: - - **Fully data-driven supervision** — standard supervised training using paired parameter–solution data - - **Physics-informed (residual) training** — includes a PDE residual loss term for self-supervised regularization - - **Test-Time Tailoring (TTT)** — post-training fine-tuning using the PDE residual at inference time to adapt to new parameter regimes -- **Scaling**: - - **Model Scaling**: 500k parameters, 5 million parameters, 50 million parameters - - **Data scaling**: parameter, initial condition, temporal horizon - - **Resolution scaling**: 64×64, 128×128, 256×256, 512×512 - -### How Configs Work - -#### Base Configs - -Base configs are located in `configs` and provide starting points for experiments: - -- Top-level configs (e.g., `1dkdv.yaml`, `2drd.yaml`) combine specific options for datasets, models, and training settings -- Individual component configs are in subdirectories (e.g., `configs/data/`, `configs/model/`) - -#### Hydra Override Mechanism - -Hydra allows you to override any configuration parameter via command line: - -1. **Simple parameter override**: `parameter=value` -2. **Nested parameter override**: `group.parameter=value` -3. **Adding new parameters**: `+new_parameter=value` - -All overrides are automatically appended to the W&B experiment name for easy tracking. - -### Basic Commands - -```bash -# Run with a predefined config -python3 train_inverse.py --config-name={pde_system} - -# Run in test mode (evaluation only) -python3 train_inverse.py --config-name={pde_system} +test_run=true - -# Load a model from W&B -python3 train_inverse.py --config-name={pde_system} +inverse_model_wandb_run={project_id}/{project_name}/model-{model_id}:{version} -``` -pde_system: `1dkdv`, `2dtf`, `2dns`, `2drdk`, `2drddu`, `2ddf` -### Common Overrides - -#### Data-related Overrides -```bash -# Specify data root directory -data.data_root=/path/to/data - -# Control time window sampling -data.every_nth_window=10 - -# Control fraction of data used -data.frac_ics_per_param=0.5 -data.frac_param_combinations=0.5 - -# Control train/test temporal split -data.train_window_end_percent=0.5 -data.test_window_start_percent=0.76 - -# High-resolution data processing -high_resolution=True -data.downsample_factor=4 # Downsample from 512x512 to 128x128 -data.downsample_factor=2 # Downsample from 256x256 to 128x128 -data.batch_size=2 # Reduce batch size for higher resolution data -``` - -#### Model-related Overrides -```bash -# Choose a model -model=fno -model=scot -model=resnet -model=fno_50mil # Higher capacity model -model=fno_500k # Lower capacity model - -# Configure model parameters -model.model_config.paramnet.encoder.use_partials=False - -# Specify which parameters to predict (e.g., for ablation studies) -model.paramnet.params_to_predict=['Du'] -model.paramnet.params_to_predict=['Dv'] -``` - -#### Training Overrides -```bash -# Control distributed training -+trainer.num_nodes=2 - -# Set random seed -seed=0 - -# Load a pre-trained model -+inverse_model_wandb_run={project_id}/{project_name}/model-{model_id}:{version} - -# Enable test-only mode (no training) -+test_run=true -``` - -#### Loss-related Overrides -```bash -# Adjust loss weights -loss.inverse_param_loss_weight=0 -loss.inverse_residual_loss_weight=1 -``` - -#### Logging Overrides -```bash -# Set W&B project and directory -logging.project=my_project -logging.save_dir=/path/to/wandb/cache -``` - -### Multi-GPU and Distributed Training - -```bash -# Single GPU -CUDA_VISIBLE_DEVICES=0 python3 train_inverse.py --config-name={pde_system} - -# Multi-node with SLURM -srun python3 train_inverse.py --config-name={pde_system} +trainer.num_nodes={num_nodes} -# num_nodes = 1, 2, 4, etc. -``` - -### 🧪 Experiment Patterns Along Core Design Axes - -This section provides ready-to-run experiment templates organized by the core research dimensions of the benchmark. Each pattern includes concrete commands and parameter sweep recommendations. - ---- - -#### 1️⃣ Training/Optimization Strategies - -Experiments exploring different supervision and optimization approaches for inverse problems. - -##### 1.1 Fully Data-Driven vs Physics-Informed Training - -```bash -# Fully data-driven (no residual loss) -python3 train_inverse.py --config-name={pde_system} \ - loss.inverse_residual_loss_weight=0 - -# Physics-informed with varying residual weights -python3 train_inverse.py --config-name={pde_system} \ - loss.inverse_residual_loss_weight={weight} -# Recommended sweep: weight = 1.0, 0.1, 0.01, 0.001, 0.0001 -``` - -This allows you to control the balance between data-driven supervision and physics-based regularization for parameter identification. - -##### 1.2 Test-Time Tailoring (TTT) - -At test time, given an observed PDE trajectory `u_{t-k}, ..., u_t`, the inverse model `f_θ` predicts parameters `φ̂ = f_θ(u_{t-k}, ..., u_t)`. -Test-Time Tailoring (TTT) adapts `f_θ` by minimizing a physics-based self-supervised loss derived from the PDE residual: - -`L_Tailor = ||F_{φ̂}(u_{t-k}, ..., u_t)||² + α * ( ||f_θ(u_{t-k}, ..., u_t) - f_{θ_frozen}(u_{t-k}, ..., u_t)||² / ||f_{θ_frozen}(u_{t-k}, ..., u_t)||² )` - -Here `F_{φ̂}` is a discrete approximation of the PDE operator, and `α` controls the strength of the *anchor loss* that stabilizes adaptation. Optimization is performed for a small number of gradient steps on `L_Tailor`, allowing the model to specialize to new or out-of-distribution parameter regimes at inference time. - -```bash -# Basic TTT with pre-trained model -python3 train_inverse.py --config-name={pde_system}_ttt \ - inverse_model_wandb_run={project_id}/{project_name}/model-{model_id}:{version} \ - tailor_anchor_loss_weight={alpha} \ - num_tailoring_steps={steps} \ - tailoring_optimizer_lr={lr} - -``` - ---- - -#### 2️⃣ Problem Representation and Inductive Bias - -Experiments exploring how to encode physical problems and what architectural inductive biases work best. - -##### 2.1 Conditioning Strategy: Derivatives vs Raw Solutions - -```bash -# Derivative conditioning (spatial/temporal derivatives as input) -python3 train_inverse.py --config-name={pde_system} \ - model.model_config.paramnet.encoder.use_partials=True - -# Temporal conditioning (raw solution snapshots only) -python3 train_inverse.py --config-name={pde_system} \ - model.model_config.paramnet.encoder.use_partials=False -``` - -Derivative conditioning provides explicit gradient information from the frames. - -##### 2.2 Model Architecture: Inductive Biases - -```bash -# Fourier Neural Operator (spectral bias) -python3 train_inverse.py --config-name={pde_system} model=fno - -# ResNet (convolutional locality bias) -python3 train_inverse.py --config-name={pde_system} model=resnet - -# scOT - Scalable Operator Transformer (attention-based) -python3 train_inverse.py --config-name={pde_system} model=scot -``` - -##### 2.3 Temporal Conditioning Frames - -```bash -# Fourier Neural Operator (spectral bias) -python3 train_inverse.py --config-name={pde_system} n_past={num_frames} - -# num_frames=2,5,10,15,20 -``` - - ---- - -#### 3️⃣ Scaling Experiments - -Experiments investigating how performance scales with model capacity, data quantity, and spatial resolution. - -##### 3.1 Model Capacity Scaling - -```bash -# Small model: ~500k parameters -python3 train_inverse.py --config-name={pde_system} model=fno_500k - -# Base model: ~5M parameters -python3 train_inverse.py --config-name={pde_system} model=fno - -# Large model: ~50M parameters -python3 train_inverse.py --config-name={pde_system} model=fno_50mil -``` - - -##### 3.2 Data Quantity Scaling - -###### 3.2.1 Initial Condition Diversity Scaling -```bash -python3 train_inverse.py --config-name={pde_system} \ - data.frac_ics_per_param={frac} -# Recommended sweep: frac = 0.2, 0.35, 0.5, 0.75 -``` - -Only `frac_ics_per_param` percent of initial trajectories per parameter will be sampled during training, allowing you to control the amount of initial condition diversity and study data efficiency across different initial states. - -###### 3.2.2 Parameter Space Coverage Scaling -```bash -python3 train_inverse.py --config-name={pde_system} \ - data.frac_param_combinations={frac} -# Recommended sweep: frac = 0.2, 0.35, 0.5, 0.75 -``` - -Only `frac_param_combinations` percent of parameters from the train set will be sampled, allowing you to control parameter space coverage and understand how model performance scales with the diversity of parameter combinations in the training data. - -###### 3.2.3 Temporal Horizon Scaling -```bash -python3 train_inverse.py --config-name={pde_system} \ - data.train_window_end_percent={train_end} \ - data.test_window_start_percent={test_start} -# Recommended sweeps: -# train_end = 0.25, 0.5, 0.76, 1.0 -# test_start = 0.76 -``` - -Only the first `train_window_end_percent` of trajectories are used for training, while the last `test_window_start_percent` are used for evaluation. This enables studies on temporal extrapolation and how much temporal dynamics are needed for accurate parameter identification. - -##### 3.3 Spatial Resolution Scaling - -```bash -# High-resolution experiments with online downsampling -python3 train_inverse.py --config-name={pde_system} \ - high_resolution=True \ - data.downsample_factor={factor} \ - data.batch_size={batch_size} - -# Example configurations: -# factor=1, 512×512 (full resolution) -# factor=2, 256×256 -# factor=4, 128×128 -# factor=8, 64×64 -``` - -The `HIGH_RESOLUTION_PDE_SPATIAL_SIZE` in `pdeinvbench/utils/types.py` defines the maximum resolution (typically 512×512), and the downsampling factor reduces from this maximum. These experiments help determine how resolution affects identifiability of parameters and whether models trained on low-resolution data can generalize to high-resolution inputs. - -## Testing - -The `tests/` directory contains validation scripts to verify the correctness of PDE residual computations and numerical implementations. - -### Test Structure - -- **`test_fluids.py`**: Validates turbulent flow and Navier-Stokes residual computations by comparing PyTorch implementations against NumPy reference implementations -- **`fluids_numpy_reference.py`**: NumPy reference implementations for fluid dynamics operators (stream function, advection, Laplacian) -- **`reaction-diffusion-residuals.py`**: Validates reaction-diffusion residual computations and generates visualization GIFs - -### Running Tests - -**Standard pytest (skips tests requiring external data):** -```bash -pytest tests/ -v -``` - -### Test Validation - -The validation tests verify: -1. **Numerical accuracy**: Finite difference operators match reference implementations (error < 1e-3) -2. **PDE residuals**: Ground-truth solutions produce near-zero residuals (typically < 1e-4) -3. **Operator correctness**: Stream function, advection, Laplacian, and gradient computations -4. **Batch independence**: No cross-contamination between batch elements - -### Data Requirements - -Some tests require external HDF5 datasets: -- Tests automatically **skip** (not fail) when data is unavailable -- Suitable for CI/CD environments without large datasets -- For full validation, download datasets following the [Data Guide](DATA_GUIDE.md) - -### Visualization - -Residual validation scripts generate animated GIFs in `test-images/` showing: -- Temporal evolution of PDE residuals -- Spatial distribution of numerical errors -- Threshold-based error highlighting - - -## Shape-Checking - -This codebase uses [jaxtyping](https://github.com/google/jaxtyping) for runtime tensor shape validation, which helps catch dimension mismatches. - -**To disable shape checking for faster execution:** -```bash -# Disable for production runs -export JAXTYPING_DISABLE=1 -python train_inverse.py --config-name=2dtf model=fno - -# Or inline -JAXTYPING_DISABLE=1 python train_inverse.py --config-name=2dtf model=fno -``` - -## Adding a New Dataset - -To add a new PDE system to the benchmark, follow the guide in [Data Guide - Section 5: Adding a New Dataset](DATA_GUIDE.md#5-adding-a-new-dataset). - -## Adding a New Model - -To add a new encoder architecture (e.g., Transformer, U-Net), follow the guide in [Model Guide - Adding a New Model](MODEL_GUIDE.md#adding-a-new-model). diff --git a/configs/1dkdv.yaml b/configs/1dkdv.yaml deleted file mode 100644 index 87dca00287d145f11cc633e7c7e973ef32b18d21..0000000000000000000000000000000000000000 --- a/configs/1dkdv.yaml +++ /dev/null @@ -1,5 +0,0 @@ -name: 1dkdv -defaults: - - _self_ - - base - - override system_params: 1dkdv diff --git a/configs/1dkdv_ttt.yaml b/configs/1dkdv_ttt.yaml deleted file mode 100644 index 6d117b63b261c32723bbb129fb2cda94ade2e288..0000000000000000000000000000000000000000 --- a/configs/1dkdv_ttt.yaml +++ /dev/null @@ -1,8 +0,0 @@ -name: 1dkdv -defaults: - - _self_ - - ttt_base - - override system_params: 1dkdv - -inverse_model_wandb_run: ml-pdes/1dkdv_test_time_tuning/model-4j475b9v:v199 -# inverse_model_wandb_run: ml-pdes/time_logging_test/model-tw4k8e8h:best diff --git a/configs/2ddf.yaml b/configs/2ddf.yaml deleted file mode 100644 index 4c446649b0d1fc7e1c8932c210ce7b366c8a3927..0000000000000000000000000000000000000000 --- a/configs/2ddf.yaml +++ /dev/null @@ -1,8 +0,0 @@ -name: 2ddf -defaults: - - base - - _self_ - - override callbacks: 2ddf - - override system_params: 2ddf - -n_past: 1 diff --git a/configs/2ddf_ttt.yaml b/configs/2ddf_ttt.yaml deleted file mode 100644 index 4ce22f6be31465072bd9bdc5a764bfaf99a18510..0000000000000000000000000000000000000000 --- a/configs/2ddf_ttt.yaml +++ /dev/null @@ -1,9 +0,0 @@ -name: 2ddf -defaults: - - ttt_base # Load base first - - _self_ # Then override with this file's values - - override callbacks: 2ddf - - override system_params: 2ddf - -inverse_model_wandb_run: ml-pdes/2ddf_compilation_folded/model-r5fj8hr1:best -n_past: 1 diff --git a/configs/2dns.yaml b/configs/2dns.yaml deleted file mode 100644 index f3dbee1c77ba8176b6685150f8df90476850bf93..0000000000000000000000000000000000000000 --- a/configs/2dns.yaml +++ /dev/null @@ -1,5 +0,0 @@ -name: 2dns -defaults: - - _self_ - - base - - override system_params: 2dns diff --git a/configs/2dns_ttt.yaml b/configs/2dns_ttt.yaml deleted file mode 100644 index b13a29498366ad435970432cf5e2d117d14d1ad5..0000000000000000000000000000000000000000 --- a/configs/2dns_ttt.yaml +++ /dev/null @@ -1,10 +0,0 @@ -name: 2dns -defaults: - - _self_ - - ttt_base - - override system_params: 2dns - - -test_run: true -inverse_model_wandb_run: ml-pdes/tailoring_redone/model-wuhbdlqr:v200 -# inverse_model_wandb_run: ml-pdes/time_logging_test/model-8mwjk5v0:best \ No newline at end of file diff --git a/configs/2drddu.yaml b/configs/2drddu.yaml deleted file mode 100644 index ba4c4a94719b2cb163ac41212bb068ef10bd0fc6..0000000000000000000000000000000000000000 --- a/configs/2drddu.yaml +++ /dev/null @@ -1,7 +0,0 @@ -name: 2drd -defaults: - - _self_ - - base - - override system_params: 2drddu - -# Note: params_to_predict is already set to ["Du"] in system_params/2drddu.yaml \ No newline at end of file diff --git a/configs/2drddu_ttt.yaml b/configs/2drddu_ttt.yaml deleted file mode 100644 index c382d8861a80babed4af835d9f3b2e9320d3564d..0000000000000000000000000000000000000000 --- a/configs/2drddu_ttt.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: 2drd -defaults: - - _self_ - - ttt_base - - override system_params: 2drddu - - -test_run: true -inverse_model_wandb_run: ml-pdes/2drddu_compilation/model-lslyzo92:v184 # 100 % ics -# inverse_model_wandb_run: ml-pdes/2drddu_compilation/model-jupsos6p:best # 20 % ics -# inverse_model_wandb_run: ml-pdes/time_logging_test/model-71xuth62:best - diff --git a/configs/2drdk.yaml b/configs/2drdk.yaml deleted file mode 100644 index e393d62843a4e95c4325c329403fd2f396d2a7b9..0000000000000000000000000000000000000000 --- a/configs/2drdk.yaml +++ /dev/null @@ -1,6 +0,0 @@ -name: 2drd -defaults: - - _self_ - - base - - override system_params: 2drdk - diff --git a/configs/2drdk_ttt.yaml b/configs/2drdk_ttt.yaml deleted file mode 100644 index 177556592eab791d14784cfccc1f3ddf25c9c1b8..0000000000000000000000000000000000000000 --- a/configs/2drdk_ttt.yaml +++ /dev/null @@ -1,10 +0,0 @@ -name: 2drd -defaults: - - _self_ - - ttt_base - - override system_params: 2drdk - - -test_run: true -inverse_model_wandb_run: ml-pdes/2drdk_compilation/model-30801ssy:v189 -# inverse_model_wandb_run: ml-pdes/time_logging_test/model-o2v1e8oa:best \ No newline at end of file diff --git a/configs/2dtf.yaml b/configs/2dtf.yaml deleted file mode 100644 index d27bdafb50e8e958b567ed78467f0969fddf4bad..0000000000000000000000000000000000000000 --- a/configs/2dtf.yaml +++ /dev/null @@ -1,6 +0,0 @@ -name: 2dtf -defaults: - - _self_ - - base - - override system_params: 2dtf - diff --git a/configs/2dtf_ttt.yaml b/configs/2dtf_ttt.yaml deleted file mode 100644 index 63ba997bf0ed5291420310d7b4ba04b49cdde3d0..0000000000000000000000000000000000000000 --- a/configs/2dtf_ttt.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: 2dtf -defaults: - - _self_ - - ttt_base - - override system_params: 2dtf - - -test_run: true -# inverse_model_wandb_run: ml-pdes/2dtf_compilation/model-kjskfseu:v172 -inverse_model_wandb_run: ml-pdes/tailoring_redone/model-h6cc91c4:v182 -# inverse_model_wandb_run: ml-pdes/time_logging_test/model-irns4x30:best diff --git a/configs/base.yaml b/configs/base.yaml deleted file mode 100644 index d8ecc9242b2f7f9cb3428f931e3e6f3cc7672378..0000000000000000000000000000000000000000 --- a/configs/base.yaml +++ /dev/null @@ -1,19 +0,0 @@ -name: base -defaults: - - _self_ - - callbacks: base - - model: fno - - lightning_module: base - - logging: base - - loss: relative - - optimizer: adam - - trainer: trainer - - lr_scheduler: cosine - - system_params: Null - - data: base - -n_past: 2 -n_future: -1 #doesn't matter for inverse problems -mode: "inverse" -seed: 0 -high_resolution: false diff --git a/configs/callbacks/2ddf.yaml b/configs/callbacks/2ddf.yaml deleted file mode 100644 index a57212eaad228cc5d44e0c682f4889e034821f19..0000000000000000000000000000000000000000 --- a/configs/callbacks/2ddf.yaml +++ /dev/null @@ -1,9 +0,0 @@ -- _target_: lightning.pytorch.callbacks.ModelCheckpoint # save model checkpoints - monitor: validation/loss - mode: min - save_last: True -- _target_: lightning.pytorch.callbacks.LearningRateMonitor # log learning rate - logging_interval: epoch -- _target_: pdeinvbench.lightning_modules.logging_callbacks.InverseErrorByTailoringStepCallback # log error by tailoring step -- _target_: pdeinvbench.lightning_modules.logging_callbacks.PDEParamErrorTestTimeTailoringCallback -- _target_: pdeinvbench.lightning_modules.logging_callbacks.PDEParamErrorPlottingCallback # stratify error by PDE parameter diff --git a/configs/callbacks/base.yaml b/configs/callbacks/base.yaml deleted file mode 100644 index d871d5ef1cc4003d7a2d58ab4b25cc8233e8c413..0000000000000000000000000000000000000000 --- a/configs/callbacks/base.yaml +++ /dev/null @@ -1,10 +0,0 @@ -- _target_: pdeinvbench.lightning_modules.logging_callbacks.PDEParamErrorPlottingCallback # stratify error by PDE parameter -- _target_: lightning.pytorch.callbacks.ModelCheckpoint # save model checkpoints - monitor: validation/loss - mode: min - save_last: True - save_top_k: 1 -- _target_: lightning.pytorch.callbacks.LearningRateMonitor # log learning rate - logging_interval: epoch -- _target_: pdeinvbench.lightning_modules.logging_callbacks.InverseErrorByTailoringStepCallback # log error by tailoring step -- _target_: pdeinvbench.lightning_modules.logging_callbacks.PDEParamErrorTestTimeTailoringCallback diff --git a/configs/data/base.yaml b/configs/data/base.yaml deleted file mode 100644 index 7e34f0fd9c2627e459bd3eb7e775eb57bfee3f15..0000000000000000000000000000000000000000 --- a/configs/data/base.yaml +++ /dev/null @@ -1,112 +0,0 @@ -# These will be overridden by child configs -name: "placeholder_inverse" -data_root: "placeholder_path" -train_data_root: ${system_params.train_data_root} -val_data_root: ${system_params.val_data_root} -ood_data_root: ${system_params.ood_data_root} -ood_data_root_extreme: ${system_params.ood_data_root_extreme} -test_data_root: ${system_params.test_data_root} -num_channels: ${system_params.num_channels} -batch_size: 8 -dilation: 1 -cutoff_first_n_frames: ${system_params.cutoff_first_n_frames} -frac_param_combinations: 1 -frac_ics_per_param: 1 -random_sample_param: True -downsample_factor: 0 -every_nth_window: 10 -train_window_start_percent: 0 -train_window_end_percent: 1 -test_window_start_percent: 0 -test_window_end_percent: 1 - -pde: - _target_: pdeinvbench.utils.types.PDE - value: ${system_params.pde_name} - -train_dataloader: - _target_: torch.utils.data.DataLoader - dataset: - _target_: pdeinvbench.data.PDE_MultiParam - data_root: ${data.train_data_root} - pde: ${data.pde} - n_past: ${n_past} - train: True - dilation: ${data.dilation} - cutoff_first_n_frames: ${data.cutoff_first_n_frames} - frac_param_combinations: ${data.frac_param_combinations} - frac_ics_per_param: ${data.frac_ics_per_param} - random_sample_param: ${data.random_sample_param} - downsample_factor: ${data.downsample_factor} - every_nth_window: ${data.every_nth_window} - window_start_percent: ${data.train_window_start_percent} - window_end_percent: ${data.train_window_end_percent} - batch_size: ${data.batch_size} - shuffle: True - -val_dataloader: - _target_: torch.utils.data.DataLoader - dataset: - _target_: pdeinvbench.data.PDE_MultiParam - data_root: ${data.val_data_root} - pde: ${data.pde} - n_past: ${n_past} - train: False - dilation: ${data.dilation} - cutoff_first_n_frames: ${data.cutoff_first_n_frames} - frac_param_combinations: ${data.frac_param_combinations} - frac_ics_per_param: ${data.frac_ics_per_param} - random_sample_param: ${data.random_sample_param} - downsample_factor: ${data.downsample_factor} - every_nth_window: ${data.every_nth_window} - window_start_percent: ${data.train_window_start_percent} - window_end_percent: ${data.train_window_end_percent} - batch_size: ${data.batch_size} - shuffle: False - -ood_dataloader: - _target_: torch.utils.data.DataLoader - dataset: - _target_: pdeinvbench.data.PDE_MultiParam - data_root: ${data.ood_data_root} - pde: ${data.pde} - n_past: ${n_past} - train: False - dilation: ${data.dilation} - cutoff_first_n_frames: ${data.cutoff_first_n_frames} - downsample_factor: ${data.downsample_factor} - every_nth_window: ${data.every_nth_window} - batch_size: ${data.batch_size} - shuffle: False - -ood_dataloader_extreme: - _target_: torch.utils.data.DataLoader - dataset: - _target_: pdeinvbench.data.PDE_MultiParam - data_root: ${data.ood_data_root_extreme} - pde: ${data.pde} - n_past: ${n_past} - train: False - dilation: ${data.dilation} - cutoff_first_n_frames: ${data.cutoff_first_n_frames} - downsample_factor: ${data.downsample_factor} - every_nth_window: ${data.every_nth_window} - batch_size: ${data.batch_size} - shuffle: False - -test_dataloader: - _target_: torch.utils.data.DataLoader - dataset: - _target_: pdeinvbench.data.PDE_MultiParam - data_root: ${data.test_data_root} - pde: ${data.pde} - n_past: ${n_past} - train: False - dilation: ${data.dilation} - cutoff_first_n_frames: ${data.cutoff_first_n_frames} - downsample_factor: ${data.downsample_factor} - every_nth_window: ${data.every_nth_window} - window_start_percent: ${data.test_window_start_percent} - window_end_percent: ${data.test_window_end_percent} - batch_size: ${data.batch_size} - shuffle: False diff --git a/configs/lightning_module/base.yaml b/configs/lightning_module/base.yaml deleted file mode 100644 index e6c1f82ce694eeea37e5e8805f6821c4e23877ae..0000000000000000000000000000000000000000 --- a/configs/lightning_module/base.yaml +++ /dev/null @@ -1,10 +0,0 @@ -_target_: pdeinvbench.lightning_modules.InverseModule -pde: ${data.pde} -n_past: ${n_past} -batch_size: ${data.batch_size} -use_partials: ${model.model_config.paramnet.encoder.use_partials} -params_to_predict: ${model.model_config.paramnet.params_to_predict} -param_loss_metric: ${loss.param_loss_metric} -inverse_residual_loss_weight: ${loss.inverse_residual_loss_weight} -inverse_param_loss_weight: ${loss.inverse_param_loss_weight} -residual_filter: False \ No newline at end of file diff --git a/configs/lightning_module/ttt.yaml b/configs/lightning_module/ttt.yaml deleted file mode 100644 index 1ccf23a8e544326085cf8375e5782db19c93c98f..0000000000000000000000000000000000000000 --- a/configs/lightning_module/ttt.yaml +++ /dev/null @@ -1,8 +0,0 @@ -defaults: - - base - -_target_: pdeinvbench.lightning_modules.InverseTestTimeTailoringModule -num_tailoring_steps: ${num_tailoring_steps} -tailor_per_batch: ${tailor_per_batch} -tailor_anchor_loss_weight: ${tailor_anchor_loss_weight} -tailor_residual_loss_weight: ${tailor_residual_loss_weight} diff --git a/configs/logging/base.yaml b/configs/logging/base.yaml deleted file mode 100644 index 0f89924c7dc563ff90d5ab878745ca5d63a43514..0000000000000000000000000000000000000000 --- a/configs/logging/base.yaml +++ /dev/null @@ -1,4 +0,0 @@ -_target_: pdeinvbench.utils.logging_utils.CustomWandbLogger -entity: "ml-pdes" -save_dir: "logs" -project: ${data.name} \ No newline at end of file diff --git a/configs/loss/mse.yaml b/configs/loss/mse.yaml deleted file mode 100644 index 453306280236dc8cc25fec66bc1e32ee27f0ad75..0000000000000000000000000000000000000000 --- a/configs/loss/mse.yaml +++ /dev/null @@ -1,5 +0,0 @@ -param_loss_metric: - _target_: pdeinvbench.utils.types.ParamMetrics - value: "Mean Squared Error" -inverse_residual_loss_weight: 0 -inverse_param_loss_weight: 1 \ No newline at end of file diff --git a/configs/loss/relative.yaml b/configs/loss/relative.yaml deleted file mode 100644 index 7e4c7a0b6087de071fc6a28c7209a2fa8d6ff37f..0000000000000000000000000000000000000000 --- a/configs/loss/relative.yaml +++ /dev/null @@ -1,5 +0,0 @@ -param_loss_metric: - _target_: pdeinvbench.utils.types.ParamMetrics - value: "Relative Error" -inverse_residual_loss_weight: 0 -inverse_param_loss_weight: 1 \ No newline at end of file diff --git a/configs/lr_scheduler/cosine.yaml b/configs/lr_scheduler/cosine.yaml deleted file mode 100644 index 1186106e83595b197e0b7169e5ac3807bc73a4c8..0000000000000000000000000000000000000000 --- a/configs/lr_scheduler/cosine.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: torch.optim.lr_scheduler.CosineAnnealingLR -T_max: ${trainer.max_epochs} diff --git a/configs/model/fno.yaml b/configs/model/fno.yaml deleted file mode 100644 index ab2439c46035e089936dadd316f61861f5c25922..0000000000000000000000000000000000000000 --- a/configs/model/fno.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# Shared FNO model configuration -# Interpolates ALL parameters from system_params -name: "${system_params.name}_fno" -dropout: ${system_params.fno_dropout} -hidden_channels: ${system_params.fno_hidden_channels} -encoder_layers: ${system_params.fno_encoder_layers} -downsampler_layers: ${system_params.fno_downsampler_layers} -mlp_layers: ${system_params.fno_mlp_layers} - -model_config: - _target_: pdeinvbench.models.inverse_model.InverseModel - paramnet: - _target_: pdeinvbench.models.inverse_model.ParameterNet - pde: ${data.pde} - normalize: ${system_params.normalize} - logspace: ${system_params.logspace} - params_to_predict: ${system_params.params_to_predict} - mlp_type: ${system_params.mlp_type} - encoder: - _target_: pdeinvbench.models.encoder.FNOEncoder - n_modes: ${system_params.fno_n_modes} - n_past: ${n_past} - n_future: ${n_future} - n_layers: ${model.encoder_layers} - data_channels: ${data.num_channels} - hidden_channels: ${model.hidden_channels} - use_partials: True - pde: ${data.pde} - batch_size: ${data.batch_size} - downsampler: ${system_params.fno_downsampler} - mlp_hidden_size: ${model.hidden_channels} - mlp_layers: ${model.mlp_layers} - mlp_activation: "relu" - mlp_dropout: ${model.dropout} - downsample_factor: ${data.downsample_factor} - diff --git a/configs/model/fno_50k.yaml b/configs/model/fno_50k.yaml deleted file mode 100644 index 086dfe1fab520578c5dce017463e0679170512de..0000000000000000000000000000000000000000 --- a/configs/model/fno_50k.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# Small FNO variant (500k params) -# Inherits structure from fno.yaml, only overrides size parameters -defaults: - - fno - -name: "${system_params.name}_fno_50k" -hidden_channels: ${system_params.fno_hidden_channels_50k} -encoder_layers: ${system_params.fno_encoder_layers_50k} - diff --git a/configs/model/fno_50mil.yaml b/configs/model/fno_50mil.yaml deleted file mode 100644 index c08aa84d8fe3689794414e76fe8ebbf2b407f704..0000000000000000000000000000000000000000 --- a/configs/model/fno_50mil.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# Large FNO variant (50 million params) -# Inherits structure from fno.yaml, only overrides size parameters -defaults: - - fno - -name: "${system_params.name}_fno_50mil" -hidden_channels: ${system_params.fno_hidden_channels_50mil} -encoder_layers: ${system_params.fno_encoder_layers_50mil} - diff --git a/configs/model/resnet.yaml b/configs/model/resnet.yaml deleted file mode 100644 index 1e6a0237fc8cf1f9fc9787720c95c93015ce2406..0000000000000000000000000000000000000000 --- a/configs/model/resnet.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# Shared ResNet model configuration -# Interpolates ALL parameters from system_params -name: "${system_params.name}_resnet" -dropout: ${system_params.resnet_dropout} -hidden_channels: ${system_params.resnet_hidden_channels} -encoder_layers: ${system_params.resnet_encoder_layers} -downsampler_layers: ${system_params.resnet_downsampler_layers} -mlp_layers: ${system_params.resnet_mlp_layers} - -model_config: - _target_: pdeinvbench.models.inverse_model.InverseModel - paramnet: - _target_: pdeinvbench.models.inverse_model.ParameterNet - pde: ${data.pde} - normalize: ${system_params.normalize} - logspace: ${system_params.logspace} - params_to_predict: ${system_params.params_to_predict} - mlp_type: ${system_params.mlp_type} - encoder: - _target_: pdeinvbench.models.encoder.ResnetEncoder - n_past: ${n_past} - n_future: ${n_future} - n_layers: ${model.encoder_layers} - data_channels: ${data.num_channels} - hidden_channels: ${model.hidden_channels} - use_partials: True - pde: ${data.pde} - batch_size: ${data.batch_size} - downsampler: ${system_params.resnet_downsampler} - mlp_hidden_size: ${model.hidden_channels} - mlp_layers: ${model.mlp_layers} - mlp_activation: "relu" - mlp_dropout: ${model.dropout} - downsample_factor: ${data.downsample_factor} - diff --git a/configs/model/scot.yaml b/configs/model/scot.yaml deleted file mode 100644 index 2112d0fb31874b70cf5bc8c732bf29d770d3be65..0000000000000000000000000000000000000000 --- a/configs/model/scot.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# Shared ScOT model configuration -# Interpolates ALL parameters from system_params -name: "${system_params.name}_scot" -dropout: ${system_params.scot_dropout} -hidden_channels: ${system_params.scot_hidden_channels} -encoder_layers: ${system_params.scot_encoder_layers} -downsampler_layers: ${system_params.scot_downsampler_layers} -mlp_layers: ${system_params.scot_mlp_layers} - -model_config: - _target_: pdeinvbench.models.inverse_model.InverseModel - paramnet: - _target_: pdeinvbench.models.inverse_model.ParameterNet - pde: ${data.pde} - normalize: ${system_params.normalize} - logspace: ${system_params.logspace} - params_to_predict: ${system_params.params_to_predict} - mlp_type: ${system_params.mlp_type} - encoder: - _target_: pdeinvbench.models.encoder.ScOTEncoder - embed_dim: ${system_params.scot_embed_dim} - n_layers: ${model.encoder_layers} - hidden_size: ${system_params.scot_hidden_size} - patch_size: ${system_params.scot_patch_size} - num_heads: ${system_params.scot_num_heads} - skip_connections: ${system_params.scot_skip_connections} - depths: ${system_params.scot_depths} - n_past: ${n_past} - n_future: ${n_future} - use_partials: True - data_channels: ${data.num_channels} - pde: ${data.pde} - batch_size: ${data.batch_size} - downsampler: ${system_params.scot_downsampler} - mlp_hidden_size: ${system_params.scot_mlp_hidden_size} - mlp_layers: ${model.mlp_layers} - mlp_activation: "relu" - mlp_dropout: ${model.dropout} - condition_on_time: ${system_params.scot_condition_on_time} - downsample_factor: ${data.downsample_factor} - diff --git a/configs/optimizer/adam.yaml b/configs/optimizer/adam.yaml deleted file mode 100644 index a605109ff378144d7091c4ce1d49cd584855039a..0000000000000000000000000000000000000000 --- a/configs/optimizer/adam.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: torch.optim.Adam -lr: 0.0001 diff --git a/configs/system_params/1dkdv.yaml b/configs/system_params/1dkdv.yaml deleted file mode 100644 index 49b35d89f4be8ca034e8439ece1a2dfbb51cfbe0..0000000000000000000000000000000000000000 --- a/configs/system_params/1dkdv.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# ============================================ -# 1DKDV SYSTEM PARAMETERS -# ============================================ -defaults: - - base - -# ============ Data Parameters ============ -name: "1dkdv_inverse" -data_root: "/data/shared/meta-pde/folded_data/kdv/fold_2" -pde_name: "Korteweg-de Vries 1D" -num_channels: 1 -cutoff_first_n_frames: 0 - -# ============ Model Parameters ============ -downsampler_input_dim: 1 # 1D system -params_to_predict: ["delta"] -normalize: True diff --git a/configs/system_params/2ddf.yaml b/configs/system_params/2ddf.yaml deleted file mode 100644 index 460c41b9e018210ba3ed7db6bdf980cfa404d260..0000000000000000000000000000000000000000 --- a/configs/system_params/2ddf.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# ============================================ -# 2DDF SYSTEM PARAMETERS -# ============================================ -defaults: - - base - -# ============ Data Parameters ============ -name: "2ddf_inverse" -data_root: "/data/shared/meta-pde/darcy-flow/r241_folded/" -pde_name: "Darcy Flow 2D" -num_channels: 1 -cutoff_first_n_frames: 0 - -# ============ Model Parameters ============ -params_to_predict: ["coeff"] -normalize: False -mlp_type: "conv" # Special: 2ddf uses conv MLP - -# Override downsamplers: 2ddf uses IdentityMap instead of ConvDownsampler -fno_downsampler: - _target_: pdeinvbench.models.downsampler.IdentityMap - -resnet_downsampler: - _target_: pdeinvbench.models.downsampler.IdentityMap - -scot_downsampler: - _target_: pdeinvbench.models.downsampler.IdentityMap diff --git a/configs/system_params/2dns.yaml b/configs/system_params/2dns.yaml deleted file mode 100644 index 20408c18acc3e2a5a4d75123b2e1c10d6cff4753..0000000000000000000000000000000000000000 --- a/configs/system_params/2dns.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# ============================================ -# 2DNS SYSTEM PARAMETERS -# ============================================ -defaults: - - base - -# ============ Data Parameters ============ -name: "2dns_inverse" -data_root: "/data/shared/meta-pde/sampled_parameters_split/navierstokes64" -pde_name: "Navier Stokes 2D" -num_channels: 1 -cutoff_first_n_frames: 0 - -# ============ Model Parameters ============ -params_to_predict: ["re"] -normalize: False diff --git a/configs/system_params/2drddu.yaml b/configs/system_params/2drddu.yaml deleted file mode 100644 index 0c36c6fbec825906674807111d59f8131992d2a2..0000000000000000000000000000000000000000 --- a/configs/system_params/2drddu.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# ============================================ -# 2DRD-DU SYSTEM PARAMETERS -# ============================================ -defaults: - - 2drdk -data_root: "/data/shared/meta-pde/folded_data/reaction-diffusion-2d/Du_fold_2" - -# ============ Model Parameters ============ -params_to_predict: ["Du"] diff --git a/configs/system_params/2drdk.yaml b/configs/system_params/2drdk.yaml deleted file mode 100644 index 6c017d112a1434f86053e1456f245230fb36ea38..0000000000000000000000000000000000000000 --- a/configs/system_params/2drdk.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# ============================================ -# 2DRD-K SYSTEM PARAMETERS -# ============================================ -defaults: - - base - -# ============ Data Parameters ============ -name: "2drdk_inverse" -data_root: "/data/shared/meta-pde/folded_data/reaction-diffusion-2d/k_fold_2" -pde_name: "Reaction Diffusion 2D" -num_channels: 2 -cutoff_first_n_frames: 2 -# Special override for corner extreme OOD -ood_data_root_extreme: ${system_params.data_root}/out_of_distribution_corner_extreme - -# ============ Model Parameters ============ -params_to_predict: ["k"] -normalize: False diff --git a/configs/system_params/2dtf.yaml b/configs/system_params/2dtf.yaml deleted file mode 100644 index fa5a9d6e6b55a7bf87a253ce353b35ddba0775ec..0000000000000000000000000000000000000000 --- a/configs/system_params/2dtf.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# ============================================ -# 2DTF SYSTEM PARAMETERS -# ============================================ -defaults: - - base - -# ============ Data Parameters ============ -name: "2dtf_inverse" -data_root: "/data/shared/meta-pde/folded_data/turbulent-flow-2d/fold_2" -pde_name: "Turbulent Flow 2D" -num_channels: 1 -cutoff_first_n_frames: 0 - -# ============ Model Parameters ============ -params_to_predict: ["nu"] -normalize: True diff --git a/configs/system_params/base.yaml b/configs/system_params/base.yaml deleted file mode 100644 index f28613ed1086f4e07c73afbfca272a6c384d30d6..0000000000000000000000000000000000000000 --- a/configs/system_params/base.yaml +++ /dev/null @@ -1,91 +0,0 @@ -# Base system parameters -# Defines common structure and defaults for BOTH data AND model -# Each system inherits this and overrides specific values - -# ============ Data Parameters ============ -name: "placeholder_inverse" -data_root: "placeholder_path" -train_data_root: ${system_params.data_root}/train -val_data_root: ${system_params.data_root}/validation -ood_data_root: ${system_params.data_root}/out_of_distribution -ood_data_root_extreme: ${system_params.data_root}/out_of_distribution_extreme -test_data_root: ${system_params.data_root}/test -pde_name: "placeholder_pde" -num_channels: 1 -cutoff_first_n_frames: 0 - -# ============ Model - System-Specific Parameters ============ -params_to_predict: [] -normalize: False -logspace: False -mlp_type: "mlp" # Default to standard MLP (2ddf overrides to "conv") -downsampler_input_dim: 2 # 1 for 1D systems, 2 for 2D systems - -# ============ FNO Architecture ============ -fno_hidden_channels: 64 -fno_encoder_layers: 4 -fno_downsampler_layers: 4 -fno_dropout: 0 -fno_mlp_layers: 1 -fno_n_modes: 16 - -fno_hidden_channels_50k: 16 -fno_encoder_layers_50k: 6 - -fno_hidden_channels_50mil: 200 -fno_encoder_layers_50mil: 4 - -fno_downsampler: - _target_: pdeinvbench.models.downsampler.ConvDownsampler - input_dimension: ${system_params.downsampler_input_dim} - n_layers: ${model.downsampler_layers} - in_channels: ${model.hidden_channels} - out_channels: ${model.hidden_channels} - kernel_size: 3 - stride: 1 - padding: 2 - dropout: ${model.dropout} - -# ============ ResNet Architecture ============ -resnet_hidden_channels: 128 -resnet_encoder_layers: 13 -resnet_downsampler_layers: 4 -resnet_dropout: 0 -resnet_mlp_layers: 1 - -resnet_downsampler: - _target_: pdeinvbench.models.downsampler.ConvDownsampler - input_dimension: ${system_params.downsampler_input_dim} - n_layers: ${model.downsampler_layers} - in_channels: ${model.hidden_channels} - out_channels: ${model.hidden_channels} - kernel_size: 3 - stride: 1 - padding: 2 - dropout: ${model.dropout} - -# ============ ScOT Architecture ============ -scot_hidden_channels: 32 -scot_encoder_layers: 4 -scot_downsampler_layers: 4 -scot_dropout: 0 -scot_mlp_layers: 1 -scot_mlp_hidden_size: 32 -scot_condition_on_time: False -scot_embed_dim: 36 -scot_hidden_size: 32 -scot_patch_size: 4 -scot_num_heads: [3, 6, 12, 24] -scot_skip_connections: [2, 2, 2, 2] -scot_depths: [1, 1, 1, 1] - -scot_downsampler: - _target_: pdeinvbench.models.downsampler.ConvDownsampler - input_dimension: ${system_params.downsampler_input_dim} - n_layers: ${model.downsampler_layers} - in_channels: ${model.hidden_channels} - out_channels: ${model.hidden_channels} - kernel_size: 3 - stride: 1 - padding: 2 - dropout: ${model.dropout} diff --git a/configs/tailoring_optimizer/adam.yaml b/configs/tailoring_optimizer/adam.yaml deleted file mode 100644 index e60fd3211411dab0882ff3bedc709d70df30a7b1..0000000000000000000000000000000000000000 --- a/configs/tailoring_optimizer/adam.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: torch.optim.Adam -lr: ${tailoring_optimizer_lr} diff --git a/configs/tailoring_optimizer/sgd.yaml b/configs/tailoring_optimizer/sgd.yaml deleted file mode 100644 index 5e1fc9b111dd2e73fda53c86e4ac6d560bcb7f6f..0000000000000000000000000000000000000000 --- a/configs/tailoring_optimizer/sgd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: torch.optim.SGD -lr: ${tailoring_optimizer_lr} \ No newline at end of file diff --git a/configs/trainer/trainer.yaml b/configs/trainer/trainer.yaml deleted file mode 100644 index 07adb95fb390a342b7b56329410aff593c7d9e8a..0000000000000000000000000000000000000000 --- a/configs/trainer/trainer.yaml +++ /dev/null @@ -1,4 +0,0 @@ -_target_: lightning.Trainer -max_epochs: 200 -log_every_n_steps: 10 -callbacks: ${callbacks} \ No newline at end of file diff --git a/configs/ttt_base.yaml b/configs/ttt_base.yaml deleted file mode 100644 index d0e2f9af31ff1313a2d13afb5606416c3af3eba6..0000000000000000000000000000000000000000 --- a/configs/ttt_base.yaml +++ /dev/null @@ -1,14 +0,0 @@ -name: ttt_base -defaults: - - _self_ - - base - - tailoring_optimizer: adam - - override lightning_module: ttt - -test_run: true - -tailor_anchor_loss_weight: 1 -tailor_residual_loss_weight: 1 -tailor_per_batch: True -num_tailoring_steps: 50 -tailoring_optimizer_lr: 0.00001 diff --git a/environment.yml b/environment.yml deleted file mode 100644 index 03ecd2f54870e74008e628607cecb01b23fc15ba..0000000000000000000000000000000000000000 --- a/environment.yml +++ /dev/null @@ -1,158 +0,0 @@ -name: inv-env -channels: - - defaults - - conda-forge -dependencies: - - _libgcc_mutex=0.1=main - - _openmp_mutex=5.1=1_gnu - - bzip2=1.0.8=h5eee18b_6 - - ca-certificates=2025.2.25=h06a4308_0 - - ld_impl_linux-64=2.40=h12ee557_0 - - libffi=3.4.4=h6a678d5_1 - - libgcc-ng=11.2.0=h1234567_1 - - libgomp=11.2.0=h1234567_1 - - libstdcxx-ng=11.2.0=h1234567_1 - - libuuid=1.41.5=h5eee18b_0 - - ncurses=6.4=h6a678d5_0 - - openssl=1.1.1w=h7f8727e_0 - - pip=25.1=pyhc872135_2 - - python=3.11.0=h7a1cb2a_3 - - readline=8.2=h5eee18b_0 - - setuptools=78.1.1=py311h06a4308_0 - - sqlite=3.45.3=h5eee18b_0 - - tk=8.6.14=h39e8969_0 - - wheel=0.45.1=py311h06a4308_0 - - xz=5.6.4=h5eee18b_1 - - zlib=1.2.13=h5eee18b_1 - - pip: - - accelerate==0.31.0 - - aiohappyeyeballs==2.6.1 - - aiohttp==3.11.18 - - aiosignal==1.3.2 - - annotated-types==0.7.0 - - antlr4-python3-runtime==4.9.3 - - appdirs==1.4.4 - - attrs==25.3.0 - - black==25.1.0 - - certifi==2025.4.26 - - charset-normalizer==3.4.2 - - click==8.2.0 - - cmake==4.2.1 - - configmypy==0.2.0 - - contourpy==1.3.2 - - crc32c==2.7.1 - - cycler==0.12.1 - - decorator==5.2.1 - - docker-pycreds==0.4.0 - - donfig==0.8.1.post1 - - filelock==3.18.0 - - fonttools==4.58.0 - - frozenlist==1.6.0 - - fsspec==2025.3.2 - - gitdb==4.0.12 - - gitpython==3.1.44 - - h5py==3.13.0 - - huggingface-hub==0.31.2 - - hydra-core==1.3.2 - - idna==3.10 - - imageio==2.37.0 - - imageio-ffmpeg==0.6.0 - - iniconfig==2.1.0 - - jaxtyping==0.3.2 - - jinja2==3.1.6 - - kiwisolver==1.4.8 - - lightning==2.5.1.post0 - - lightning-utilities==0.14.3 - - lit==18.1.8 - - markupsafe==3.0.2 - - matplotlib==3.10.3 - - moviepy==2.1.2 - - mpmath==1.3.0 - - multidict==6.4.3 - - mypy-extensions==1.1.0 - - narwhals==1.39.1 - - networkx==3.4.2 - - neuraloperator==0.3.0 - - numcodecs==0.16.0 - - numpy==2.2.5 - - nvidia-cublas-cu11==11.10.3.66 - - nvidia-cublas-cu12==12.6.4.1 - - nvidia-cuda-cupti-cu11==11.7.101 - - nvidia-cuda-cupti-cu12==12.6.80 - - nvidia-cuda-nvrtc-cu11==11.7.99 - - nvidia-cuda-nvrtc-cu12==12.6.77 - - nvidia-cuda-runtime-cu11==11.7.99 - - nvidia-cuda-runtime-cu12==12.6.77 - - nvidia-cudnn-cu11==8.5.0.96 - - nvidia-cudnn-cu12==9.5.1.17 - - nvidia-cufft-cu11==10.9.0.58 - - nvidia-cufft-cu12==11.3.0.4 - - nvidia-cufile-cu12==1.11.1.6 - - nvidia-curand-cu11==10.2.10.91 - - nvidia-curand-cu12==10.3.7.77 - - nvidia-cusolver-cu11==11.4.0.1 - - nvidia-cusolver-cu12==11.7.1.2 - - nvidia-cusparse-cu11==11.7.4.91 - - nvidia-cusparse-cu12==12.5.4.2 - - nvidia-cusparselt-cu12==0.6.3 - - nvidia-nccl-cu11==2.14.3 - - nvidia-nccl-cu12==2.26.2 - - nvidia-nvjitlink-cu12==12.6.85 - - nvidia-nvtx-cu11==11.7.91 - - nvidia-nvtx-cu12==12.6.77 - - omegaconf==2.3.0 - - opt-einsum==3.4.0 - - packaging==24.2 - - pandas==2.2.3 - - pathspec==0.12.1 - - pathtools==0.1.2 - - pillow==10.4.0 - - platformdirs==4.3.8 - - plotly==6.1.0 - - pluggy==1.6.0 - - proglog==0.1.12 - - propcache==0.3.1 - - protobuf==4.25.8 - - psutil==7.0.0 - - pydantic==2.11.4 - - pydantic-core==2.33.2 - - pyparsing==3.2.3 - - pytest==8.3.5 - - pytest-mock==3.14.0 - - python-dateutil==2.9.0.post0 - - python-dotenv==1.1.0 - - pytorch-lightning==2.5.1.post0 - - pytz==2025.2 - - pyyaml==6.0.2 - - regex==2024.11.6 - - requests==2.32.3 - - ruamel-yaml==0.18.10 - - ruamel-yaml-clib==0.2.12 - - safetensors==0.5.3 - - scipy==1.15.3 - - scoringrules==0.7.1 - - scot==1.0.0 - - sentry-sdk==2.28.0 - - setproctitle==1.3.6 - - six==1.17.0 - - smmap==5.0.2 - - sympy==1.14.0 - - tensorly==0.9.0 - - tensorly-torch==0.5.0 - - tokenizers==0.13.3 - - torch==2.0.1 - - torch-harmonics==0.7.3 - - torchmetrics==1.7.1 - - torchvision==0.15.2 - - tqdm==4.67.1 - - transformers==4.29.2 - - triton==2.0.0 - - typeguard==2.13.3 - - typing-extensions==4.13.2 - - typing-inspection==0.4.0 - - tzdata==2025.2 - - urllib3==2.4.0 - - wadler-lindig==0.1.6 - - wandb==0.14.2 - - yarl==1.20.0 - - zarr==3.0.7 \ No newline at end of file diff --git a/fluid_stats.py b/fluid_stats.py deleted file mode 100644 index 3634228281a738cd88cb7747bf6a76f062d1f80a..0000000000000000000000000000000000000000 --- a/fluid_stats.py +++ /dev/null @@ -1,418 +0,0 @@ -#!/usr/bin/env python3 -""" -Compute energy spectra from vorticity field data. - -This script loads vorticity trajectory data from a .npy file and computes -the azimuthally averaged energy spectrum E(k). It outputs both the spectrum -data as a .npz file and a visualization plot as a .png file. - -To run direct numerical simulations and get fluid fields, please use Jax-CFD: https://github.com/google/jax-cfd -Commit hash we used: 0c17e3855702f884265b97bd6ff0793c34f3155e - -Usage: - uv run python fluid_stats.py path/to/vorticity.npy --out_dir results/ -""" - -import argparse -import logging -import os -from functools import partial - -import jax -import jax.numpy as jnp -import matplotlib.pyplot as plt -import numpy as np -from jax import jit, vmap -from tqdm import tqdm - -# Configure logging -logging.basicConfig( - level=logging.INFO, - format="%(asctime)s - %(levelname)s - %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", -) -logger = logging.getLogger(__name__) - - -# ============================================================================= -# Core computation functions -# ============================================================================= - - -@jit -def vorticity_to_velocity(vorticity): - """ - Convert vorticity to velocity components using the streamfunction. - - Solves the Poisson equation in Fourier space: psi_hat = -vorticity_hat / k^2 - Then computes velocity from streamfunction: u_x = -d(psi)/dy, u_y = d(psi)/dx - - Parameters - ---------- - vorticity : jnp.ndarray, shape (X, Y) - 2D vorticity field on a square grid. - - Returns - ------- - u_x : jnp.ndarray, shape (X, Y) - x-component of velocity. - u_y : jnp.ndarray, shape (X, Y) - y-component of velocity. - """ - N = vorticity.shape[0] - - # Compute streamfunction from vorticity using Poisson equation - # In Fourier space: psi_hat = -vorticity_hat / k^2 - vort_hat = jnp.fft.fft2(vorticity) - - # Create wavenumber arrays - kx = jnp.fft.fftfreq(N, d=1.0) * 2 * jnp.pi - ky = jnp.fft.fftfreq(N, d=1.0) * 2 * jnp.pi - KX, KY = jnp.meshgrid(kx, ky, indexing="ij") - K2 = KX**2 + KY**2 - - # Avoid division by zero at k=0 - K2 = K2.at[0, 0].set(1.0) - psi_hat = -vort_hat / K2 - psi_hat = psi_hat.at[0, 0].set(0.0) # Set mean streamfunction to zero - - # Compute velocity components from streamfunction - # u_x = -d(psi)/dy, u_y = d(psi)/dx - u_x_hat = -1j * KY * psi_hat - u_y_hat = 1j * KX * psi_hat - - u_x = jnp.real(jnp.fft.ifft2(u_x_hat)) - u_y = jnp.real(jnp.fft.ifft2(u_y_hat)) - - return u_x, u_y - - -@partial(jit, static_argnames=["k_max"]) -def energy_spectrum_single(u_x, u_y, k_max=None): - """ - Compute azimuthally averaged energy spectrum E(k) for a single velocity field. - - The energy spectrum is computed by binning the 2D Fourier-transformed - velocity field by wavenumber magnitude |k|. - - Parameters - ---------- - u_x : jnp.ndarray, shape (X, Y) - x-component of velocity. - u_y : jnp.ndarray, shape (X, Y) - y-component of velocity. - k_max : int, optional - Maximum wavenumber to compute. If None, uses N//3 (2/3 dealiasing rule). - - Returns - ------- - E : jnp.ndarray, shape (k_max+1,) - Energy spectrum E(k) for k = 0, 1, ..., k_max. - """ - N = u_x.shape[0] - - # FFT, shifted so k=0 is at centre - Ux = jnp.fft.fftshift(jnp.fft.fft2(u_x)) - Ux = Ux / (N**2) - Uy = jnp.fft.fftshift(jnp.fft.fft2(u_y)) - Uy = Uy / (N**2) - - # Integer wave numbers - kx = jnp.fft.fftshift(jnp.fft.fftfreq(N)) * N - ky = kx - KX, KY = jnp.meshgrid(kx, ky) - K = jnp.hypot(KX, KY).astype(jnp.int32) - - if k_max is None: # Nyquist under 2/3 de-alias - k_max = N // 3 - - # Vectorized computation of energy spectrum - def compute_E_k(k): - mask = K == k - return 0.5 * jnp.sum(jnp.abs(Ux) ** 2 * mask + jnp.abs(Uy) ** 2 * mask) - - k_vals = jnp.arange(k_max + 1) - E = vmap(compute_E_k)(k_vals) - - return E - - -@partial(jit, static_argnames=["k_max"]) -def energy_spectrum_from_vorticity(vorticity, k_max=None): - """ - Compute energy spectrum from vorticity field using vmap. - - Suitable for moderate resolution fields (up to ~1024x1024). - For larger resolutions, use energy_spectrum_from_vorticity_lax_map. - - Parameters - ---------- - vorticity : jnp.ndarray, shape (T, X, Y) - Vorticity field over T time steps on an X x Y grid. - k_max : int, optional - Maximum wavenumber. If None, uses N//3 (2/3 dealiasing rule). - - Returns - ------- - E : jnp.ndarray, shape (T, k_max+1) - Energy spectrum for each time step. - """ - N = vorticity.shape[1] - - if k_max is None: - k_max = N // 3 - - def process_timestep(vort_t): - u_x, u_y = vorticity_to_velocity(vort_t) - return energy_spectrum_single(u_x, u_y, k_max) - - # Vectorize over time dimension - E = vmap(process_timestep)(vorticity) - - return E - - -@partial(jit, static_argnames=["k_max", "batch_size"]) -def energy_spectrum_from_vorticity_lax_map(vorticity, k_max=None, batch_size=16): - """ - Compute energy spectrum from vorticity field using jax.lax.map. - - Memory-efficient version suitable for high resolution fields (>1024x1024). - Processes timesteps sequentially to reduce memory footprint. - - Parameters - ---------- - vorticity : jnp.ndarray, shape (T, X, Y) - Vorticity field over T time steps on an X x Y grid. - k_max : int, optional - Maximum wavenumber. If None, uses N//3 (2/3 dealiasing rule). - batch_size : int, optional - Batch size for lax.map processing. Default is 16. - - Returns - ------- - E : jnp.ndarray, shape (T, k_max+1) - Energy spectrum for each time step. - """ - N = vorticity.shape[1] - - if k_max is None: - k_max = N // 3 - - def process_timestep(vort_t): - u_x, u_y = vorticity_to_velocity(vort_t) - return energy_spectrum_single(u_x, u_y, k_max) - - # Use lax.map instead of vmap for memory efficiency - E = jax.lax.map(process_timestep, vorticity, batch_size=batch_size) - - return E - - -# ============================================================================= -# Main script -# ============================================================================= - - -def parse_args(): - """Parse command line arguments.""" - parser = argparse.ArgumentParser( - description=( - "Compute energy spectra from 2D vorticity trajectory data. " - "Loads vorticity fields from a .npy file, computes the azimuthally " - "averaged energy spectrum E(k), and saves both the spectrum data " - "and a visualization plot." - ), - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=""" -Examples: - uv run python fluid_stats.py simulation.npy - uv run python fluid_stats.py data/vorticity.npy --out_dir results/ - -Input format: - The input .npy file should contain a 4D array with shape (batch, time, X, Y) - where batch is the number of independent trajectories, time is the number - of snapshots, and X, Y are the spatial grid dimensions. - """, - ) - - parser.add_argument( - "input_file", - type=str, - help=( - "Path to the input .npy file containing vorticity data. " - "Expected shape: (batch, time, X, Y) where X and Y are the " - "spatial grid dimensions (must be square, i.e., X == Y)." - ), - ) - - parser.add_argument( - "--out_dir", - type=str, - default=".", - help=( - "Directory to save output files. Will be created if it does not " - "exist. Output files are named based on the input filename. " - "Default: current directory." - ), - ) - - return parser.parse_args() - - -def main(): - """Main entry point for energy spectrum computation.""" - args = parse_args() - - # Setup - logger.info("JAX devices: %s", jax.devices()) - - # Validate input file - if not os.path.exists(args.input_file): - logger.error("Input file not found: %s", args.input_file) - raise FileNotFoundError(f"Input file not found: {args.input_file}") - - if not args.input_file.endswith(".npy"): - logger.warning( - "Input file does not have .npy extension: %s", args.input_file - ) - - # Create output directory - os.makedirs(args.out_dir, exist_ok=True) - - # Generate output filenames from input filename - input_basename = os.path.splitext(os.path.basename(args.input_file))[0] - data_filename = f"{input_basename}_spectrum_data.npz" - plot_filename = f"{input_basename}_spectrum.png" - data_path = os.path.join(args.out_dir, data_filename) - plot_path = os.path.join(args.out_dir, plot_filename) - - # Load data - logger.info("Loading data from: %s", args.input_file) - field = np.load(args.input_file) - logger.info("Loaded field with shape: %s", field.shape) - - # Validate shape - if field.ndim != 4: - logger.error( - "Expected 4D array (batch, time, X, Y), got %dD array", field.ndim - ) - raise ValueError( - f"Expected 4D array (batch, time, X, Y), got {field.ndim}D array" - ) - - batch_size, time_steps, height, width = field.shape - if height != width: - logger.error( - "Expected square spatial grid (X == Y), got %d x %d", height, width - ) - raise ValueError( - f"Expected square spatial grid (X == Y), got {height} x {width}" - ) - - resolution = height - k_max = resolution // 3 - logger.info( - "Processing %d trajectories with %d timesteps at %dx%d resolution", - batch_size, - time_steps, - resolution, - resolution, - ) - logger.info("Maximum wavenumber (k_max): %d", k_max) - - # Compute energy spectrum - logger.info("Computing energy spectra...") - spectra_list = [] - - for i in tqdm(range(batch_size), desc="Computing spectra"): - if resolution > 1024: - # Use memory-efficient lax.map for large resolutions - single_spectrum = energy_spectrum_from_vorticity_lax_map( - field[i], k_max - ) - else: - # Use vmap for moderate resolutions - single_spectrum = energy_spectrum_from_vorticity(field[i], k_max) - spectra_list.append(single_spectrum) - - # Stack all spectra - all_spectra = jnp.stack(spectra_list) - logger.info("All spectra shape: %s", all_spectra.shape) - - # Compute mean spectrum (over batch and time) - mean_spectrum = all_spectra.reshape(-1, all_spectra.shape[-1]).mean(axis=0) - logger.info("Mean spectrum shape: %s", mean_spectrum.shape) - - # Save spectrum data - logger.info("Saving spectrum data to: %s", data_path) - np.savez_compressed( - data_path, - mean_spectrum=np.array(mean_spectrum), - all_spectra=np.array(all_spectra), - k_values=np.arange(len(mean_spectrum)), - resolution=resolution, - batch_size=batch_size, - time_steps=time_steps, - ) - - # Generate plot - logger.info("Generating energy spectrum plot...") - plt.figure(figsize=(10, 6)) - - # Plot mean spectrum (skip k=0) - offset = 1 - spectrum = mean_spectrum[offset:] - k_values = np.arange(offset, len(mean_spectrum)) - plt.loglog(k_values, spectrum, "b-", linewidth=2, label="Mean spectrum") - - # Add k^{-5/3} reference line (Kolmogorov scaling for 3D turbulence) - # and k^{-3} reference line (enstrophy cascade in 2D turbulence) - k_match = min(10, len(spectrum) // 3) - if k_match > 0: - ref_value = float(spectrum[k_match - 1]) - - # k^{-3} line (2D enstrophy cascade) - scaling_k3 = ref_value * (k_match**3) - k_theory = np.logspace(0, np.log10(len(mean_spectrum)), 100) - power_law_k3 = scaling_k3 * k_theory ** (-3) - plt.loglog( - k_theory, - power_law_k3, - "k--", - alpha=0.7, - linewidth=1.5, - label=r"$k^{-3}$ (enstrophy cascade)", - ) - - # k^{-5/3} line (inverse energy cascade) - scaling_k53 = ref_value * (k_match ** (5 / 3)) - power_law_k53 = scaling_k53 * k_theory ** (-5 / 3) - plt.loglog( - k_theory, - power_law_k53, - "r--", - alpha=0.7, - linewidth=1.5, - label=r"$k^{-5/3}$ (energy cascade)", - ) - - plt.xlabel("Wavenumber k", fontsize=12) - plt.ylabel("Energy Spectrum E(k)", fontsize=12) - plt.title(f"Energy Spectrum ({resolution}x{resolution} resolution)", fontsize=14) - plt.legend() - plt.grid(True, alpha=0.3) - xlim = plt.xlim() - plt.xlim(1, xlim[1]) - plt.tight_layout() - - # Save plot - plt.savefig(plot_path, dpi=300, bbox_inches="tight") - plt.close() - logger.info("Plot saved to: %s", plot_path) - - logger.info("Done!") - - -if __name__ == "__main__": - main() diff --git a/huggingface_pdeinv_download.py b/huggingface_pdeinv_download.py deleted file mode 100644 index a9f3abd31ab676eedfe790873440c50d13d03dec..0000000000000000000000000000000000000000 --- a/huggingface_pdeinv_download.py +++ /dev/null @@ -1,60 +0,0 @@ -import argparse -from huggingface_hub import snapshot_download - -datasets = [ - "darcy-flow-241", - "darcy-flow-421", - "korteweg-de-vries-1d", - "navier-stokes-forced-2d-2048", - "navier-stokes-forced-2d", - "navier-stokes-unforced-2d", - "reaction-diffusion-2d-du-512", - "reaction-diffusion-2d-du", - "reaction-diffusion-2d-k-512", - "reaction-diffusion-2d-k", -] - -splits = [ - "*", - "train", - "validation", - "test", - "out_of_distribution", - "out_of_distribution_extreme", -] - - -def main(): - parser = argparse.ArgumentParser( - description="Download PDE Inverse Problem Benchmarking datasets" - ) - parser.add_argument( - "--dataset", - type=str, - default="darcy-flow-241", - choices=datasets, - help="Dataset to download", - ) - parser.add_argument( - "--split", type=str, default="*", choices=splits, help="Data split to download" - ) - parser.add_argument( - "--local-dir", type=str, default="", help="Local directory to save data" - ) - - args = parser.parse_args() - - data_bucket = "DabbyOWL/PDE_Inverse_Problem_Benchmarking" - - print(f"Downloading {args.dataset}/{args.split} to {args.local_dir}") - - snapshot_download( - data_bucket, - allow_patterns=[f"{args.dataset}/{args.split}/*"], - local_dir=args.local_dir, - repo_type="dataset", - ) - - -if __name__ == "__main__": - main() diff --git a/images/1dkdv.png b/images/1dkdv.png deleted file mode 100644 index 706416ce8e5daba69e816bf5bbdee9e43ff93de3..0000000000000000000000000000000000000000 --- a/images/1dkdv.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:80bc2c529faa8b4aff5ff2a5e7ad3543c66da8694e173aa24e598dddebcdb801 -size 24713 diff --git a/images/2d_navier_stokes_unforced_train_val_split.png b/images/2d_navier_stokes_unforced_train_val_split.png deleted file mode 100644 index e7ec98ab2fbdbf92187afe14f179e2881537830c..0000000000000000000000000000000000000000 --- a/images/2d_navier_stokes_unforced_train_val_split.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1fca27e1e0f475f207ecd1581d8dfe309148373cc4256006b6eb6e1909599b8f -size 25553 diff --git a/images/2ddf.png b/images/2ddf.png deleted file mode 100644 index 42f0caa84a7ac0f5e12cc1ea0fcee3b22085fa3f..0000000000000000000000000000000000000000 --- a/images/2ddf.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1e36b7d1a0edff49332fe1a724c7fc247d2a0c93a9dd94e5eb23f20b05dde710 -size 70413 diff --git a/images/2dns.png b/images/2dns.png deleted file mode 100644 index 8eaa3244245b56b3a5352d5a300c1652ea6a4cc7..0000000000000000000000000000000000000000 --- a/images/2dns.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f5b7940d31d01f444e7d5788251f20a525730340abc2d0ac9a191b4ef3bd7533 -size 90658 diff --git a/images/2drd_u_channel.png b/images/2drd_u_channel.png deleted file mode 100644 index 8273e90444f0ea61fa9ddb2ea67b204e672c04f1..0000000000000000000000000000000000000000 --- a/images/2drd_u_channel.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:08a78c48ecade998b2f7578769bd9c0bdd42c37bb407c4268e1cb517b5afb4b6 -size 32808 diff --git a/images/2drd_v_channel.png b/images/2drd_v_channel.png deleted file mode 100644 index 2a89a51024706535cea41bbcc04f67110a4eec74..0000000000000000000000000000000000000000 --- a/images/2drd_v_channel.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b9914887c7f546b4d82cdf5c77c9447da4fbb59c879f2a04a33cdb6c1fcfc9f5 -size 38392 diff --git a/images/2dtf.png b/images/2dtf.png deleted file mode 100644 index 1c9f568c2c217837b3388e8027189514a60ca089..0000000000000000000000000000000000000000 --- a/images/2dtf.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6506ce67970f938a64b20f576661132edfd90757e505ec41413ef3d98e1518cd -size 22342 diff --git a/images/kdv.gif b/images/kdv.gif deleted file mode 100644 index 386f179051ccbcc058d7a49c0f1076d4ed93be6f..0000000000000000000000000000000000000000 --- a/images/kdv.gif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6070143d622022676f92b15e742a7288d617b894ad2fc8efb94bc026b789f83e -size 1344946 diff --git a/images/kdv_distribution.png b/images/kdv_distribution.png deleted file mode 100644 index 767f9703dbcb1af347421eb8497acedd50fd2f84..0000000000000000000000000000000000000000 --- a/images/kdv_distribution.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b1bbe32f4bda7a85322b9795ba5612f8ebc81033f9d23ddcc3d5b63ef65fe67d -size 18500 diff --git a/images/low_res_turbulentflow_distribution.png b/images/low_res_turbulentflow_distribution.png deleted file mode 100644 index 06d871107023725a3766c27eca93b1959241df36..0000000000000000000000000000000000000000 --- a/images/low_res_turbulentflow_distribution.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9f7908752f9db0aed8dc54b036c7624667101d441234da57217f1c2a6d0122dc -size 19853 diff --git a/images/navierstokes_distribution.png b/images/navierstokes_distribution.png deleted file mode 100644 index 78d8a53618654225ba59ff852e58470b270719ed..0000000000000000000000000000000000000000 --- a/images/navierstokes_distribution.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:eeda98c88834e3a47b70c2a6804d48f91561877faeb9bb9aa2293a51b04985b0 -size 19910 diff --git a/images/pde_objectives_main_fig_1.png b/images/pde_objectives_main_fig_1.png deleted file mode 100644 index 3fbf2b3d54cb196d0c4f07fe5cf5df1fac303cea..0000000000000000000000000000000000000000 --- a/images/pde_objectives_main_fig_1.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6fd830d907b773b64c02623f6060e49bf75f82b58ac949dc75ac9de158f37b9e -size 604478 diff --git a/images/reaction-diffusion-2d-du_distribution.png b/images/reaction-diffusion-2d-du_distribution.png deleted file mode 100644 index 28e89f480d0e489033dae9a20e3613a2f0810ec9..0000000000000000000000000000000000000000 --- a/images/reaction-diffusion-2d-du_distribution.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a74bc09e72d764dc4a712350a544f7cb25c22885bec994b6d157cdc32e64e569 -size 17215 diff --git a/images/reaction-diffusion-2d-dv_distribution.png b/images/reaction-diffusion-2d-dv_distribution.png deleted file mode 100644 index 325d2ecf92ee2c1ce19354551e7a17fc463ba12f..0000000000000000000000000000000000000000 --- a/images/reaction-diffusion-2d-dv_distribution.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:df6e5dbf847af59e03c8b3b1d6143afca7af46d191c4fad671874699505b00d7 -size 17185 diff --git a/images/reaction-diffusion-2d-k_distribution.png b/images/reaction-diffusion-2d-k_distribution.png deleted file mode 100644 index 596df362ea9f5d9b620646a8c1c956ff479fb836..0000000000000000000000000000000000000000 --- a/images/reaction-diffusion-2d-k_distribution.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a56c3bd48b823211096d4a15525edb9168066329cc01aade2f70551566ead12b -size 17294 diff --git a/images/turbulentflow_distribution.png b/images/turbulentflow_distribution.png deleted file mode 100644 index ca95a9cedcc7404d02c9cb1b7f567caac5a1529b..0000000000000000000000000000000000000000 --- a/images/turbulentflow_distribution.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c253820d46e19d4dce6937f4702cb220d7de879c4eb25b6bf1747de92856f8a7 -size 20044 diff --git a/pdeinvbench/__init__.py b/pdeinvbench/__init__.py deleted file mode 100644 index 87eeec6838c24c85bc45909d3ce50521fbe7a6d1..0000000000000000000000000000000000000000 --- a/pdeinvbench/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from pdeinvbench import data -from pdeinvbench import lightning_modules -from pdeinvbench import losses -from pdeinvbench import losses -from pdeinvbench import utils diff --git a/pdeinvbench/data/__init__.py b/pdeinvbench/data/__init__.py deleted file mode 100644 index 36205cc4abf6f58c3c2bc6004b0ad14775d43d48..0000000000000000000000000000000000000000 --- a/pdeinvbench/data/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from pdeinvbench.data.dataset import PDE_MultiParam diff --git a/pdeinvbench/data/dataset.py b/pdeinvbench/data/dataset.py deleted file mode 100644 index 2da4414dd0ba5c983d740b28df453f23322fff66..0000000000000000000000000000000000000000 --- a/pdeinvbench/data/dataset.py +++ /dev/null @@ -1,360 +0,0 @@ -import glob -import logging -import math - -import h5py -import numpy as np -import torch -from scipy import signal -from torch.utils.data import Dataset - -from pdeinvbench.data.transforms import collapse_time_and_channels_torch_transform -from pdeinvbench.data.utils import extract_params_from_path -from pdeinvbench.utils.types import PDE, PDE_NUM_SPATIAL, PDE_TRAJ_LEN - - -class PDE_MultiParam(Dataset): - """Data Loader that loads the multiple parameter version of PDE Datasets.""" - - def __init__( - self, - data_root: str, - pde: PDE, - n_past: int, - dilation: int, - cutoff_first_n_frames: int, - train: bool, - frac_param_combinations: float = 1, - frac_ics_per_param: float = 1, - random_sample_param: bool = True, - downsample_factor: int = 0, - every_nth_window: int = 1, - window_start_percent: float = 0.0, - window_end_percent: float = 1.0, - ): - """ - Args: - data_root: path containing the h5 files for the current data split - pde: name of the PDE system - one of the enum values. - n_past: number of conditioning frames - dilation: frequency at which to subsample the ground truth trajectories in the time dimension - cutoff_first_n_frames: number of initial frames to cutoff in each trajectory (may want to do this e.g. if initial PDE residuals are very high) - train: if training dataloader, windows are randomly sampled from each trajecory, if non-training dataloader we loop through all non-overlapping windows - frac_param_combinations: fraction of parameter combinations to use. 1 takes all parameters. "0.x" takes x percent of total parameters - frac_ics_per_param: fraction of initial conditions per parameter combination to keep. - random_sample_param: (bool) If frac_param_combinations < 1, true means we randomly sample params and false means we grab the first n_frac params. Defaults to true. - downsample_factor: downsample a solution field spatially by the 'downsample_factor'. eg if downsample_factor=4, sol field spatial size=[128,128] --downsample--> final spatial size = [32,32] - every_nth_window: take every nth window from the list of non-over-lapping windows - window_start_percent: percent of the way through the trajectory to start the window after cutoff_first_n_frames - window_end_percent: percent of the way through the trajectory to end the window - """ - - self.data_root = data_root - self.pde = pde - self.n_past = n_past - self.dilation = dilation - self.cutoff_first_n_frames = cutoff_first_n_frames - self.frac_param_combinations = frac_param_combinations - self.frac_ics_per_param = frac_ics_per_param - self.random_sample_param = random_sample_param - self.train = train - self.every_nth_window = every_nth_window - assert ( - window_start_percent < window_end_percent - ), "window_start_percent must be less than window_end_percent" - self.window_start_index = int( - (PDE_TRAJ_LEN[self.pde] - self.cutoff_first_n_frames) * window_start_percent - + self.cutoff_first_n_frames - ) - self.window_end_index = int( - (PDE_TRAJ_LEN[self.pde] - self.cutoff_first_n_frames) * window_end_percent - + self.cutoff_first_n_frames - ) - self.total_trajectory_length = self.window_end_index - self.window_start_index - - if self.train: - self.num_windows = self.total_trajectory_length - self.n_past - 1 - else: - self.num_windows = (self.total_trajectory_length) // ( - (self.n_past) * self.every_nth_window - ) - - if self.num_windows == 0 and self.every_nth_window > 1: - self.every_nth_window = 1 - self.num_windows = (self.total_trajectory_length) // ((self.n_past)) - - # Quick check basically force a non-AR dataloader for darcy flow - if self.pde == PDE.DarcyFlow2D: - self.num_windows = 1 - - self.downsample_factor = downsample_factor - - if PDE_NUM_SPATIAL[pde] == 2: - self.transforms = [collapse_time_and_channels_torch_transform] - else: - self.transforms = None - - # get all h5 paths in the root folder and read them - # each h5 path represents a set of trajectories with a different PDE parameter - self.h5_paths = glob.glob(f"{self.data_root}/*.h5") - if len(self.h5_paths) == 0: - self.h5_paths = glob.glob(f"{self.data_root}/*.hdf5") - if self.pde == PDE.DarcyFlow2D: - self.h5_files = [file for file in self.h5_paths] - else: - self.h5_files = [h5py.File(file, "r") for file in self.h5_paths] - - # extract the individual trajectories from each h5 file - if self.pde == PDE.ReactionDiffusion2D or self.pde == PDE.TurbulentFlow2D: - self.seqs = [list(h5_file.keys()) for h5_file in self.h5_files] - elif self.pde == PDE.NavierStokes2D: - # The individual trajectories are stored in key: 'solutions' - self.seqs = [h5_file["solutions"] for h5_file in self.h5_files] - elif self.pde == PDE.KortewegDeVries1D: - self.seqs = [h5_file["tensor"] for h5_file in self.h5_files] - elif self.pde == PDE.DarcyFlow2D: - # There is an issue where too many files are open, os throws errno 24 - self.seqs = [file for file in self.h5_paths] - else: - self.seqs = [h5py.File(file, "r") for file in self.h5_paths] - if self.frac_param_combinations < 1: - total_params = math.ceil(len(self.seqs) * self.frac_ics_per_param) - - logging.info( - f"trimming dataset from length {len(self.seqs)} to {total_params}" - ) - if self.random_sample_param: - # Just a quick sanity check to ensure that all of the variables are the same length - # If this fails, something has gone VERY wrong - assert len(self.seqs) == len(self.h5_paths) and len( - self.h5_paths - ) == len( - self.h5_files - ), f"The dataloader variables are mismatched. seqs = {len(self.seqs)}, h5_paths = {len(self.h5_paths)}, h5_files = {len(self.h5_files)}" - - # We've had issues in the past with reproducibility so this forces a seed - # Also will keep the datasets the same regardless of the training and weight init seeds - num_sequences: int = len(self.seqs) - requested_dataset_size: int = int( - num_sequences * self.frac_param_combinations - ) - indices = np.arange(num_sequences) - sample_seed: int = 42 - rng_generator = np.random.default_rng(seed=sample_seed) - sampled_indices = rng_generator.choice( - indices, size=requested_dataset_size, replace=False - ) - logging.info( - f"Using random sampling to trim the dataset down from length {len(self.seqs)} to {requested_dataset_size}" - ) - assert ( - len(set(sampled_indices.tolist())) == sampled_indices.shape[0] - ), f"Duplicate items in random sampling of PDE parameters!" - assert ( - sampled_indices.shape[0] == requested_dataset_size - ), f"Mismatch between the requested dataset sample size and the new sampled dataset. frac requested = {self.frac_param_combinations}, requested size = {requested_dataset_size}, new size = {sampled_indices.shape[0]}" - self.seqs = [self.seqs[i] for i in sampled_indices] - self.h5_paths = [self.h5_paths[i] for i in sampled_indices] - self.h5_files = [self.h5_files[i] for i in sampled_indices] - else: - self.seqs = self.seqs[:total_params] - self.h5_paths = self.h5_paths[:total_params] - self.h5_files = self.h5_files[:total_params] - - self.num_params = len(self.seqs) - if self.pde == PDE.KortewegDeVries1D: - # Since it follows the same format at 1D reaction diffusion - self.num_ics_per_param = self.seqs[0].shape[0] - elif self.pde == PDE.DarcyFlow2D: - self.num_ics_per_param = 1 # Each param only has one IC - elif self.pde != PDE.NavierStokes2D: - self.num_ics_per_param = len( - min([self.seqs[i] for i in range(len(self.seqs))]) - ) # to manage un-even number of ICs per param - else: - self.num_ics_per_param = min( - [self.seqs[i].shape[0] for i in range(len(self.seqs))] - ) - - # Trim nmber of ICs per parameter - - self.num_ics_per_param = math.ceil( - self.num_ics_per_param * self.frac_ics_per_param - ) - # We also need to save the dx, dt, dy information in order to compute the PDE residual - if pde == PDE.ReactionDiffusion2D or pde == PDE.TurbulentFlow2D: - self.x = self.h5_files[0]["0001"]["grid"]["x"][:] - self.y = self.h5_files[0]["0001"]["grid"]["y"][:] - self.t = torch.Tensor(self.h5_files[0]["0001"]["grid"]["t"][:]) - elif pde == PDE.NavierStokes2D: - self.x = self.h5_files[0]["x-coordinate"][:] - self.y = self.h5_files[0]["y-coordinate"][:] - self.t = torch.Tensor(self.h5_files[0]["t-coordinate"][:]) - elif pde == PDE.DarcyFlow2D: - # Not ideal but it's fine to just hard code the current coordinates darcy flow - domain_len = 1 # Uniform grid with 1 - same regardless of resolution - d = h5py.File(self.seqs[0], "r") - size, _, _ = d["sol"].shape - d.close() - x = np.linspace(0, domain_len, size, endpoint=False) - self.x = torch.from_numpy(x) - self.y = torch.from_numpy(x) - self.t = ( - torch.ones(10, dtype=float) * -1 - ) # Darcy flow is non time dependent so we use -1 - else: - # All of the 1D systems - self.y = None # There is no y component - self.x = self.h5_files[0]["x-coordinate"][:] - self.t = torch.Tensor(self.h5_files[0]["t-coordinate"][:]) - - if self.downsample_factor != 0: - self.y = ( - None - if self.y is None - else signal.decimate(self.y, q=self.downsample_factor, axis=0).copy() - ) - self.x = signal.decimate(self.x, q=self.downsample_factor, axis=0).copy() - self.x = torch.Tensor(self.x) - self.y = torch.Tensor(self.y) if self.y is not None else None - - logging.info( - f"Initialized dataset with {self.num_params} parameter combinations" - ) - - def __len__(self): - """ - Number of parameters * number of ICs = number of full trajectories. - """ - if self.train: - return self.num_params * self.num_ics_per_param - else: - return self.num_params * self.num_ics_per_param * self.num_windows - - def __getitem__(self, index: int): - """ - Loops over all parameters and ICs, and randomly samples time windows. - Returns: - x: conditioning frames, shape of [n_past, spatial/channel dims] - y: target frame(s), shape of [n_future, spatial/channel dims] - param_dict: dictionary containing the true PDE parameter for the trajectory. - """ - # Compute the parameter and ic index for train loader - if self.train: - param_index = index // self.num_ics_per_param - ic_index = index % self.num_ics_per_param - else: - # Compute the parameter, ic index, and window index for validation/test loaders - # index is assumed to be in row major format of [num_params, num_ics_per_param, num_windows] dataset matrix organization - param_index = index // (self.num_ics_per_param * self.num_windows) - ic_index = (index // self.num_windows) % self.num_ics_per_param - window_index = index % self.num_windows - # get the corresponding trajectory and parameters - h5_file = self.h5_files[param_index] - h5_path = self.h5_paths[param_index] - param_dict = extract_params_from_path(h5_path, self.pde) - - if self.pde == PDE.ReactionDiffusion2D or self.pde == PDE.TurbulentFlow2D: - # get data - seq = self.seqs[param_index][ic_index] - traj = torch.Tensor( - np.array(h5_file[f"{seq}/data"], dtype="f") - ) # dim = [seq_len, spatial_dim_1, spatial_dim_2, channels] - elif self.pde == PDE.NavierStokes2D: - seq = self.seqs[param_index] - traj = torch.Tensor(seq[ic_index]) - # dim = [seq_len (t), spatial_dim_1, spatial_dim_2, channels] - - elif self.pde == PDE.DarcyFlow2D: - # Unique since there is no time dim - # There is also only one ic per param - seq = h5py.File(self.seqs[param_index], "r") - - coeff = torch.from_numpy(np.asarray(seq["coeff"])) - coeff = torch.squeeze(coeff) - coeff = torch.unsqueeze(coeff, dim=0) # Channel first repr - # We treat the coeff as a binary mask - min_val = coeff.min() - max_val = coeff.max() - # generate the binary mask - coeff = coeff - min_val - binary_mask = coeff > 0 - - def wrap_scalar(x): - return torch.Tensor([x.item()]) - - param_dict["coeff"] = binary_mask.float() - param_dict["max_val"] = wrap_scalar(max_val) - param_dict["min_val"] = wrap_scalar(min_val) - traj = torch.from_numpy(np.asarray(seq["sol"])) - seq.close() - else: - seq = self.seqs[param_index] - traj = torch.Tensor(np.array(h5_file["tensor"][ic_index])) - traj = traj[:: self.dilation] # subsample based on dilation - - # sample a random window of length [n_past] from this trajectory - if traj.shape[0] - self.n_past == 0: - start = 0 - # if n_past > 1, problem is well posed - if self.n_past == 1: - raise ValueError("Problem is ill-posed when n_past == 1. ") - else: - if self.train: - start = np.random.randint( - self.window_start_index, - self.window_end_index - self.n_past, - ) - else: - # multiply with self.n_past to avoid overlapping in validation/test samples - start = self.window_start_index + ( - window_index * (self.n_past) * self.every_nth_window - ) - - if self.pde != PDE.DarcyFlow2D: - traj = traj[start : start + self.n_past] - time_frames = self.t[start : start + self.n_past] - else: - time_frames = -1 * torch.ones(self.n_past, dtype=float) - # 2D systems - if len(traj.shape) == 4: - # [T, Channels, Spatial, Spatial] - traj = traj.permute((0, 3, 1, 2)) - - if self.downsample_factor != 0: - traj = signal.decimate(traj, q=self.downsample_factor, axis=-1) - traj = ( - torch.Tensor( - signal.decimate(traj, q=self.downsample_factor, axis=-2).copy() - ) - if len(traj.shape) == 4 - else torch.Tensor(traj.copy()) - ) - - # split into conditioning and target frames - if self.pde == PDE.DarcyFlow2D: - # Transforms to reshape the traj to the expected shape - # nx x ny x 1 -> T, C, X, Y - # T == C == 1 - traj = torch.squeeze(traj) - traj = torch.unsqueeze(traj, dim=0) - traj = torch.unsqueeze(traj, dim=0) - x, y = ( - traj, - traj, - ) - x = x.float() - y = y.float() - else: - x, y = torch.split(traj, [self.n_past, 0], dim=0) - - if self.transforms is not None: - # Perform any data transforms if specified - for T in self.transforms: - x, y, param_dict = T((x, y, param_dict)) - - # return spatial/temporal grid, frames and parameters - spatial_grid = (self.x, self.y) if self.y is not None else (self.x,) - - ic_index = torch.tensor([ic_index], dtype=float) - return spatial_grid, self.t, x, y, time_frames, ic_index, param_dict diff --git a/pdeinvbench/data/transforms.py b/pdeinvbench/data/transforms.py deleted file mode 100644 index 0e3c67c8cc154e1580899fffcda036d8514bd80e..0000000000000000000000000000000000000000 --- a/pdeinvbench/data/transforms.py +++ /dev/null @@ -1,80 +0,0 @@ -import pdb -from typing import Dict, Tuple - -import jaxtyping -import torch -import typeguard -from jaxtyping import Float, jaxtyped - -""" -Set of utility functions for data transformations. -""" - - -@jaxtyped(typechecker=typeguard.typechecked) -def collapse_time_and_channels( - x: Float[torch.Tensor, "time channel xspace yspace"], -) -> Float[torch.Tensor, "time*channel xspace yspace"]: - """ - Collapses the time and channel dimensions of a tensor into a single dimension. - NOTE: This is only applicable to 2D systems and this is NOT batched! - We do this to be compatible with FNO. FNO can't handle multiple function outputs - at once since we're already using the channel dimension to represent time. - :param x: Input tensor of shape (time, channel, xspace, yspace). - :return: Output tensor of shape (time*channel, xspace, yspace). - """ - x_flattened = torch.flatten(x, start_dim=0, end_dim=1) - return x_flattened - - -@jaxtyped(typechecker=typeguard.typechecked) -def collapse_time_and_channels_torch_transform( - batch: Tuple[ - Float[torch.Tensor, "time_n_past in_channels xspace yspace"], - Float[torch.Tensor, "time_n_fut out_channels xspace yspace"], - Dict[ - str, Float[torch.Tensor, "param"] | Float[torch.Tensor, "xspace yspace 1"] - ], - ], -) -> Tuple[ - Float[torch.Tensor, "time_n_past*in_channels xspace yspace"], - Float[torch.Tensor, "time_n_fut*out_channels xspace yspace"], - Dict[str, Float[torch.Tensor, "param"] | Float[torch.Tensor, "xspace yspace 1"]], -]: - """ - Wrapper for ```collapse_time_and_channels``` to be used with PyTorch's dataloader transforms. - Accepts a batch and for the first two elements of the batch, collapses the time and channel dimensions. - :param batch: Tuple of (input, target, pde_params). - :return: Tuple of (input, target, pde_params) - """ - input, target, pde_params = batch - input = collapse_time_and_channels(input) - target = collapse_time_and_channels(target) - return input, target, pde_params - - -@jaxtyped(typechecker=typeguard.typechecked) -def expand_time_and_channels( - x: Float[torch.Tensor, "timexchannel xspace yspace"], - num_channels: int = -1, - num_timesteps: int = -1, -) -> Float[torch.Tensor, "time channel xspace yspace"]: - """ - Expands the time and channel dimensions of a tensor into separate dimensions. - Either number of channels or number of timesteps must be specified. - NOTE: This is only applicable to 2D systems. - :param x: Input tensor of shape (time*channel, xspace, yspace). - :param num_channels: Number of channels to expand to. OPTIONAL if num_timesteps is specified. - :param num_timesteps: Number of timesteps to expand to. OPTIONAL if num_channels is specified. - :return: Output tensor of shape (time, channel, xspace, yspace). - """ - assert ( - num_channels != -1 or num_timesteps != -1 - ), "Either num_channels or num_timesteps must be specified!" - if num_channels != -1: - # Case we infer the number of timesteps - x_unflattened = torch.unflatten(x, 0, (-1, num_channels)) - else: - # Case we infer the number of channels - x_unflattened = torch.unflatten(x, 0, (num_timesteps, -1)) - return x_unflattened diff --git a/pdeinvbench/data/utils.py b/pdeinvbench/data/utils.py deleted file mode 100644 index d70619c637dd3946cd13d4adcbb1930578fb9cef..0000000000000000000000000000000000000000 --- a/pdeinvbench/data/utils.py +++ /dev/null @@ -1,83 +0,0 @@ -import os -from typing import Dict - -import torch -import typeguard -from jaxtyping import jaxtyped - -from pdeinvbench.utils.types import PDE - -""" -Hardcoded parameter normalization stats for each dataset. -These are used to normalize the parameters before training. -""" -PARAM_NORMALIZATION_STATS = { - PDE.ReactionDiffusion2D: { - "k": (0.06391126306498819, 0.029533048151465856), - "Du": (0.3094992685910578, 0.13865605073673604), - "Dv": (0.259514500345804, 0.11541850276902947), - }, - PDE.NavierStokes2D: {"re": (1723.425, 1723.425)}, - PDE.TurbulentFlow2D: {"nu": (0.001372469573118451, 0.002146258280849241)}, - PDE.KortewegDeVries1D: {"delta": (2.899999997019768, 1.2246211546444339)}, -} - - -@jaxtyped(typechecker=typeguard.typechecked) -def unnormalize_params( - param_dict: Dict[str, torch.Tensor], pde: PDE -) -> Dict[str, torch.Tensor]: - """ - Unnormalize the PDE parameters. - """ - for param in param_dict.keys(): - if "var" not in param: - mean, std = PARAM_NORMALIZATION_STATS[pde][param] - param_dict[param] = param_dict[param] * std + mean - return param_dict - - -@jaxtyped(typechecker=typeguard.typechecked) -def extract_params_from_path(path: str, pde: PDE) -> dict: - """ - Extracts the PDE parameters from the h5 path and returns as a dictionary. - """ - param_dict = {} - if pde == PDE.ReactionDiffusion2D: - name = os.path.basename(path) - elements = name.split("=")[1:] - Du = torch.Tensor([float(elements[0].split("_")[0])]) - Dv = torch.Tensor([float(elements[1].split("_")[0])]) - k = torch.Tensor( - [float(elements[2].split(".")[0] + "." + elements[2].split(".")[1])] - ) - param_dict = {"k": k, "Du": Du, "Dv": Dv} - elif pde == PDE.NavierStokes2D: - name = os.path.basename(path) - re_string = name.split(".")[0].strip() - re = torch.Tensor([float(re_string)]) - param_dict = {"re": re} - elif pde == PDE.TurbulentFlow2D: - name = os.path.basename(path) - viscosity_string = name.split("=")[1][:-3] - viscosity = float(viscosity_string) - param_dict = {"nu": torch.Tensor([viscosity])} - elif pde == PDE.KortewegDeVries1D: - name = os.path.basename(path) - delta = name.split("=")[-1].split("_")[0] - param_dict = {"delta": torch.Tensor([float(delta)])} - elif pde == PDE.DarcyFlow2D: - # The parameter is stored as part of the h5 file so we return the parsed file index - name = os.path.basename(path) - index = name.split(".")[0].split("_")[-1] - index = int(index) - index = torch.Tensor([index]) - param_dict = {"index": index} - else: - raise ValueError(f"Unknown PDE type: {pde}. Cannot extract parameters.") - - if len(param_dict) == 0: - raise ValueError( - f"No parameters found for PDE: {pde}. Cannot extract parameters." - ) - return param_dict diff --git a/pdeinvbench/lightning_modules/__init__.py b/pdeinvbench/lightning_modules/__init__.py deleted file mode 100644 index 33bcbc63a1800dd906bcc7bfbf3d03fb774a1515..0000000000000000000000000000000000000000 --- a/pdeinvbench/lightning_modules/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .inversemodule import InverseModule -from .inverse_ttt_module import InverseTestTimeTailoringModule -from .logging_callbacks import ( - PDEParamErrorPlottingCallback, -) diff --git a/pdeinvbench/lightning_modules/inverse_ttt_module.py b/pdeinvbench/lightning_modules/inverse_ttt_module.py deleted file mode 100644 index 38a755e75939290e3abfe767a46467fe2ff7085e..0000000000000000000000000000000000000000 --- a/pdeinvbench/lightning_modules/inverse_ttt_module.py +++ /dev/null @@ -1,345 +0,0 @@ -import os -from typing import Callable, Dict, List, Tuple, Union -import time -import lightning as L -import torch -import typeguard -from functorch.dim import tree_map -from jaxtyping import Float, jaxtyped -from lightning.pytorch.utilities import grad_norm -from pdeinvbench.utils.pytorch_utils import compute_grad_norm -from lightning.pytorch.loggers import WandbLogger -from pdeinvbench.data.utils import unnormalize_params -from pdeinvbench.utils.logging_utils import get_best_model_weights -from copy import deepcopy - -from pdeinvbench.losses import ( - get_param_metric, - get_pde_residual_function, - pde_residual_reduction, -) -from pdeinvbench.losses.metrics import ( - classification_metrics_darcy_flow, - param_relative_loss, -) -from pdeinvbench.models.inverse_model import InverseModel -from pdeinvbench.utils import pytorch_utils as ptu -from pdeinvbench.utils.types import ( - PDE, - DataMetrics, - ParamMetrics, - TypeAutoRegressiveInitFrames, - TypeAutoRegressivePredFrames, - TypeBatch, - TypeBatch1D, - TypeBatch2D, - TypeLossDict, - TypeParam, - TypePredict1D, - TypePredict2D, -) -from pdeinvbench.lightning_modules.inversemodule import InverseModule - - -class InverseTestTimeTailoringModule(InverseModule): - """ - InverseModule with test time tailoring - :param num_tailoring_steps: Number of tailoring steps - :param tailoring_optimizer: Optimizer for tailoring steps - :param tailor_per_batch: Whether to tailor per batch or per element - :param tailor_anchor_loss_weight: Weight for the anchor loss - :param tailor_residual_loss_weight: Weight for the residual loss - """ - - def __init__( - self, - *args, - num_tailoring_steps: int = 0, - tailoring_optimizer: torch.optim.Optimizer = None, - tailor_per_batch: bool = False, - tailor_anchor_loss_weight: float = 0, - tailor_residual_loss_weight: float = 1, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.num_tailoring_steps = num_tailoring_steps - self.tailoring_optimizer = tailoring_optimizer - self.tailor_per_batch = tailor_per_batch - self.tailor_anchor_loss_weight = tailor_anchor_loss_weight - self.tailor_residual_loss_weight = tailor_residual_loss_weight - - @torch.enable_grad() - def tailor( - self, - batch: Union[TypeBatch1D, TypeBatch2D], - stage: str, - dataloader_idx: int = 0, - true_params: any = None, - ) -> Union[ - TypePredict1D, - TypePredict2D, - ]: - """ - As is, pytorch lightning doesn't support test time tailoring out of the box. As a result, we use a separate optimizer for the tailoring step. - The tailoring step is performed on the model using :self.tailoring_optimizer: and the loss is computed using :self.pde_residual:, calls super().predict_with_loss to get the loss. - :param batch: Batch of data. - :param stage: Which of "train", "validation", "test","tailoring" is the current stage. - :param dataloader_idx: Index of the dataloader. - Returns the appended prediction, appended target and losses, same as :super().predict_with_loss:. - """ - timing_metrics = {"tailoring_step_time": [], "tailoring_total_time": None} - total_tailoring_start_time = time.time() - # Each tailoring step is isolated so we make a copy of the model - base_model = self.model - model = deepcopy(base_model).to(next(base_model.parameters()).device) - # We remap self.model to the tailored model to enable calling super().predict_with_loss - self.model = model - self.model.train() - optimizer = self.tailoring_optimizer(model.parameters()) - aux_metrics = {"grad_norm": []} - - for tailoring_step in range(self.num_tailoring_steps): - tailoring_step_start_time = time.time() - with torch.enable_grad(): - optimizer.zero_grad() - predicted_pde_params, _, loss = self.predict_with_loss( - batch, "tailoring" - ) - - if "timing_metrics" in loss: - del loss["timing_metrics"] - - for key in predicted_pde_params: - loss[f"tailored_vs_init_predicted_relative_param_loss_{key}"] = ( - loss[f"relative_param_loss_{key}"] - ) - if self.pde != PDE.DarcyFlow2D: - tailored_vs_true_params_relative_error = param_relative_loss( - predicted_pde_params, - true_params, - reduction="mean", - ) - else: - tailored_vs_true_params_relative_error = param_relative_loss( - {"coeff": predicted_pde_params["coeff"]}, - {"coeff": true_params["coeff"]}, - reduction="mean", - ) - - if self.tailor_anchor_loss_weight == 0: - # Only backprop based on the residual loss - total_loss = ( - self.tailor_residual_loss_weight * loss["residual_loss"] - ) - else: - # sum anchor param loss and residual loss and backprop instead - total_loss = ( - self.tailor_residual_loss_weight * loss["residual_loss"] - ) + (self.tailor_anchor_loss_weight * loss["param_loss"]) - total_loss.backward() - optimizer.step() - # Aux metrics for debugging - grad_norm = compute_grad_norm(self.model, None) - for param in tailored_vs_true_params_relative_error: - if ( - f"tailored_vs_true_params_relative_error_{param}" - not in aux_metrics - ): - aux_metrics[ - f"tailored_vs_true_params_relative_error_{param}" - ] = [] - if self.tailor_per_batch: - metric_value = tailored_vs_true_params_relative_error[param] - if isinstance(metric_value, torch.Tensor): - metric_value = metric_value.detach().cpu() - else: - metric_value = metric_value - aux_metrics[ - f"tailored_vs_true_params_relative_error_{param}" - ].append(metric_value) - else: - aux_metrics[ - f"tailored_vs_true_params_relative_error_{param}" - ].append(tailored_vs_true_params_relative_error[param].item()) - aux_metrics["grad_norm"].append(grad_norm) - for metric, value in loss.items(): - if metric not in aux_metrics: - aux_metrics[metric] = [] - if self.tailor_per_batch: - metric_value = value - if isinstance(metric_value, torch.Tensor): - metric_value = metric_value.detach().cpu() - else: - metric_value = metric_value - aux_metrics[metric].append(metric_value) - else: - aux_metrics[metric].append(value.item()) - if "total_loss" not in aux_metrics: - aux_metrics["total_loss"] = [] - if "optimizing_residual_loss" not in aux_metrics: - aux_metrics["optimizing_residual_loss"] = [] - if "optimizing_anchor_loss" not in aux_metrics: - aux_metrics["optimizing_anchor_loss"] = [] - aux_metrics["total_loss"].append(total_loss.item()) - aux_metrics["optimizing_residual_loss"].append( - loss["residual_loss"].item() - ) - aux_metrics["optimizing_anchor_loss"].append(loss["param_loss"].item()) - torch.cuda.empty_cache() - tailoring_step_end_time = time.time() - if "tailoring_step_time" not in aux_metrics: - aux_metrics["tailoring_step_time"] = [] - aux_metrics["tailoring_step_time"].append( - torch.tensor(tailoring_step_end_time - tailoring_step_start_time) - ) - with torch.no_grad(): - batch[-1] = true_params - final_appended_prediction, final_appended_target, final_losses = ( - self.predict_with_loss(batch, stage) - ) - - # We delete the model and optimizer to prevent memory leaks - optimizer.zero_grad(set_to_none=True) - del model, optimizer - torch.cuda.empty_cache() - # Reset the model to the base model - self.model = base_model - total_tailoring_end_time = time.time() - total_tailoring_time = total_tailoring_end_time - total_tailoring_start_time - final_losses["total_tailoring_time"] = torch.tensor(total_tailoring_time) - return ( - final_appended_prediction, - final_appended_target, - { - "final_losses": final_losses, - "aux_metrics": aux_metrics, - }, - ) - - @jaxtyped(typechecker=typeguard.typechecked) - def test_step( - self, - batch: Union[TypeBatch1D, TypeBatch2D], - batch_idx: int, - dataloader_idx: int = 0, - ) -> TypeLossDict: - stage = "test" - self.model.train() - test_losses = self.validation_step(batch, batch_idx, dataloader_idx, stage) - # return test_losses if not tailoring - if self.num_tailoring_steps == 0 or self.tailoring_optimizer is None: - return test_losses - else: - self.log_metrics(stage, test_losses, prefix_dir="pre-tailored") - # replace the true PDE params with the predicted PDE params for tailoring anchor loss - true_pde_params = batch[-1].copy() - for param, predicted_param in test_losses["predictions"].items(): - if self.pde != PDE.DarcyFlow2D: - batch[-1][param] = predicted_param.detach() - else: - batch[-1][param] = torch.tensor(predicted_param).to( - next(self.model.parameters()).device - ) - true_pde_params = tree_map( - lambda x: torch.tensor(x).to( - next(self.model.parameters()).device - ), - true_pde_params, - ) - - if self.tailor_per_batch: - predictions, targets, loss_dict = self.tailor( - batch, stage, dataloader_idx, true_pde_params - ) - - losses, aux_metrics = ( - loss_dict["final_losses"], - loss_dict["aux_metrics"], - ) - losses["predictions"] = predictions - losses["targets"] = targets - else: - # Add a second "batch dimension" to the batch. As a result, we manually loop over the batch dimension. - # Ideally, we would use vmap over the real batch dimension, but there are inplace ops in the forward pass of FNO that is not vectorizable. - # Related github issue with a minimal example: https://github.com/pytorch/pytorch/issues/103329 - # Specifically, inplace ops such as: https://github.com/neuraloperator/neuraloperator/blob/main/neuralop/layers/spectral_convolution.py#L459 are not vectorizable. - - # PDE param is a dict so we have to handle it separately. Each parameter is a tensor with shape (B, 1). We unsqueeze it such that the tensor is now (B, 1, 1) - # When we loop over and index the real batch dimension, the PDE parameter will be a tensor of shape (1, 1), reprsenting a batch of size 1. This ensure PDE param also has a second batch dim. - batch = tree_map(lambda x: x.unsqueeze(1), batch) - batch_size = batch[0][0].shape[0] - losses = [] - aux_metrics = {"grad_norm": []} - appended_predictions = [] - appended_targets = [] - for idx in range(batch_size): - single_input = tree_map(lambda x: x[idx], batch) - single_true_params = tree_map( - lambda x: x[idx].unsqueeze(1), true_pde_params - ) - - predictions, targets, curr_loss_dict = self.tailor( - single_input, stage, dataloader_idx, single_true_params - ) - loss, single_aux_metrics = ( - curr_loss_dict["final_losses"], - curr_loss_dict["aux_metrics"], - ) - - appended_predictions.append(predictions) - appended_targets.append(targets) - losses.append(loss) - # Append the aux metrics - for k in single_aux_metrics.keys(): - if k not in aux_metrics: - aux_metrics[k] = [] - aux_metrics[k].append(single_aux_metrics[k]) - - # Stack the results back into a tensor - appended_predictions = { - key: torch.stack( - [prediction[key] for prediction in appended_predictions], dim=0 - ) - for key in appended_predictions[0].keys() - } - appended_targets = { - key: torch.stack([target[key] for target in appended_targets], dim=0) - for key in appended_targets[0].keys() - } - - # Stack the losses back into a tensor - losses = { - key: torch.stack([loss[key] for loss in losses], dim=0) - for key in losses[0].keys() if key != "timing_metrics" - } - # Only certain keys need to be squeezed - losses = tree_map(lambda x: x if x.dim() < 2 else x.squeeze(1), losses) - - # Loss contains the following keys from super().predict_with_loss - # 'data_loss' (B), 'residual_loss' (B), 'loss' (B), 'data_loss_per_batch_element' (B), 'residual_per_batch_element' (B), 'data_loss_per_frame' (B, T) - # As a result of looping, we have to reduce the batch dimension for some keys. - # Average the losses - for key in losses.keys(): - if "per_batch_element" not in key: - losses[key] = losses[key].mean(dim=0) - - # Average the aux metrics and prep for logging - for k in aux_metrics.keys(): - aux_metrics[k] = torch.tensor(aux_metrics[k]).mean(dim=0) - losses["predictions"] = appended_predictions - losses["targets"] = appended_targets - self.log_metrics(stage, losses, prefix_dir="post-tailored") - losses["tailoring_metrics"] = aux_metrics - losses["pre_tailored_metrics"] = test_losses - - return losses - - def log_metrics(self, stage, losses, prefix_dir=""): - super().log_metrics(stage, losses, prefix_dir) - if "total_tailoring_time" in losses: - self.log( - os.path.join(stage, prefix_dir, "total_tailoring_time"), - losses["total_tailoring_time"], - on_step=False, - on_epoch=True, - ) diff --git a/pdeinvbench/lightning_modules/inversemodule.py b/pdeinvbench/lightning_modules/inversemodule.py deleted file mode 100644 index 48d03b3393169403644e68b2243eb5776afa372f..0000000000000000000000000000000000000000 --- a/pdeinvbench/lightning_modules/inversemodule.py +++ /dev/null @@ -1,444 +0,0 @@ -import os -from typing import Callable, Dict, List, Tuple, Union - -import lightning as L -import torch -import typeguard -from functorch.dim import tree_map -from jaxtyping import Float, jaxtyped -from lightning.pytorch.utilities import grad_norm -from pdeinvbench.utils.pytorch_utils import compute_grad_norm -from lightning.pytorch.loggers import WandbLogger -from pdeinvbench.data.utils import unnormalize_params -from pdeinvbench.utils.logging_utils import get_best_model_weights -from copy import deepcopy -import time - -from pdeinvbench.losses import ( - get_param_metric, - get_pde_residual_function, - pde_residual_reduction, -) -from pdeinvbench.losses.metrics import ( - classification_metrics_darcy_flow, - param_relative_loss, -) -from pdeinvbench.models.inverse_model import InverseModel -from pdeinvbench.utils import pytorch_utils as ptu -from pdeinvbench.utils.types import ( - PDE, - DataMetrics, - ParamMetrics, - TypeBatch, - TypeBatch1D, - TypeBatch2D, - TypeLossDict, - TypeParam, - TypePredict1D, - TypePredict2D, -) - - -class InverseModule(L.LightningModule): - """ - Inverse Problem Module. Takes a set of conditioning frames from a PDE trajectory and predicts the value of the PDE parameter(s). - :param model: Model. - :param optimizer: Optimizer. - :param lr_scheduler: Learning rate scheduler. - :param pde: enum of the PDE to use for the residual calculation. - :param param_loss_metric: Metric to use for the parameter loss. - :param inverse_residual_loss_weight: Weight for the PDE residual loss obtained from the predicted parameters. - :param n_past: Number of past frames to condition on. - :param use_partials: whether to append the partial derivatives to the input. - :param params_to_predict: list of strings of parameters to predict. - :param residual_filter: Whether to use residual filtering. - :param batch_size: Batch size. - """ - - def __init__( - self, - model: InverseModel, - optimizer: torch.optim.Optimizer, - lr_scheduler: torch.optim.lr_scheduler, - pde: PDE, - param_loss_metric: ParamMetrics, - inverse_residual_loss_weight: float, - inverse_param_loss_weight: float, - n_past: int, - use_partials: bool, - params_to_predict: List[str], - residual_filter: bool, - batch_size: int, - ): - super().__init__() - self.save_hyperparameters(ignore=["model"]) - self.model = model - self.old_inverse_model = None - self.new_inverse_model = None - self.pde_residual = get_pde_residual_function(pde) - self.pde = pde - self.param_loss_metric = get_param_metric(param_loss_metric) - self.inverse_residual_loss_weight = inverse_residual_loss_weight - self.inverse_param_loss_weight = inverse_param_loss_weight - - self.validation_step_outputs = [] - self.validation_step_targets = [] - """ - Loss dicts for validation. Each element is a tuple of (losses, pde_params). - PDE_params comes directly from the dataloader. - Loss: Dict[str, torch.Tensor] with keys (shape) - 'data_loss' (B), 'residual_loss' (B), 'loss' (), 'data_loss_per_batch_element' (B), 'residual_per_batch_element' (B). - """ - self.validation_step_loss_dicts = [] - self.n_past = n_past - self.use_partials = use_partials - self.params_to_predict = params_to_predict - self.optimizer = optimizer - self.lr_scheduler = lr_scheduler - self.residual_filter = residual_filter - self.batch_size = batch_size - - @jaxtyped(typechecker=typeguard.typechecked) - def predict_with_loss( - self, - batch: Union[TypeBatch1D, TypeBatch2D], - stage: str, - gumbel: bool = False, - ) -> Tuple[TypeParam, TypeParam, Dict[str, Union[torch.tensor, TypeBatch]]]: - """ - Common method which computes a prediction of the PDE parameter from the conditioning frames and returns the loss. - :param batch: Batch of data. - :param stage: Which of "train", "validation", "test","tailoring" is the current stage. - """ - _, _, initial_frames, _, _, _, _ = batch - start_time = time.time() - # To fix batching issue. Sometimes the input batch_size is less than the defined batch_size. - # This causes logging issues downstream during logging. This snippet repeats the final batch element - # to match the specified input batch_size. - num_repeat_elements = 0 - if initial_frames.shape[0] < self.batch_size: - num_repeat_elements = self.batch_size - initial_frames.shape[0] - batch = tree_map( - lambda x: torch.cat( - [x] + [x[-1].unsqueeze(0) for _ in range(num_repeat_elements)] - ), - batch, - ) - spatial_grid, t, initial_frames, _, _, _, pde_params = batch - - forward_pass_start_time = time.time() - predicted_pde_params, residual, true_residual = self.model( - initial_frames, pde_params, spatial_grid, t, gumbel=gumbel - ) - forward_pass_end_time = time.time() - forward_pass_time = forward_pass_end_time - forward_pass_start_time - if self.residual_filter: - ids = [ - true_residual.reshape(true_residual.shape[0], -1).max(dim=1)[0] < 100 - ] - residual = residual[ids] - true_residual = true_residual[ids] - - for key in pde_params: - predicted_pde_params[key] = predicted_pde_params[key][ids] - pde_params[key] = pde_params[key][ids] - - residual_loss = pde_residual_reduction( - residual - ) # Convert residual tensor to float loss - true_residual_loss = pde_residual_reduction(true_residual) - param_losses = self.param_loss_metric( - predicted_pde_params, - pde_params, - reduction="mean", - ) - - relative_param_losses = param_relative_loss( - {param: prediction for param, prediction in predicted_pde_params.items()}, - pde_params, - reduction="mean", - ) - - # To avoid scenario where the averaged loss is computed with zero errors corresponding to params - # that are not predicted for darcy flow. - backprop_losses: list[torch.Tensor] - if self.pde == PDE.DarcyFlow2D and stage != "tailoring": - backprop_losses = [v for v in param_losses.values() if v.item() != 0] - else: - backprop_losses = list(param_losses.values()) - - param_loss = torch.stack(backprop_losses).mean() - - individual_param_losses = { - f"param_loss_{k}": v for k, v in param_losses.items() - } - individual_relative_param_losses = { - f"relative_param_loss_{k}": v for k, v in relative_param_losses.items() - } - - weighted_param_loss = self.inverse_param_loss_weight * param_loss - - weighted_residual_loss = self.inverse_residual_loss_weight * residual_loss - - loss = weighted_param_loss + weighted_residual_loss - losses = { - "param_loss": param_loss, - "residual_loss": residual_loss, - "true_residual_loss": true_residual_loss, - "loss": loss, - **individual_param_losses, - **individual_relative_param_losses, - } - - if self.pde == PDE.DarcyFlow2D: - # Additional metrics for darcy flow to be consistent with PINO - darcy_losses: dict[str, float] = classification_metrics_darcy_flow( - predicted_coeff=predicted_pde_params["coeff"], - true_coeff=pde_params["coeff"], - ) - losses.update(darcy_losses) - - # In the case of validation, we want to handle some additional metrics - if "validation" in stage or "test" in stage or "tailoring" in stage: - # we want to stratify the losses based on PDE parameter - # Stratification happens in the plotting callback - this performs bookkeeping. - param_loss_per_batch_element, residual_per_batch_element = ( - self.stratify_losses( - predicted_pde_params, - pde_params, - residual, - ) - ) - losses["param_loss_per_batch_element"] = param_loss_per_batch_element - losses["residual_per_batch_element"] = residual_per_batch_element - - # reset batch to original size by removing the repeated last element in orginal batch - if num_repeat_elements > 0: - predicted_pde_params = tree_map( - lambda x: x[: -1 * num_repeat_elements], predicted_pde_params - ) - pde_params = tree_map(lambda x: x[: -1 * num_repeat_elements], pde_params) - losses = tree_map( - lambda x: x[: -1 * num_repeat_elements] if x.numel() > 1 else x, losses - ) - end_time = time.time() - losses["timing_metrics"] = { - "predict_with_loss_time": end_time - start_time, - "forward_pass_time": forward_pass_time, - } - return predicted_pde_params, pde_params, losses - - @jaxtyped(typechecker=typeguard.typechecked) - def training_step( - self, - batch: Union[TypeBatch1D, TypeBatch2D], - batch_idx: int, - ) -> TypeLossDict: - stage = "train" - _, _, losses = self.predict_with_loss(batch, stage="train") - self.log_metrics(stage, losses) - return losses - - @jaxtyped(typechecker=typeguard.typechecked) - def validation_step( - self, - batch: Union[TypeBatch1D, TypeBatch2D], - batch_idx: int, - dataloader_idx: int = 0, - stage: str = None, - ) -> Union[None, TypeLossDict]: - stage = "validation" if stage == None else stage - prediction, target, losses = self.predict_with_loss(batch, stage=stage) - - #### Bookkeeping for plotting. See logging_callbacks.py for plotting. ##### - - # NOTE: If we dont convert to numpy, there will be an OOM exception - if self.pde == PDE.DarcyFlow2D: - for k in prediction.keys(): - prediction[k] = prediction[k].cpu().numpy() - target[k] = target[k].cpu().numpy() - self.validation_step_outputs.append(prediction) - self.validation_step_targets.append(target) - self.validation_step_loss_dicts.append( - ( - ptu.torch_dict_to_numpy(losses), - ptu.torch_dict_to_numpy(batch[-1]), - ) - ) - else: - self.validation_step_outputs.append(prediction) - self.validation_step_targets.append(target) - self.validation_step_loss_dicts.append((losses, batch[-1])) - losses["predictions"] = prediction - losses["targets"] = target - self.log_metrics(stage, losses) - return losses - - @jaxtyped(typechecker=typeguard.typechecked) - def test_step( - self, - batch: Union[TypeBatch1D, TypeBatch2D], - batch_idx: int, - dataloader_idx: int = 0, - ) -> TypeLossDict: - stage = "test" - self.model.train() - test_losses = self.validation_step(batch, batch_idx, dataloader_idx, stage) - return test_losses - - def log_metrics(self, stage, losses, prefix_dir=""): - if "validation" in stage or "test" in stage: - on_step = False - on_epoch = True - else: - on_step = True - on_epoch = False - - main_loss_type = "param_loss" - - self.log( - os.path.join(stage, prefix_dir, main_loss_type), - losses["param_loss"], - prog_bar=True, - on_step=on_step, - on_epoch=on_epoch, - ) - - for param in self.params_to_predict: - self.log( - os.path.join(stage, prefix_dir, f"{main_loss_type}_{param}"), - losses[f"param_loss_{param}"], - prog_bar=True, - on_step=on_step, - on_epoch=on_epoch, - ) - - self.log( - os.path.join(stage, prefix_dir, f"relative_param_loss_{param}"), - losses[f"relative_param_loss_{param}"], - prog_bar=True, - on_step=on_step, - on_epoch=on_epoch, - ) - - self.log( - os.path.join(stage, prefix_dir, "residual_loss"), - losses["residual_loss"], - prog_bar=True, - on_step=on_step, - on_epoch=on_epoch, - ) - - self.log( - os.path.join(stage, prefix_dir, "true_residual_loss"), - losses["true_residual_loss"], - prog_bar=True, - on_step=on_step, - on_epoch=on_epoch, - ) - - if isinstance(self.model, InverseModel): - - # Check for darcy inverse model - if self.pde == PDE.DarcyFlow2D: - self.log( - os.path.join( - stage, prefix_dir, "darcy_flow_classification_accuracy" - ), - losses["darcy_flow_classification_accuracy"], - prog_bar=True, - on_step=on_step, - on_epoch=on_epoch, - ) - self.log( - os.path.join(stage, prefix_dir, "darcy_flow_classification_iou"), - losses["darcy_flow_classification_iou"], - prog_bar=True, - on_step=on_step, - on_epoch=on_epoch, - ) - - self.log( - os.path.join(stage, prefix_dir, "loss"), - losses["loss"], - on_step=on_step, - on_epoch=on_epoch, - ) - - if "timing_metrics" in losses: - self.log( - os.path.join(stage, prefix_dir, "predict_with_loss_time"), - losses["timing_metrics"]["predict_with_loss_time"], - on_step=on_step, - on_epoch=on_epoch, - ) - - self.log( - os.path.join(stage, prefix_dir, "forward_pass_time"), - losses["timing_metrics"]["forward_pass_time"], - on_step=on_step, - on_epoch=on_epoch, - ) - - def configure_optimizers(self): - optimizer = self.optimizer - lr_scheduler = self.lr_scheduler - return [optimizer], [lr_scheduler] - - def on_before_optimizer_step(self, optimizer): - # logging the sum of the l2 norms of both the forward and inverse model - - self.old_inverse_model = torch.cat( - [p.flatten() for p in self.model.parameters()] - ) - inverse_norms = grad_norm(self.model, 2) - inverse_total_norm = sum(inverse_norms.values()) - self.log( - f"outer_loss_grad_norms/inverse_model", - inverse_total_norm, - on_step=True, - ) - - def optimizer_step(self, *args, **kwargs): - super().optimizer_step(*args, **kwargs) - - self.new_inverse_model = torch.cat( - [p.flatten() for p in self.model.parameters()] - ) - param_diff_norm = torch.norm(self.new_inverse_model - self.old_inverse_model) - original_param_norm = torch.norm(self.old_inverse_model) - rel_diff = param_diff_norm / original_param_norm - self.log( - "outer_loss_grad_norms/callback_inverse_param_rel_diff", - rel_diff, - on_step=True, - ) - - def stratify_losses(self, predicted_pde_params, pde_params, residual): - # Compute the per trajectory param loss & residual - param_loss_per_batch_element = self.param_loss_metric( - predicted_pde_params, pde_params, reduction="none" - ) - - # Edge case where we instead sum the error for darcy flow - if self.pde == PDE.DarcyFlow2D: - coeff = param_loss_per_batch_element["coeff"] - # In the case we use a relative error, we don't need to do this - if len(coeff.shape) == 4: - coeff = torch.flatten(coeff, start_dim=2).sum(axis=2) - - param_loss_per_batch_element["coeff"] = coeff - - param_loss_per_batch_element = ( - torch.stack(list(param_loss_per_batch_element.values())) - .squeeze(-1) - .mean(dim=0) - ) - residual_per_batch_element = pde_residual_reduction( - residual, dim=tuple(range(1, residual.dim())) - ) - - return param_loss_per_batch_element, residual_per_batch_element - - def on_test_epoch_start(self): - super().on_test_epoch_start() diff --git a/pdeinvbench/lightning_modules/logging_callbacks.py b/pdeinvbench/lightning_modules/logging_callbacks.py deleted file mode 100644 index cf03440994bdcc75ae84aac847454236c2c20d05..0000000000000000000000000000000000000000 --- a/pdeinvbench/lightning_modules/logging_callbacks.py +++ /dev/null @@ -1,578 +0,0 @@ -from types import NoneType -from typing import Any, List, Mapping - -import lightning.pytorch as pl -import matplotlib.pyplot as plt -import numpy as np -import plotly.express as plotly_express -import plotly.graph_objects as go -import torch -import wandb -from functorch.dim import tree_map -from lightning.pytorch.callbacks import Callback - -import pdeinvbench.utils.pytorch_utils as ptu -from pdeinvbench.utils.logging_utils import ( - collect_loss_dicts, -) -from pdeinvbench.utils.types import PDE, TypeBatch1D, TypeBatch2D - - -class PDEParamErrorPlottingCallback(Callback): - """ - Logs a set of errors stratified based on PDE parameter value. - """ - - def __init__(self): - self.validation_step_loss_dicts = [] - """ - Loss dicts for validation and autoregressive validation. Each element is a tuple of (losses, pde_params). - PDE_params comes directly from the dataloader. - Loss: Dict[str, torch.Tensor] with keys (shape) - 'data_loss' (B), 'residual_loss' (B), 'loss' (), 'data_loss_per_batch_element' (B), 'residual_per_batch_element' (B). - """ - self.pde = None # type: ignore - self.params_to_predict = [] - - def on_validation_epoch_start( - self, trainer: pl.Trainer, pl_module: pl.LightningModule - ): - # Clear memory of loss dicts - self.validation_step_loss_dicts = [] - self.pde = pl_module.pde - if self.pde == PDE.DarcyFlow2D: - self.params_to_predict = ["index"] - else: - self.params_to_predict = pl_module.params_to_predict - - def on_validation_batch_end( - self, - trainer: pl.Trainer, - pl_module: pl.LightningModule, - outputs: Mapping[str, torch.Tensor], - batch: TypeBatch1D | TypeBatch2D, - batch_idx: int, - dataloader_idx: int = 0, - ): - # Convert to numpy arrays - collect_loss_dicts( - outputs, - batch, - "residual_per_batch_element", - self.validation_step_loss_dicts, - ) - - def on_test_batch_end( - self, - trainer: pl.Trainer, - pl_module: pl.LightningModule, - outputs: Mapping[str, torch.Tensor], - batch: TypeBatch1D | TypeBatch2D, - batch_idx: int, - dataloader_idx: int = 0, - ): - self.on_validation_batch_end( - trainer, pl_module, outputs, batch, batch_idx) - - def generate_pde_parameter_histogram(self, loss_tuples): - """ - Generates a histogram of PDE parameter values vs loss - """ - if len(loss_tuples) == 0: - return None - if self.pde == PDE.DarcyFlow2D: - parameter_keys = ["index"] - else: - parameter_keys = loss_tuples[0][1].keys() - plots = {} - for parameter in parameter_keys: - # Num batches x Batch size - parameter_values = [ - ptu.torch_to_numpy(loss_tuple[1][parameter].ravel()) - for loss_tuple in loss_tuples - ] - parameter_values = np.concatenate(parameter_values, axis=0) - - residuals = [ - ptu.torch_to_numpy(loss_tuple[0]["residual_per_batch_element"]) - for loss_tuple in loss_tuples - ] - residuals = np.concatenate(residuals, axis=0) - - key_name = "param_loss_per_batch_element" - - data_or_param_loss = [loss_tuple[0][key_name] - for loss_tuple in loss_tuples] - - if len(data_or_param_loss[0].shape) != 0: - data_or_param_loss = np.concatenate(data_or_param_loss, axis=0) - - residual_fig = plotly_express.density_heatmap( - x=parameter_values, - y=residuals, - nbinsx=20, - nbinsy=20, - title=f"Residual vs. {parameter}", - ) - residual_fig.update_layout( - xaxis_title=f"{parameter} Values", - yaxis_title="Residual", - title_x=0.5, - margin_t=40, - ) - title = "Parameter" - data_or_param_loss_fig = plotly_express.density_heatmap( - x=parameter_values, - y=data_or_param_loss, - nbinsx=20, - nbinsy=20, - title=f"{title} Loss vs. {parameter}", - ) - data_or_param_loss_fig.update_layout( - xaxis_title=f"{title} Loss (MSE)", - yaxis_title=f"{parameter} Values", - title_x=0.5, - margin_t=40, - ) - plots[parameter] = [residual_fig, data_or_param_loss_fig] - return plots - - def on_test_epoch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - self.on_validation_epoch_start(trainer, pl_module) - - def log_plots(self, prefix: str): - plots = self.generate_pde_parameter_histogram( - self.validation_step_loss_dicts) - if plots is not None: - for parameter, plots in plots.items(): - residual_fig, data_loss_fig = plots - wandb.log( - { - f"{prefix}/residual_vs_{parameter}": residual_fig, - f"{prefix}/data_loss_vs_{parameter}": data_loss_fig, - } - ) - plt.close() - - def log_parameter_predictions_table(self, loss_tuples, prefix: str = "test"): - if len(loss_tuples) == 0: - return None - if self.pde == PDE.DarcyFlow2D: - parameter_keys = ["index"] - else: - parameter_keys = loss_tuples[0][1].keys() - plots = {} - columns = ["ic_index", "true_parameters", - "predicted_parameters", "param_loss"] - for parameter in parameter_keys: - # Num batches x Batch size - true_parameters = [ - ptu.torch_to_numpy(loss_tuple[1][parameter].ravel()) - for loss_tuple in loss_tuples - ] - true_parameters = np.concatenate(true_parameters, axis=0) - - residuals = [ - ptu.torch_to_numpy(loss_tuple[0]["residual_per_batch_element"]) - for loss_tuple in loss_tuples - ] - residuals = np.concatenate(residuals, axis=0) - - key_name = "param_loss_per_batch_element" - - data_or_param_loss = [ - ptu.torch_to_numpy(loss_tuple[0][key_name]) - for loss_tuple in loss_tuples - ] - - if len(data_or_param_loss[0].shape) != 0: - data_or_param_loss = np.concatenate(data_or_param_loss, axis=0) - - ic_index = [ - ptu.torch_to_numpy(loss_tuple[2]).ravel() for loss_tuple in loss_tuples - ] - timestamps = [ - ptu.torch_to_numpy(loss_tuple[3]).ravel() for loss_tuple in loss_tuples - ] - - ic_index = np.concatenate(ic_index, axis=0) - timestamps = np.concatenate(timestamps, axis=0) - - predicted_parameters = [ - ptu.torch_to_numpy( - loss_tuple[0]["predictions"][parameter]).ravel() - for loss_tuple in loss_tuples - ] - - predicted_parameters = np.concatenate(predicted_parameters, axis=0) - - data = [ - [ - ic_index[i], - timestamps[i], - true_parameters[i], - predicted_parameters[i], - data_or_param_loss[i], - ] - for i in range(len(ic_index)) - ] - table = wandb.Table( - data=data, - columns=[ - "ic_index", - "timestamps", - "true_parameters", - "predicted_parameters", - "param_loss", - ], - ) - wandb.log( - {f"{prefix}/parameter_predictions_table_{parameter}": table}) - - def on_validation_epoch_end( - self, trainer: pl.Trainer, pl_module: pl.LightningModule - ): - # Plot error stratified by PDE parameter value - self.log_plots("validation") - # Clear caches - self.validation_step_loss_dicts = [] - - def on_test_epoch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - self.on_validation_epoch_start(trainer, pl_module) - - def on_test_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - self.log_plots("test") - self.log_parameter_predictions_table( - self.validation_step_loss_dicts, "test") - # Clear caches - self.validation_step_loss_dicts = [] - - -class PDEParamErrorTestTimeTailoringCallback(PDEParamErrorPlottingCallback): - """ - Logs errors before and after tailoring, stratified by PDE parameter value. - """ - - def __init__(self): - super().__init__() - """ - Loss dicts for test time tailoring. Each element is a tuple of (losses, pde_params). - PDE_params comes directly from the dataloader. - Loss: Dict[str, torch.Tensor] with keys (shape) - 'data_loss' (B), 'residual_loss' (B), 'loss' (), 'data_loss_per_batch_element' (B), 'residual_per_batch_element' (B). - """ - self.pre_tailored_loss_dicts = [] - self.post_tailored_loss_dicts = [] - self.params_to_predict = [] - self.pde = None # type: ignore - - def on_test_epoch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - self.pde = pl_module.pde - if self.pde == PDE.DarcyFlow2D: - # 'Coeff' is a 2D parameter field, index corresponds to the filename of the parameter in the 2D field - self.params_to_predict = ["index"] - else: - self.params_to_predict = pl_module.params_to_predict - - def on_test_batch_end( - self, - trainer: pl.Trainer, - pl_module: pl.LightningModule, - outputs: Mapping[str, torch.Tensor], - batch: TypeBatch1D | TypeBatch2D, - batch_idx: int, - dataloader_idx: int = 0, - ): - if ( - not hasattr(pl_module, "num_tailoring_steps") - or pl_module.tailoring_optimizer is None - or pl_module.num_tailoring_steps == 0 - ): - return - if "pre_tailored_metrics" in outputs: - - collect_loss_dicts( - outputs["pre_tailored_metrics"], - batch, - "param_loss_per_batch_element", - self.pre_tailored_loss_dicts, - ) - collect_loss_dicts( - {key: v for key, v in outputs.items() if key != "pre_tailored_metrics"}, - batch, - "param_loss_per_batch_element", - self.post_tailored_loss_dicts, - ) - - def log_tables(self): - # take the param_loss_per_batch_element for the pre and post tailored metrics - - pre_tailored_data_loss = [ - loss_tuple[0]["param_loss_per_batch_element"] - for loss_tuple in self.pre_tailored_loss_dicts - ] - post_tailored_data_loss = [ - loss_tuple[0]["param_loss_per_batch_element"] - for loss_tuple in self.post_tailored_loss_dicts - ] - - parameter_values = [ - ptu.torch_to_numpy( - loss_tuple[1][self.params_to_predict[0]].ravel()) - for loss_tuple in self.pre_tailored_loss_dicts - ] - - pre_tailored_parameter_values = [ - ptu.torch_to_numpy( - loss_tuple[0]["predictions"][self.params_to_predict[0]].ravel() - ) - for loss_tuple in self.pre_tailored_loss_dicts - ] - post_tailored_parameter_values = [ - ptu.torch_to_numpy( - loss_tuple[0]["predictions"][self.params_to_predict[0]].ravel() - ) - for loss_tuple in self.post_tailored_loss_dicts - ] - - parameter_values = np.concatenate(parameter_values, axis=0) - pre_tailored_parameter_values = np.concatenate( - pre_tailored_parameter_values, axis=0 - ) - post_tailored_parameter_values = np.concatenate( - post_tailored_parameter_values, axis=0 - ) - - pre_tailored_data_loss = np.concatenate(pre_tailored_data_loss, axis=0) - post_tailored_data_loss = np.concatenate( - post_tailored_data_loss, axis=0) - - # log table containing paramter value, pre tailored data loss, post tailored data loss - data = [ - [ - parameter_values[i], - pre_tailored_data_loss[i], - post_tailored_data_loss[i], - pre_tailored_parameter_values[i], - post_tailored_parameter_values[i], - ] - for i in range(len(parameter_values)) - ] - table = wandb.Table( - data=data, - columns=[ - "parameter_value", - "pre_tailored_data_loss", - "post_tailored_data_loss", - "pre_tailored_parameter_value", - "post_tailored_parameter_value", - ], - ) - wandb.log({"tailoring_data_loss_table": table}) - - def on_test_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - if ( - not hasattr(pl_module, "num_tailoring_steps") - or pl_module.tailoring_optimizer is None - or pl_module.num_tailoring_steps == 0 - ): - return - self.log_tables() - self.pre_tailored_loss_dicts = [] - self.post_tailored_loss_dicts = [] - - -class TailoringTimingMetricsCallback(Callback): - """ - Logs the timing metrics for the tailoring step. - """ - - def __init__(self): - self.tailoring_timing_metrics = {} - - def on_test_batch_end( - self, - trainer: pl.Trainer, - pl_module: pl.LightningModule, - outputs: Mapping[str, torch.Tensor], - batch: TypeBatch1D | TypeBatch2D, - batch_idx: int, - dataloader_idx: int = 0, - ): - if ( - not hasattr(pl_module, "tailoring_optimizer") - or pl_module.tailoring_optimizer is None - or pl_module.num_tailoring_steps == 0 - ): - return - - if "tailoring_timing_metrics" in outputs: - self.tailoring_timing_metrics[dataloader_idx] = outputs[ - "tailoring_timing_metrics" - ] - - def on_test_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - for dataloader_idx in self.tailoring_timing_metrics: - wandb.log( - { - f"tailoring_timing_metrics_dataloader_{dataloader_idx}": self.tailoring_timing_metrics[ - dataloader_idx - ] - } - ) - - -class InverseErrorByTailoringStepCallback(Callback): - """ - Helper callback that plots the error by tailoring step. On the Y-axis is the metric and the X-axis is the tailoring step. - Uses plotly to generate the plot and plots to W&B. - This is specifically for PINO and tailoring. - """ - - def __init__(self): - # Required class variables - reset on val epoch start - self.errors_by_tailor_step = {} - - def on_test_epoch_start( - self, trainer: pl.Trainer, pl_module: pl.LightningModule - ) -> None: - if ( - not hasattr(pl_module, "tailoring_optimizer") - or pl_module.tailoring_optimizer is None - or pl_module.num_tailoring_steps == 0 - ): - return - self.errors_by_tailor_step = {} - - def generate_plots(self, pl_module, loader_idx=0): - """ - Generates the plots for the data and residual error by tailoring step. - """ - num_tailoring_steps = pl_module.num_tailoring_steps - metric_plots = {} - - for error_metric in self.errors_by_tailor_step[loader_idx]: - metric_data_by_tailor_step = np.asarray( - self.errors_by_tailor_step[loader_idx][error_metric] - ) - - # Calculate mean across tailoring steps - mean_metric_data_by_tailor_step = np.mean( - metric_data_by_tailor_step, axis=0 - ) - - # Calculate y-axis bounds with some padding (e.g., 5% of the range) - y_min = np.min(metric_data_by_tailor_step) - y_max = np.max(metric_data_by_tailor_step) - y_range = y_max - y_min - y_axis_min = y_min - 0.05 * y_range # Add 5% padding below min - y_axis_max = y_max + 0.05 * y_range # Add 5% padding above max - - # Create data for the table - data = [ - [x, y] - for (x, y) in zip( - list(range(num_tailoring_steps) - ), mean_metric_data_by_tailor_step - ) - ] - table = wandb.Table( - data=data, columns=["tailor_steps", f"mean_{error_metric}"] - ) - - # Create a Plotly figure for custom y-axis bounds - fig = go.Figure() - fig.add_trace( - go.Scatter( - x=list(range(num_tailoring_steps)), - y=mean_metric_data_by_tailor_step, - mode="lines", - name=f"Mean {error_metric}", - ) - ) - fig.update_layout( - title=f"Tailoring Steps vs Mean {error_metric}", - xaxis_title="Tailoring Steps", - yaxis_title=f"Mean {error_metric}", - # Set y-axis bounds - yaxis=dict(range=[y_axis_min, y_axis_max]), - ) - - # Log the Plotly figure to WandB - metric_plots[error_metric] = wandb.Plotly(fig) - return metric_plots - - def on_test_batch_end( - self, - trainer: pl.Trainer, - pl_module: pl.LightningModule, - outputs: Mapping[str, torch.Tensor], - batch: TypeBatch1D | TypeBatch2D, - batch_idx: int, - dataloader_idx: int = 0, - ): - """ - After each batch, we accumulate the metric for each tailoring step. - """ - if ( - not hasattr(pl_module, "tailoring_optimizer") - or pl_module.tailoring_optimizer is None - or pl_module.num_tailoring_steps == 0 - ): - return - - tailoring_metrics = outputs["tailoring_metrics"] - if dataloader_idx not in self.errors_by_tailor_step: - self.errors_by_tailor_step[dataloader_idx] = {} - current_dataloader = trainer.test_dataloaders[dataloader_idx] - - for metric, metric_data in tailoring_metrics.items(): - if metric not in self.errors_by_tailor_step[dataloader_idx]: - self.errors_by_tailor_step[dataloader_idx][metric] = [] - if "per_batch_element" in metric and pl_module.tailor_per_batch: - current_batch_size = metric_data[0].shape[0] - elements_to_add = pl_module.batch_size - current_batch_size - if current_batch_size != pl_module.batch_size: - for tailoring_step in range(pl_module.num_tailoring_steps): - step_ouput = metric_data[tailoring_step] - - # Get the last element of the tensor - last_element = step_ouput[-1] - - # Create a tensor with repeated last elements - repeated_elements = ( - last_element.repeat(elements_to_add, 1) - if len(step_ouput.shape) > 1 - else last_element.repeat(elements_to_add) - ) - - # Concatenate the original tensor with the repeated elements - metric_data[tailoring_step] = torch.cat( - [step_ouput, repeated_elements], dim=0 - ) - - self.errors_by_tailor_step[dataloader_idx][metric].append( - metric_data) - - def on_test_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - if ( - not hasattr(pl_module, "tailoring_optimizer") - or pl_module.tailoring_optimizer is None - or pl_module.num_tailoring_steps == 0 - ): - return - - """ - After each epoch, we plot the metric by tailoring step. - """ - - for dataloader_idx in self.errors_by_tailor_step: - tailoring_figures = self.generate_plots(pl_module, dataloader_idx) - to_log = {} - for tailoring_metric, err_fig in tailoring_figures.items(): - to_log[ - f"tailoring_step_plots_dataloader_{dataloader_idx}/{tailoring_metric}" - ] = err_fig - - wandb.log(to_log) - plt.close() diff --git a/pdeinvbench/losses/__init__.py b/pdeinvbench/losses/__init__.py deleted file mode 100644 index 97742f588a5359662901f8ba7e68f2644674e962..0000000000000000000000000000000000000000 --- a/pdeinvbench/losses/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from pdeinvbench.losses.pde_residuals import * -from pdeinvbench.losses.metrics import * diff --git a/pdeinvbench/losses/darcy_residuals.py b/pdeinvbench/losses/darcy_residuals.py deleted file mode 100644 index 5930d62a916b22afe9b0a9dc37f27a30cbaee744..0000000000000000000000000000000000000000 --- a/pdeinvbench/losses/darcy_residuals.py +++ /dev/null @@ -1,237 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# In[1]: - - -import numpy as np -import scipy.io - -f = "../data/piececonst_r241_N1024_smooth1.mat" - -mat = scipy.io.loadmat(f) - - -# In[6]: - - -kcoeff = mat["Kcoeff"] -kcoeff_x = mat["Kcoeff_x"] -kcoeff_y = mat["Kcoeff_y"] -sol = mat["sol"] -coeff = mat["coeff"] - -print(kcoeff.shape, "kcoeff") -print(kcoeff_x.shape, "kcoeff x") -print(kcoeff_y.shape, "kcoeff y") -print(sol.shape, "sol") -print(coeff.shape, "coeff") - - -# In[35]: - - -import matplotlib.pyplot as plt - -idx = 0 -plt.imshow(kcoeff[idx]) -plt.title("kcoeff") -plt.colorbar() -plt.show() - -plt.imshow(kcoeff_y[idx]) -plt.title("kcoeff_y") -plt.colorbar() -plt.show() - -plt.imshow(kcoeff_x[idx]) -plt.title("kcoeff_x") -plt.colorbar() -plt.show() - -plt.imshow(sol[idx]) -plt.title("sol") -plt.colorbar() -plt.show() - -plt.imshow(coeff[idx]) -plt.title("coeff") -plt.colorbar() -plt.show() - - -# In[44]: - - -def darcy_residual(u, a): - # u: sol, a: piecewise constant diffusion coefficient - # This is an unbatched implementation - # (size, size) - forcing_func = np.ones_like(u) - D = 1 # 1 uniform grid - size = u.shape[0] - dx = D / size - dy = dx - - ux = np.gradient(u, dx, axis=0) - uy = np.gradient(u, dy, axis=1) - - aux = a * ux - auy = a * uy - - auxx = np.gradient(aux, dx, axis=0) - auyy = np.gradient(auy, dy, axis=1) - lhs = -(auxx + auyy) - return lhs - forcing_func - - -residual = darcy_residual(sol[idx], kcoeff[idx]) - - -# In[45]: - - -residual.shape - - -# In[46]: - - -plt.imshow(residual) -plt.colorbar() - - -# In[47]: - - -residual.sum(), residual.mean(), np.linalg.norm(residual) - - -# In[89]: - - -# Batched version of darcy residuals -def batched_darcy_residual(u, a): - # u: sol, a: piecewise constant diffusion coefficient - # (batch size, size, size) - forcing_func = np.ones_like(u) - D = 1 # 1 uniform grid - bsize = u.shape[0] - size = u.shape[1] - dx = D / size - dy = dx - - ux = np.gradient(u, dx, axis=1) - uy = np.gradient(u, dy, axis=2) - - aux = a * ux - auy = a * uy - - auxx = np.gradient(aux, dx, axis=1) - auyy = np.gradient(auy, dy, axis=2) - lhs = -(auxx + auyy) - return lhs - forcing_func - - -def corrupt_a(a, p: float): - max_val = a.max().item() - min_val = a.min().item() - ax = np.gradient(a, axis=0) - ay = np.gradient(a, axis=1) - field = ax + ay - - # Find indices of non-zero entries - non_zero_indices = np.argwhere(field != 0) - - # Define offsets for neighbors - offsets = np.array( - [[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 1], [1, -1], [1, 0], [1, 1]] - ) - - # Calculate all potential neighbor positions for non-zero indices - neighbor_indices = non_zero_indices[:, None, :] + offsets[None, :, :] - - # Flatten the neighbor_indices array into two columns (row, col) - neighbor_indices = neighbor_indices.reshape(-1, 2) - - # Filter neighbors to stay within bounds - valid_mask = ( - (neighbor_indices[:, 0] >= 0) - & (neighbor_indices[:, 0] < field.shape[0]) - & (neighbor_indices[:, 1] >= 0) - & (neighbor_indices[:, 1] < field.shape[1]) - ) - valid_neighbors = neighbor_indices[valid_mask] - - # Create a copy of the field to update - updated_field = a.copy() - - # Set all valid neighbor positions to 1 - rand_vals = np.random.rand(valid_neighbors.shape[0]) - valid_neighbors = valid_neighbors[rand_vals < p] - - # Half of the valid_neighbors are set to max and half are set to min - valid_neighbors = np.random.permutation(valid_neighbors) - size = valid_neighbors.shape[0] // 2 - updated_field[valid_neighbors[size:, 0], valid_neighbors[size:, 1]] = max_val - updated_field[valid_neighbors[:size, 0], valid_neighbors[:size, 1]] = min_val - - return updated_field - - -# In[79]: - - -from tqdm import tqdm - -corruptions = np.linspace(0, 1, 20, endpoint=True) -print("Corruption vals", corruptions) -residual_corruption_norm = {c: [] for c in corruptions} -residual_corruption_mean = {c: [] for c in corruptions} -for i in tqdm(range(sol.shape[0])): - u = sol[i] - a = coeff[i] - for c in corruptions: - new_a = corrupt_a(a, p=c) - residual = darcy_residual(u, new_a) - residual_corruption_norm[c].append(np.linalg.norm(residual)) - residual_corruption_mean[c].append(np.mean(residual)) - - -# In[81]: - - -for k in residual_corruption_norm.keys(): - residual_corruption_norm[k] = np.asarray(residual_corruption_norm[k]) - residual_corruption_mean[k] = np.asarray(residual_corruption_mean[k]) - - -# In[86]: - - -plt.plot( - residual_corruption_norm.keys(), - [residual_corruption_norm[k].mean() for k in residual_corruption_norm.keys()], -) -plt.title("Norm based comparison") -plt.xlabel("Percentage of corruption") -plt.ylabel("Mean of residual norm values") -plt.show() -plt.plot( - residual_corruption_mean.keys(), - [residual_corruption_mean[k].mean() for k in residual_corruption_mean.keys()], -) -plt.title("Mean based comparison") -plt.xlabel("Percentage of corruption") -plt.ylabel("Mean of Mean residual values") -plt.show() - - -# In[93]: - - -plt.imshow(corrupt_a(coeff[idx], p=0.0001)) -plt.show() - - -# In[ ]: diff --git a/pdeinvbench/losses/finite_differences.py b/pdeinvbench/losses/finite_differences.py deleted file mode 100644 index c8bf2b8dbf6658316d45fe2464bad3daac575a30..0000000000000000000000000000000000000000 --- a/pdeinvbench/losses/finite_differences.py +++ /dev/null @@ -1,84 +0,0 @@ -from typing import Dict, List, Tuple, Union - -import torch -import typeguard -from jaxtyping import Float, jaxtyped - -from pdeinvbench.utils.types import ( - Type1DRPartialsTuple, - Type2DRDPartialsTuple, - TypeBatchSolField1D, - TypePartials2D, - TypeXGrid, - TypeYGrid, -) - - -@jaxtyped(typechecker=typeguard.typechecked) -def partials_torch_1d_systems( - solution_field: TypeBatchSolField1D, - x: TypeXGrid, - dt: torch.Tensor, -) -> Type1DRPartialsTuple: - """ - Compute the spatial and temporal partial differentials for 1d systems. - solution field: solution field - x: spatial grid - dt: time differential - Returns: - data_x: X spatial gradient (du/dx) - data_x_usqr: spatial gradient of u^2/2 (du^2/dx) - data_xx: X spatial second gradient (d^2u/dx^2) - data_t: temporal gradient (du/dt) - (All return arguments are same shape as data) - """ - x_axis = -1 - t_axis = -2 - - # take first batch element of spatial and time grids (all the same, artifact of dataloader) - if len(x.shape) == 2: - x = x[0] - - data_x = torch.gradient(solution_field, spacing=(x,), dim=x_axis)[0] - data_x_usqr = torch.gradient( - solution_field * solution_field / 2, spacing=(x,), dim=x_axis - )[0] - data_xx = torch.gradient(data_x, spacing=(x,), dim=x_axis)[0] - data_t = torch.gradient(solution_field, spacing=dt, dim=t_axis)[0] - return data_x, data_x_usqr, data_xx, data_t - - -@jaxtyped(typechecker=typeguard.typechecked) -def partials_torch_2d_systems( - solution_field: TypePartials2D, - x: TypeXGrid, - y: TypeYGrid, - dt: torch.Tensor, -) -> Type2DRDPartialsTuple: - """ - Compute the spatial and temporal partial differentials for 2D systems. - solution_field: solution field - x, y: spatial grids - dt: time differential - Returns: - data_x: X spatial gradient (du/dx) - data_y: Y spatial gradient (du/dy) - data_xx: X spatial second gradient (d^2u/dx^2) - data_yy: Y spatial second gradient (d^2u/dy^2) - data_t: temporal gradient (du/dt) - """ - - # take first batch element of spatial grids (all the same, artifact of dataloader) - if len(x.shape) == 2: - x = x[0] - if len(y.shape) == 2: - y = y[0] - y_axis = -1 - x_axis = -2 - t_axis = -3 - data_x = torch.gradient(solution_field, spacing=(x,), dim=x_axis)[0] - data_xx = torch.gradient(data_x, spacing=(x,), dim=x_axis)[0] - data_y = torch.gradient(solution_field, spacing=(y,), dim=y_axis)[0] - data_yy = torch.gradient(data_y, spacing=(y,), dim=y_axis)[0] - data_t = torch.gradient(solution_field, spacing=dt, dim=t_axis)[0] - return data_x, data_y, data_xx, data_yy, data_t diff --git a/pdeinvbench/losses/fluids.py b/pdeinvbench/losses/fluids.py deleted file mode 100644 index 06082d87e49fda1986890cfd16ecafccfc758c72..0000000000000000000000000000000000000000 --- a/pdeinvbench/losses/fluids.py +++ /dev/null @@ -1,391 +0,0 @@ -import os -from functools import partial - -import h5py as h -import numpy as np -import torch -from jaxtyping import Complex, Float -from torch import Tensor, vmap - -""" -Methods to compute the pde residual of turbulent flow and navier stokes. -""" - - -def _maybe_unsqueeze_3d( - u: Float[Tensor, "nt nx ny"] | Float[Tensor, "nt nx ny 1"], -) -> Float[Tensor, "nt nx ny 1"]: - """ - Given a tensor, makes sure that the last dimension is 1 (channel dim). - Helps to ensure number of channels consistency. - NOTE: This should only be used within this file. Assumes that u is an unbatched fluid field. - Also always assumes well-formed input. - """ - return u if len(u.shape) == 4 and u.shape[-1] == 1 else torch.unsqueeze(u, dim=-1) - - -def _maybe_unsqueeze_2d( - u: Float[Tensor, "nx ny"] | Float[Tensor, "nx ny 1"], -) -> Float[Tensor, "nx ny 1"]: - """ - Same as 3d version but assumes a tensor of shape (nx, ny, 1) - """ - return u if len(u.shape) == 3 and u.shape[-1] == 1 else torch.unsqueeze(u, dim=-1) - - -@partial(vmap, in_dims=(0, None, None, None)) -def compute_stream_function( - vorticity: Float[Tensor, "nx ny 1"], - dx: float, - dy: float, - fourier: bool = False, -) -> Float[Tensor, "nx ny 1"] | Complex[Tensor, "nx ny 1"]: - """ - Compute the stream function psi. If :arg fourier: returns the fft coefficients. - Otherwise, returns the real components. - :args: - - vorticity: (nx, ny, 1) vorticity in real space - - dx: float - - dy: float - - fourier: bool - whether to return fft coeffs or real space - """ - w = vorticity - device = w.device - w = torch.squeeze(w) - what = torch.fft.fft2(w) - nx, ny = w.shape - kx = torch.fft.fftfreq(nx, d=dx) * 2 * torch.pi - ky = torch.fft.fftfreq(ny, d=dy) * 2 * torch.pi - kx, ky = torch.meshgrid(kx, ky, indexing="ij") - kx, ky = kx.to(device), ky.to(device) - wavenumbers_squared = kx**2 + ky**2 - # stream function = psi - psi_hat = torch.zeros_like( - what, device=what.device - ) # NOTE: zeros_like implicit broadcasts to cfloat (this might change in the future) - psi_hat[wavenumbers_squared > 0] = what[wavenumbers_squared > 0] / ( - -wavenumbers_squared[wavenumbers_squared > 0] - ) - - if fourier: - return _maybe_unsqueeze_2d(psi_hat) - else: - return _maybe_unsqueeze_2d(torch.fft.ifft2(psi_hat).real) - - -def compute_first_order_gradient( - u: Float[Tensor, "nt nx ny 1"], spacing: float, dim: int, fourier: bool = False -): - """ - Returns the first derivative with respect to :arg dim:. Spacing must be provided (dx). - """ - - if fourier: - kx = torch.fft.fftfreq(u.shape[1], d=spacing) * 2 * torch.pi - ky = torch.fft.fftfreq(u.shape[2], d=spacing) * 2 * torch.pi - kx, ky = torch.meshgrid(kx, ky, indexing="ij") - kx, ky = kx.to(u.device), ky.to(u.device) - kx = kx.unsqueeze(0) - ky = ky.unsqueeze(0) - - # print("kx.shape, ky.shape, u.shape", kx.shape, ky.shape, u.shape) - if dim == 1: - return torch.fft.ifft2(1j * kx * torch.fft.fft2(u)).real - elif dim == 2: - return torch.fft.ifft2(1j * ky * torch.fft.fft2(u)).real - else: - raise ValueError(f"Invalid dimension: {dim}") - else: - return torch.gradient(u, spacing=spacing, dim=dim)[0] - - -def vorticity_to_velocity( - vorticity: Float[Tensor, "nt nx ny 1"], dx: float, dy: float -) -> tuple[Float[Tensor, "nt nx ny 1"], Float[Tensor, "nt nx ny 1"]]: - """ - Given vorticity, dx, dy, returns a pair (vx, vy) corresponding to velocity - in the x and y directions. - """ - w = vorticity - device = w.device - w = torch.squeeze(w) - nt, nx, ny = w.shape - psi_hat = compute_stream_function(vorticity, dx, dy, True) - psi_hat = torch.squeeze(psi_hat) - - # Compute velcities using psi - kx = torch.fft.fftfreq(nx, d=dx) * 2 * torch.pi - ky = torch.fft.fftfreq(ny, d=dy) * 2 * torch.pi - kx, ky = torch.meshgrid(kx, ky, indexing="ij") - kx, ky = kx.to(device), ky.to(device) - vx_hat = 1j * ky * psi_hat - vy_hat = -1j * kx * psi_hat - vx = torch.fft.ifft2(vx_hat).real - vy = torch.fft.ifft2(vy_hat).real - return _maybe_unsqueeze_3d(vx), _maybe_unsqueeze_3d(vy) - - -def compute_advection( - vorticity: Float[Tensor, "nt nx ny 1"], - dx: float, - dy: float, - return_velocity: bool = False, -) -> ( - Float[Tensor, "nt nx ny 1"] - | tuple[ - Float[Tensor, "nt nx ny 1"], - Float[Tensor, "nt nx ny 1"], - Float[Tensor, "nt nx ny 1"], - ] -): - """ - Computes the advection term of Navier stokes: v * nabla w - :args - - vorticity: (nt, nx, ny, 1) - - dx: float - - dy: float - - return_velocity (bool): Whether to return adv, vx, vy - """ - w = vorticity - w = torch.squeeze(w) - vx, vy = vorticity_to_velocity(vorticity, dx, dy) - w_dx = compute_first_order_gradient(w, dx, 1, fourier=True) - w_dx = _maybe_unsqueeze_3d(w_dx) - w_dy = compute_first_order_gradient(w, dy, 2, fourier=True) - w_dy = _maybe_unsqueeze_3d(w_dy) - adv = vx * w_dx + vy * w_dy - assert len(adv.shape) == 4, "Incorrect advection shape" - if return_velocity: - return _maybe_unsqueeze_3d(adv), vx, vy - return _maybe_unsqueeze_3d(adv) - - -def second_order_gradient( - u: Float[Tensor, "..."], spacing: float, dim: int -) -> Float[Tensor, "..."]: - """ - Returns the second derivative with respect to :arg dim:. Spacing must be provided (dx). - """ - return torch.gradient( - torch.gradient(u, spacing=spacing, dim=dim)[0], spacing=spacing, dim=dim - )[0] - - -def laplacian( - vorticity: Float[Tensor, "nt nx ny 1"], dx: float, dy: float -) -> Float[Tensor, "nt nx ny 1"]: - """ - Computes the laplacian of vorticity assuming some dx and dy. - args: - - vorticity (nt, nx, ny, 1) - - dx: float - - dy: float - """ - w = vorticity - w = np.squeeze(w) - nt, nx, ny = w.shape - w_dxx = second_order_gradient(w, spacing=dx, dim=1) - w_dyy = second_order_gradient(w, spacing=dy, dim=2) - return _maybe_unsqueeze_3d(w_dxx + w_dyy) - - -def wrapper(func): - def _wrapper(*args): - new_args = [] - for a in args: - if isinstance(a, np.ndarray): - new_args.append(torch.from_numpy(a)) - else: - new_args.append(a) - out = func(*new_args) - return out.cpu().numpy() - - return _wrapper - - -def _maybe_unsqueeze_np(u): - return u if u.shape[-1] == 1 else np.expand_dims(u, axis=-1) - - -def compare_funcs(f1, f2): - def compare(f1args, f2args): - reference = _maybe_unsqueeze_np(f1(*f1args)) - newout = wrapper(f2)(*f2args) - logger.info( - f"Diff between {f1.__name__} and {f2.__name__} was {np.linalg.norm(reference - newout)}" - ) - - return compare - - -def turbulent_flow_residual( - vorticity: Float[Tensor, "nt nx ny 1"], - t: Float[Tensor, "nt"], - x: Float[Tensor, "nx"], - y: Float[Tensor, "ny"], - nu: float, - return_partials: bool = False, -): - """ - Computes the NS equation assuming a damping coeff of 0.1 and Kolmogorov forcing func. - NOTE: This function is unbatched. - Eqn: - -dwdt -v * nable w + nu * lap(w) - alpha*w + f - """ - dt: float = (t[1] - t[0]).item() - dx: float = (x[1] - x[0]).item() - dy: float = (y[1] - y[0]).item() - nt, nx, ny, _ = vorticity.shape - dwdt = torch.gradient(vorticity, spacing=dt, dim=0)[0] - if return_partials: - advection, vx, vy = compute_advection( - vorticity, dx=dx, dy=dy, return_velocity=True - ) - else: - advection = compute_advection(vorticity, dx=dx, dy=dy) - - # desired equation: dwdt = -v * \nabla w + v \nabla^2 w - alpha*w + f - alpha = 0.1 - damping_term = alpha * vorticity - forced_mode = 2 - forcing_func = forced_mode * torch.cos(forced_mode * y) - forcing_func = torch.reshape(forcing_func, (1, 1, ny, 1)) - forcing_func = torch.broadcast_to(forcing_func, vorticity.shape) - diffusion_term = nu * laplacian(vorticity, dx=dx, dy=dy) - - # residual = -dwdt + (advection) + diffusion_term - damping_term + forcing_func - residual = -dwdt + (advection) + diffusion_term - damping_term + forcing_func - - if return_partials: - dwdx = torch.gradient(vorticity, spacing=dx, dim=1)[0] - dwdy = torch.gradient(vorticity, spacing=dy, dim=2)[0] - partials = torch.stack([dwdt, dwdx, dwdy], dim=0) - # There's a trailing channel dim that we replace - partials = torch.squeeze(partials, dim=-1) - return residual, partials - return residual - - -def navier_stokes_residual( - vorticity: Float[Tensor, "nt nx ny 1"], - t: Float[Tensor, "nt"], - x: Float[Tensor, "nx"], - y: Float[Tensor, "ny"], - re: float, - return_partials: bool = False, -): - """ - Computes the unforced NS equation with no damping. - NOTE: This function is unbatched. - Eqn: - -dwdt -v * nable w + nu * lap(w) - """ - # Compute the viscosity from Re number - # Map backward: l (characteristic / length scale) - l = 0.8 - ic_scaling = 1 - nu: float = l * ic_scaling / re - dt: float = (t[1] - t[0]).item() - dx: float = (x[1] - x[0]).item() - dy: float = (y[1] - y[0]).item() - nt, nx, ny, _ = vorticity.shape - dwdt = torch.gradient(vorticity, spacing=dt, dim=0)[0] - if return_partials: - advection, vx, vy = compute_advection( - vorticity, dx=dx, dy=dy, return_velocity=True - ) - else: - advection = compute_advection(vorticity, dx=dx, dy=dy) - - # desired equation: dwdt = -v * \nabla w + v \nabla^2 w - alpha*w + f - diffusion_term = nu * laplacian(vorticity, dx=dx, dy=dy) - - residual = -dwdt + (advection) + diffusion_term - - if return_partials: - dwdx = torch.gradient(vorticity, spacing=dx, dim=1)[0] - dwdy = torch.gradient(vorticity, spacing=dy, dim=2)[0] - partials = torch.stack([dwdt, dwdx, dwdy], dim=0) - partials = torch.squeeze(partials, dim=-1) - return residual, partials - return residual - - -if __name__ == "__main__": - import argparse - - from fluids_numpy import advection as advection_np_base - from fluids_numpy import compute_stream_function as compute_stream_function_np - from fluids_numpy import laplacian as laplacian_np - from fluids_numpy import tf_residual_numpy - from loguru import logger - - logger.warning( - "You should only be running this script as a main function if you are testing the fluids residual computation" - ) - - parser = argparse.ArgumentParser( - description="Runs a series of tests to check a numpy implementation with the current torch NS+TF residual computations" - ) - parser.add_argument( - "--filename", - type=str, - default="../../../data/2D_turbulent-flow_nu=0.006153085601625313.h5", - ) - args = parser.parse_args() - filename = args.filename - dataset = h.File(filename) - traj_idx = "0000" - data = dataset[traj_idx]["data"][:] - logger.info(f"Data shape {data.shape}") - t = dataset[traj_idx]["grid/t"][:] - x = dataset[traj_idx]["grid/x"][:] - y = dataset[traj_idx]["grid/y"][:] - nu = os.path.basename(filename).split("=")[-1][:-3] - nu: float = float(nu) - dataset.close() - - dx = x[1] - x[0] - dy = y[1] - y[0] - logger.info( - f"Computed residual norm {np.linalg.norm(wrapper(turbulent_flow_residual)(data, t, x, y, nu))}" - ) - - compute_stream_function_np.__name__ = "compute_stream_function_np, fourier=False" - compare_funcs(compute_stream_function_np, compute_stream_function)( - (data, x, y, False), (data, dx, dy, False) - ) - compute_stream_function_np.__name__ = "compute_stream_function_np, fourier=True" - compare_funcs(compute_stream_function_np, compute_stream_function)( - (data, x, y, True), (data, dx, dy, True) - ) - - def advection_np(u, x, y): - return advection_np_base(u, x, y, stream_func=compute_stream_function_np)[0] - - advection_np.__name__ = "advenction_np" - compare_funcs(advection_np, compute_advection)((data, x, y), (data, dx, dy)) - - laplacian_np.__name__ = "laplacian_np" - compare_funcs(laplacian_np, laplacian)((data, x, y), (data, dx, dy)) - - # Compare the velocity conversions - def advection_np(u, x, y): - return advection_np_base(u, x, y, stream_func=compute_stream_function_np)[1] - - advection_np.__name__ = "advenction_np for vx" - compare_funcs( - advection_np, lambda *args: compute_advection(*args, return_velocity=True)[1] - )((data, x, y), (data, dx, dy)) - - def advection_np(u, x, y): - return advection_np_base(u, x, y, stream_func=compute_stream_function_np)[2] - - advection_np.__name__ = "advenction_np for vy" - compare_funcs( - advection_np, lambda *args: compute_advection(*args, return_velocity=True)[2] - )((data, x, y), (data, dx, dy)) - - compare_funcs(tf_residual_numpy, turbulent_flow_residual)( - (data, t, x, y, nu), (data, t, x, y, nu) - ) diff --git a/pdeinvbench/losses/metrics.py b/pdeinvbench/losses/metrics.py deleted file mode 100644 index aaa54e6635132d56354b9c0571047e9e650b93cc..0000000000000000000000000000000000000000 --- a/pdeinvbench/losses/metrics.py +++ /dev/null @@ -1,214 +0,0 @@ -from typing import Any, Callable, Dict, List, Tuple, Union - -import torch -import typeguard -from jaxtyping import Float, jaxtyped - -from pdeinvbench.utils.types import DataMetrics, ParamMetrics - -import scoringrules as sr - - -@jaxtyped(typechecker=typeguard.typechecked) -def get_data_metric(metric: DataMetrics) -> Callable: - if metric == DataMetrics.MSE: - return torch.nn.functional.mse_loss - elif metric == DataMetrics.Relative_Error: - return relative_error - - -@jaxtyped(typechecker=typeguard.typechecked) -def get_param_metric(metric: ParamMetrics) -> Callable: - if metric == ParamMetrics.MSE: - return param_mse_loss - elif metric == ParamMetrics.Relative_Error: - return param_relative_loss - - else: - raise ValueError("Parameter metric not recognized.") - - -@jaxtyped(typechecker=typeguard.typechecked) -def relative_error( - input: Float[torch.Tensor, "batch ..."], - target: Float[torch.Tensor, "batch ..."], - reduction: str = "mean", - setting: str = "per_trajectory", -) -> torch.Tensor: - """ - Compute the relative loss between two solution fields. - :param input: torch.Tensor - predicted solution (B x ...) - :param target: torch.Tensor - target solution (B x ...) - :param reduction: str - reduction method for the loss. None returns a tensor of batch size - :return: torch.Tensor - relative data loss - """ - difference = input - target - batch_size = difference.shape[0] - if setting == "per_trajectory": - difference = difference.view(batch_size, -1) - loss = torch.linalg.norm(difference, dim=1) / torch.linalg.norm( - target.view(batch_size, -1), dim=1 - ) - elif setting == "per_frame": - time_size = difference.shape[1] - difference = difference.view(batch_size, time_size, -1) - loss = torch.linalg.norm(difference, dim=2) / torch.linalg.norm( - target.view(batch_size, time_size, -1), dim=2 - ) - else: - raise ValueError( - "relative data error setting not recognized. Select between 'per_trajectory' and 'per_frame'" - ) - if reduction == "mean": - return loss.mean() - elif reduction == "sum": - return loss.sum() - else: - return loss - - -@jaxtyped(typechecker=typeguard.typechecked) -def param_mse_loss( - predicted_params: Dict[ - str, Float[torch.Tensor, "batch 1"] | Float[torch.Tensor, "batch 1 nx ny"] - ], - true_params: Dict[ - str, Float[torch.Tensor, "batch 1"] | Float[torch.Tensor, "batch 1 nx ny"] - ], - reduction: str, -) -> Dict[str, torch.Tensor]: - """ - Compute the MSE loss between predicted and true parameters. - :param predicted_params: torch.Tensor - dictionary of predicted parameters. - :param true_params: torch.Tensor - dictionary of true parameters. - :return: dictionary of parameter MSE losses - """ - # assert that dictionaries both have the same keys. - assert ( - predicted_params.keys() == true_params.keys() - ), "Keys of predicted and true parameters do not match." - - assert reduction in [ - "mean", - "sum", - "none", - ], "Reduction must be either 'mean' 'sum'. or 'none'" - - loss: dict[str, torch.Tensor] = dict() - - for param in predicted_params.keys(): - loss[param] = torch.nn.functional.mse_loss( - predicted_params[param], true_params[param], reduction=reduction - ) - return loss - - -@jaxtyped(typechecker=typeguard.typechecked) -def param_relative_loss( - predicted_params: Dict[ - str, Float[torch.Tensor, "batch 1"] | Float[torch.Tensor, "batch 1 nx ny"] - ], - true_params: Dict[ - str, Float[torch.Tensor, "batch 1"] | Float[torch.Tensor, "batch 1 nx ny"] - ], - reduction: str, -) -> Dict[str, torch.Tensor]: - """ - Compute the relative loss between predicted and true parameters. - :param predicted_params: torch.Tensor - dictionary of predicted parameters. - :param true_params: torch.Tensor - dictionary of true parameters. - :return: dictionary of parameter relative losses - """ - # assert that dictionaries both have the same keys. - assert ( - predicted_params.keys() == true_params.keys() - ), "Keys of predicted and true parameters do not match." - assert reduction in [ - "mean", - "sum", - "none", - ], "Reduction must be either 'mean', 'sum' or 'none'." - - loss: dict[str, torch.Tensor] = dict() - - for param in predicted_params.keys(): - # In the case that the parameter is a vector field, we have to use the relative l2 loss from the forward problem - # Only relevant in the case of darcy flow - if isinstance(true_params[param], Float[torch.Tensor, "batch 1 nx ny"]): - predicted = predicted_params[param] - true = true_params[param] - error = relative_error(predicted, true, reduction="none") - loss[param] = error - else: - difference = predicted_params[param] - true_params[param] - - loss[param] = torch.linalg.norm(difference, dim=1) / torch.linalg.norm( - true_params[param], dim=1 - ) - - if reduction == "mean": - loss[param] = loss[param].mean() - elif reduction == "sum": - loss[param] = loss[param].sum() - - return loss - - -@jaxtyped(typechecker=typeguard.typechecked) -def pde_residual_reduction( - pde_residual: Float[torch.Tensor, "batch ..."], - reduction: str = "mean", - dim: Any = None, -) -> torch.Tensor: - """ - Given a tensor of PDE residuals (B x ...), compute the reduction of the residuals into a single metric. - :param pde_residual: torch.Tensor containing the PDE residual field. - :param reduction: How to reduce different batch elements. Must be one of "sum" or "mean". Default: "mean" - :param dim: dimension(s) along which to reduce. Default: None - """ - target = torch.zeros_like(pde_residual) - sq_diff = (pde_residual - target).square() - if reduction == "mean": - return sq_diff.mean(dim) - elif reduction == "sum": - return sq_diff.sum(dim) - else: - AssertionError("Reduction method not recognized.") - - -@jaxtyped(typechecker=typeguard.typechecked) -def classification_metrics_darcy_flow( - predicted_coeff: Float[torch.Tensor, "batch 1 nx ny"], - true_coeff: Float[torch.Tensor, "batch 1 nx ny"], - reduction: str = "mean", -) -> dict[str, Float[torch.Tensor, ""] | Float[torch.Tensor, "batch"]]: - """ - Stand in for classification metrics to compute on darcy flow. - Reduction represents the batch-wise reduction. - Returns a dict with two keys: "accuracy" and "IoU". - """ - metrics: dict[str, float] = {} - - # Predicted coeff is a field that goes from 0 -> 1, so we presume anything > 0.5 is binned to true - binarized_predicted = predicted_coeff > 0.5 - binarized_true = true_coeff.bool() - pointwise_correctness = binarized_predicted == binarized_true - flat_pointwise = pointwise_correctness.flatten(start_dim=1) - num_points = flat_pointwise.shape[1] - per_batch_elem_accuracy = flat_pointwise.sum(dim=1).float() / num_points - metrics["darcy_flow_classification_accuracy"] = per_batch_elem_accuracy - - intersection = binarized_predicted & binarized_true - intersection = intersection.flatten(start_dim=1) - union = binarized_predicted | binarized_true - union = union.flatten(start_dim=1) - IoU = intersection.sum(dim=1) / union.sum(dim=1) - metrics["darcy_flow_classification_iou"] = IoU - - for k in metrics.keys(): - if reduction == "mean": - metrics[k] = metrics[k].mean() - elif reduction == "sum": - metrics[k] = metrics[k].sum() - - return metrics diff --git a/pdeinvbench/losses/pde_residuals.py b/pdeinvbench/losses/pde_residuals.py deleted file mode 100644 index dc79083ce7634b0b29afc34272305ab9320e4321..0000000000000000000000000000000000000000 --- a/pdeinvbench/losses/pde_residuals.py +++ /dev/null @@ -1,384 +0,0 @@ -from functools import partial -from typing import Any, Callable, List, Optional, Tuple, Union - -import torch -import typeguard -from jaxtyping import Float, jaxtyped -from numpy import spacing -from torch import Tensor - -from pdeinvbench.losses.finite_differences import ( - partials_torch_1d_systems, - partials_torch_2d_systems, -) -from pdeinvbench.losses.fluids import navier_stokes_residual, turbulent_flow_residual -from pdeinvbench.utils.types import ( - PDE, - Type1DKDVPartialsReturnType, - Type1DRDPartialsReturnType, - Type2DRDPartialsReturnType, - TypeAdvectionPartialsReturnType, - TypeBatchSolField1D, - TypeBatchSolField2D, - TypeBurgersPartialsReturnType, - TypeNSPartials2D, - TypeParam, - TypePartials1D, - TypePartials2D, - TypeTimeGrid, - TypeUnBatchedNSPartials2D, - TypeUnBatchedNSResiduals2D, - TypeUnBatchSolField2D, - TypeXGrid, - TypeYGrid, -) - - -@jaxtyped(typechecker=typeguard.typechecked) -def get_pde_residual_function(pde_name: PDE) -> Callable: - """ - Get PDE residual function for the given pde - """ - - if pde_name == PDE.ReactionDiffusion2D: - return reaction_diff_2d_residual_compute - - if pde_name == PDE.NavierStokes2D: - return navier_stokes_2d_residual_compute - - if pde_name == PDE.TurbulentFlow2D: - return turbulent_flow_2d_residual_compute - - if pde_name == PDE.KortewegDeVries1D: - return kdv_1d_residual_compute - - if pde_name == PDE.DarcyFlow2D: - return darcy_flow_2d_residual_compute - - raise ValueError(f"Unknown PDE type: {pde_name}. No suitable residual function.") - - -@jaxtyped(typechecker=typeguard.typechecked) -def reaction_diff_2d_residual_compute( - solution_field: TypeBatchSolField2D, - pde_params: TypeParam, - spatial_grid: List[Union[TypeXGrid, TypeYGrid]], - t: TypeTimeGrid, - return_partials=False, -) -> Type2DRDPartialsReturnType: - """ - Compute the PDE residual for 2D Reaction Diffusion. - R_u = u - u^3 - k - v - R_v = u - v - Eqn 1: du/dt = D_u * d^2u/dx^2 + D_u * d^2u/dy^2 + R_u - Eqn 2: dv/dt = D_v * d^2v/dx^2 + D_v * d^2v/dy^2 + R_v - args: - solution_field: solution_field of 2D Reaction Diffusion - x,y : spatial grids - t: temporal grid - k, du, dv: 2d Reaction Diffusion parameters - return_partials: Flag to return partial derivatives in 2D Reaction Diffusion equation - """ - u = solution_field[:, :, 0] - v = solution_field[:, :, 1] - k, du, dv = pde_params["k"], pde_params["Du"], pde_params["Dv"] - x, y = spatial_grid - if len(t.shape) == 2: - dt = t[0, 1] - t[0, 0] - else: - dt = t[1] - t[0] - - u_x, u_y, u_xx, u_yy, u_t = partials_torch_2d_systems(u, x, y, dt) - v_x, v_y, v_xx, v_yy, v_t = partials_torch_2d_systems(v, x, y, dt) - - # batch x time x space x space - # No channel dimension since we extracted U, V out of channels - k = torch.reshape(k, (-1, 1, 1, 1)) - du = torch.reshape(du, (-1, 1, 1, 1)) - dv = torch.reshape(dv, (-1, 1, 1, 1)) - - # 2d reaction diffusion equations - ru = u - (u**3) - k - v - rv = u - v - eqn1 = du * u_xx + du * u_yy + ru - u_t - eqn2 = dv * v_xx + dv * v_yy + rv - v_t - - # Expand both equations to have a channel dimension we concatenate them along - pde_residual = torch.stack([eqn1, eqn2], dim=2) - if return_partials: - u_partials = torch.cat([u_x, u_y, u_xx, u_yy, u_t], dim=1) - v_partials = torch.cat([v_x, v_y, v_xx, v_yy, v_t], dim=1) - partials = torch.stack([u_partials, v_partials], dim=2) - return pde_residual, partials - else: - return pde_residual - - -@jaxtyped(typechecker=typeguard.typechecked) -def navier_stokes_velocity_from_vorticity( - w: Float[torch.Tensor, "xspace yspace"], -) -> Tuple[Float[torch.Tensor, "xspace yspace"], Float[torch.Tensor, "xspace yspace"]]: - """ - Computes the velocity field from the vorticity field. - :param w: Vorticity of shape (Nx, Ny) - :return: Tuple of (vx, vy) velocity fields of shape (Nx, Ny) - """ - - what = torch.fft.fft2(w) - nx, ny = w.shape[-2:] - - # Compute wave numbers - kx = torch.tile( - torch.fft.fftfreq(nx, device=what.device)[:, None] * nx * 2 * torch.pi, (1, ny) - ) - ky = torch.tile( - torch.fft.fftfreq(ny, device=what.device)[None, :] * ny * 2 * torch.pi, (nx, 1) - ) - - # Compute negative laplacian - lap = kx**2 + ky**2 - lap[0, 0] = 1 - - # Compute velocities - vx = torch.fft.irfft2(what * 1j * ky / lap, what.shape) - vy = torch.fft.irfft2(what * -1j * kx / lap, what.shape) - - return vx, vy - - -@jaxtyped(typechecker=typeguard.typechecked) -def navier_stokes_2d_residual_compute( - solution_field: TypeBatchSolField2D, - pde_params: TypeParam, - spatial_grid: List[Union[TypeXGrid, TypeYGrid]], - t: TypeTimeGrid, - return_partials: bool = False, -) -> ( - Float[torch.Tensor, "batch time 1 xspace yspace"] - | tuple[ - Float[torch.Tensor, "batch time 1 xspace yspace"], - Float[torch.Tensor, "batch time 3 xspace yspace"], - ] -): - """ - Compute the PDE residual for 2D unforced Navier Stokes in vorticity form. Equation: - dw/dt + (u * \\nabla w) - re * \\lap w = 0 - args: - solution_field: solution_field of 2D Navier Stokes - pde_params: Dictionary of parameters for 2D Navier Stokes. The only key should be "re" for the reynolds number. - x,y : spatial grids - t: temporal grid - return_partials: Flag to return partial derivatives in 2D Navier Stokes equation - Also see fluids.py::navier_stokes_residual - """ - - re = pde_params["re"] - # remove batch dim - x = spatial_grid[0][0] - y = spatial_grid[1][0] - t = t[0] - # Channel last representation - solution_field = torch.permute(solution_field, (0, 1, 3, 4, 2)) - - residual_func = torch.vmap( - navier_stokes_residual, in_dims=(0, None, None, None, 0, None) - ) - - if return_partials: - residual, partials = residual_func(solution_field, t, x, y, re, return_partials) - else: - residual = residual_func(solution_field, t, x, y, re, return_partials) - # B, T, X, Y, C -> B, T, C, X, Y - residual = torch.permute(residual, (0, 1, 4, 2, 3)) - - if return_partials: - return residual, partials - return residual - - -@jaxtyped(typechecker=typeguard.typechecked) -def turbulent_flow_2d_residual_compute( - solution_field: TypeBatchSolField2D, - pde_params: TypeParam, - spatial_grid: List[Union[TypeXGrid, TypeYGrid]], - t: TypeTimeGrid, - return_partials: bool = False, -) -> ( - Float[torch.Tensor, "batch time 1 xspace yspace"] - | tuple[ - Float[torch.Tensor, "batch time 1 xspace yspace"], - Float[torch.Tensor, "batch time 3 xspace yspace"], - ] -): - """ - Computes residual for forced 2D TF. See fluids.py::turbulent_flow_residual. - """ - nu = pde_params["nu"] - # remove batch dim - x = spatial_grid[0][0] - y = spatial_grid[1][0] - t = t[0] - # Channel last representation - solution_field = torch.permute(solution_field, (0, 1, 3, 4, 2)) - - residual_func = torch.vmap( - turbulent_flow_residual, in_dims=(0, None, None, None, 0, None) - ) - - if return_partials: - residual, partials = residual_func(solution_field, t, x, y, nu, return_partials) - else: - residual = residual_func(solution_field, t, x, y, nu, return_partials) - # B, T, X, Y, C -> B, T, C, X, Y - residual = torch.permute(residual, (0, 1, 4, 2, 3)) - - if return_partials: - return residual, partials - return residual - - -@jaxtyped(typechecker=typeguard.typechecked) -def kdv_1d_residual_compute( - solution_field: TypePartials1D, - pde_params: TypeParam, - spatial_grid: List[TypeXGrid], - t: TypeTimeGrid, - return_partials: bool = False, -) -> Type1DKDVPartialsReturnType: - """ - Compute the PDE residual for 1D Korteweg de Vries. - du/dt + 6u * du/dx + delta**2 * d^3u/dx^3 = 0 - args: - solution_field: solution_field of 1D KDV - x : spatial grids - t: temporal grid - delta: 1d KDV parameter - return_partials: Flag to return partial derivatives in 1D KDV - """ - # Spatial grid is a tuple of tensors - # Each tensor is of shape B x Nx - x = spatial_grid[0] - if len(t.shape) == 2: - dt = t[0, 1] - t[0, 0] - else: - dt = t[1] - t[0] - u = solution_field - delta = pde_params["delta"] - delta = delta.unsqueeze(-1) - - data_x, _, data_xx, data_t = partials_torch_1d_systems(u, x, dt) - - # We still need data_xxx - x_axis = -1 - # Since each spatial is B x Nx, we need to grab a single x - x = x[0] - data_xxx = torch.gradient(data_xx, spacing=(x,), dim=x_axis)[0] - - residual = data_t + u * data_x + delta**2 * data_xxx - if return_partials: - u_partials = torch.cat([data_x, data_xx, data_xxx, data_t], dim=1) - return residual, u_partials - - return residual - - -def _single_darcy_flow_residual( - solution_field: Float[Tensor, "1 1 nx ny"], # time, channel, nx, ny - binary_coeffs: Float[Tensor, "1 nx ny"], - max_val: float, - min_val: float, - dx: float, - dy: float, - return_partials: bool = False, -) -> ( - Float[Tensor, "1 1 nx ny"] - | tuple[Float[Tensor, "1 1 nx ny"], Float[Tensor, "1 4 nx ny"]] -): - # Note: As shorthand to fit the math, we use u = solution_field and a = coeffs - u = solution_field - # prune time dim - u = torch.squeeze(u, dim=0) - - # Denormalize coeffs - normalized_diff = max_val - min_val - a = (binary_coeffs * normalized_diff) + min_val - forcing_func = torch.ones_like(u) - _, nx, ny = u.shape - - ux = torch.gradient(u, spacing=dx, dim=1)[0] - uy = torch.gradient(u, spacing=dy, dim=2)[0] - - aux = a * ux - auy = a * uy - - auxx = torch.gradient(aux, spacing=dx, dim=1)[0] - auyy = torch.gradient(auy, spacing=dy, dim=2)[0] - lhs = -(auxx + auyy) - residual = lhs - forcing_func - # Add back in time dim - residual = torch.unsqueeze(residual, dim=0) - - if return_partials: - uxx = torch.gradient(ux, spacing=dx, dim=1)[0] - uyy = torch.gradient(uy, spacing=dy, dim=2)[0] - # 1, nx, ny -> num_partials, nx, ny - partials = torch.cat([ux, uy, uxx, uyy], dim=0) - # Add time dim: 1, nx, ny -> 1, 1, nx, ny - partials = torch.unsqueeze(partials, dim=0) - return residual, partials - return residual - - -@jaxtyped(typechecker=typeguard.typechecked) -def darcy_flow_2d_residual_compute( - solution_field: Float[Tensor, "batch 1 1 nx ny"], - pde_params: TypeParam, - spatial_grid: List[Union[TypeXGrid, TypeYGrid]], - t: TypeTimeGrid, - return_partials: bool = False, -) -> ( - Float[Tensor, "batch 1 nx ny"] - | tuple[Float[Tensor, "batch 1 1 nx ny"], Float[Tensor, "batch 1 4 nx ny"]] -): - """ - Compute the 2D Darcy Flow residual. Darcy flow is time independent so t is - not used. PDE: - - args: - solution_field: Solution field of 2D darcy flow (batch, channel, nx, ny) - x: spatial grids - t: temporal grid (unused) - params: coeff field [tensor same shape as solution_field] - return_partials: bool to return partial derivatives - """ - x, y = spatial_grid - x = x[0] - y = y[0] - dx = (x[1] - x[0]).item() - dy = (y[1] - y[0]).item() - max_vals = pde_params["max_val"] - min_vals = pde_params["min_val"] - binary_coeffs = pde_params["coeff"] - - del t - return torch.vmap( - _single_darcy_flow_residual, - in_dims=( - 0, - 0, - 0, - 0, - None, - None, - None, - ), - out_dims=0, - )( - solution_field, - binary_coeffs, - max_vals, - min_vals, - dx, - dy, - return_partials, - ) diff --git a/pdeinvbench/models/__init__.py b/pdeinvbench/models/__init__.py deleted file mode 100644 index 19cff3dd7b600bcadc65ea7388a2eb331de4c2c3..0000000000000000000000000000000000000000 --- a/pdeinvbench/models/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .mlp import MLP -from .conv_head import ConvHead -from .downsampler import ConvDownsampler -from .encoder import FNOEncoder -from .inverse_model import InverseModel, ParameterNet diff --git a/pdeinvbench/models/conv_head.py b/pdeinvbench/models/conv_head.py deleted file mode 100644 index 54632c140b327faeedff6dae5ba659aa5203090c..0000000000000000000000000000000000000000 --- a/pdeinvbench/models/conv_head.py +++ /dev/null @@ -1,57 +0,0 @@ -import torch -from jaxtyping import Float -from torch import Tensor, nn - - -def to_binary_mask(x: Float[Tensor, "batch 1 nx ny"]) -> Float[Tensor, "batch 1 nx ny"]: - """ - converts x into a binary mask using torch exp. - """ - # return 1 / (1 + torch.exp(-x)) - return nn.functional.sigmoid(x) - - -class ConvHead(nn.Module): - """ - Simple convolution head which uses pointwise convolutions to generate a segmentation map. - The segmentation map is binary. All convolutions are done pointwise (kernel size = 1) - """ - - def __init__( - self, - hidden_dim: int, - in_dim: int, - n_layers: int, - dropout: float, - activation: str, - out_dim: int = 1, - ) -> None: - super(ConvHead, self).__init__() - activation_fn = None - if activation == "relu": - activation_fn = nn.ReLU() - elif activation == "gelu": - activation_fn = nn.GELU() - else: - raise NotImplementedError( - f"Activation function not implemented {activation_fn}" - ) - layers = [] - # Initial layer - layers.append(nn.Conv2d(in_dim, hidden_dim, 1)) - layers.append(activation_fn) - if dropout != 0: - layers.append(nn.Dropout(p=dropout)) - - for _ in range(n_layers): - layers.append(nn.Conv2d(hidden_dim, hidden_dim, 1)) - layers.append(activation_fn) - if dropout != 0: - layers.append(nn.Dropout(p=dropout)) - - # output layer - layers.append(nn.Conv2d(hidden_dim, 1, 1)) - self.layers = nn.Sequential(*layers) - - def forward(self, x: Float[Tensor, "batch channel nx ny"]): - return to_binary_mask(self.layers(x)) diff --git a/pdeinvbench/models/downsampler.py b/pdeinvbench/models/downsampler.py deleted file mode 100644 index 380343d86eb8056a6e4ebec62266c76189e2329d..0000000000000000000000000000000000000000 --- a/pdeinvbench/models/downsampler.py +++ /dev/null @@ -1,141 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn import Conv1d, Conv2d, Linear, ReLU - -from pdeinvbench.utils.types import PDE - - -class ConvDownsampler(nn.Module): - """ - Multi-layer convolutional downsampler for spatial dimension reduction. - - Stacks multiple ConvBlock layers, each consisting of a convolutional layer, - ReLU activation, dropout, and max pooling. Supports both 1D and 2D spatial - dimensions. - - Parameters - ---------- - n_layers : int - Number of convolutional blocks to stack. - input_dimension : int - Spatial dimensionality of the input (1 for 1D, 2 for 2D). - Determines whether to use Conv1d or Conv2d operations. - in_channels : int - Number of input channels. Note: this stays constant across all layers - in the current implementation. - out_channels : int - Number of output channels for each convolutional layer. - kernel_size : int - Size of the convolving kernel. - stride : int - Stride of the convolution operation. - padding : int - Zero-padding added to both sides of the input. - dropout : float - Dropout probability applied after each ReLU activation. - """ - - def __init__( - self, - n_layers: int, - input_dimension: int, - in_channels: int, - out_channels: int, - kernel_size: int, - stride: int, - padding: int, - dropout: float, - ): - super(ConvDownsampler, self).__init__() - - self.n_layers = n_layers - self.input_dimension = input_dimension - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.stride = stride - self.padding = padding - self.dropout = dropout - self.blocks = nn.ModuleList() - for _ in range(n_layers): - self.blocks.append( - ConvBlock( - input_dimension, - in_channels, - out_channels, - kernel_size, - stride, - padding, - dropout, - ) - ) - - def forward(self, x): - for block in self.blocks: - x = block(x) - return x - - -class ConvBlock(nn.Module): - """ - Conv block with a convolutional layer, ReLU activation and maxpooling. - """ - - def __init__( - self, - input_dimension: int, - in_channels: int, - out_channels: int, - kernel_size: int, - stride: int, - padding: int, - dropout: float, - ): - super(ConvBlock, self).__init__() - - if input_dimension == 2: - self.conv = Conv2d( - in_channels, - out_channels, - kernel_size=kernel_size, - stride=stride, - padding=(padding, padding), - ) - self.maxpool = nn.MaxPool2d(2) - elif input_dimension == 1: - self.conv = Conv1d( - in_channels, - out_channels, - kernel_size=kernel_size, - stride=stride, - padding=padding, - ) - self.maxpool = nn.MaxPool1d(2) - else: - raise ValueError("Input dimension must be 1 or 2.") - - self.relu = ReLU() - self.dropout_layer = nn.Dropout(dropout) - - def forward(self, x): - x = self.conv(x) - x = self.relu(x) - x = self.dropout_layer(x) - x = self.maxpool(x) - return x - - -class IdentityMap(nn.Module): - """ - Identity downsampler to use for darcy flow. Since Fno maps to function spaces, - there is no spatial downsampling that needs to be done. - """ - - def __init__(self, **kwargs): - super(IdentityMap, self).__init__() - # Ignore all kwargs - IdentityMap doesn't need any configuration - - def forward(self, x): - return x diff --git a/pdeinvbench/models/encoder.py b/pdeinvbench/models/encoder.py deleted file mode 100644 index f38d9be99d0900a40b0d6f6cf89380d1d1e28cbf..0000000000000000000000000000000000000000 --- a/pdeinvbench/models/encoder.py +++ /dev/null @@ -1,353 +0,0 @@ -from math import ceil -from typing import List - -import torch -import torch.nn as nn -from jaxtyping import Float -from neuralop.models import FNO -from scOT.model import ScOT, ScOTConfig, ScOTOutput -from torchvision.models.resnet import BasicBlock, Bottleneck, ResNet - -from pdeinvbench.utils.types import ( - PDE, - PDE_NUM_PARAMETERS, - PDE_NUM_SPATIAL, - PDE_PARTIALS, - PDE_SPATIAL_SIZE, - PDE_TRAJ_LEN, -) - - -def resolve_number_input_channels( - n_past: int, data_channels: int, use_partials: bool, pde: PDE -) -> int: - """ - Returns the number of input channels for a pde given args: - - n_past - - data_channels - - use_partials - """ - - num_partials = PDE_PARTIALS[pde] - - if use_partials: - # each timestep gets partials appended to it - data_channels += num_partials * data_channels - - in_channels = n_past * data_channels - - return in_channels - - -class FNOEncoder(FNO): - """ - Wrapper around FNO that figures out the input channels based - on the number of past frames and partial derivatives. - :param n_modes: Number of modes to use in the FNO. - :param n_layers: Number of layers in the FNO. - :param n_past: Number of past frames to use. - :param pde: PDE to use for the partial derivatives. - :param data_channels: Number of channels per timestep in the native input data. - :param hidden_channels: Number of channels in the hidden layers. - :param use_partials: Whether to use partial derivatives as input (only applicable to the inverse problem) - """ - - def __init__( - self, - n_modes: int, - n_layers: int, - n_past: int, - n_future: int, - pde: PDE, - data_channels: int, - hidden_channels: int, - use_partials: bool, - batch_size: int, - ): - - if use_partials: - # if using partials, we are in inverse model mode - # therefore, there will be a downsampler after the encoder, - # need to preserve the number of channels - out_channels = hidden_channels - - else: - out_channels = hidden_channels - - # figure out the number of input channels - - self.use_partials = use_partials - - in_channels = resolve_number_input_channels( - n_past=n_past, - data_channels=data_channels, - use_partials=use_partials, - pde=pde, - ) - - # expand modes based on dimensionality of PDE - n_modes = [n_modes] * PDE_NUM_SPATIAL[pde] - self.batch_size = batch_size - super(FNOEncoder, self).__init__( - n_modes=n_modes, - n_layers=n_layers, - in_channels=in_channels, - hidden_channels=hidden_channels, - out_channels=out_channels, - ) - - def forward(self, x, **kwargs): - return super().forward(x) - - -class Resnet(nn.Module): - """ - Wrapper around FNO replacing FNO convolution blocks with Resnet Blocks. - """ - - def __init__(self, *args, **kwargs): - # super().__init__(*args, **kwargs) - super(Resnet, self).__init__() - self.in_channels = kwargs["in_channels"] - self.hidden_channels = kwargs["hidden_channels"] - self.n_layers = kwargs["n_layers"] - self.batch_size = kwargs["batch_size"] - self.in_block = BasicBlock( - inplanes=self.in_channels, - planes=self.hidden_channels, - stride=1, - downsample=None, - groups=1, - base_width=64, - dilation=1, - norm_layer=nn.BatchNorm2d, - ) - self.in_block = nn.Sequential( - nn.Conv2d( - self.in_channels, - self.hidden_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ), - nn.BatchNorm2d( - self.hidden_channels, - eps=1e-05, - momentum=0.1, - affine=True, - track_running_stats=True, - ), - nn.ReLU(inplace=True), - nn.Conv2d( - self.hidden_channels, - self.hidden_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ), - nn.BatchNorm2d( - self.hidden_channels, - eps=1e-05, - momentum=0.1, - affine=True, - track_running_stats=True, - ), - ) - self.resnet_blocks = nn.ModuleList( - [ - BasicBlock( - inplanes=self.hidden_channels, - planes=self.hidden_channels, - stride=1, - downsample=None, - groups=1, - base_width=64, - dilation=1, - norm_layer=nn.BatchNorm2d, - ) - for _ in range(kwargs["n_layers"]) - ] - ) - - def forward(self, x, output_shape=None, **kwargs): - """CN-Resnet's forward pass - - Parameters - ---------- - x : tensor - input tensor - output_shape : {tuple, tuple list, None}, default is None - Gives the option of specifying the exact output shape for odd shaped inputs. - * If None, don't specify an output shape - * If tuple, specifies the output-shape of the **last** FNO Block - * If tuple list, specifies the exact output-shape of each FNO Block - """ - x = self.in_block(x) - for layer_idx in range(self.n_layers): - x = self.resnet_blocks[layer_idx](x) - - return x - - -class ResnetEncoder(Resnet): - """ - Wrapper around Resnet that figures out the input channels based - on the number of past frames and partial derivatives. - :param n_layers: Number of layers in the Resnet. - :param n_past: Number of past frames to use. - :param pde: PDE to use for the partial derivatives. - :param data_channels: Number of channels per timestep in the native input data. - :param hidden_channels: Number of channels in the hidden layers. - :param use_partials: Whether to use partial derivatives as input (only applicable to the inverse problem) - :param mode: One of "oneshot", "autoregressive", "grid_to_soln" - """ - - def __init__( - self, - n_layers: int, - n_past: int, - n_future: int, - pde: PDE, - data_channels: int, - hidden_channels: int, - use_partials: bool, - batch_size: int, - ): - - # figure out the number of output channels - if use_partials: - # if using partials, we are in inverse model mode - # therefore, there will be a downsampler after the encoder, - # need to preserve the number of channels - out_channels = hidden_channels - - else: - out_channels = hidden_channels # data_channels - - self.use_partials = use_partials - self.pde = pde - in_channels = resolve_number_input_channels( - n_past=n_past, - data_channels=data_channels, - use_partials=use_partials, - pde=pde, - ) - - super(ResnetEncoder, self).__init__( - n_layers=n_layers, - in_channels=in_channels, - hidden_channels=hidden_channels, - out_channels=out_channels, - batch_size=batch_size, - pde=pde, - ) - - def forward(self, x, **kwargs): - if self.pde == PDE.KortewegDeVries1D: - x = x.unsqueeze(2) - x = super().forward(x) - return x[:, :, 0, :] - return super().forward(x) - - -class ScOTEncoder(nn.Module): - config: ScOTConfig - backbone: ScOT - n_past: int - in_channels: int - use_partials: bool - patch_size: int - padding_mode: str = "constant" - - def __init__( - self, - # backbone args - embed_dim: int, # patch embedding - n_layers: int, - hidden_size: int, - patch_size: int, - num_heads: list[int], - skip_connections: list[int], - depths: list[int], - # Our args - use_partials: bool, - data_channels: bool, - n_past: int, - pde: PDE, - **kwargs, - ): - super(ScOTEncoder, self).__init__() - - self.n_past = n_past - self.use_partials = use_partials - self.patch_size = patch_size - self.in_channels = resolve_number_input_channels( - n_past=self.n_past, - use_partials=self.use_partials, - data_channels=data_channels, - pde=pde, - ) - - # All pdes are on square grids - self.spatial_size = PDE_SPATIAL_SIZE[pde][0] - self.pde = pde - self.config = ScOTConfig( - num_layers=n_layers, - num_channels=self.in_channels, - num_out_channels=hidden_size, - depths=depths, - num_heads=num_heads, - skip_connections=skip_connections, - patch_size=self.patch_size, - embed_dim=embed_dim, - image_size=self.spatial_size, - **kwargs, - ) - - self.backbone = ScOT(self.config) - - def _pad_input( - self, x: Float[torch.Tensor, "batch channels nx ny"] - ) -> tuple[ - Float[torch.Tensor, "batch channels nx ny"], tuple[int, int, int, int] | None - ]: - _, _, nx, ny = x.shape - assert nx == ny, f"Non-square solutions not supported nx={nx}, ny={ny}" - total_pad: int = (self.patch_size - (nx % self.patch_size)) % self.patch_size - left_pad, right_pad = total_pad // 2, ceil(total_pad / 2) - assert ( - left_pad + right_pad == total_pad - ), f"Incorrect swin padding {left_pad} + {right_pad} = {total_pad}" - if left_pad or right_pad: - pad_vals = (left_pad, right_pad, left_pad, right_pad) - return ( - torch.nn.functional.pad( - x, - pad_vals, - mode=self.padding_mode, - value=0, - ), - pad_vals, - ) - return x, None - - def forward( - self, - x: Float[torch.Tensor, "batch channels nx ny"], - t: Float[torch.Tensor, "batch nt"] | None = None, - ) -> Float[torch.Tensor, "batch outdim nx ny"]: - # Check if we need to pad the input - if self.pde == PDE.KortewegDeVries1D: - x = x.unsqueeze(2) - x = x.repeat(1, 1, x.shape[-1], 1) - x, pad_vals = self._pad_input(x) - output: ScOTOutput = self.backbone(pixel_values=x, time=t).output - if pad_vals: - # undo padding - l, r, _, _ = pad_vals - output = output[..., l:-r, l:-r] - if self.pde == PDE.KortewegDeVries1D: - output = output[:, :, :1, :] - return output diff --git a/pdeinvbench/models/inverse_model.py b/pdeinvbench/models/inverse_model.py deleted file mode 100644 index 38156a9a1a5a07efbdbe49640c43fd5fa722c0a7..0000000000000000000000000000000000000000 --- a/pdeinvbench/models/inverse_model.py +++ /dev/null @@ -1,252 +0,0 @@ -import math -from functools import partial -from os.path import join -from typing import Callable, List - -import h5py -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -from neuralop.models import FNO -from torch import vmap -from torch.distributions import Categorical - -from pdeinvbench.data.transforms import ( - collapse_time_and_channels, - expand_time_and_channels, -) -from pdeinvbench.data.utils import unnormalize_params -from pdeinvbench.losses import get_pde_residual_function -from pdeinvbench.models import MLP, ConvHead -from pdeinvbench.utils.types import ( - PDE, - PDE_NUM_CHANNELS, - PDE_NUM_SPATIAL, - PDE_PARAM_VALUES, - PDE_SPATIAL_SIZE, -) - - -class InverseModel(nn.Module): - """ - Model that predicts the parameters of a PDE given a set of conditioning frames. - :param paramnet: Model that predicts the parameters. - """ - - def __init__( - self, - paramnet: nn.Module, - ): - super(InverseModel, self).__init__() - - self.paramnet = paramnet - self.pde = paramnet.pde - self.param_values = paramnet.param_values - self.pde_residual = get_pde_residual_function(self.pde) - self.params_to_predict = self.paramnet.params_to_predict - self.use_partials = self.paramnet.encoder.use_partials - self.dropout = self.paramnet.dropout - # Set up data transform and detransform functions - self.data_transform = None - self.data_detransform = None - if PDE_NUM_SPATIAL[self.pde] > 1: - self.data_transform = vmap(collapse_time_and_channels) - self.data_detransform = vmap( - partial( - expand_time_and_channels, num_channels=PDE_NUM_CHANNELS[self.pde] - ) - ) - - def forward(self, solution_field, true_params, spatial_grid, t, gumbel=False): - # get the partial derivatives and true pde residual, and append to the input if needed - with torch.no_grad(): - # PDE residual function expects time and channel dimensions to be uncollapsed - sf = ( - solution_field - if self.data_transform is None - else self.data_detransform(solution_field) - ) - true_residual, partials = self.pde_residual( - sf, true_params, spatial_grid, t, return_partials=True - ) - if self.use_partials: - partials = ( - partials - if self.data_transform is None - else self.data_transform(partials) - ) - new_solution_field = torch.cat([solution_field, partials], dim=1) - else: - new_solution_field = solution_field - - # predict parameters - pred_params = self.paramnet(new_solution_field, gumbel=gumbel, t=t) - - # compute PDE residual with predicted params (replace unpredicted params with true params) - combined_params = {**true_params, **pred_params} - - pred_residual = self.pde_residual(sf, combined_params, spatial_grid, t) - return combined_params, pred_residual, true_residual - - -class ParameterNet(nn.Module): - """ - Model that predicts PDE parameters given conditioning frames. - :param params_to_predict: list of strings of parameters to predict. - :param pde: enum of the PDE to use for the residual calculation. - :param encoder: Encoder model. Can be FNO or e.g ResNet - :param downsampler: Convolutional downsampler model. - :param mlp_hidden_size: Hidden size of the MLP. - :param mlp_layers: Number of layers in the MLP. - :param mlp_activation: Activation function in the MLP. - :param logspace: Whether to predict the parameters in log space. - :param normalize: Whether to normalize the parameters. - :param condition_on_time: Whether or not to provide $t$ as input to the encoder - :param mlp_type: Alternatively, may be "conv" which denotes pointwise convolution of darcy flow - """ - - def __init__( - self, - params_to_predict: List[str], - pde: PDE, - encoder: nn.Module, - downsampler: nn.Module, - mlp_hidden_size: int, - mlp_layers: int, - mlp_activation: str, - mlp_dropout: float, - logspace: bool, - normalize: bool, - downsample_factor: int, - condition_on_time: bool = False, - mlp_type: str = "mlp", - ): - super(ParameterNet, self).__init__() - - self.encoder = encoder - self.downsampler = downsampler - self.params_to_predict = params_to_predict - self.logspace = logspace - self.normalize = normalize - self.pde = pde - self.input_size = PDE_SPATIAL_SIZE[pde] - self.condition_on_time = condition_on_time - self.param_values = None - self.dropout = mlp_dropout - if self.pde != PDE.DarcyFlow2D: - self.param_values = { - param: torch.tensor(PDE_PARAM_VALUES[pde][param]).to( - torch.device(torch.cuda.current_device()) - ) - for param in params_to_predict - } - - # Consistency checks - assert not ( - logspace and normalize - ), "Cannot use logspace and normalize together." - - # get the input shape into the MLP by running a dummy forward pass - with torch.no_grad(): - dummy = torch.randn(1, self.encoder.in_channels, *self.input_size) - dummy_time = ( - torch.ones(1, self.encoder.__dict__.get("n_past", 1)) - if self.condition_on_time - else None - ) - dummy = self.encoder(dummy, t=dummy_time) - dummy = self.downsampler(dummy) - encoder_out_channels = dummy.shape[1] - dummy = torch.flatten(dummy, start_dim=1) - mlp_input_size = dummy.shape[1] - - self.heads: nn.ModuleList - - # The following calls set the heads - if mlp_type == "mlp": - self.generate_mlp_heads( - mlp_input_size=mlp_input_size, - mlp_hidden_size=mlp_hidden_size, - mlp_dropout=mlp_dropout, - mlp_layers=mlp_layers, - mlp_activation=mlp_activation, - params_to_predict=self.params_to_predict, - ) - elif mlp_type == "conv": - self.generate_conv_heads( - in_dim=encoder_out_channels, - hidden_dim=mlp_hidden_size, - n_layers=mlp_layers, - dropout=mlp_dropout, - activation=mlp_activation, - params_to_predict=self.params_to_predict, - ) - - def generate_conv_heads( - self, - in_dim: int, - hidden_dim: int, - n_layers: int, - dropout: float, - activation: str, - params_to_predict, - ): - self.heads = nn.ModuleList( - [ - ConvHead( - in_dim=in_dim, - hidden_dim=hidden_dim, - n_layers=n_layers, - dropout=dropout, - activation=activation, - ) - for param in params_to_predict - ] - ) - - def generate_mlp_heads( - self, - mlp_input_size: int, - mlp_hidden_size: int, - mlp_dropout: float, - mlp_layers: int, - mlp_activation, - params_to_predict, - ): - self.heads = nn.ModuleList( - [ - MLP( - in_dim=int(mlp_input_size), - hidden_size=mlp_hidden_size, - dropout=mlp_dropout, - out_dim=1, - num_layers=mlp_layers, - activation=mlp_activation, - ) - for param in params_to_predict - ] - ) - - def forward(self, x, gumbel=False, t=None): - if self.condition_on_time: - x = self.encoder(x, t=t) - else: - x = self.encoder(x) - x = self.downsampler(x) - # We follow different paths depending on the PDE - if self.pde == PDE.DarcyFlow2D: - preds = [head(x) for head in self.heads] - else: - x = torch.flatten(x, start_dim=1) - # combine output from each head - preds = [head(x) for head in self.heads] - if self.logspace: - preds = [torch.exp(pred) for pred in preds] - - # convert to a dictionary - preds = {k: v for k, v in zip(self.params_to_predict, preds)} - if self.normalize: - preds = unnormalize_params(preds, self.pde) - return preds diff --git a/pdeinvbench/models/mlp.py b/pdeinvbench/models/mlp.py deleted file mode 100644 index d23f37ff036a6a2852580a04494866b216398d01..0000000000000000000000000000000000000000 --- a/pdeinvbench/models/mlp.py +++ /dev/null @@ -1,49 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F - - -class MLP(nn.Module): - """ - MLP with a variable number of hidden layers and activation functions. - """ - - def __init__( - self, - in_dim: int, - hidden_size: int, - dropout: float, - out_dim: int, - num_layers: int, - activation: str, - ): - super(MLP, self).__init__() - self.layers = nn.ModuleList() - - # Input layer - self.layers.append(nn.Linear(in_dim, hidden_size)) - if dropout != 0: - self.layers.append(nn.Dropout(dropout)) - - # Hidden layers - for _ in range(num_layers - 1): - self.layers.append(nn.Linear(hidden_size, hidden_size)) - if dropout != 0: - self.layers.append(nn.Dropout(dropout)) - - # Output layer - self.layers.append(nn.Linear(hidden_size, out_dim)) - - # Activation function - if activation == "relu": - self.activation = F.relu - elif activation == "gelu": - self.activation = F.gelu - else: - raise ValueError(f"Unsupported activation: {activation}") - - def forward(self, x): - for i, layer in enumerate(self.layers): - x = layer(x) - if i < len(self.layers) - 1: # Apply activation to all but last layer - x = self.activation(x) - return x diff --git a/pdeinvbench/utils/__init__.py b/pdeinvbench/utils/__init__.py deleted file mode 100644 index a9a26be529bc6769af770a2fe2ce5f968561ee73..0000000000000000000000000000000000000000 --- a/pdeinvbench/utils/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .run_utils import validate_git_status -from .config_utils import get_function_from_string diff --git a/pdeinvbench/utils/config_utils.py b/pdeinvbench/utils/config_utils.py deleted file mode 100644 index 3c8b53548648a17b83b780d5d0a2dda42396f001..0000000000000000000000000000000000000000 --- a/pdeinvbench/utils/config_utils.py +++ /dev/null @@ -1,66 +0,0 @@ -import importlib -from typing import Callable - -import typeguard -from hydra.utils import instantiate -from jaxtyping import Float, jaxtyped -from omegaconf import DictConfig - -from pdeinvbench.utils.types import ( - HIGH_RESOLUTION_PDE_SPATIAL_SIZE, - PDE, - PDE_SPATIAL_SIZE, -) - - -@jaxtyped(typechecker=typeguard.typechecked) -def get_function_from_string(string: str) -> Callable: - """ - Converts a function specified as a string to an actual function object. Used in hydra configs. - """ - module_name, function_name = string.rsplit(".", 1) - # Import the module dynamically - module = importlib.import_module(module_name) - # Get the function object - function = getattr(module, function_name) - return function - - -def resolve_pde_resolution(cfg: DictConfig) -> None: - """ - Simple utility method which checks if we are using the high resolution data. - If we are, it updates the types::PDE_SPATIAL_SIZE dict. Currently only - works with the inverse setting. Assumes keys cfg:high_resolution[bool] and - cfg.data.downsample_factor[int] exist. - """ - assert "high_resolution" in cfg, "No key 'high_resolution' found in hydra config" - assert ( - "data" in cfg and "downsample_factor" in cfg.data - ), "No key 'data' or 'data.downsample_factor' found in hydra config" - high_resolution: bool = cfg.high_resolution - downsample_factor: int = cfg.data.downsample_factor - pde: PDE = instantiate(cfg.data.pde) - - if high_resolution: - assert ( - pde in HIGH_RESOLUTION_PDE_SPATIAL_SIZE - ), f"Could not find {pde} in high resolution PDE size mapping." - - resolution: list[int] = ( - HIGH_RESOLUTION_PDE_SPATIAL_SIZE[pde] - if high_resolution - else PDE_SPATIAL_SIZE[pde] - ) - if ( - downsample_factor == 0 - ): # Ensures that dynamic setting works without downsampling - downsample_factor = 1 - new_resolution: list[float] = [res / downsample_factor for res in resolution] - # only allow downsampling to an integer factor - for res in new_resolution: - assert ( - int(res) == res - ), f"Downsample factor leads to non-integer resolution {res}" - - new_resolution: list[int] = [int(res) for res in new_resolution] - PDE_SPATIAL_SIZE[pde] = new_resolution diff --git a/pdeinvbench/utils/logging_utils.py b/pdeinvbench/utils/logging_utils.py deleted file mode 100644 index 3627a72cd831f801007a1770a7f9c45b540dd111..0000000000000000000000000000000000000000 --- a/pdeinvbench/utils/logging_utils.py +++ /dev/null @@ -1,260 +0,0 @@ -import numpy as np -import pdeinvbench.utils.pytorch_utils as ptu -from pdeinvbench.utils.pytorch_utils import is_numpy -from jaxtyping import jaxtyped -import typeguard -import wandb -from pdeinvbench.utils.types import ( - TypeScaleInputField2D, - TypeScaleInputField1D, - TypeScaledField1D, - TypeLoggingField1D, - TypeLoggingField2D, -) -from typing import Dict, Tuple -import torch - -from lightning.pytorch.loggers import WandbLogger - -import time -import warnings - - -class CustomWandbLogger(WandbLogger): - def after_save_checkpoint(self, checkpoint_callback): - - # Logic to delete intermediate checkpoints - # You can use the W&B API to delete specific checkpoints if needed - # Example: delete the checkpoints that are not the best - super().after_save_checkpoint(checkpoint_callback) - if self._log_model: - api = wandb.Api() - entity = self._wandb_init["entity"] - project = self._project - model_ckpt = self._checkpoint_name - try: - model_artifacts = api.artifacts( - "model", f"{entity}/{project}/{model_ckpt}" - ) - for model_ckpt in model_artifacts: - if ( - model_ckpt.state != "DELETED" - and "latest" not in model_ckpt.aliases - and "best" not in model_ckpt.aliases - ): - model_ckpt.delete() - except Exception as e: - # adhoc way to do deal with latency in wandb - warnings.warn(f"W&B error {e}") - - -@jaxtyped(typechecker=typeguard.typechecked) -def scale_2d_field_for_wandb( - predictions: TypeScaleInputField2D, - targets: TypeScaleInputField2D, -) -> TypeLoggingField2D: - """ - Scales the predicted trajectory and the ground truth trajectory between [0,255] for wandb video logging for 2d systems, per channel. - Computes the absolute difference between the predicted and ground truth trajectories and scales the difference - between [0,255] for wandb video logging. - For the predictions and target fields: - The 0 value corresponds to the lowest solution field value across both ground truth and predicted trajectories. - The 255 value corresponds to the largest solution field value across both ground truth and predicted trajectories. - The absolute difference field: - The 0 value corresponds to the lowest absolute difference value across both ground truth and predicted trajectories. - The 255 value corresponds to the largest absolute difference value across both ground truth and predicted trajectories. - The scaled fields repeated 3 times as RGB channel dimensions for logging. - :param predictions: numpy.Array - predicted solution (T x X_spatial x y_spatial) - :param target: numpy.Array - target solution (T x X_spatial x y_spatial) - :return: numpy.Array - scaled target field (T x 3 x X_spatial x y_spatial), scaled predicted fields (T x 3 x X_spatial x y_spatial), scaled absolute difference field (T x 3 x X_spatial x y_spatial) - """ - if is_numpy(predictions): - predictions = np.expand_dims(predictions, axis=1) - else: - predictions = ptu.torch_to_numpy(predictions.unsqueeze(1)) - if is_numpy(targets): - targets = np.expand_dims(targets, axis=1) - else: - targets = ptu.torch_to_numpy(targets.unsqueeze(1)) - - pred_min, pred_max = np.min(predictions), np.max(predictions) - target_min, target_max = np.min(targets), np.max(targets) - scale_min, scale_max = min(pred_min, target_min), max(pred_max, target_max) - scaled_target = 255 * ( - ( - ( - np.repeat( - targets, - repeats=3, - axis=1, - ) - ) - - scale_min - ) - / (scale_max - scale_min) - ) - scaled_pred = 255 * ( - ( - ( - np.repeat( - predictions, - repeats=3, - axis=1, - ) - ) - - scale_min - ) - / (scale_max - scale_min) - ) - - diff_min, diff_max = np.min(np.abs(predictions - targets)), np.max( - np.abs(predictions - targets) - ) - scaled_diff = 255 * ( - ( - np.repeat( - np.abs(predictions - targets), - repeats=3, - axis=1, - ) - - diff_min - ) - / (diff_max - diff_min) - ) - return ( - scaled_target.astype(np.uint8), - scaled_pred.astype(np.uint8), - scaled_diff.astype(np.uint8), - ) - - -@jaxtyped(typechecker=typeguard.typechecked) -def scale_1d_field(field: TypeScaleInputField1D) -> TypeScaledField1D: - """ - Scales 1D trajectory between 0 and 255 for wandb image logging. - The 0 value corresponds to the lowest solution field value across both ground truth and predicted trajectories. - The 255 value corresponds to the largest solution field value across both ground truth and predicted trajectories. - :param field: numpy.Array - predicted solution (T x X_spatial) - :return: numpy.Array - scaled field - - """ - scale_min = np.min(field) - scale_max = np.max(field) - scaled_field = 255 * ((field - scale_min) / scale_max - scale_min) - return scaled_field.astype(np.uint8) - - -@jaxtyped(typechecker=typeguard.typechecked) -def scale_1d_field_for_wandb( - predictions: TypeScaleInputField1D, - target: TypeScaleInputField1D, -) -> TypeLoggingField1D: - """ - Scales the predicted trajectory and the ground truth trajectory between [0,255] for wandb video logging for 1d systems. - Computes the absolute difference between the predicted and ground truth trajectories and scales the difference - between [0,255] for wandb video logging. - For the predictions and target fields: - The 0 value corresponds to the lowest solution field value across both ground truth and predicted trajectories. - The 255 value corresponds to the largest solution field value across both ground truth and predicted trajectories. - The absolute difference field: - The 0 value corresponds to the lowest absolute difference value across both ground truth and predicted trajectories. - The 255 value corresponds to the largest absolute difference value across both ground truth and predicted trajectories. - :param predictions: numpy.Array - predicted solution (T x X_spatial) - :param target: numpy.Array - target solution (T x X_spatial) - :return: numpy.Array - scaled target field, scaled predicted fields, scaled absolute difference field - """ - scale_min = min(np.min(target), np.min(predictions)) - scale_max = max(np.max(target), np.max(predictions)) - scaled_target = 255 * ((target - scale_min) / scale_max - scale_min) - scaled_predictions = 255 * ((predictions - scale_min) / scale_max - scale_min) - - difference = np.absolute(target - predictions) - difference_min, difference_max = np.min(difference), np.max(difference) - scaled_diff = 255 * ( - (difference - difference_min) / (difference_max - difference_min) - ) - return ( - scaled_target.astype(np.uint8), - scaled_predictions.astype(np.uint8), - scaled_diff.astype(np.uint8), - ) - - -def get_best_model_weights( - entity: str, - project: str, - metric: str = "validation/param_loss", - filters: Dict = None, -) -> Dict[str, torch.Tensor]: - """Get the weights from the best performing model.""" - api = wandb.Api() - - # Build filters query - filter_str = " ".join([f"{k}={v}" for k, v in (filters or {}).items()]) - - # Get all runs - runs = api.runs(f"{entity}/{project}", filters=filter_str) - - best_value = float("inf") - best_run = None - - for run in runs: - if run.state != "finished": - continue - - try: - current_value = run.summary.get(metric) - if current_value is None: - continue - - if current_value < best_value: - best_value = current_value - best_run = run - - except Exception as e: - print(f"Error processing run {run.id}: {e}") - continue - - if best_run is None: - raise ValueError("No valid runs found!") - - # Download the checkpoint - checkpoint_file = None - for file in best_run.files(): - if file.name.endswith(".ckpt"): - checkpoint_file = file - break - - if checkpoint_file is None: - raise ValueError(f"No checkpoint found in best run {best_run.id}") - - # Download and load checkpoint - checkpoint_file.download(replace=True) - checkpoint = torch.load(checkpoint_file.name) - - return checkpoint["state_dict"] - - -def collect_loss_dicts(outputs, batch, metric_name, metrics_array): - outputs = ptu.torch_dict_to_numpy(outputs) - pde_params_np = ptu.torch_dict_to_numpy(batch[-1]) - ic_index = batch[-2] - if type(ic_index) == torch.Tensor: - # to cpu and then numpy - ic_index = ic_index.cpu().numpy() - timestamps = batch[-3] - if type(timestamps) == torch.Tensor: - timestamps = timestamps.cpu().numpy() - required_batch_size = outputs[metric_name].shape[0] - param_key = list(pde_params_np.keys())[0] - if required_batch_size > pde_params_np[param_key].shape[0]: - num_repeat_elements = required_batch_size - pde_params_np[param_key].shape[0] - batch = tree_map( - lambda x: torch.cat( - [x] + [x[-1].unsqueeze(0) for _ in range(num_repeat_elements)] - ), - batch, - ) - pde_params_np = ptu.torch_dict_to_numpy(batch[-1]) - - metrics_array.append((outputs, pde_params_np, ic_index, timestamps)) diff --git a/pdeinvbench/utils/pytorch_utils.py b/pdeinvbench/utils/pytorch_utils.py deleted file mode 100644 index 576ef36dc636884709fe2192bb94c9e91d199011..0000000000000000000000000000000000000000 --- a/pdeinvbench/utils/pytorch_utils.py +++ /dev/null @@ -1,45 +0,0 @@ -import torch -import numpy as np -from jaxtyping import jaxtyped -import typeguard -import typing -from functorch.dim import tree_map -import torch - - -@jaxtyped(typechecker=typeguard.typechecked) -def torch_to_numpy(tensor: typing.Any) -> np.ndarray | float: - """ - Convert a torch tensor to a numpy array. - """ - if isinstance(tensor, torch.Tensor): - return tensor.detach().cpu().numpy() - else: - return tensor - - -@jaxtyped(typechecker=typeguard.typechecked) -def torch_dict_to_numpy(d: dict) -> dict: - return tree_map(torch_to_numpy, d) - - -@jaxtyped(typechecker=typeguard.typechecked) -def compute_grad_norm(model: torch.nn.Module, grads: None) -> float: - - total_norm = 0 - if grads is not None: - for p in grads: - param_norm = p.norm(2) - total_norm += param_norm.item() ** 2 - total_norm = total_norm ** (1.0 / 2) - return total_norm - - for p in model.parameters(): - param_norm = p.grad.data.norm(2) - total_norm += param_norm.item() ** 2 - total_norm = total_norm ** (1.0 / 2) - return total_norm - - -def is_numpy(x: typing.Any) -> bool: - return isinstance(x, np.ndarray) diff --git a/pdeinvbench/utils/run_utils.py b/pdeinvbench/utils/run_utils.py deleted file mode 100644 index 28761001b95388940d61e86d36a07cccf20cd0f5..0000000000000000000000000000000000000000 --- a/pdeinvbench/utils/run_utils.py +++ /dev/null @@ -1,51 +0,0 @@ -import logging -import os - -from git import Repo - -""" -Collection of utility functions for running experiments. -""" - - -def is_wandb_online(): - """ - Check if wandb is online based on wandb setting file. - """ - # Get W&B directory - default to current directory - wandb_dir = os.environ.get("WANDB_DIR", ".") - settings_file = os.path.join("wandb", "settings") - disabled = False - mode = "online" - - with open(os.path.join(wandb_dir, settings_file), "r") as file: - for line in file: - line = line.strip() - if line.startswith("disabled"): - disabled = "true" in line.split("=")[1].strip().lower() - if line.startswith("mode"): - mode = line.split("=")[1].strip().lower() - return mode == "online" and not disabled - - -def validate_git_status(): - """ - Check if the git repository is clean to run experiments. - """ - # Check the env variable if we are in a dev environment in which case, - # we ignore any git dirty status - is_dev_env = os.environ.get("META_DEV", "false").lower() == "true" - if is_dev_env: - return - - wandb_online = is_wandb_online() - repo = Repo(".", search_parent_directories=True) - repo_is_dirty = repo.is_dirty() - if wandb_online: - assert ( - not repo_is_dirty - ), "Git repository is dirty! Please commit your changes before running wandb online experiments." - elif repo_is_dirty: - logging.warning( - "Git repository is dirty! You may test out runs locally but commit your changes before running any wandb online experiments." - ) diff --git a/pdeinvbench/utils/test_utils.py b/pdeinvbench/utils/test_utils.py deleted file mode 100644 index d21079100a6c874e0af4dbfd30f819652716b58f..0000000000000000000000000000000000000000 --- a/pdeinvbench/utils/test_utils.py +++ /dev/null @@ -1,118 +0,0 @@ -import matplotlib.animation as animation -import matplotlib.pyplot as plt -import torch -from functorch.dim import tree_flatten, tree_map -import logging - -""" -Helpers for various PyTests. -""" - - -def prune_boundary(array, dim): - """ - Prune the boundary of an array. - """ - if dim == 0: - return array[1:-2] - elif dim == 1: - return array[:, 1:-2] - elif dim == 2: - return array[:, :, 1:-2] - elif dim == 3: - return array[:, :, :, 1:-2] - else: - raise ValueError("Invalid dimension.") - - -def array_difference_less_than(a, b, val): - """ - Check if all elements in A - B are less than val. - """ - return torch.all((a - b) < val) - - -def generate_synthetic_data_1d(batch_size=4, Nx=100, Nt=1024): - """ - Generate synthetic data for 1D reaction diffusion. - """ - x = torch.linspace(0, 1, Nx) - t = torch.linspace(0, 1, Nt) - tt, xx = torch.meshgrid(t, x) - u = torch.sin(xx) * torch.cos(tt) - du_dx = torch.cos(tt) * torch.cos(xx) - du_dt = -torch.sin(tt) * torch.sin(xx) - ddu_dxx = -torch.cos(tt) * torch.sin(xx) - du_sqr_dx = 2 * (torch.cos(tt) ** 2) * torch.sin(xx) * torch.cos(xx) - - ## Account for batch sizes - u = u.repeat(batch_size, 1, 1) - du_dx = du_dx.repeat(batch_size, 1, 1) - du_dt = du_dt.repeat(batch_size, 1, 1) - ddu_dxx = ddu_dxx.repeat(batch_size, 1, 1) - du_sqr_dx = du_sqr_dx.repeat(batch_size, 1, 1) - return x, t, u, du_dx, du_dt, ddu_dxx, du_sqr_dx - - -def generate_synthetic_data_2d(batch_size=4, Nx=100, Ny=100, Nt=1024): - """ - Generate synthetic data to test 2D finite differences. (3D including time). - """ - x = torch.linspace(0, 1, Nx) - y = torch.linspace(0, 1, Ny) - t = torch.linspace(0, 1, Nt) - tt, xx, yy = torch.meshgrid(t, x, y) - u = torch.cos(tt) * torch.sin(xx) * y * y - du_dx = y * y * torch.cos(tt) * torch.cos(xx) - du_dy = 2 * y * torch.cos(tt) * torch.sin(xx) - ddu_dxx = -(y * y) * torch.cos(tt) * torch.sin(xx) - ddu_dyy = 2 * torch.cos(tt) * torch.sin(xx) - du_dt = -y * y * torch.sin(tt) * torch.sin(xx) - - # Account for batch sizes - u = u.repeat(batch_size, 1, 1, 1) - du_dx = du_dx.repeat(batch_size, 1, 1, 1) - du_dy = du_dy.repeat(batch_size, 1, 1, 1) - ddu_dxx = ddu_dxx.repeat(batch_size, 1, 1, 1) - ddu_dyy = ddu_dyy.repeat(batch_size, 1, 1, 1) - du_dt = du_dt.repeat(batch_size, 1, 1, 1) - return x, y, t, u, du_dx, du_dy, ddu_dxx, ddu_dyy, du_dt - - -def create_gif_and_save(data, filename, title, cmap="magma", interval=50): - """ - Create a gif from a list of images and save it. - :param data: list of frames - :param filename: location to save gif - :param title: title of the gif - :param cmap: colormap - :param interval: interval between frames - """ - vmin = data.min() - vmax = data.max() - fig, ax = plt.subplots() - im = ax.imshow(data[0], animated=True, cmap=cmap, vmin=vmin, vmax=vmax) - ax.set_title(title) - fig.colorbar(im) - - def _update(i): - im.set_array(data[i]) - return (im,) - - animation_fig = animation.FuncAnimation( - fig, - _update, - frames=len(data), - interval=interval, - blit=True, - repeat_delay=10, - ) - # Specify writer for GIF - requires pillow: pip install pillow - try: - animation_fig.save(filename, writer="pillow") - except Exception as e: - # Fallback to imagemagick if pillow fails - print(f"Pillow writer failed, trying imagemagick: {e}") - animation_fig.save(filename, writer="imagemagick") - finally: - plt.close(fig) # Clean up to avoid memory leaks diff --git a/pdeinvbench/utils/types.py b/pdeinvbench/utils/types.py deleted file mode 100644 index 4db293f3b91b90ccc2035cd7fd9ffc05bd996b2b..0000000000000000000000000000000000000000 --- a/pdeinvbench/utils/types.py +++ /dev/null @@ -1,433 +0,0 @@ -# Type Utilities -import enum -from typing import Dict, List, Tuple, Union - -import numpy as np -import torch -from jaxtyping import Float, UInt8 - - -class PDE(enum.Enum): - """ - Describes which PDE system currently being used. - The PDE system is used to determine the correct data loading and processing steps. - """ - - ReactionDiffusion2D = "Reaction Diffusion 2D" - NavierStokes2D = "Navier Stokes 2D" - TurbulentFlow2D = "Turbulent Flow 2D" - KortewegDeVries1D = "Korteweg-de Vries 1D" - DarcyFlow2D = "Darcy Flow 2D" - - -"""Define a global dictionaries of PDE attributs.""" - -# Number of partial derivatives for each PDE system. -PDE_PARTIALS = { - PDE.ReactionDiffusion2D: 5, - PDE.NavierStokes2D: 3, - PDE.TurbulentFlow2D: 3, - PDE.KortewegDeVries1D: 4, - PDE.DarcyFlow2D: 4, -} - -# Number of spatial dimensions for each PDE system. -PDE_NUM_SPATIAL = { - PDE.ReactionDiffusion2D: 2, - PDE.NavierStokes2D: 2, - PDE.TurbulentFlow2D: 2, - PDE.KortewegDeVries1D: 1, - PDE.DarcyFlow2D: 2, -} - -# Spatial size of the grid for each PDE system. -PDE_SPATIAL_SIZE = { - PDE.ReactionDiffusion2D: [128, 128], - PDE.NavierStokes2D: [64, 64], - PDE.TurbulentFlow2D: [64, 64], - PDE.KortewegDeVries1D: [256], - PDE.DarcyFlow2D: [241, 241], -} - - -# Spatial size of the grid for each PDE system. -HIGH_RESOLUTION_PDE_SPATIAL_SIZE = { - PDE.ReactionDiffusion2D: [512, 512], - PDE.TurbulentFlow2D: [2048, 2048], - PDE.DarcyFlow2D: [421, 421], - PDE.NavierStokes2D: [256, 256], -} - - -# Number of parameters for each PDE system. -PDE_NUM_PARAMETERS = { - PDE.ReactionDiffusion2D: 3, - PDE.NavierStokes2D: 1, - PDE.TurbulentFlow2D: 1, - PDE.KortewegDeVries1D: 1, - PDE.DarcyFlow2D: 128, # NOTE: Incorrect, but we only use this in the forward problem? -} - -# Range of parameter values for each PDE system. -PDE_PARAM_VALUES = { - PDE.ReactionDiffusion2D: { - "k": [ - 0.00544908, - 0.01064798, - 0.01446092, - 0.01591103, - 0.02190137, - 0.02248171, - 0.03376446, - 0.04418002, - 0.05103662, - 0.05279494, - 0.05734164, - 0.06385121, - 0.06426775, - 0.06746974, - 0.07166788, - 0.07212561, - 0.07438393, - 0.08332919, - 0.08620312, - 0.08693649, - 0.0880078, - 0.08820963, - 0.0905649, - 0.09362309, - 0.09649866, - 0.09658253, - 0.09808294, - 0.09985239, - ], - "Du": [ - 0.02219061, - 0.07546761, - 0.0816335, - 0.117242, - 0.1297511, - 0.1470162, - 0.1975422, - 0.2052899, - 0.2223661, - 0.2351847, - 0.238229, - 0.3073048, - 0.3356696, - 0.3410229, - 0.3570933, - 0.3594401, - 0.3844191, - 0.4004743, - 0.4182471, - 0.4187508, - 0.4282146, - 0.4363962, - 0.4394185, - 0.4521105, - 0.4605572, - 0.4644799, - 0.4954957, - 0.4978229, - ], - "Dv": [ - 0.01647486, - 0.03266683, - 0.03295169, - 0.0336989, - 0.04517053, - 0.1197443, - 0.1431938, - 0.1512121, - 0.1513326, - 0.1761043, - 0.1856076, - 0.1935473, - 0.2369018, - 0.2541142, - 0.2725704, - 0.2871926, - 0.2925416, - 0.292952, - 0.2959587, - 0.3023561, - 0.3132344, - 0.3136975, - 0.3793569, - 0.4004971, - 0.4271173, - 0.4328981, - 0.4949132, - ], - }, - PDE.NavierStokes2D: { - "re": [ - 83.0, - 105.55940015, - 134.25044531, - 170.7397166, - 217.14677189, - 276.1672649, - 351.22952801, - 446.69371438, - 568.10506678, - 722.51602499, - 918.89588194, - 1168.65178436, - 1486.29134153, - 1890.26533093, - 2404.03945141, - 3057.45737879, - 3888.47430005, - 4945.36162206, - 6289.51092016, - 7999.0, - ] - }, - PDE.KortewegDeVries1D: {"delta": np.linspace(0.8, 5.0, 100, endpoint=True)}, - PDE.TurbulentFlow2D: { - "nu": [ - 1.00000000e-05, - 1.42792351e-05, - 2.03896555e-05, - 2.91148685e-05, - 4.15738052e-05, - 5.93642139e-05, - 8.47675566e-05, - 1.21041587e-04, - 1.72838128e-04, - 2.46799626e-04, - 3.52410989e-04, - 5.03215936e-04, - 7.18553866e-04, - 1.02603996e-03, - 1.46510658e-03, - 2.09206013e-03, - 2.98730184e-03, - 4.26563853e-03, - 6.09100555e-03, - 8.69749003e-03, - ] - }, - PDE.DarcyFlow2D: { - # NOTE: This should not be used since coeff is a scalar field - "coeff": [] - }, -} - -# Number of data channels for each PDE system. -PDE_NUM_CHANNELS = { - PDE.ReactionDiffusion2D: 2, - PDE.NavierStokes2D: 1, - PDE.TurbulentFlow2D: 1, - PDE.KortewegDeVries1D: 1, - PDE.DarcyFlow2D: 1, -} - -# Number of timesteps in the trajectory for each PDE system. -PDE_TRAJ_LEN = { - PDE.ReactionDiffusion2D: 101, - PDE.NavierStokes2D: 64, - PDE.TurbulentFlow2D: 60, - PDE.KortewegDeVries1D: 140, - # This value is only used to pass some assertions so any non-zero value works - PDE.DarcyFlow2D: 101, -} - - -class DataMetrics(enum.Enum): - """ - Describes various data loss metrics, removing the need for metrics based on strings. - """ - - MSE = "Mean Squared Error" - Relative_Error = "Relative Error" - - -class ParamMetrics(enum.Enum): - """ - Describes various parameter loss metrics, removing the need for metrics based on strings. - """ - - MSE = "Mean Squared Error" - Relative_Error = "Relative Error" - - -######################## -# common types -TypeBatchSolField1D = Float[torch.Tensor, "batch time xspace"] -TypeBatchSolField2D = Float[torch.Tensor, "batch time channel xspace yspace"] -TypeUnBatchSolField2D = Float[torch.Tensor, "time channel xspace yspace"] - -TypeXGrid = Float[torch.Tensor, "batch xspace"] -TypeYGrid = Float[torch.Tensor, "batch yspace"] -# Navier Stokes has different grid input shape -TypeNSGrid = Float[torch.Tensor, "xspace yspace"] -TypeTimeGrid = Float[torch.Tensor, "batch timesteps"] -TypeParam = Dict[ - str, Float[torch.Tensor, "batch 1"] | Float[torch.Tensor, "batch xspace yspace 1"] -] -TypeBatch = Float[torch.Tensor, "batch"] -######################## -# types for logging_utils - -# input dimensions for scaling functions -TypeScaleInputField1D = Float[np.ndarray, "time xspace"] -TypeScaleInputField2D = Float[np.ndarray, "time xspace yspace"] - -# output dimensions for return value of scaling functions -TypeScaledField1D = UInt8[np.ndarray, "time xspace"] -TypeScaledField2D = UInt8[np.ndarray, "time 3 xspace yspace"] - -# output type for return value of scaling functions -TypeLoggingField1D = Tuple[ - TypeScaledField1D, - TypeScaledField1D, - TypeScaledField1D, -] -TypeLoggingField2D = Tuple[ - TypeScaledField2D, - TypeScaledField2D, - TypeScaledField2D, -] -######################## -# types for pde_module -TypeCollapsedInputSolField1D = Float[torch.Tensor, "batch channels_conditioning xspace"] -TypeCollapsedTargetSolField1D = Float[torch.Tensor, "batch channels_target xspace"] -TypeCollapsedInputSolField2D = Float[ - torch.Tensor, "batch channels_conditioning xspace yspace" -] -TypeCollapsedTargetSolField2D = Float[ - torch.Tensor, "batch channels_target xspace yspace" -] - -TypeTimeFrames = Float[torch.Tensor, "batch n_past"] -TypeICIndex = Float[torch.Tensor, "batch 1"] -# input batch types for pde module - - -TypeBatch1D = List[ - Union[ - List[TypeXGrid], - TypeTimeGrid, - TypeCollapsedInputSolField1D, - TypeCollapsedTargetSolField1D, - TypeTimeFrames, - TypeICIndex, - TypeParam, - ] -] - -TypeBatch2D = List[ - Union[ - List[Union[TypeXGrid, TypeYGrid]], - TypeTimeGrid, - TypeCollapsedInputSolField2D, - TypeCollapsedTargetSolField2D, - TypeTimeFrames, - TypeICIndex, - TypeParam, - ] -] - -# output prediction types for pde module - -TypePredict1D = Tuple[ - TypeBatchSolField1D, - TypeBatchSolField1D, - # Dict of dict so that we may store auxillary metrics during tailoring - Dict[str, Union[torch.Tensor, Float[torch.Tensor, "batch"], Dict]], -] -TypePredict2D = Tuple[ - TypeBatchSolField2D, - TypeBatchSolField2D, - # Dict of dict so that we may store auxillary metrics during tailoring - Dict[str, Union[torch.Tensor, Float[torch.Tensor, "batch"], Dict]], -] - -TypeLossDict = Dict[str, Union[Dict, torch.Tensor, Float[torch.Tensor, "batch"]]] - -# input and output types for autoregressive rollout in pde module -TypeAutoRegressiveInitFrames = Union[ - Float[torch.Tensor, "batch n_past xspace"], - Float[torch.Tensor, "batch n_pastxchannels xspace yspace"], -] - -TypeAutoRegressivePredFrames = Union[ - Float[torch.Tensor, "batch n_fut xspace"], - Float[torch.Tensor, "batch n_futxchannels xspace yspace"], -] -######################## -# types for pde_residual - -# Shape of partial differentials computed for 1d and 2d systems -TypePartials1D = TypeBatchSolField1D -TypePartials2D = Float[torch.Tensor, "batch time xspace yspace"] -TypeNSPartials2D = Float[torch.Tensor, "batch 3 time xspace yspace"] - -# return type for advection residual + partials -TypeAdvectionPartialsReturnType = Union[ - TypePartials1D, - Tuple[ - TypePartials1D, - Float[torch.Tensor, "batch 2*time xspace"], - ], -] -# return type for burgers residual + partials -TypeBurgersPartialsReturnType = Union[ - TypePartials1D, - Tuple[ - TypePartials1D, - Float[torch.Tensor, "batch 3*time xspace"], - ], -] -# return type for 1drd residual + partials -Type1DRDPartialsReturnType = Union[ - TypePartials1D, - Tuple[ - TypePartials1D, - Float[torch.Tensor, "batch 2*time xspace"], - ], -] - -# Return type for 1D KDV residual + partials -Type1DKDVPartialsReturnType = Union[ - TypePartials1D, - Tuple[ - TypePartials1D, - Float[torch.Tensor, "batch 4*time xspace"], - ], -] - -# return type for 2drd residual + partials -Type2DRDPartialsReturnType = Union[ - TypeBatchSolField2D, - Tuple[ - TypeBatchSolField2D, - Float[torch.Tensor, "batch time*5 channels xspace yspace"], - ], -] - -# return types for 2dns residual + partials -TypeUnBatchedNSPartials2D = Float[torch.Tensor, "3 time xspace yspace"] -TypeUnBatchedNSResiduals2D = Float[torch.Tensor, "time xspace yspace"] - - -################################## -# types for finite_differences - -# Return type after computing all needed partials -Type1DRPartialsTuple = Tuple[ - TypeBatchSolField1D, - TypeBatchSolField1D, - TypeBatchSolField1D, - TypeBatchSolField1D, -] -Type2DRDPartialsTuple = Tuple[ - TypePartials2D, - TypePartials2D, - TypePartials2D, - TypePartials2D, - TypePartials2D, -] diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index 0ce18def86d18691acf9cfe44c5dd6eae504e6a3..0000000000000000000000000000000000000000 --- a/pyproject.toml +++ /dev/null @@ -1,12 +0,0 @@ -[project] -name = "PDEInvBench" -version = "0.0.1" -authors = [ -] -description = "PDE Inverse Benchmarking" -readme = "README.md" -requires-python = ">=3.10" -classifiers = ["Programming Language :: Python :: 3"] - -[tool.setuptools] -packages = ["pdeinvbench"] diff --git a/tests/fluids_numpy_reference.py b/tests/fluids_numpy_reference.py deleted file mode 100644 index bab8e2949f9b3204adc5989cf85a40c26a4c4051..0000000000000000000000000000000000000000 --- a/tests/fluids_numpy_reference.py +++ /dev/null @@ -1,168 +0,0 @@ -from warnings import warn - -import numpy as np - -warn( - "DEPRECATED: The file pdeinvbench/losses/fluids_numpy.py is being imported. This is most certainly a bug!" -) - - -def _maybe_unsqueeze_3d( - u, -): - """ - Given a tensor, makes sure that the last dimension is 1 (channel dim). - Helps to ensure number of channels consistency. - NOTE: This should only be used within this file. Assumes that u is an unbatched fluid field. - Also always assumes well-formed input. - """ - return u - - -def _maybe_unsqueeze_2d(u): - """ - Same as 3d version but assumes a tensor of shape (nx, ny, 1) - """ - return u - - -def _compute_stream_function(vorticity, x, y, fourier=False): - """ - from the vorticity, compute the stream function and returns in physical space - vorticity shape: (nt, nx, ny, 1) Channels is squeezed off - """ - w = vorticity - w = np.squeeze(w) - what = np.fft.fft2(w) - nx, ny = w.shape[-2], w.shape[-1] - dx = x[1] - x[0] - dy = y[1] - y[0] - kx = np.fft.fftfreq(nx, d=dx) * 2 * np.pi - ky = np.fft.fftfreq(ny, d=dy) * 2 * np.pi - kx, ky = np.meshgrid(kx, ky) - wavenumbers_squared = kx**2 + ky**2 - # stream function = psi - psi_hat = np.zeros_like(what, dtype=np.complex128) - psi_hat[wavenumbers_squared > 0] = what[wavenumbers_squared > 0] / ( - -wavenumbers_squared[wavenumbers_squared > 0] - ) - - if fourier: - return _maybe_unsqueeze_2d(psi_hat) - else: - return np.fft.ifft2(psi_hat).real - - -def compute_stream_function(vorticity, x, y, fourier=False): - nt = vorticity.shape[0] - out = [] - for i in range(nt): - out.append(_compute_stream_function(vorticity[i], x, y, fourier=fourier)) - return np.asarray(out) - - -def _compute_stream_function_jaxcfd(vorticity, x, y, fourier=False): - w = vorticity - w = np.squeeze(w) - what = np.fft.fft2(w) - nx, ny = w.shape[-2], w.shape[-1] - dx = x[1] - x[0] - dy = y[1] - y[0] - kx = np.fft.fftfreq(nx, d=dx) * 2 * np.pi - ky = np.fft.fftfreq(ny, d=dy) * 2 * np.pi - kx, ky = np.meshgrid(kx, ky) - wavenumbers_squared = kx**2 + ky**2 - - # Compute psi - lap = wavenumbers_squared - lap[0, 0] = 1 - psi_hat = -1 / lap * what - if fourier: - return psi_hat - return np.fft.ifft2(psi_hat).real - - -def compute_stream_function_jaxcfd(vorticity, x, y, fourier=False): - nt = vorticity.shape[0] - out = [] - for i in range(nt): - out.append(_compute_stream_function_jaxcfd(vorticity[i], x, y, fourier=fourier)) - return np.asarray(out) - - -def advection(vorticity, x, y, stream_func): - psi_hat = stream_func(vorticity, x, y, fourier=True) - w = vorticity - w = np.squeeze(w) - # Compute u, v - nx, ny = w.shape[-2], w.shape[-1] - dx = x[1] - x[0] - dy = y[1] - y[0] - kx = np.fft.fftfreq(nx, d=dx) * 2 * np.pi - ky = np.fft.fftfreq(ny, d=dy) * 2 * np.pi - kx, ky = np.meshgrid(kx, ky) - vx_hat = 1j * ky * psi_hat - vy_hat = -1j * kx * psi_hat - vx = np.fft.ifft2(vx_hat).real - vy = np.fft.ifft2(vy_hat).real - w_dx = np.gradient(w, dx, axis=1) - w_dy = np.gradient(w, dy, axis=2) - - adv = vx * w_dx + vy * w_dy - return _maybe_unsqueeze_3d(adv), _maybe_unsqueeze_3d(vx), _maybe_unsqueeze_3d(vy) - - -def second_order_gradient(field, d_coord, axis): - return np.gradient(np.gradient(field, d_coord, axis=axis), d_coord, axis=axis) - - -def laplacian(vorticity, x, y): - w = vorticity - w = np.squeeze(w) - nx, ny = w.shape[-2], w.shape[-1] - dx = x[1] - x[0] - dy = y[1] - y[0] - w_dxx = second_order_gradient(w, dx, axis=1) - w_dyy = second_order_gradient(w, dy, axis=2) - return w_dxx + w_dyy - - -def tf_residual_numpy(w, t, x, y, nu): - alpha = 0.1 - forced_mode = 2 - dy = y[1] - y[0] - dx = x[1] - x[0] - f = forced_mode * np.cos(forced_mode * y) # Forcing function - # Broadcast to solution field: nt, nx, ny, 1 - f = np.reshape(f, (1, 1, -1, 1)) - f = np.broadcast_to(f, w.shape) - # Damping term: alpha * w - damping = alpha * w - # Compute dwdt - dt: float = t[1] - t[0] - dwdt = np.gradient(w, dt, axis=0) - - #### Stream function distraction #### - # In order to compute v, we need to compute the stream function psi - # This is necessary to compute the advection term - psi = compute_stream_function(w, x, y) - psi = np.expand_dims(psi, axis=-1) - - #### Final stream function = psi #### - # compute advection - adv, vx, vy = advection(w, x, y, compute_stream_function) - prep_plot = lambda a: np.expand_dims(a, axis=-1) - advection_terms = {"adv": adv, "vx": vx, "vy": vy} - #### Continue with adv = v * \nabla w - lap = laplacian(w, x, y) - - ## Diffusion term - diffusion = nu * lap - - ### Now compute the new residual - # desired equation: dwdt = -v * \nabla w + v \nabla^2 w - alpha*w + f - adv = np.expand_dims(adv, axis=-1) - diffusion = np.expand_dims(diffusion, axis=-1) - - residual = -1 * dwdt + -1 * adv + diffusion - damping + f - return residual diff --git a/tests/reaction-diffusion-residuals.py b/tests/reaction-diffusion-residuals.py deleted file mode 100644 index 0711b45eff246fca80e7520cb3e7b2506185630a..0000000000000000000000000000000000000000 --- a/tests/reaction-diffusion-residuals.py +++ /dev/null @@ -1,93 +0,0 @@ -import h5py -from pdeinvbench.data.utils import extract_params_from_path -from pdeinvbench.utils.types import PDE -from pdeinvbench.utils.test_utils import create_gif_and_save -from pdeinvbench.losses import pde_residuals -from os import makedirs, listdir -from os.path import join -import torch -import numpy as np -import matplotlib.pyplot as plt - - -"""Simple script that tests the residuals for the PDEs.""" - -img_dir = "./test-images" - - -def reaction_diffusion_2d(): - # 2D Reaction Diffusion - - # Plot - f = "/data/shared/meta-pde/reaction-diffusion-2d/train" - files = [f for f in listdir(f) if f.endswith(".h5") or f.endswith(".hdf5")] - data_file = join(f, files[0]) - data = h5py.File(data_file, "r") - x = torch.Tensor(data["0001"]["grid"]["x"][:]).unsqueeze(0) - y = torch.Tensor(data["0001"]["grid"]["y"][:]).unsqueeze(0) - t = torch.Tensor(data["0001"]["grid"]["t"][:]).unsqueeze(0) - sol = torch.Tensor(data["0001"]["data"]) - sol = sol.unsqueeze(0) # Batch dim - sol = torch.permute(sol, (0, 1, 4, 2, 3)) # B x T x C x X x Y - pde_params = extract_params_from_path(files[0], PDE.ReactionDiffusion2D) - - # Compute the residuals - residual_func = pde_residuals.get_pde_residual_function(PDE.ReactionDiffusion2D) - residual = residual_func(sol, pde_params, [x, y], t, return_partials=False) - eqn1 = residual[:, :, 0] - eqn1_scalar = torch.nn.functional.mse_loss(eqn1, torch.zeros_like(eqn1)) - eqn2 = residual[:, :, 1] - eqn2_scalar = torch.nn.functional.mse_loss(eqn2, torch.zeros_like(eqn2)) - eqn1 = eqn1.squeeze() - eqn2 = eqn2.squeeze() - - sol = sol.squeeze() # Remove batch dim of sol - # Visualize as GIFs - create_gif_and_save( - eqn1, - join(img_dir, "2d-reaction-diffusion-eqn1.gif"), - cmap="magma", - title=f"2D Reaction Diffusion u Residual {eqn1_scalar}", - ) - create_gif_and_save( - eqn2, - join(img_dir, "2d-reaction-diffusion-eqn2.gif"), - cmap="magma", - title=f"2D Reaction Diffusion v Residual {eqn2_scalar}", - ) - create_gif_and_save( - torch.abs(eqn1) > 1e-2, - join(img_dir, "2d-reaction-diffusion-eqn1-1e-2.gif"), - cmap="PuRd", - title=f"2D Reaction Diffusion u Residual (Threshold 1e-2)", - ) - create_gif_and_save( - torch.abs(eqn2) > 1e-2, - join(img_dir, "2d-reaction-diffusion-eqn2-1e-2.gif"), - cmap="PuRd", - title=f"2D Reaction Diffusion v Residual (Threshold 1e-2)", - ) - create_gif_and_save( - torch.abs(eqn1) > 1e-4, - join(img_dir, "2d-reaction-diffusion-eqn1-1e-4.gif"), - cmap="PuRd", - title=f"2D Reaction Diffusion u Residual (Threshold 1e-4)", - ) - create_gif_and_save( - torch.abs(eqn2) > 1e-4, - join(img_dir, "2d-reaction-diffusion-eqn2-1e-4.gif"), - cmap="PuRd", - title=f"2D Reaction Diffusion v Residual (Threshold 1e-4)", - ) - - -def main(): - ### Folder to plot images ### - makedirs(img_dir, exist_ok=True) - - # Test residuals - reaction_diffusion_2d() - - -if __name__ == "__main__": - main() diff --git a/tests/test_fluids.py b/tests/test_fluids.py deleted file mode 100644 index 0569e727298f00f51efc0450a3e6e06e7a536e62..0000000000000000000000000000000000000000 --- a/tests/test_fluids.py +++ /dev/null @@ -1,265 +0,0 @@ -""" -Tests for fluid dynamics residual computations. -Compares torch implementations against numpy reference implementations. - -Can be run as: -1. Pytest test: pytest tests/test_fluids.py (skips if data not found) -2. Standalone script: python tests/test_fluids.py --filename -""" - -import argparse -import os -import sys -from pathlib import Path - -import h5py -import numpy as np -import pytest -import torch -from loguru import logger - -from pdeinvbench.losses.fluids import ( - compute_advection, - compute_stream_function, - laplacian, - turbulent_flow_residual, -) - - -def find_turbulent_flow_data(): - """Try to find turbulent flow data in common locations.""" - possible_paths = [ - Path("../data/2D_turbulent-flow_nu=0.006153085601625313.h5"), - Path("data/2D_turbulent-flow_nu=0.006153085601625313.h5"), - Path("/data/shared/meta-pde/turbulent-flow-2d/train").glob("*.h5"), - ] - - for path in possible_paths: - if isinstance(path, Path) and path.exists(): - return str(path) - # Handle glob results - try: - for file in path: - if file.exists(): - return str(file) - except (TypeError, AttributeError): - pass - - return None - - -@pytest.fixture -def turbulent_flow_datafile(): - """Fixture that provides path to test data.""" - data_path = find_turbulent_flow_data() - if data_path is None: - pytest.skip( - "Turbulent flow test data not found. " - "This test requires real PDE data and is skipped in CI/CD. " - "Run manually with: python tests/test_fluids.py --filename " - ) - return data_path - - -def wrapper(func): - """Convert torch tensor outputs to numpy for comparison.""" - - def _wrapper(*args): - # Convert numpy to torch - new_args = [ - torch.from_numpy(a).float() if isinstance(a, np.ndarray) else a - for a in args - ] - out = func(*new_args) - return out.cpu().numpy() - - return _wrapper - - -def _maybe_unsqueeze_np(u): - """Ensure last dimension exists for channel.""" - return u if u.shape[-1] == 1 else np.expand_dims(u, axis=-1) - - -def compare_funcs(f1, f2): - """Compare outputs of two functions and log the difference.""" - - def compare(f1args, f2args): - reference = _maybe_unsqueeze_np(f1(*f1args)) - newout = wrapper(f2)(*f2args) - diff = np.linalg.norm(reference - newout) - logger.info(f"Diff between {f1.__name__} and {f2.__name__}: {diff:.2e}") - - # Assert reasonable accuracy - assert diff < 1e-3, f"Difference too large: {diff:.2e}" - return diff - - return compare - - -def run_fluids_comparison(filename: str): - """ - Run the fluids residual computation comparison. - - Args: - filename: Path to turbulent flow HDF5 data file - """ - # Import numpy reference implementations - from fluids_numpy_reference import ( - advection as advection_np_base, - compute_stream_function as compute_stream_function_np, - laplacian as laplacian_np, - tf_residual_numpy, - ) - - logger.info(f"Loading data from: {filename}") - - # Load data - try: - dataset = h5py.File(filename, "r") - traj_idx = "0000" - data = dataset[traj_idx]["data"][:] - logger.info(f"Data shape: {data.shape}") - t = dataset[traj_idx]["grid/t"][:] - x = dataset[traj_idx]["grid/x"][:] - y = dataset[traj_idx]["grid/y"][:] - dataset.close() - except (KeyError, FileNotFoundError) as e: - logger.error(f"Failed to load data: {e}") - raise - - # Extract parameters - nu = ( - os.path.basename(filename) - .split("=")[-1] - .replace(".h5", "") - .replace(".hdf5", "") - ) - nu: float = float(nu) - logger.info(f"Viscosity parameter nu: {nu}") - - dx = x[1] - x[0] - dy = y[1] - y[0] - - # Compute residual norm as sanity check - residual_norm = np.linalg.norm(wrapper(turbulent_flow_residual)(data, t, x, y, nu)) - logger.info(f"Computed residual norm: {residual_norm:.2e}") - - # Test 1: Stream function computation (Fourier space) - logger.info("\n=== Testing stream function (Fourier=False) ===") - compute_stream_function_np.__name__ = "compute_stream_function_np, fourier=False" - compare_funcs(compute_stream_function_np, compute_stream_function)( - (data, x, y, False), (data, dx, dy, False) - ) - - # Test 2: Stream function computation (physical space) - logger.info("\n=== Testing stream function (Fourier=True) ===") - compute_stream_function_np.__name__ = "compute_stream_function_np, fourier=True" - compare_funcs(compute_stream_function_np, compute_stream_function)( - (data, x, y, True), (data, dx, dy, True) - ) - - # Test 3: Advection term - logger.info("\n=== Testing advection ===") - - def advection_np(u, x, y): - return advection_np_base(u, x, y, stream_func=compute_stream_function_np)[0] - - advection_np.__name__ = "advection_np" - compare_funcs(advection_np, compute_advection)((data, x, y), (data, dx, dy)) - - # Test 4: Laplacian - logger.info("\n=== Testing laplacian ===") - laplacian_np.__name__ = "laplacian_np" - compare_funcs(laplacian_np, laplacian)((data, x, y), (data, dx, dy)) - - # Test 5: Velocity component vx - logger.info("\n=== Testing velocity vx ===") - - def advection_np_vx(u, x, y): - return advection_np_base(u, x, y, stream_func=compute_stream_function_np)[1] - - advection_np_vx.__name__ = "advection_np for vx" - compare_funcs( - advection_np_vx, lambda *args: compute_advection(*args, return_velocity=True)[1] - )((data, x, y), (data, dx, dy)) - - # Test 6: Velocity component vy - logger.info("\n=== Testing velocity vy ===") - - def advection_np_vy(u, x, y): - return advection_np_base(u, x, y, stream_func=compute_stream_function_np)[2] - - advection_np_vy.__name__ = "advection_np for vy" - compare_funcs( - advection_np_vy, lambda *args: compute_advection(*args, return_velocity=True)[2] - )((data, x, y), (data, dx, dy)) - - # Test 7: Full turbulent flow residual - logger.info("\n=== Testing full turbulent flow residual ===") - compare_funcs(tf_residual_numpy, turbulent_flow_residual)( - (data, t, x, y, nu), (data, t, x, y, nu) - ) - - logger.success("\n✓ All fluids tests passed!") - - -@pytest.mark.slow -@pytest.mark.data_required -def test_fluids_residual(turbulent_flow_datafile): - """ - Pytest wrapper for fluids residual comparison. - - Automatically skipped if test data is not available. - Run with: pytest tests/test_fluids.py -v -m data_required - """ - run_fluids_comparison(turbulent_flow_datafile) - - -if __name__ == "__main__": - # Allow running as standalone script with argparse - parser = argparse.ArgumentParser( - description="Validate torch turbulent flow residual computations against numpy reference", - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=""" -Examples: - python tests/test_fluids.py --filename data/2D_turbulent-flow_nu=0.006153085601625313.h5 - python tests/test_fluids.py --auto-find - """, - ) - parser.add_argument( - "--filename", type=str, help="Path to turbulent flow HDF5 data file" - ) - parser.add_argument( - "--auto-find", - action="store_true", - help="Automatically search for data file in common locations", - ) - - args = parser.parse_args() - - if args.auto_find or args.filename is None: - logger.info("Searching for turbulent flow data...") - filename = find_turbulent_flow_data() - if filename is None: - logger.error( - "Could not find turbulent flow data. " - "Please specify path with --filename" - ) - sys.exit(1) - logger.info(f"Found data at: {filename}") - else: - filename = args.filename - - if not Path(filename).exists(): - logger.error(f"File not found: {filename}") - sys.exit(1) - - try: - run_fluids_comparison(filename) - except Exception as e: - logger.error(f"Test failed with error: {e}") - import traceback - - traceback.print_exc() - sys.exit(1) diff --git a/train_inverse.py b/train_inverse.py deleted file mode 100644 index c00dad415fcdeda2866cca9b31b1d739989ba32a..0000000000000000000000000000000000000000 --- a/train_inverse.py +++ /dev/null @@ -1,167 +0,0 @@ -import os -import logging -from functools import partial -import hydra -import torch -import wandb -from hydra.core.hydra_config import HydraConfig -from hydra.utils import instantiate -from lightning import seed_everything -from omegaconf import DictConfig, OmegaConf -from pdeinvbench.utils import validate_git_status -from pdeinvbench.utils.config_utils import resolve_pde_resolution -import sys -import pdeinvbench - -# Add any other submodules you need - - -@hydra.main( - version_base=None, - config_path="configs/", - config_name="1dkdv", -) -def main(cfg: DictConfig) -> None: - # check git dirty status - validate_git_status() - - hydra_cfg = HydraConfig.get() - overrides = hydra_cfg.overrides.task - overrides = [ - override - for override in overrides - if "data_root" not in override - and "batch_size" not in override - and "logging.project" not in override - and "logging.save_dir" not in override - and "num_nodes" not in override - and "log_model" not in override - and "supervised_learning_min_epoch" not in override - and "supervised_learning_max_epoch" not in override - ] - - # First thing is to determine the constants - resolve_pde_resolution(cfg) - - # Combine the config name with overrides: - wandb_name = hydra_cfg.job["config_name"] + "_" + "_".join(overrides) - - # Fix random seed - seed_everything(cfg.seed) - - # Define logger - # Arguments for wandb.init - wandb_args = OmegaConf.to_container( - cfg.logging, resolve=True, throw_on_missing=True - ) - # Remove the _target_ key which usually points to the lightning logger - wandb_args.pop("_target_") - # Rename the save_dir key to dir, which is what wandb.init expects - # We keep save_dir in cfg.logging so that we can still resolve the lightning wandb logger - wandb_args["dir"] = wandb_args.pop("save_dir") - cfg_as_dict = OmegaConf.to_container( - cfg, resolve=True, throw_on_missing=True) - # Manually instantiate the wandb experiment - # We do this so that the wandb run is created with the correct parameters to recreate the run at the start. - # Otherwise, the config is only saved at the end of the run. - experiment = wandb.init(**wandb_args, name=wandb_name, config=cfg_as_dict) - # Instantiate dataloaders - train_dataloader = instantiate(cfg.data.train_dataloader) - val_dataloaders = [] - test_dataloaders = [] - - val_dataloader = instantiate(cfg.data.val_dataloader) - - val_dataloaders.append(val_dataloader) - - # to run validation set at test time as well with best weights - test_dataloaders.append(val_dataloader) - if "ood_dataloader" in cfg.data.keys() and os.path.exists(cfg.data.ood_data_root): - print("ood loader") - test_dataloaders.append(instantiate(cfg.data.ood_dataloader)) - if "ood_dataloader_extreme" in cfg.data.keys() and os.path.exists( - cfg.data.ood_data_root_extreme - ): - print("ood loader extreme") - test_dataloaders.append(instantiate(cfg.data.ood_dataloader_extreme)) - if "test_dataloader" in cfg.data.keys() and os.path.exists(cfg.data.test_data_root): - print("test iid loader") - test_dataloaders.append(instantiate(cfg.data.test_dataloader)) - - # Instantiate model and optimizer - model = instantiate(cfg.model.model_config) - - if "inverse_model_wandb_run" in cfg.keys() and cfg.inverse_model_wandb_run != "": - logging.info("Loading inverse model checkpoint from wandb") - inverse_model_run_path = cfg.inverse_model_wandb_run - artifact = experiment.use_artifact( - inverse_model_run_path, type="model") - checkpoint_path = os.path.join(artifact.download(), "model.ckpt") - state_dict = torch.load(checkpoint_path, weights_only=False)[ - "state_dict"] - state_dict = {k.partition( - "model.")[2]: v for k, v in state_dict.items()} - model.load_state_dict(state_dict) - - optimizer = instantiate(cfg.optimizer, params=model.parameters()) - lr_scheduler = instantiate(cfg.lr_scheduler, optimizer=optimizer) - - module_kwargs = { - "model": model, - "optimizer": optimizer, - "lr_scheduler": lr_scheduler, - } - - if "tailoring_optimizer" in cfg.keys(): - # We use a partial function so that we can dynamically build new optimizers - def tailoring_optimizer(x): - return instantiate(cfg.tailoring_optimizer, params=x) - - module_kwargs["tailoring_optimizer"] = tailoring_optimizer - else: - tailoring_optimizer = None - # Wraps the base model to perform prediction and losses - if tailoring_optimizer != None: - print(cfg.tailoring_optimizer) - - print("instantiate inverse module") - Inverse_module = instantiate( - cfg.lightning_module, - **module_kwargs, - ) - print("instantiate inverse module done") - # Instantiate the lightning logger & force the lightning logger to use the wandb experiment manually created - log_model = "all" - if "log_model" in cfg: - log_model = cfg.log_model - logger = instantiate( - cfg.logging, experiment=experiment, log_model=log_model) - logger._save_dir += "/" + \ - cfg_as_dict["logging"]["project"] + "/" + wandb_name - logger.watch(model, log="all") - # Lightning trainer - if tailoring_optimizer is not None: - L_trainer = instantiate( - cfg.trainer, - logger=logger, - inference_mode=False, - ) - else: - L_trainer = instantiate(cfg.trainer, logger=logger) - - if "test_run" in cfg and cfg.test_run: - L_trainer.test(model=Inverse_module, - dataloaders=test_dataloaders, weights_only=False) - else: - # Train - L_trainer.fit( - Inverse_module, - train_dataloaders=train_dataloader, - val_dataloaders=val_dataloaders, - ) - L_trainer.test(dataloaders=test_dataloaders, - ckpt_path="best", weights_only=False) - - -if __name__ == "__main__": - main()