Kuangdai
Initial release of LUCAS-MEGA
9bc98d9
import re
from pathlib import Path
import numpy as np
import pandas as pd
from src.process.io import prepare_dirs, unzip, copy_to_local, upload
from src.process.processor import process_excel
# === Step 1: Set up local and drive directory paths ===
local_processed_dir, drive_download_dir, drive_processed_dir = (
prepare_dirs("esdac", Path(__file__).parent.stem)
)
# === Step 2: Extract all ZIP files and copy specific files from drive/download to local/processed ===
unzip(local_processed_dir, drive_download_dir)
copy_to_local(local_processed_dir, drive_download_dir, files=[
"LUCAS.SOIL_corr.csv"
])
# === Step 3: Convert Excel file to CSV(s) in place ===
process_excel(local_processed_dir / "LUCAS_TOPSOIL_v1/LUCAS_TOPSOIL_v1.xlsx")
# === Step 4: Collect all generated CSVs and standardize delimiters
move_list = []
for csv_path in local_processed_dir.rglob("*.csv"):
try:
# Read with automatic delimiter detection (comma, pipe, etc.)
with open(csv_path, "r", encoding="utf-8") as f:
sample = f.read(2048)
delimiter = "|" if sample.count("|") > sample.count(",") else ","
df = pd.read_csv(csv_path, delimiter=delimiter)
df.to_csv(csv_path, index=False) # overwrite with standard comma separator
move_list.append(csv_path)
print(f"✅ Standardized CSV: {csv_path.name}")
except Exception as e:
print(f"❌ Failed to process CSV {csv_path.name}: {e}")
# === Step 5: Process assets ===
spec_path = local_processed_dir / "LUCAS.SOIL_corr.csv"
out_path = local_processed_dir / "assets/psd"
out_path.mkdir(parents=True, exist_ok=True)
df = pd.read_csv(spec_path).drop(columns=["Unnamed: 0"])
# Extract all "spc." columns
spc_cols = [c for c in df.columns if c.startswith("spc.")]
# Parse numeric axis values from the column names
x_vals = np.array([float(c.replace("spc.", "")) for c in spc_cols])
# Sort columns by numeric x
sort_idx = np.argsort(x_vals)
x_vals = x_vals[sort_idx]
spc_cols_sorted = [spc_cols[i] for i in sort_idx]
for _, row in df.iterrows():
point_id = row["POINT_ID"]
sample_id = row["sample.ID"]
sample_id = re.sub(r"\D", "", str(sample_id))
y_vals = row[spc_cols_sorted].to_numpy(dtype=np.float32)
arr = np.column_stack([x_vals, y_vals])
fname = out_path / f"lucas2009_{point_id}_{sample_id}.npz"
np.savez(fname, arr=arr)
print(f"✅ PSD Spectrum data saved in assets")
# === Step 6: Zip PSD folder and upload only the zip ===
psd_dir = local_processed_dir / "assets/psd"
zip_path = local_processed_dir / "assets/psd.zip"
# Create zip archive
import zipfile
with zipfile.ZipFile(zip_path, "w", compression=zipfile.ZIP_DEFLATED) as z:
for npz_file in psd_dir.glob("*.npz"):
# arcname removes full path → keeps folder structure clean
z.write(npz_file, arcname=npz_file.name)
print(f"✅ Created ZIP archive: {zip_path.name}")
move_list.append(zip_path)
# === Step 7: Upload to drive/processed ===
upload(local_processed_dir, drive_processed_dir, move_list)