Upload folder using huggingface_hub
Browse files- .gitattributes +4 -60
- README.md +49 -3
- configs/dataset_config.yaml +24 -0
- configs/reasoning_prompts.yaml +42 -0
- data/brain/conversations.jsonl +3 -0
- data/brain/metadata.json +10 -0
- data/brain/reasoning_steps.jsonl +3 -0
- data/validation/validation.jsonl +1 -0
- requirements.txt +3 -0
- scripts/augment_reasoning.py +70 -0
- scripts/preprocess.py +37 -0
- scripts/push_to_hub.py +38 -0
- scripts/validate_data.py +38 -0
.gitattributes
CHANGED
|
@@ -1,60 +1,4 @@
|
|
| 1 |
-
*.
|
| 2 |
-
*.
|
| 3 |
-
*.
|
| 4 |
-
*.
|
| 5 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 36 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 37 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 38 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 39 |
-
# Audio files - uncompressed
|
| 40 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 41 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 42 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 43 |
-
# Audio files - compressed
|
| 44 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 45 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 46 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 47 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 48 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 49 |
-
# Image files - uncompressed
|
| 50 |
-
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 51 |
-
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 52 |
-
*.png filter=lfs diff=lfs merge=lfs -text
|
| 53 |
-
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 54 |
-
# Image files - compressed
|
| 55 |
-
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
-
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 57 |
-
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 58 |
-
# Video files - compressed
|
| 59 |
-
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
-
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
*.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.json filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.yaml filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.yml filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
|
@@ -1,3 +1,49 @@
|
|
| 1 |
-
--
|
| 2 |
-
|
| 3 |
-
--
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Amkyaw-Core-L3
|
| 2 |
+
|
| 3 |
+
A high-quality 3-step reasoning dataset for training AI models on systematic thinking processes.
|
| 4 |
+
|
| 5 |
+
## Dataset Description
|
| 6 |
+
|
| 7 |
+
Amkyaw-Core-L3 is a reasoning-focused dataset designed to train AI models on 3-step thinking processes:
|
| 8 |
+
1. Problem understanding
|
| 9 |
+
2. Solution derivation
|
| 10 |
+
3. Verification
|
| 11 |
+
|
| 12 |
+
## Structure
|
| 13 |
+
|
| 14 |
+
```
|
| 15 |
+
Amkyaw-Core-L3/
|
| 16 |
+
βββ data/
|
| 17 |
+
β βββ brain/
|
| 18 |
+
β β βββ reasoning_steps.jsonl # Main reasoning data
|
| 19 |
+
β β βββ conversations.jsonl # User-Model dialogues
|
| 20 |
+
β β βββ metadata.json # Training data metadata
|
| 21 |
+
β βββ validation/
|
| 22 |
+
β β βββ validation.jsonl # Validation data
|
| 23 |
+
β βββ raw_logs/
|
| 24 |
+
β βββ logs-*.jsonl # Raw logs from Gradio Space
|
| 25 |
+
βββ scripts/
|
| 26 |
+
β βββ preprocess.py
|
| 27 |
+
β βββ validate_data.py
|
| 28 |
+
β βββ push_to_hub.py
|
| 29 |
+
β βββ augment_reasoning.py
|
| 30 |
+
βββ configs/
|
| 31 |
+
βββ dataset_config.yaml
|
| 32 |
+
βββ reasoning_prompts.yaml
|
| 33 |
+
```
|
| 34 |
+
|
| 35 |
+
## Usage
|
| 36 |
+
|
| 37 |
+
```python
|
| 38 |
+
from datasets import load_dataset
|
| 39 |
+
|
| 40 |
+
dataset = load_dataset("amkyawdev/Amkyaw-Core-L3")
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
## License
|
| 44 |
+
|
| 45 |
+
MIT License
|
| 46 |
+
|
| 47 |
+
## Citation
|
| 48 |
+
|
| 49 |
+
Please cite this dataset if used in research.
|
configs/dataset_config.yaml
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_name: Amkyaw-Core-L3
|
| 2 |
+
version: "1.0.0"
|
| 3 |
+
description: 3-step reasoning dataset for AI training
|
| 4 |
+
|
| 5 |
+
# Data configuration
|
| 6 |
+
data:
|
| 7 |
+
brain:
|
| 8 |
+
reasoning_steps: data/brain/reasoning_steps.jsonl
|
| 9 |
+
conversations: data/brain/conversations.jsonl
|
| 10 |
+
metadata: data/brain/metadata.json
|
| 11 |
+
validation:
|
| 12 |
+
path: data/validation/validation.jsonl
|
| 13 |
+
|
| 14 |
+
# Processing
|
| 15 |
+
processing:
|
| 16 |
+
num_reasoning_steps: 3
|
| 17 |
+
languages:
|
| 18 |
+
- en
|
| 19 |
+
tasks:
|
| 20 |
+
- reasoning
|
| 21 |
+
- math
|
| 22 |
+
|
| 23 |
+
# License
|
| 24 |
+
license: MIT
|
configs/reasoning_prompts.yaml
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3-Step Reasoning Prompt Templates
|
| 2 |
+
|
| 3 |
+
templates:
|
| 4 |
+
step1:
|
| 5 |
+
name: "Problem Understanding"
|
| 6 |
+
description: "Break down and understand the problem"
|
| 7 |
+
prompt: |
|
| 8 |
+
Step 1: Understand the problem
|
| 9 |
+
- Identify what is being asked
|
| 10 |
+
- Determine the operation needed
|
| 11 |
+
- Extract relevant numbers
|
| 12 |
+
|
| 13 |
+
step2:
|
| 14 |
+
name: "Solution Derivation"
|
| 15 |
+
description: "Apply the operation to find the answer"
|
| 16 |
+
prompt: |
|
| 17 |
+
Step 2: Derive the solution
|
| 18 |
+
- Apply the operation step by step
|
| 19 |
+
- Show intermediate calculations
|
| 20 |
+
- Get the final result
|
| 21 |
+
|
| 22 |
+
step3:
|
| 23 |
+
name: "Verification"
|
| 24 |
+
description: "Verify the answer is correct"
|
| 25 |
+
prompt: |
|
| 26 |
+
Step 3: Verify
|
| 27 |
+
- Check by doing the reverse operation
|
| 28 |
+
- Confirm the answer makes sense
|
| 29 |
+
|
| 30 |
+
# Example prompts
|
| 31 |
+
examples:
|
| 32 |
+
math_add: |
|
| 33 |
+
Let me solve this step by step:
|
| 34 |
+
Step 1: Add {a} + {b}
|
| 35 |
+
Step 2: Compute the sum
|
| 36 |
+
Step 3: Verify by subtracting
|
| 37 |
+
|
| 38 |
+
math_multiply: |
|
| 39 |
+
Let me solve this step by step:
|
| 40 |
+
Step 1: Break down the problem
|
| 41 |
+
Step 2: Multiply step by step
|
| 42 |
+
Step 3: Verify by dividing
|
data/brain/conversations.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"user": "What is 15 + 27?", "assistant": "Let me solve this step by step:\n\nStep 1: Add 15 + 20 = 35\nStep 2: Add 35 + 7 = 42\nStep 3: Verify: 42 - 27 = 15 β\n\nAnswer: 42"}
|
| 2 |
+
{"user": "What is 8 Γ 6?", "assistant": "Let me solve this step by step:\n\nStep 1: Break 6 into 2 Γ 3: 8 Γ 2 = 16\nStep 2: Multiply 16 Γ 3 = 48\nStep 3: Verify: 48 Γ· 6 = 8 β\n\nAnswer: 48"}
|
| 3 |
+
{"user": "What is 100 - 37?", "assistant": "Let me solve this step by step:\n\nStep 1: Subtract 100 - 30 = 70\nStep 2: Subtract 70 - 7 = 63\nStep 3: Verify: 63 + 37 = 100 β\n\nAnswer: 63"}
|
data/brain/metadata.json
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"dataset_name": "Amkyaw-Core-L3",
|
| 3 |
+
"version": "1.0.0",
|
| 4 |
+
"description": "3-step reasoning training data",
|
| 5 |
+
"num_samples": 3,
|
| 6 |
+
"languages": ["en"],
|
| 7 |
+
"tasks": ["reasoning", "math"],
|
| 8 |
+
"created_date": "2026-04-14",
|
| 9 |
+
"license": "MIT"
|
| 10 |
+
}
|
data/brain/reasoning_steps.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"id": "reasoning_001", "problem": "What is 15 + 27?", "steps": [{"step": 1, "description": "Add 15 + 20", "result": "35"}, {"step": 2, "description": "Add 35 + 7", "result": "42"}, {"step": 3, "description": "Verify 42 - 27 = 15", "result": "15 β"}], "final_answer": "42"}
|
| 2 |
+
{"id": "reasoning_002", "problem": "What is 8 Γ 6?", "steps": [{"step": 1, "description": "Break 6 into 2 Γ 3", "result": "8 Γ 2 = 16"}, {"step": 2, "description": "Multiply 16 Γ 3", "result": "48"}, {"step": 3, "description": "Verify 48 Γ· 6 = 8", "result": "8 β"}], "final_answer": "48"}
|
| 3 |
+
{"id": "reasoning_003", "problem": "What is 100 - 37?", "steps": [{"step": 1, "description": "Subtract 100 - 30", "result": "70"}, {"step": 2, "description": "Subtract 70 - 7", "result": "63"}, {"step": 3, "description": "Verify 63 + 37 = 100", "result": "100 β"}], "final_answer": "63"}
|
data/validation/validation.jsonl
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"id": "val_001", "problem": "What is 25 + 18?", "steps": [{"step": 1, "description": "Add 25 + 10", "result": "35"}, {"step": 2, "description": "Add 35 + 8", "result": "43"}, {"step": 3, "description": "Verify 43 - 18 = 25", "result": "25 β"}], "final_answer": "43"}
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
datasets>=2.14.0
|
| 2 |
+
huggingface_hub>=0.19.0
|
| 3 |
+
pyyaml>=6.0
|
scripts/augment_reasoning.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Augment reasoning data with additional examples.
|
| 4 |
+
"""
|
| 5 |
+
import json
|
| 6 |
+
import random
|
| 7 |
+
import sys
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def generate_examples(num_samples: int = 100):
|
| 12 |
+
"""Generate synthetic reasoning examples."""
|
| 13 |
+
operators = [
|
| 14 |
+
("+", lambda a, b: a + b, "add"),
|
| 15 |
+
("-", lambda a, b: a - b, "subtract"),
|
| 16 |
+
("Γ", lambda a, b: a * b, "multiply"),
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
examples = []
|
| 20 |
+
for i in range(num_samples):
|
| 21 |
+
op_func, op_name = random.choice(operators)
|
| 22 |
+
a = random.randint(1, 100)
|
| 23 |
+
b = random.randint(1, 20)
|
| 24 |
+
result = op_func(a, b)
|
| 25 |
+
|
| 26 |
+
example = {
|
| 27 |
+
"id": f"reasoning_{i+100:03d}",
|
| 28 |
+
"problem": f"What is {a} {op_name} {b}?",
|
| 29 |
+
"steps": [
|
| 30 |
+
{"step": 1, "description": f"Apply {op_name}", "result": str(result)},
|
| 31 |
+
{"step": 2, "description": "Verify", "result": "β"},
|
| 32 |
+
{"step": 3, "description": "Final answer", "result": str(result)},
|
| 33 |
+
],
|
| 34 |
+
"final_answer": str(result),
|
| 35 |
+
}
|
| 36 |
+
examples.append(example)
|
| 37 |
+
|
| 38 |
+
return examples
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def augment_data(input_file: str, output_file: str, num_new: int = 100):
|
| 42 |
+
"""Augment existing data with new examples."""
|
| 43 |
+
input_path = Path(input_file)
|
| 44 |
+
|
| 45 |
+
# Read existing
|
| 46 |
+
existing = []
|
| 47 |
+
if input_path.exists():
|
| 48 |
+
with open(input_path) as f:
|
| 49 |
+
for line in f:
|
| 50 |
+
existing.append(json.loads(line))
|
| 51 |
+
|
| 52 |
+
# Generate new
|
| 53 |
+
new_examples = generate_examples(num_new)
|
| 54 |
+
|
| 55 |
+
# Write combined
|
| 56 |
+
output_path = Path(output_file)
|
| 57 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 58 |
+
|
| 59 |
+
with open(output_path, "w") as f:
|
| 60 |
+
for item in existing + new_examples:
|
| 61 |
+
f.write(json.dumps(item, ensure_ascii=False) + "\n")
|
| 62 |
+
|
| 63 |
+
print(f"Saved {len(existing) + len(new_examples)} examples to {output_file}")
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
if __name__ == "__main__":
|
| 67 |
+
input_file = sys.argv[1] if len(sys.argv) > 1 else "data/brain/reasoning_steps.jsonl"
|
| 68 |
+
output_file = sys.argv[2] if len(sys.argv) > 2 else "data/brain/reasoning_steps.jsonl"
|
| 69 |
+
num_new = int(sys.argv[3]) if len(sys.argv) > 3 else 100
|
| 70 |
+
augment_data(input_file, output_file, num_new)
|
scripts/preprocess.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Preprocess raw logs and convert to training data format.
|
| 4 |
+
"""
|
| 5 |
+
import json
|
| 6 |
+
import sys
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def preprocess_logs(raw_dir: str, output_dir: str):
|
| 11 |
+
"""Process raw logs and create training data."""
|
| 12 |
+
raw_path = Path(raw_dir)
|
| 13 |
+
output_path = Path(output_dir)
|
| 14 |
+
output_path.mkdir(parents=True, exist_ok=True)
|
| 15 |
+
|
| 16 |
+
processed = []
|
| 17 |
+
for log_file in raw_path.glob("logs-*.jsonl"):
|
| 18 |
+
with open(log_file) as f:
|
| 19 |
+
for line in f:
|
| 20 |
+
data = json.loads(line)
|
| 21 |
+
# Transform to training format
|
| 22 |
+
processed.append(data)
|
| 23 |
+
|
| 24 |
+
# Write processed data
|
| 25 |
+
output_file = output_path / "reasoning_steps.jsonl"
|
| 26 |
+
with open(output_file, "w") as f:
|
| 27 |
+
for item in processed:
|
| 28 |
+
f.write(json.dumps(item, ensure_ascii=False) + "\n")
|
| 29 |
+
|
| 30 |
+
print(f"Processed {len(processed)} records to {output_file}")
|
| 31 |
+
return len(processed)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
if __name__ == "__main__":
|
| 35 |
+
raw_dir = sys.argv[1] if len(sys.argv) > 1 else "data/raw_logs"
|
| 36 |
+
output_dir = sys.argv[2] if len(sys.argv) > 2 else "data/brain"
|
| 37 |
+
preprocess_logs(raw_dir, output_dir)
|
scripts/push_to_hub.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Push local dataset to Hugging Face Hub.
|
| 4 |
+
"""
|
| 5 |
+
import sys
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def push_to_hub(repo_id: str, data_dir: str):
|
| 10 |
+
"""Push dataset to Hugging Face Hub."""
|
| 11 |
+
import os
|
| 12 |
+
from huggingface_hub import HfApi, login
|
| 13 |
+
|
| 14 |
+
# Get token from environment
|
| 15 |
+
token = os.environ.get("HF_TOKEN")
|
| 16 |
+
if not token:
|
| 17 |
+
print("Error: HF_TOKEN not set")
|
| 18 |
+
return
|
| 19 |
+
|
| 20 |
+
# Login and upload
|
| 21 |
+
api = HfApi()
|
| 22 |
+
api.create_repo(repo_id=repo_id, token=token, repo_type="dataset", exist_ok=True)
|
| 23 |
+
|
| 24 |
+
# Upload folder
|
| 25 |
+
api.upload_folder(
|
| 26 |
+
folder_path=data_dir,
|
| 27 |
+
repo_id=repo_id,
|
| 28 |
+
repo_type="dataset",
|
| 29 |
+
token=token,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
print(f"Pushed to https://huggingface.co/datasets/{repo_id}")
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
if __name__ == "__main__":
|
| 36 |
+
repo_id = sys.argv[1] if len(sys.argv) > 1 else "amkyawdev/Amkyaw-Core-L3"
|
| 37 |
+
data_dir = sys.argv[2] if len(sys.argv) > 2 else "."
|
| 38 |
+
push_to_hub(repo_id, data_dir)
|
scripts/validate_data.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Validate JSONL data format and integrity.
|
| 4 |
+
"""
|
| 5 |
+
import json
|
| 6 |
+
import sys
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def validate_jsonl(file_path: str) -> bool:
|
| 11 |
+
"""Validate a JSONL file format."""
|
| 12 |
+
path = Path(file_path)
|
| 13 |
+
if not path.exists():
|
| 14 |
+
print(f"Error: File not found: {file_path}")
|
| 15 |
+
return False
|
| 16 |
+
|
| 17 |
+
valid = True
|
| 18 |
+
line_num = 0
|
| 19 |
+
with open(path) as f:
|
| 20 |
+
for line in f:
|
| 21 |
+
line_num += 1
|
| 22 |
+
try:
|
| 23 |
+
data = json.loads(line)
|
| 24 |
+
# Basic validation
|
| 25 |
+
if not isinstance(data, dict):
|
| 26 |
+
print(f"Line {line_num}: Not a JSON object")
|
| 27 |
+
valid = False
|
| 28 |
+
except json.JSONDecodeError as e:
|
| 29 |
+
print(f"Line {line_num}: Invalid JSON - {e}")
|
| 30 |
+
valid = False
|
| 31 |
+
|
| 32 |
+
print(f"Validated {path}: {line_num} lines, valid={valid}")
|
| 33 |
+
return valid
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
if __name__ == "__main__":
|
| 37 |
+
file_path = sys.argv[1] if len(sys.argv) > 1 else "data/brain/reasoning_steps.jsonl"
|
| 38 |
+
validate_jsonl(file_path)
|