theapemachine commited on
Commit
defd8d1
Β·
1 Parent(s): 7b4d2d2

Add initial project configuration and dependencies

Browse files

- Introduced `pyproject.toml` for project metadata, specifying the name, version, description, and required Python version.
- Listed essential dependencies including `numpy`, `scipy`, `torch`, and others for the project.
- Added `uv.lock` to manage package versions and ensure reproducibility.
- Updated evaluation runner to use semantic field scoring instead of the previous Tensegrity v2 scoring.
- Enhanced the cognitive controller with new hypothesis generation capabilities and session management.
- Implemented a new causal energy framework for model competition based on prediction errors.
- Introduced FHRR-RNS encoding for compositional observation encoding, enhancing semantic processing capabilities.
- Established a unified cognitive engine integrating various components for improved performance and flexibility.

pyproject.toml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "tensegrity"
3
+ version = "0.1.0"
4
+ description = "Non-gradient cognitive architecture (free energy, causal models, Markov blankets)"
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ dependencies = [
8
+ "numpy",
9
+ "scipy",
10
+ "networkx",
11
+ "pydantic",
12
+ "openai",
13
+ "torch",
14
+ "transformers",
15
+ "accelerate",
16
+ "datasets",
17
+ "sentence-transformers"
18
+ ]
19
+
20
+ [project.optional-dependencies]
21
+ dev = ["pytest>=7.0"]
22
+
23
+ [build-system]
24
+ requires = ["hatchling>=1.13"]
25
+ build-backend = "hatchling.build"
26
+
27
+ [tool.hatch.build.targets.wheel]
28
+ packages = ["tensegrity"]
tensegrity/bench/runner.py CHANGED
@@ -185,8 +185,8 @@ class EvalRunner:
185
  Runs baseline vs grafted evaluation on any set of tasks.
186
 
187
  Modes:
188
- "local" β€” Uses transformers model + confidence-gated Tensegrity v2 scoring
189
- "offline" β€” No LLM; baseline = random, grafted = v2 scoring
190
 
191
  Local mode blending:
192
  effective_Ξ» = Ξ» * (1 - LLM_confidence / confidence_gate_threshold)
@@ -246,18 +246,18 @@ class EvalRunner:
246
  return scores
247
 
248
  def _get_tensegrity_scores(self, sample: TaskSample) -> Tuple[List[float], float]:
249
- """Run Tensegrity v2 scoring bridge on a sample."""
250
- from tensegrity.v2.graft import V2ScoringBridge
251
- if not hasattr(self, '_v2_bridge'):
252
- self._v2_bridge = V2ScoringBridge(
253
  obs_dim=256, hidden_dims=[128, 32], fhrr_dim=2048,
254
  ngc_settle_steps=30, ngc_learning_rate=0.01,
255
  hopfield_beta=0.05, confidence_threshold=0.15,
256
  context_settle_steps=40, choice_settle_steps=25,
257
  context_learning_epochs=3,
258
  )
259
- self._v2_bridge.reset()
260
- return self._v2_bridge.score_choices(sample.prompt, sample.choices)
261
 
262
  def evaluate_sample(self, sample: TaskSample) -> SampleResult:
263
  """Evaluate a single sample with confidence-gated blending."""
 
185
  Runs baseline vs grafted evaluation on any set of tasks.
186
 
187
  Modes:
188
+ "local" β€” Uses transformers model + confidence-gated semantic field scoring
189
+ "offline" β€” No LLM; baseline = random, grafted = field scoring
190
 
191
  Local mode blending:
192
  effective_Ξ» = Ξ» * (1 - LLM_confidence / confidence_gate_threshold)
 
246
  return scores
247
 
248
  def _get_tensegrity_scores(self, sample: TaskSample) -> Tuple[List[float], float]:
249
+ """Run semantic field scoring (ScoringBridge) on a sample."""
250
+ from tensegrity.engine.scoring import ScoringBridge
251
+ if not hasattr(self, '_field_scorer'):
252
+ self._field_scorer = ScoringBridge(
253
  obs_dim=256, hidden_dims=[128, 32], fhrr_dim=2048,
254
  ngc_settle_steps=30, ngc_learning_rate=0.01,
255
  hopfield_beta=0.05, confidence_threshold=0.15,
256
  context_settle_steps=40, choice_settle_steps=25,
257
  context_learning_epochs=3,
258
  )
259
+ self._field_scorer.reset()
260
+ return self._field_scorer.score_choices(sample.prompt, sample.choices)
261
 
262
  def evaluate_sample(self, sample: TaskSample) -> SampleResult:
263
  """Evaluate a single sample with confidence-gated blending."""
tensegrity/broca/controller.py CHANGED
@@ -31,11 +31,15 @@ from tensegrity.broca.schemas import (
31
  BeliefState,
32
  CognitiveAction,
33
  Hypothesis,
 
34
  )
35
  from tensegrity.broca.interface import BrocaInterface
 
36
 
37
  logger = logging.getLogger(__name__)
38
 
 
 
39
 
40
  class CognitiveController:
41
  """
@@ -60,7 +64,8 @@ class CognitiveController:
60
  broca: Optional[BrocaInterface] = None,
61
  n_hypotheses: int = 8,
62
  hypothesis_labels: Optional[List[str]] = None,
63
- use_llm: bool = True):
 
64
  """
65
  Args:
66
  agent: TensegrityAgent instance. Created with defaults if None.
@@ -68,8 +73,10 @@ class CognitiveController:
68
  n_hypotheses: Number of competing hypotheses to maintain
69
  hypothesis_labels: Labels for the hypothesis space
70
  use_llm: If False, uses template-based parse/produce (for testing without API)
 
71
  """
72
  self.use_llm = use_llm
 
73
 
74
  # Cognitive substrate
75
  n_states = n_hypotheses
@@ -121,6 +128,88 @@ class CognitiveController:
121
  3: "state_conclusion",
122
  }
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  def _init_hypotheses(self, labels: List[str]):
125
  """Initialize the hypothesis space with uniform priors."""
126
  n = len(labels)
@@ -135,6 +224,63 @@ class CognitiveController:
135
  for i, label in enumerate(labels)
136
  ]
137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  def _observation_to_vector(self, parsed: ParsedObservation) -> np.ndarray:
139
  """
140
  Convert a ParsedObservation into a numeric vector for Tensegrity.
@@ -165,26 +311,10 @@ class CognitiveController:
165
  if ename in label or label in ename:
166
  features[idx] += 0.5
167
 
168
- for relation in parsed.relations:
169
- subj = relation.subject.lower()
170
- obj = relation.object.lower()
171
-
172
- # Find which hypotheses the subject/object refer to
173
- subj_matches = [i for label, i in hyp_labels.items() if subj in label or label in subj]
174
- obj_matches = [i for label, i in hyp_labels.items() if obj in label or label in obj]
175
-
176
- sign = -1.0 if relation.negated else 1.0
177
-
178
- if relation.predicate in ("causes", "enables", "confirms", "is_a", "has_property"):
179
- for idx in obj_matches:
180
- features[idx] += 0.8 * sign
181
- for idx in subj_matches:
182
- features[idx] += 0.4 * sign
183
- elif relation.predicate in ("prevents", "contradicts"):
184
- for idx in obj_matches:
185
- features[idx] -= 0.8 * sign
186
- for idx in subj_matches:
187
- features[idx] -= 0.3 * sign
188
 
189
  # Linguistic confidence modulates the whole vector
190
  features *= parsed.confidence_linguistic
@@ -388,6 +518,7 @@ class CognitiveController:
388
  # === 2. PROCESS (Tensegrity cognition) ===
389
  obs_vector = self._observation_to_vector(parsed)
390
  perception = self.agent.perceive(obs_vector)
 
391
 
392
  # Update hypothesis probabilities from Tensegrity beliefs
393
  self._update_hypotheses_from_inference(perception, obs_vector)
@@ -403,6 +534,15 @@ class CognitiveController:
403
  fact = f"NOT({fact})"
404
  self.belief_state.confirmed_facts.append(fact)
405
 
 
 
 
 
 
 
 
 
 
406
  # === 3. SELECT ACTION (Tensegrity decides) ===
407
  action = self._select_cognitive_action(perception)
408
 
@@ -588,6 +728,7 @@ class CognitiveController:
588
  return ParsedObservation(
589
  entities=entities,
590
  relations=relations,
 
591
  is_question=is_question,
592
  is_assertion=not is_question and not is_command,
593
  is_command=is_command,
 
31
  BeliefState,
32
  CognitiveAction,
33
  Hypothesis,
34
+ RelationMention,
35
  )
36
  from tensegrity.broca.interface import BrocaInterface
37
+ from tensegrity.causal.from_proposal import build_scm_from_proposal
38
 
39
  logger = logging.getLogger(__name__)
40
 
41
+ IMPLICIT_RELATION_WEIGHT = 0.3
42
+
43
 
44
  class CognitiveController:
45
  """
 
64
  broca: Optional[BrocaInterface] = None,
65
  n_hypotheses: int = 8,
66
  hypothesis_labels: Optional[List[str]] = None,
67
+ use_llm: bool = True,
68
+ enable_hypothesis_generation: bool = False):
69
  """
70
  Args:
71
  agent: TensegrityAgent instance. Created with defaults if None.
 
73
  n_hypotheses: Number of competing hypotheses to maintain
74
  hypothesis_labels: Labels for the hypothesis space
75
  use_llm: If False, uses template-based parse/produce (for testing without API)
76
+ enable_hypothesis_generation: If True and use_llm, may add LLM-proposed SCMs when tension is high
77
  """
78
  self.use_llm = use_llm
79
+ self.enable_hypothesis_generation = enable_hypothesis_generation
80
 
81
  # Cognitive substrate
82
  n_states = n_hypotheses
 
128
  3: "state_conclusion",
129
  }
130
 
131
+ def reset_session(self, hypothesis_labels: List[str]) -> None:
132
+ """
133
+ Start a fresh session for an independent item (e.g. one benchmark sample).
134
+
135
+ Rebuilds the substrate agent with dimensions matched to the hypothesis
136
+ count and clears conversational artifacts.
137
+ """
138
+ labels = list(hypothesis_labels)
139
+ if not labels:
140
+ labels = ["_empty_"]
141
+ n_hyp = max(len(labels), 2)
142
+ self.agent = TensegrityAgent(
143
+ n_states=n_hyp,
144
+ n_observations=n_hyp * 4,
145
+ n_actions=4,
146
+ sensory_dims=n_hyp,
147
+ sensory_bits=4,
148
+ context_dim=32,
149
+ associative_dim=64,
150
+ planning_horizon=2,
151
+ precision=4.0,
152
+ )
153
+ self.belief_state = BeliefState(
154
+ turn=0,
155
+ hypotheses=[],
156
+ eliminated_hypotheses=[],
157
+ confirmed_facts=[],
158
+ open_questions=[],
159
+ current_tension=1.0,
160
+ epistemic_urgency=1.0,
161
+ free_energy=0.0,
162
+ )
163
+ self._init_hypotheses(labels)
164
+ self._conversation.clear()
165
+
166
+ def perceive_only(self, input_text: str) -> Dict[str, Any]:
167
+ """
168
+ Parse and run perception + belief update only (no action / no verbalization).
169
+ """
170
+ self.belief_state.turn += 1
171
+ if self.use_llm and self.broca:
172
+ parsed = self.broca.parse(input_text, context=self._get_parse_context())
173
+ else:
174
+ parsed = self._template_parse(input_text)
175
+ obs_vector = self._observation_to_vector(parsed)
176
+ perception = self.agent.perceive(obs_vector)
177
+ self._maybe_inject_causal_hypothesis(perception, input_text)
178
+ self._update_hypotheses_from_inference(perception, obs_vector)
179
+ for entity in parsed.entities:
180
+ fact = (
181
+ f"[T{self.belief_state.turn}] Observed: {entity.normalized} "
182
+ f"({entity.entity_type})"
183
+ )
184
+ self.belief_state.confirmed_facts.append(fact)
185
+ for relation in parsed.relations:
186
+ fact = (
187
+ f"[T{self.belief_state.turn}] {relation.subject} "
188
+ f"{relation.predicate} {relation.object}"
189
+ )
190
+ if relation.negated:
191
+ fact = f"NOT({fact})"
192
+ self.belief_state.confirmed_facts.append(fact)
193
+ for relation in parsed.implicit_relations:
194
+ fact = (
195
+ f"[T{self.belief_state.turn}] (implicit) {relation.subject} "
196
+ f"{relation.predicate} {relation.object}"
197
+ )
198
+ if relation.negated:
199
+ fact = f"NOT({fact})"
200
+ self.belief_state.confirmed_facts.append(fact)
201
+ return {
202
+ "perception": {
203
+ "free_energy": perception["free_energy"],
204
+ "surprise": perception["surprise"],
205
+ "tension": perception["arena"]["tension"],
206
+ "epistemic_value": perception["epistemic_value"],
207
+ },
208
+ "belief_state": self.belief_state.model_dump(),
209
+ "parsed_input": parsed.model_dump(),
210
+ "turn": self.belief_state.turn,
211
+ }
212
+
213
  def _init_hypotheses(self, labels: List[str]):
214
  """Initialize the hypothesis space with uniform priors."""
215
  n = len(labels)
 
224
  for i, label in enumerate(labels)
225
  ]
226
 
227
+ @staticmethod
228
+ def _apply_relation_evidence(
229
+ features: np.ndarray,
230
+ hyp_labels: Dict[str, int],
231
+ relations: List[RelationMention],
232
+ weight: float,
233
+ ) -> None:
234
+ """Add hypothesis-indexed evidence from typed relations (scaled by weight)."""
235
+ for relation in relations:
236
+ subj = relation.subject.lower()
237
+ obj = relation.object.lower()
238
+ subj_matches = [i for label, i in hyp_labels.items() if subj in label or label in subj]
239
+ obj_matches = [i for label, i in hyp_labels.items() if obj in label or label in obj]
240
+ sign = -1.0 if relation.negated else 1.0
241
+ w = weight
242
+ if relation.predicate in ("causes", "enables", "confirms", "is_a", "has_property"):
243
+ for idx in obj_matches:
244
+ features[idx] += 0.8 * sign * w
245
+ for idx in subj_matches:
246
+ features[idx] += 0.4 * sign * w
247
+ elif relation.predicate in ("prevents", "contradicts"):
248
+ for idx in obj_matches:
249
+ features[idx] -= 0.8 * sign * w
250
+ for idx in subj_matches:
251
+ features[idx] -= 0.3 * sign * w
252
+
253
+ def _maybe_inject_causal_hypothesis(self, perception: Dict[str, Any], input_text: str) -> None:
254
+ """If causal fit is poor, ask Broca for a new SCM and register it (LLM only)."""
255
+ if not self.enable_hypothesis_generation or not self.use_llm or not self.broca:
256
+ return
257
+ if not hasattr(self.broca, "propose_causal_hypothesis"):
258
+ return
259
+ ar = perception.get("arena") or {}
260
+ if ar.get("tension", 0) < 0.72:
261
+ return
262
+ lls = ar.get("log_likelihoods") or {}
263
+ if lls and max(lls.values()) > -2.0:
264
+ return
265
+ try:
266
+ names = list(self.agent.arena.models.keys())
267
+ prop = self.broca.propose_causal_hypothesis(input_text[:2000], names)
268
+ scm = build_scm_from_proposal(prop)
269
+ if scm.name in self.agent.arena.models:
270
+ return
271
+ self.agent.add_causal_model(scm)
272
+ q = perception["belief_state"]
273
+ obs_idx = perception["observation_index"]
274
+ causal_obs: Dict[str, int] = {
275
+ "state": int(np.argmax(q)),
276
+ "observation": int(obs_idx),
277
+ }
278
+ if "mediated_causal" in self.agent.arena.models:
279
+ causal_obs["cause"] = int(np.argmax(q))
280
+ perception["arena"] = self.agent.arena.compete(causal_obs)
281
+ except Exception as e:
282
+ logger.warning("Dynamic causal hypothesis skipped: %s", e)
283
+
284
  def _observation_to_vector(self, parsed: ParsedObservation) -> np.ndarray:
285
  """
286
  Convert a ParsedObservation into a numeric vector for Tensegrity.
 
311
  if ename in label or label in ename:
312
  features[idx] += 0.5
313
 
314
+ self._apply_relation_evidence(features, hyp_labels, parsed.relations, weight=1.0)
315
+ self._apply_relation_evidence(
316
+ features, hyp_labels, parsed.implicit_relations, weight=IMPLICIT_RELATION_WEIGHT,
317
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318
 
319
  # Linguistic confidence modulates the whole vector
320
  features *= parsed.confidence_linguistic
 
518
  # === 2. PROCESS (Tensegrity cognition) ===
519
  obs_vector = self._observation_to_vector(parsed)
520
  perception = self.agent.perceive(obs_vector)
521
+ self._maybe_inject_causal_hypothesis(perception, input_text)
522
 
523
  # Update hypothesis probabilities from Tensegrity beliefs
524
  self._update_hypotheses_from_inference(perception, obs_vector)
 
534
  fact = f"NOT({fact})"
535
  self.belief_state.confirmed_facts.append(fact)
536
 
537
+ for relation in parsed.implicit_relations:
538
+ fact = (
539
+ f"[T{self.belief_state.turn}] (implicit) {relation.subject} "
540
+ f"{relation.predicate} {relation.object}"
541
+ )
542
+ if relation.negated:
543
+ fact = f"NOT({fact})"
544
+ self.belief_state.confirmed_facts.append(fact)
545
+
546
  # === 3. SELECT ACTION (Tensegrity decides) ===
547
  action = self._select_cognitive_action(perception)
548
 
 
728
  return ParsedObservation(
729
  entities=entities,
730
  relations=relations,
731
+ implicit_relations=[],
732
  is_question=is_question,
733
  is_assertion=not is_question and not is_command,
734
  is_command=is_command,
tensegrity/broca/interface.py CHANGED
@@ -17,7 +17,7 @@ data that doesn't match the schema.
17
  import os
18
  import json
19
  import logging
20
- from typing import Optional, Type, TypeVar, Union
21
 
22
  from pydantic import BaseModel
23
 
@@ -28,6 +28,7 @@ from tensegrity.broca.schemas import (
28
  QuestionUtterance,
29
  BeliefState,
30
  CognitiveAction,
 
31
  )
32
 
33
  logger = logging.getLogger(__name__)
@@ -132,9 +133,12 @@ class BrocaInterface:
132
  ParsedObservation with typed fields
133
  """
134
  system_prompt = (
135
- "You are a linguistic parser. Extract structured information from the input. "
136
- "Do NOT interpret, evaluate, reason about, or add to the content. "
137
- "Extract only what is explicitly stated. "
 
 
 
138
  "If something is unclear, set confidence_linguistic lower."
139
  )
140
  if context:
@@ -148,6 +152,34 @@ class BrocaInterface:
148
  self._parse_calls += 1
149
  return self._call_llm(messages, ParsedObservation, self.max_parse_tokens)
150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  def parse_feedback(self, feedback: str,
152
  action_taken: str,
153
  hypotheses: list) -> ParsedFeedback:
 
17
  import os
18
  import json
19
  import logging
20
+ from typing import Optional, Type, TypeVar, Union, List
21
 
22
  from pydantic import BaseModel
23
 
 
28
  QuestionUtterance,
29
  BeliefState,
30
  CognitiveAction,
31
+ ProposedSCM,
32
  )
33
 
34
  logger = logging.getLogger(__name__)
 
133
  ParsedObservation with typed fields
134
  """
135
  system_prompt = (
136
+ "You are a linguistic parser. Extract structured information from the input.\n"
137
+ "relations: predicates that are DIRECTLY stated in the text.\n"
138
+ "implicit_relations: the SAME RelationMention shape for links that are NOT quoted "
139
+ "but are logically required for the scenario to hold (commonsense bridges only). "
140
+ "Keep implicit_relations sparse; do not invent unrelated facts.\n"
141
+ "Do NOT output prose reasoning β€” only typed fields. "
142
  "If something is unclear, set confidence_linguistic lower."
143
  )
144
  if context:
 
152
  self._parse_calls += 1
153
  return self._call_llm(messages, ParsedObservation, self.max_parse_tokens)
154
 
155
+ def propose_causal_hypothesis(
156
+ self,
157
+ situation_summary: str,
158
+ existing_model_names: List[str],
159
+ ) -> ProposedSCM:
160
+ """
161
+ Propose a new structural causal model when existing SCMs fit poorly.
162
+
163
+ Returns a bounded DAG schema only (no free-form reasoning).
164
+ """
165
+ system_prompt = (
166
+ "You are a causal model designer. Propose ONE small directed acyclic graph "
167
+ "as variable names and typed edges (causes / prevents / enables). "
168
+ "Use short snake_case identifiers. At most 12 edges. "
169
+ "Name must differ from existing model names. Output only the schema fields."
170
+ )
171
+ existing = ", ".join(existing_model_names[:24]) if existing_model_names else "(none)"
172
+ user_content = (
173
+ f"Existing models: {existing}\n\n"
174
+ f"Observations / situation:\n{situation_summary[:2400]}"
175
+ )
176
+ messages = [
177
+ {"role": "system", "content": system_prompt},
178
+ {"role": "user", "content": user_content},
179
+ ]
180
+ self._parse_calls += 1
181
+ return self._call_llm(messages, ProposedSCM, self.max_parse_tokens)
182
+
183
  def parse_feedback(self, feedback: str,
184
  action_taken: str,
185
  hypotheses: list) -> ParsedFeedback:
tensegrity/broca/schemas.py CHANGED
@@ -43,6 +43,20 @@ class RelationMention(BaseModel):
43
  negated: bool = False
44
 
45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  class ParsedObservation(BaseModel):
47
  """
48
  Schema for LLM-as-parser: convert natural language into structured observation.
@@ -51,6 +65,13 @@ class ParsedObservation(BaseModel):
51
  """
52
  entities: List[EntityMention] = Field(default_factory=list)
53
  relations: List[RelationMention] = Field(default_factory=list)
 
 
 
 
 
 
 
54
  is_question: bool = Field(description="Is the input asking for information?")
55
  is_assertion: bool = Field(description="Is the input stating a fact/claim?")
56
  is_command: bool = Field(description="Is the input requesting an action?")
 
43
  negated: bool = False
44
 
45
 
46
+ class CausalEdge(BaseModel):
47
+ """One edge in a proposed structural causal model (SCM)."""
48
+ source: str = Field(description="Cause or enabling variable name")
49
+ target: str = Field(description="Effect variable name")
50
+ mechanism: Literal["causes", "prevents", "enables"]
51
+
52
+
53
+ class ProposedSCM(BaseModel):
54
+ """LLM-proposed SCM as a named DAG plus short description."""
55
+ name: str = Field(max_length=64, description="Short identifier, suitable for SCM.name")
56
+ description: str = Field(max_length=512, description="One sentence: what this model claims")
57
+ edges: List[CausalEdge] = Field(max_length=48, description="Directed edges; must be acyclic")
58
+
59
+
60
  class ParsedObservation(BaseModel):
61
  """
62
  Schema for LLM-as-parser: convert natural language into structured observation.
 
65
  """
66
  entities: List[EntityMention] = Field(default_factory=list)
67
  relations: List[RelationMention] = Field(default_factory=list)
68
+ implicit_relations: List[RelationMention] = Field(
69
+ default_factory=list,
70
+ description=(
71
+ "Typed implications required for consistency with the text but not literally stated; "
72
+ "use closed predicates only (same vocabulary as relations)."
73
+ ),
74
+ )
75
  is_question: bool = Field(description="Is the input asking for information?")
76
  is_assertion: bool = Field(description="Is the input stating a fact/claim?")
77
  is_command: bool = Field(description="Is the input requesting an action?")
tensegrity/causal/from_proposal.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Build a StructuralCausalModel from a structured LLM proposal (ProposedSCM).
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import logging
8
+ import networkx as nx
9
+
10
+ from tensegrity.broca.schemas import ProposedSCM
11
+ from tensegrity.causal.scm import StructuralCausalModel
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ def build_scm_from_proposal(proposal: ProposedSCM, n_values: int = 4) -> StructuralCausalModel:
17
+ """
18
+ Convert a ProposedSCM into a StructuralCausalModel with discrete variables.
19
+ Drops edges that would create cycles. Variable order follows a topological sort.
20
+ """
21
+ G = nx.DiGraph()
22
+ for e in proposal.edges:
23
+ G.add_edge(e.source.strip(), e.target.strip())
24
+
25
+ if G.number_of_nodes() == 0:
26
+ logger.warning("ProposedSCM '%s' has no edges; returning empty SCM", proposal.name)
27
+ scm = StructuralCausalModel(name=proposal.name[:60])
28
+ scm.add_variable("observation", n_values=n_values, parents=[])
29
+ return scm
30
+
31
+ if not nx.is_directed_acyclic_graph(G):
32
+ # Greedily remove edges that introduce cycles (reverse insertion order)
33
+ edges_list = [(e.source.strip(), e.target.strip()) for e in proposal.edges]
34
+ G.clear()
35
+ for s, t in edges_list:
36
+ G.add_edge(s, t)
37
+ if not nx.is_directed_acyclic_graph(G):
38
+ G.remove_edge(s, t)
39
+ logger.debug("Dropped cyclic edge %s -> %s", s, t)
40
+
41
+ order = list(nx.topological_sort(G))
42
+ scm = StructuralCausalModel(name=proposal.name[:60].replace(" ", "_"))
43
+ for node in order:
44
+ parents = sorted(G.predecessors(node))
45
+ scm.add_variable(node, n_values=n_values, parents=parents)
46
+ return scm
tensegrity/core/agent.py CHANGED
@@ -28,6 +28,7 @@ from tensegrity.memory.associative import AssociativeMemory
28
  from tensegrity.causal.arena import CausalArena
29
  from tensegrity.causal.scm import StructuralCausalModel
30
  from tensegrity.inference.free_energy import FreeEnergyEngine
 
31
 
32
  logger = logging.getLogger(__name__)
33
 
@@ -132,9 +133,20 @@ class TensegrityAgent:
132
  self._step_count = 0
133
  self._total_surprise = 0.0
134
  self._total_free_energy = 0.0
 
135
 
136
  # Initialize with default competing models
137
  self._init_default_models()
 
 
 
 
 
 
 
 
 
 
138
 
139
  def _init_default_models(self):
140
  """
@@ -192,107 +204,71 @@ class TensegrityAgent:
192
 
193
  def perceive(self, raw_observation: np.ndarray) -> Dict[str, Any]:
194
  """
195
- Process a raw observation through the full perception pipeline.
196
-
197
- 1. Morton-encode the raw data (Markov blanket sensory boundary)
198
- 2. Map to observation index
199
- 3. Run free energy minimization (state inference)
200
- 4. Update all memory systems
201
- 5. Run causal arena competition
202
- 6. Store in episodic memory
203
- 7. Update associative memory
204
-
205
- Args:
206
- raw_observation: Raw sensory data of any modality.
207
- Shape: (n_points, sensory_dims) or (sensory_dims,)
208
-
209
- Returns:
210
- Perception results including beliefs, surprise, free energy
211
  """
212
  self._step_count += 1
213
-
214
- # === 1. MARKOV BLANKET: Morton encode ===
215
- morton_codes = self.blanket.sense(raw_observation)
216
- obs_idx = self._morton_to_obs_index(morton_codes)
217
-
218
- # === 2. INFERENCE ENGINE: Minimize free energy ===
 
 
 
 
 
219
  A = self.epistemic.A
220
  B = self.epistemic.B
221
  C = self.epistemic.C
222
  D = self.epistemic.D
223
  log_A = self.epistemic.log_A
224
-
225
  inference_result = self.engine.step(obs_idx, A, B, C, D, log_A)
226
- q_states = inference_result['belief_state']
227
- F = inference_result['free_energy']
228
-
229
- # === 3. EPISTEMIC MEMORY: Bayesian counting update ===
230
  self.epistemic.update_likelihood(obs_idx, q_states)
231
- if self.engine.prev_action is not None and self._step_count > 1:
232
- # Get previous belief state from episodic memory
233
- prev_episodes = self.episodic.get_sequence(
234
- self._step_count - 2, self._step_count - 2)
235
- if prev_episodes:
236
- prev_belief = prev_episodes[0].belief_state
237
- self.epistemic.update_transition(
238
- prev_belief, q_states, self.engine.prev_action)
239
-
240
- # === 4. CAUSAL ARENA: Compete ===
241
- # Map observation to causal variable values
242
  causal_obs = {
243
- 'state': int(np.argmax(q_states)),
244
- 'observation': obs_idx,
245
  }
246
- # Add 'cause' for the mediated model
247
- if 'mediated_causal' in self.arena.models:
248
- causal_obs['cause'] = int(np.argmax(q_states)) # Best guess
249
-
250
  arena_result = self.arena.compete(causal_obs)
251
-
252
- # === 5. EPISODIC MEMORY: Encode experience ===
253
- episode = self.episodic.encode(
254
- observation=raw_observation,
255
- morton_code=morton_codes if isinstance(morton_codes, np.ndarray)
256
- else np.array([morton_codes]),
257
- belief_state=q_states,
258
- action=self.engine.prev_action,
259
- surprise=self.blanket.surprise,
260
- free_energy=F,
261
- metadata={
262
- 'obs_idx': obs_idx,
263
- 'arena_winner': arena_result['winner'],
264
- 'tension': arena_result['tension'],
265
- }
266
- )
267
-
268
- # === 6. ASSOCIATIVE MEMORY: Store pattern ===
269
- pattern = self._obs_to_associative_pattern(obs_idx, q_states)
270
- self.associative.store(pattern, metadata={
271
- 'obs_idx': obs_idx,
272
- 'step': self._step_count,
273
- 'surprise': self.blanket.surprise,
274
- })
275
-
276
- # === 7. ASSOCIATIVE RETRIEVAL: Pattern completion ===
277
- retrieved_pattern, energy = self.associative.retrieve(pattern, return_energy=True)
278
-
279
- # Track cumulative metrics
280
- self._total_surprise += self.blanket.surprise
281
  self._total_free_energy += F
282
-
283
  return {
284
- 'step': self._step_count,
285
- 'morton_codes': morton_codes,
286
- 'observation_index': obs_idx,
287
- 'belief_state': q_states,
288
- 'free_energy': F,
289
- 'surprise': self.blanket.surprise,
290
- 'action': inference_result['action'],
291
- 'action_confidence': inference_result['action_confidence'],
292
- 'arena': arena_result,
293
- 'associative_energy': energy,
294
- 'epistemic_value': self.engine.epistemic_value,
295
- 'pragmatic_value': self.engine.pragmatic_value,
 
296
  }
297
 
298
  def act(self) -> Dict[str, Any]:
 
28
  from tensegrity.causal.arena import CausalArena
29
  from tensegrity.causal.scm import StructuralCausalModel
30
  from tensegrity.inference.free_energy import FreeEnergyEngine
31
+ from tensegrity.engine.unified_field import UnifiedField
32
 
33
  logger = logging.getLogger(__name__)
34
 
 
133
  self._step_count = 0
134
  self._total_surprise = 0.0
135
  self._total_free_energy = 0.0
136
+ self._prev_belief_for_transition: Optional[np.ndarray] = None
137
 
138
  # Initialize with default competing models
139
  self._init_default_models()
140
+
141
+ # Single perceptual substrate: FHRR β†’ NGC β†’ Hopfield (replaces parallel Morton-sense path).
142
+ self.field = UnifiedField(
143
+ obs_dim=256,
144
+ hidden_dims=[128, 32],
145
+ fhrr_dim=2048,
146
+ hopfield_beta=0.01,
147
+ ngc_settle_steps=20,
148
+ ngc_learning_rate=0.005,
149
+ )
150
 
151
  def _init_default_models(self):
152
  """
 
204
 
205
  def perceive(self, raw_observation: np.ndarray) -> Dict[str, Any]:
206
  """
207
+ One perception path: numeric vector β†’ UnifiedField (FHRR / NGC / Hopfield)
208
+ β†’ discrete observation index β†’ active inference engine β†’ causal arena.
209
+
210
+ Episodic and classical Hopfield associative traces are not written here;
211
+ memory consolidation for this path lives inside UnifiedField.
 
 
 
 
 
 
 
 
 
 
 
212
  """
213
  self._step_count += 1
214
+ raw = np.asarray(raw_observation, dtype=np.float64).ravel()
215
+
216
+ cycle = self.field.observe(raw, input_type="numeric")
217
+ obs_vec = cycle["observation"]
218
+ decomp = cycle["energy"]
219
+ surprise = float(decomp.surprise)
220
+
221
+ # Deterministic discrete index for generative-model matrices
222
+ v = np.arange(1, len(obs_vec) + 1, dtype=np.float64)
223
+ obs_idx = int(np.abs(np.dot(obs_vec, v))) % max(self.n_obs, 1)
224
+
225
  A = self.epistemic.A
226
  B = self.epistemic.B
227
  C = self.epistemic.C
228
  D = self.epistemic.D
229
  log_A = self.epistemic.log_A
230
+
231
  inference_result = self.engine.step(obs_idx, A, B, C, D, log_A)
232
+ q_states = inference_result["belief_state"]
233
+ F = float(inference_result["free_energy"])
234
+
 
235
  self.epistemic.update_likelihood(obs_idx, q_states)
236
+ if (self.engine.prev_action is not None
237
+ and self._prev_belief_for_transition is not None):
238
+ self.epistemic.update_transition(
239
+ self._prev_belief_for_transition, q_states,
240
+ self.engine.prev_action)
241
+ self._prev_belief_for_transition = q_states.copy()
242
+
 
 
 
 
243
  causal_obs = {
244
+ "state": int(np.argmax(q_states)),
245
+ "observation": obs_idx,
246
  }
247
+ if "mediated_causal" in self.arena.models:
248
+ causal_obs["cause"] = int(np.argmax(q_states))
249
+
 
250
  arena_result = self.arena.compete(causal_obs)
251
+
252
+ morton_codes = np.array([obs_idx], dtype=np.int64)
253
+ self.blanket.surprise = surprise
254
+
255
+ self._total_surprise += surprise
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256
  self._total_free_energy += F
257
+
258
  return {
259
+ "step": self._step_count,
260
+ "morton_codes": morton_codes,
261
+ "observation_index": obs_idx,
262
+ "belief_state": q_states,
263
+ "free_energy": F,
264
+ "surprise": surprise,
265
+ "action": inference_result["action"],
266
+ "action_confidence": inference_result["action_confidence"],
267
+ "arena": arena_result,
268
+ "associative_energy": float(decomp.memory),
269
+ "epistemic_value": self.engine.epistemic_value,
270
+ "pragmatic_value": self.engine.pragmatic_value,
271
+ "field_cycle": cycle,
272
  }
273
 
274
  def act(self) -> Dict[str, Any]:
tensegrity/engine/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unified cognitive engine: compositional encoding, predictive coding, unified field,
3
+ semantic scoring, and optional energy-based causal competition.
4
+ """
5
+
6
+ from tensegrity.engine.unified_field import UnifiedField, HopfieldMemoryBank, EnergyDecomposition
7
+ from tensegrity.engine.ngc import PredictiveCodingCircuit, LayerState
8
+ from tensegrity.engine.fhrr import (
9
+ FHRREncoder,
10
+ FHRRCodebook,
11
+ SemanticFHRRCodebook,
12
+ bind,
13
+ bundle,
14
+ unbind,
15
+ permute,
16
+ )
17
+ from tensegrity.engine.causal_energy import EnergyCausalArena, CausalEnergyTerm
18
+ from tensegrity.engine.scoring import ScoringBridge, NGCLogitsProcessor
tensegrity/{v2 β†’ engine}/causal_energy.py RENAMED
@@ -10,13 +10,8 @@ Where:
10
  pa(v) = parents of v in the causal DAG
11
 
12
  Multiple SCMs compete. The model with lowest causal energy provides
13
- the best explanation. This replaces the v1 causal arena's log-likelihood
14
- comparison with a unified energy-based comparison.
15
-
16
- The causal energy connects to the NGC energy through shared variables:
17
- if a causal variable maps to an NGC layer's abstract state, then the
18
- NGC prediction error and the causal prediction error are literally
19
- the same quantity at different scales of description.
20
  """
21
 
22
  import numpy as np
@@ -81,18 +76,9 @@ class CausalEnergyTerm:
81
  return mech.cpt[:, config_idx]
82
 
83
 
84
- class CausalArenaV2:
85
  """
86
- v2 causal arena: SCMs compete via energy, not log-likelihood.
87
-
88
- Each model is wrapped in a CausalEnergyTerm. The model with
89
- lowest energy wins. The tension is the ratio of energies
90
- (or equivalently, the softmax distribution over models).
91
-
92
- This integrates with the unified energy landscape:
93
- E_total = E_perception(NGC) + E_memory(Hopfield) + E_causal(arena)
94
-
95
- Where E_causal = min_k E_causal(M_k) β€” we use the best model's energy.
96
  """
97
 
98
  def __init__(self, precision: float = 1.0, beta: float = 1.0):
 
10
  pa(v) = parents of v in the causal DAG
11
 
12
  Multiple SCMs compete. The model with lowest causal energy provides
13
+ the best explanation. This complements the log-likelihood CausalArena in ``tensegrity.causal.arena``
14
+ when an energy-based readout of SCM fit is required.
 
 
 
 
 
15
  """
16
 
17
  import numpy as np
 
76
  return mech.cpt[:, config_idx]
77
 
78
 
79
+ class EnergyCausalArena:
80
  """
81
+ SCMs compete via prediction-error energy. Lowest energy wins.
 
 
 
 
 
 
 
 
 
82
  """
83
 
84
  def __init__(self, precision: float = 1.0, beta: float = 1.0):
tensegrity/{v2 β†’ engine}/fhrr.py RENAMED
File without changes
tensegrity/{v2 β†’ engine}/ngc.py RENAMED
@@ -68,8 +68,14 @@ class PredictiveCodingCircuit:
68
  tau: float = 1.0,
69
  gamma: float = 0.01,
70
  settle_steps: int = 20,
 
 
71
  learning_rate: float = 0.01,
72
- activation: str = "tanh"):
 
 
 
 
73
  """
74
  Args:
75
  layer_sizes: [dim_sensory, dim_hidden1, ..., dim_top]
@@ -79,15 +85,26 @@ class PredictiveCodingCircuit:
79
  tau: Membrane time constant (settling speed)
80
  gamma: State decay rate (leaky integration)
81
  settle_steps: How many steps to run before declaring convergence
 
 
82
  learning_rate: Hebbian learning rate for synaptic updates
83
  activation: Nonlinearity: "tanh", "relu", "sigmoid", or "linear"
 
 
 
84
  """
85
  self.n_layers = len(layer_sizes)
86
  self.layer_sizes = layer_sizes
87
  self.tau = tau
88
  self.gamma = gamma
89
  self.settle_steps = settle_steps
 
 
90
  self.lr = learning_rate
 
 
 
 
91
 
92
  # Activation function
93
  self._phi, self._phi_deriv = self._get_activation(activation)
@@ -123,6 +140,9 @@ class PredictiveCodingCircuit:
123
  # Energy tracking
124
  self.energy_history: List[float] = []
125
  self.error_history: List[List[float]] = [] # Per-layer error norms
 
 
 
126
 
127
  def _get_activation(self, name: str):
128
  """Get activation function and its derivative."""
@@ -206,12 +226,6 @@ class PredictiveCodingCircuit:
206
  Returns:
207
  Settling diagnostics
208
  """
209
- n_steps = steps or self.settle_steps
210
-
211
- if not self._initialized:
212
- self._init_layers(observation)
213
-
214
- # Clamp sensory layer
215
  obs = np.asarray(observation, dtype=np.float64)
216
  if len(obs) != self.layer_sizes[0]:
217
  # Project to sensory dimension
@@ -222,6 +236,25 @@ class PredictiveCodingCircuit:
222
  padded[:len(obs)] = obs
223
  obs = padded
224
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
  self.layers[0].z = obs.copy()
226
 
227
  energy_trace = []
@@ -295,6 +328,17 @@ class PredictiveCodingCircuit:
295
  """
296
  effective_lr = self.lr * modulation
297
 
 
 
 
 
 
 
 
 
 
 
 
298
  for ell in range(self.n_layers - 1):
299
  error_below = self.layers[ell].error
300
  z_above = self._phi(self.layers[ell + 1].z)
 
68
  tau: float = 1.0,
69
  gamma: float = 0.01,
70
  settle_steps: int = 20,
71
+ settle_steps_warm: int = 5,
72
+ obs_change_threshold: float = 1e-2,
73
  learning_rate: float = 0.01,
74
+ activation: str = "tanh",
75
+ adaptive_precision: bool = True,
76
+ precision_momentum: float = 0.9,
77
+ precision_min: float = 0.1,
78
+ precision_max: float = 100.0):
79
  """
80
  Args:
81
  layer_sizes: [dim_sensory, dim_hidden1, ..., dim_top]
 
85
  tau: Membrane time constant (settling speed)
86
  gamma: State decay rate (leaky integration)
87
  settle_steps: How many steps to run before declaring convergence
88
+ settle_steps_warm: Steps when the observation is nearly unchanged (warm-started z)
89
+ obs_change_threshold: L2 change above this triggers full settle_steps
90
  learning_rate: Hebbian learning rate for synaptic updates
91
  activation: Nonlinearity: "tanh", "relu", "sigmoid", or "linear"
92
+ adaptive_precision: If True, update precisions from prediction-error variance in learn()
93
+ precision_momentum: EMA factor for precision updates (higher = slower change)
94
+ precision_min / precision_max: Clamp learned precisions
95
  """
96
  self.n_layers = len(layer_sizes)
97
  self.layer_sizes = layer_sizes
98
  self.tau = tau
99
  self.gamma = gamma
100
  self.settle_steps = settle_steps
101
+ self.settle_steps_warm = max(1, int(settle_steps_warm))
102
+ self.obs_change_threshold = obs_change_threshold
103
  self.lr = learning_rate
104
+ self.adaptive_precision = adaptive_precision
105
+ self.precision_momentum = precision_momentum
106
+ self.precision_min = precision_min
107
+ self.precision_max = precision_max
108
 
109
  # Activation function
110
  self._phi, self._phi_deriv = self._get_activation(activation)
 
140
  # Energy tracking
141
  self.energy_history: List[float] = []
142
  self.error_history: List[List[float]] = [] # Per-layer error norms
143
+
144
+ # Warm-start: last observation for change detection
145
+ self._last_obs: Optional[np.ndarray] = None
146
 
147
  def _get_activation(self, name: str):
148
  """Get activation function and its derivative."""
 
226
  Returns:
227
  Settling diagnostics
228
  """
 
 
 
 
 
 
229
  obs = np.asarray(observation, dtype=np.float64)
230
  if len(obs) != self.layer_sizes[0]:
231
  # Project to sensory dimension
 
236
  padded[:len(obs)] = obs
237
  obs = padded
238
 
239
+ obs_changed = True
240
+ if self._last_obs is not None and self._last_obs.shape == obs.shape:
241
+ if float(np.linalg.norm(obs - self._last_obs)) <= self.obs_change_threshold:
242
+ obs_changed = False
243
+ self._last_obs = obs.copy()
244
+
245
+ if steps is not None:
246
+ n_steps = steps
247
+ elif not self._initialized:
248
+ n_steps = self.settle_steps
249
+ elif obs_changed:
250
+ n_steps = self.settle_steps
251
+ else:
252
+ n_steps = self.settle_steps_warm
253
+
254
+ if not self._initialized:
255
+ self._init_layers(obs)
256
+
257
+ # Clamp sensory layer
258
  self.layers[0].z = obs.copy()
259
 
260
  energy_trace = []
 
328
  """
329
  effective_lr = self.lr * modulation
330
 
331
+ if self.adaptive_precision and self.layers:
332
+ for ell in range(self.n_layers):
333
+ sq_error = float(np.mean(self.layers[ell].error ** 2))
334
+ target_precision = 1.0 / max(sq_error, 1e-6)
335
+ mom = self.precision_momentum
336
+ self.precisions[ell] = mom * self.precisions[ell] + (1.0 - mom) * target_precision
337
+ self.precisions[ell] = float(
338
+ np.clip(self.precisions[ell], self.precision_min, self.precision_max)
339
+ )
340
+ self.layers[ell].precision = self.precisions[ell]
341
+
342
  for ell in range(self.n_layers - 1):
343
  error_below = self.layers[ell].error
344
  z_above = self._phi(self.layers[ell + 1].z)
tensegrity/{v2/graft.py β†’ engine/scoring.py} RENAMED
@@ -1,25 +1,12 @@
1
  """
2
- v2 Graft: Semantic scoring bridge + NGC logit bias injection.
3
-
4
- Scoring bridge (V2ScoringBridge):
5
- Three-tier scoring for multiple-choice tasks:
6
- 1. SENTENCE-LEVEL: sbert cosine(prompt, choice) β€” strongest semantic signal
7
- 2. TOKEN-LEVEL: Semantic FHRR compositional similarity
8
- 3. NGC ENERGY: Hierarchical prediction error after settling
9
-
10
- All signals z-normalized and combined (1.0/0.3/0.2 weights).
11
- Convergence gate abstains when signal has insufficient spread.
12
-
13
- NGCLogitsProcessor:
14
- Per-decode-step logit bias injection during LLM generation.
15
- NGC prediction errors projected into vocabulary space via fixed random matrices.
16
- Only emits when NGC energy has converged (graceful fallback otherwise).
17
  """
18
 
19
  import numpy as np
20
  from typing import Dict, List, Optional, Callable, Set, Tuple
21
  import math
22
  import logging
 
23
 
24
  logger = logging.getLogger(__name__)
25
 
@@ -37,7 +24,8 @@ class NGCLogitsProcessor:
37
  supports_continuous_batching = False
38
 
39
  def __init__(self, field, tokenizer, vocab_projections=None,
40
- scale=1.0, energy_gate=0.1, max_settle_steps=30, max_bias=5.0):
 
41
  _ensure_torch()
42
  self.field = field
43
  self.tokenizer = tokenizer
@@ -45,11 +33,29 @@ class NGCLogitsProcessor:
45
  self.energy_gate = energy_gate
46
  self.max_settle_steps = max_settle_steps
47
  self.max_bias = max_bias
 
48
  self.vocab_size = tokenizer.vocab_size
49
  self.projections = vocab_projections or self._build_projections()
50
  self._step_count = 0
51
  self._emissions = 0
52
  self._total_settle_steps = 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  def _build_projections(self):
55
  projections = []
@@ -60,19 +66,17 @@ class NGCLogitsProcessor:
60
  projections.append(P)
61
  return projections
62
 
63
- def __call__(self, input_ids, scores):
64
- self._step_count += 1
65
- ids = input_ids[0].tolist()[-16:]
66
  text = self.tokenizer.decode(ids, skip_special_tokens=True)
67
  tokens = text.lower().split()
68
  if not tokens:
69
- return scores
70
  obs = self.field._fhrr_to_obs(self.field.encoder.encode_sequence(tokens))
71
- settle = self.field.ngc.settle(obs, steps=self.max_settle_steps)
72
- self._total_settle_steps += self.max_settle_steps
73
  et = settle["energy_trace"]
74
  if len(et) < 2 or abs(et[-1] - et[-2]) >= self.energy_gate:
75
- return scores
76
  bias = np.zeros(self.vocab_size, dtype=np.float64)
77
  for ell in range(self.field.ngc.n_layers):
78
  err = self.field.ngc.layers[ell].error
@@ -83,7 +87,46 @@ class NGCLogitsProcessor:
83
  bias *= self.scale * confidence
84
  np.clip(bias, -self.max_bias, self.max_bias, out=bias)
85
  self._emissions += 1
86
- return scores + torch.tensor(bias, device=scores.device, dtype=scores.dtype).unsqueeze(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
  @property
89
  def statistics(self):
@@ -94,7 +137,7 @@ class NGCLogitsProcessor:
94
  }
95
 
96
 
97
- class V2ScoringBridge:
98
  """
99
  Semantic scoring bridge for benchmark evaluation.
100
 
@@ -107,7 +150,7 @@ class V2ScoringBridge:
107
  hopfield_beta=0.05, confidence_threshold=0.15,
108
  context_settle_steps=40, choice_settle_steps=25,
109
  context_learning_epochs=3):
110
- from tensegrity.v2.field import UnifiedField
111
  self.field = field or UnifiedField(
112
  obs_dim=obs_dim, hidden_dims=hidden_dims or [128, 32],
113
  fhrr_dim=fhrr_dim, hopfield_beta=hopfield_beta,
 
1
  """
2
+ Semantic scoring bridge + NGC logit bias injection (part of the unified engine).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  """
4
 
5
  import numpy as np
6
  from typing import Dict, List, Optional, Callable, Set, Tuple
7
  import math
8
  import logging
9
+ import threading
10
 
11
  logger = logging.getLogger(__name__)
12
 
 
24
  supports_continuous_batching = False
25
 
26
  def __init__(self, field, tokenizer, vocab_projections=None,
27
+ scale=1.0, energy_gate=0.1, max_settle_steps=30, max_bias=5.0,
28
+ async_cognitive: bool = True):
29
  _ensure_torch()
30
  self.field = field
31
  self.tokenizer = tokenizer
 
33
  self.energy_gate = energy_gate
34
  self.max_settle_steps = max_settle_steps
35
  self.max_bias = max_bias
36
+ self.async_cognitive = async_cognitive
37
  self.vocab_size = tokenizer.vocab_size
38
  self.projections = vocab_projections or self._build_projections()
39
  self._step_count = 0
40
  self._emissions = 0
41
  self._total_settle_steps = 0
42
+
43
+ self._lock = threading.Lock()
44
+ self._halt = threading.Event()
45
+ self._wake = threading.Event()
46
+ self._pending_ids: Optional[List[int]] = None
47
+ self._latest_bias_np: Optional[np.ndarray] = None
48
+ self._worker: Optional[threading.Thread] = None
49
+ if self.async_cognitive:
50
+ self._worker = threading.Thread(target=self._cognitive_loop, daemon=True)
51
+ self._worker.start()
52
+
53
+ def close(self):
54
+ self._halt.set()
55
+ self._wake.set()
56
+ if self._worker is not None:
57
+ self._worker.join(timeout=2.0)
58
+ self._worker = None
59
 
60
  def _build_projections(self):
61
  projections = []
 
66
  projections.append(P)
67
  return projections
68
 
69
+ def _compute_bias_from_ids(self, ids: List[int]) -> Optional[np.ndarray]:
 
 
70
  text = self.tokenizer.decode(ids, skip_special_tokens=True)
71
  tokens = text.lower().split()
72
  if not tokens:
73
+ return None
74
  obs = self.field._fhrr_to_obs(self.field.encoder.encode_sequence(tokens))
75
+ settle = self.field.ngc.settle(obs)
76
+ self._total_settle_steps += int(settle.get("settle_steps", self.max_settle_steps))
77
  et = settle["energy_trace"]
78
  if len(et) < 2 or abs(et[-1] - et[-2]) >= self.energy_gate:
79
+ return None
80
  bias = np.zeros(self.vocab_size, dtype=np.float64)
81
  for ell in range(self.field.ngc.n_layers):
82
  err = self.field.ngc.layers[ell].error
 
87
  bias *= self.scale * confidence
88
  np.clip(bias, -self.max_bias, self.max_bias, out=bias)
89
  self._emissions += 1
90
+ return bias
91
+
92
+ def _cognitive_loop(self):
93
+ while not self._halt.is_set():
94
+ if not self._wake.wait(timeout=0.05):
95
+ continue
96
+ self._wake.clear()
97
+ if self._halt.is_set():
98
+ break
99
+ ids = self._pending_ids
100
+ if ids is None:
101
+ continue
102
+ try:
103
+ bias_np = self._compute_bias_from_ids(ids)
104
+ except Exception as e:
105
+ logger.debug("NGC cognitive worker: %s", e)
106
+ bias_np = None
107
+ with self._lock:
108
+ self._latest_bias_np = bias_np
109
+
110
+ def __call__(self, input_ids, scores):
111
+ self._step_count += 1
112
+ ids = input_ids[0].tolist()[-16:]
113
+ if self.async_cognitive:
114
+ self._pending_ids = ids
115
+ self._wake.set()
116
+ with self._lock:
117
+ bias_np = None if self._latest_bias_np is None else self._latest_bias_np.copy()
118
+ if bias_np is None:
119
+ return scores
120
+ return scores + torch.tensor(bias_np, device=scores.device, dtype=scores.dtype).unsqueeze(0)
121
+
122
+ try:
123
+ bias_np = self._compute_bias_from_ids(ids)
124
+ except Exception as e:
125
+ logger.debug("NGCLogitsProcessor: %s", e)
126
+ return scores
127
+ if bias_np is None:
128
+ return scores
129
+ return scores + torch.tensor(bias_np, device=scores.device, dtype=scores.dtype).unsqueeze(0)
130
 
131
  @property
132
  def statistics(self):
 
137
  }
138
 
139
 
140
+ class ScoringBridge:
141
  """
142
  Semantic scoring bridge for benchmark evaluation.
143
 
 
150
  hopfield_beta=0.05, confidence_threshold=0.15,
151
  context_settle_steps=40, choice_settle_steps=25,
152
  context_learning_epochs=3):
153
+ from tensegrity.engine.unified_field import UnifiedField
154
  self.field = field or UnifiedField(
155
  obs_dim=obs_dim, hidden_dims=hidden_dims or [128, 32],
156
  fhrr_dim=fhrr_dim, hopfield_beta=hopfield_beta,
tensegrity/{v2/field.py β†’ engine/unified_field.py} RENAMED
@@ -1,8 +1,8 @@
1
  """
2
  Unified Energy Landscape: One functional to rule them all.
3
 
4
- The v1 architecture had four separate components doing four separate kinds
5
- of energy minimization. This module unifies them into a single energy
6
  functional that decomposes into local terms:
7
 
8
  E_total = E_perception + E_memory + E_causal
@@ -29,8 +29,8 @@ import numpy as np
29
  from typing import Dict, List, Optional, Any, Tuple
30
  from dataclasses import dataclass
31
 
32
- from tensegrity.v2.fhrr import FHRREncoder, bind, bundle, unbind
33
- from tensegrity.v2.ngc import PredictiveCodingCircuit
34
 
35
 
36
  @dataclass
@@ -302,7 +302,7 @@ class UnifiedField:
302
  """
303
  What does the system expect to observe next?
304
 
305
- This is the prediction that v1 never made.
306
  """
307
  return self.ngc.predict_observation()
308
 
 
1
  """
2
  Unified Energy Landscape: One functional to rule them all.
3
 
4
+ Earlier designs used separate components for separate kinds of energy
5
+ minimization. This module unifies them into a single energy
6
  functional that decomposes into local terms:
7
 
8
  E_total = E_perception + E_memory + E_causal
 
29
  from typing import Dict, List, Optional, Any, Tuple
30
  from dataclasses import dataclass
31
 
32
+ from .fhrr import FHRREncoder, bind, bundle, unbind
33
+ from .ngc import PredictiveCodingCircuit
34
 
35
 
36
  @dataclass
 
302
  """
303
  What does the system expect to observe next?
304
 
305
+ This is the forward prediction from the settled internal state.
306
  """
307
  return self.ngc.predict_observation()
308
 
tensegrity/graft/logit_bias.py CHANGED
@@ -30,6 +30,7 @@ The graft implements three principles from the manifold integration:
30
  """
31
 
32
  import math
 
33
  import numpy as np
34
  from typing import Dict, List, Optional, Set, Callable, Any
35
  from dataclasses import dataclass
@@ -84,12 +85,14 @@ class TensegrityLogitsProcessor:
84
  suppress_threshold: float = 0.01,
85
  entropy_gate: float = 0.85,
86
  min_confidence: float = 0.3,
87
- max_bias: float = 8.0):
 
 
88
  """
89
  Args:
90
  hypothesis_tokens: {hyp_id: set of token_ids} from VocabularyGrounding
91
  belief_fn: Callable that returns current posteriors {hyp_id: probability}
92
- This is called at EVERY decode step β€” must return fresh state.
93
  vocab_size: LLM vocabulary size
94
  scale: Ξ³ β€” guidance strength. 0=off, 2.5=moderate, 5.0=strong
95
  suppress_threshold: P below this β†’ hard -inf suppress
@@ -97,6 +100,8 @@ class TensegrityLogitsProcessor:
97
  1.0 = always emit, 0.0 = never emit, 0.85 = emit when fairly certain
98
  min_confidence: Minimum max-posterior probability to emit any bias
99
  max_bias: Clamp bias magnitude to prevent numerical issues
 
 
100
  """
101
  _ensure_torch()
102
 
@@ -108,10 +113,20 @@ class TensegrityLogitsProcessor:
108
  self.entropy_gate = entropy_gate
109
  self.min_confidence = min_confidence
110
  self.max_bias = max_bias
 
 
111
 
112
  # State tracking
113
  self.state = GraftState()
114
  self._step_count = 0
 
 
 
 
 
 
 
 
115
 
116
  def _compute_entropy(self, posteriors: Dict[str, float]) -> float:
117
  """Normalized entropy of the posterior. 0=resolved, 1=uniform."""
@@ -142,37 +157,37 @@ class TensegrityLogitsProcessor:
142
 
143
  return self.state.convergence_met
144
 
145
- def __call__(self, input_ids, scores):
146
- """
147
- Called at every decode step by model.generate().
148
-
149
- Args:
150
- input_ids: (batch_size, seq_len) β€” generated tokens so far
151
- scores: (batch_size, vocab_size) β€” raw logits before softmax
152
-
153
- Returns:
154
- Modified scores with belief-derived logit biases
155
- """
156
- self._step_count += 1
157
- self.state.step = self._step_count
158
-
159
- # Read current beliefs from Tensegrity
160
- posteriors = self.belief_fn()
161
-
 
 
 
 
162
  if not posteriors:
163
  self.state.bias_emitted = False
164
- return scores
165
-
166
- # Convergence gate
167
  if not self._should_emit(posteriors):
168
  self.state.bias_emitted = False
169
- return scores
170
 
171
- # Compute bias
172
  N = len(posteriors)
173
  p_uniform = 1.0 / N
174
-
175
- bias = torch.zeros(self.vocab_size, device=scores.device, dtype=scores.dtype)
176
  boosted = 0
177
  suppressed = 0
178
  max_mag = 0.0
@@ -183,40 +198,59 @@ class TensegrityLogitsProcessor:
183
  continue
184
 
185
  if prob < self.suppress_threshold:
186
- # Hard suppress β€” eliminated hypothesis
187
- b = float('-inf')
188
  for tid in token_ids:
189
  if 0 <= tid < self.vocab_size:
190
- bias[tid] = float('-inf')
191
  suppressed += 1
192
  else:
193
- # Log-ratio bias: positive when prob > uniform, negative when below
194
  b = self.scale * math.log(prob / p_uniform)
195
- b = max(-self.max_bias, min(self.max_bias, b)) # Clamp
196
-
197
  for tid in token_ids:
198
  if 0 <= tid < self.vocab_size:
199
- if bias[tid] != float('-inf'): # Don't un-suppress
200
  bias[tid] += b
201
  if b > 0:
202
  boosted += 1
203
  max_mag = max(max_mag, abs(b))
204
 
205
- # Scale by confidence β€” uncertain beliefs emit weaker biases
206
  max_prob = max(posteriors.values())
207
  confidence_scale = (max_prob - p_uniform) / (1.0 - p_uniform) if max_prob > p_uniform else 0.0
 
 
208
 
209
- # Apply confidence scaling to non-inf biases
210
- finite_mask = bias != float('-inf')
211
- bias[finite_mask] *= confidence_scale
212
-
213
- # Update state
214
  self.state.bias_emitted = True
215
  self.state.max_bias_magnitude = max_mag * confidence_scale
216
  self.state.boosted_tokens = boosted
217
  self.state.suppressed_tokens = suppressed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
 
219
- return scores + bias.unsqueeze(0) # Broadcast over batch dim
 
220
 
221
 
222
  class StaticLogitBiasBuilder:
 
30
  """
31
 
32
  import math
33
+ import threading
34
  import numpy as np
35
  from typing import Dict, List, Optional, Set, Callable, Any
36
  from dataclasses import dataclass
 
85
  suppress_threshold: float = 0.01,
86
  entropy_gate: float = 0.85,
87
  min_confidence: float = 0.3,
88
+ max_bias: float = 8.0,
89
+ async_beliefs: bool = False,
90
+ belief_poll_s: float = 0.005):
91
  """
92
  Args:
93
  hypothesis_tokens: {hyp_id: set of token_ids} from VocabularyGrounding
94
  belief_fn: Callable that returns current posteriors {hyp_id: probability}
95
+ Sync mode: called each decode step. Async mode: polled in a worker thread.
96
  vocab_size: LLM vocabulary size
97
  scale: Ξ³ β€” guidance strength. 0=off, 2.5=moderate, 5.0=strong
98
  suppress_threshold: P below this β†’ hard -inf suppress
 
100
  1.0 = always emit, 0.0 = never emit, 0.85 = emit when fairly certain
101
  min_confidence: Minimum max-posterior probability to emit any bias
102
  max_bias: Clamp bias magnitude to prevent numerical issues
103
+ async_beliefs: If True, belief_fn runs in a daemon thread; __call__ is O(1) bias add
104
+ belief_poll_s: Sleep between async polls (seconds)
105
  """
106
  _ensure_torch()
107
 
 
113
  self.entropy_gate = entropy_gate
114
  self.min_confidence = min_confidence
115
  self.max_bias = max_bias
116
+ self.async_beliefs = async_beliefs
117
+ self.belief_poll_s = belief_poll_s
118
 
119
  # State tracking
120
  self.state = GraftState()
121
  self._step_count = 0
122
+
123
+ self._bias_lock = threading.Lock()
124
+ self._latest_bias_np: Optional[np.ndarray] = None
125
+ self._stop_worker = threading.Event()
126
+ self._worker: Optional[threading.Thread] = None
127
+ if self.async_beliefs:
128
+ self._worker = threading.Thread(target=self._async_belief_worker, daemon=True)
129
+ self._worker.start()
130
 
131
  def _compute_entropy(self, posteriors: Dict[str, float]) -> float:
132
  """Normalized entropy of the posterior. 0=resolved, 1=uniform."""
 
157
 
158
  return self.state.convergence_met
159
 
160
+ def close(self):
161
+ """Stop the background belief worker (async mode)."""
162
+ self._stop_worker.set()
163
+ if self._worker is not None:
164
+ self._worker.join(timeout=2.0)
165
+ self._worker = None
166
+
167
+ def _async_belief_worker(self):
168
+ while not self._stop_worker.is_set():
169
+ try:
170
+ posteriors = self.belief_fn()
171
+ bias_np = self._compute_bias_numpy(posteriors)
172
+ except Exception as e:
173
+ logger.debug("Async belief worker error: %s", e)
174
+ bias_np = None
175
+ with self._bias_lock:
176
+ self._latest_bias_np = bias_np
177
+ self._stop_worker.wait(self.belief_poll_s)
178
+
179
+ def _compute_bias_numpy(self, posteriors: Dict[str, float]) -> Optional[np.ndarray]:
180
+ """Build vocab-sized bias vector on CPU, or None if gated off."""
181
  if not posteriors:
182
  self.state.bias_emitted = False
183
+ return None
 
 
184
  if not self._should_emit(posteriors):
185
  self.state.bias_emitted = False
186
+ return None
187
 
 
188
  N = len(posteriors)
189
  p_uniform = 1.0 / N
190
+ bias = np.zeros(self.vocab_size, dtype=np.float64)
 
191
  boosted = 0
192
  suppressed = 0
193
  max_mag = 0.0
 
198
  continue
199
 
200
  if prob < self.suppress_threshold:
 
 
201
  for tid in token_ids:
202
  if 0 <= tid < self.vocab_size:
203
+ bias[tid] = -np.inf
204
  suppressed += 1
205
  else:
 
206
  b = self.scale * math.log(prob / p_uniform)
207
+ b = max(-self.max_bias, min(self.max_bias, b))
 
208
  for tid in token_ids:
209
  if 0 <= tid < self.vocab_size:
210
+ if not np.isneginf(bias[tid]):
211
  bias[tid] += b
212
  if b > 0:
213
  boosted += 1
214
  max_mag = max(max_mag, abs(b))
215
 
 
216
  max_prob = max(posteriors.values())
217
  confidence_scale = (max_prob - p_uniform) / (1.0 - p_uniform) if max_prob > p_uniform else 0.0
218
+ finite = np.isfinite(bias)
219
+ bias[finite] *= confidence_scale
220
 
 
 
 
 
 
221
  self.state.bias_emitted = True
222
  self.state.max_bias_magnitude = max_mag * confidence_scale
223
  self.state.boosted_tokens = boosted
224
  self.state.suppressed_tokens = suppressed
225
+ return bias
226
+
227
+ def __call__(self, input_ids, scores):
228
+ """
229
+ Called at every decode step by model.generate().
230
+
231
+ Args:
232
+ input_ids: (batch_size, seq_len) β€” generated tokens so far
233
+ scores: (batch_size, vocab_size) β€” raw logits before softmax
234
+
235
+ Returns:
236
+ Modified scores with belief-derived logit biases
237
+ """
238
+ self._step_count += 1
239
+ self.state.step = self._step_count
240
+
241
+ if self.async_beliefs:
242
+ with self._bias_lock:
243
+ bias_np = None if self._latest_bias_np is None else self._latest_bias_np.copy()
244
+ else:
245
+ posteriors = self.belief_fn()
246
+ bias_np = self._compute_bias_numpy(posteriors)
247
+
248
+ if bias_np is None:
249
+ self.state.bias_emitted = False
250
+ return scores
251
 
252
+ bias = torch.tensor(bias_np, device=scores.device, dtype=scores.dtype)
253
+ return scores + bias.unsqueeze(0)
254
 
255
 
256
  class StaticLogitBiasBuilder:
tensegrity/graft/pipeline.py CHANGED
@@ -50,8 +50,9 @@ class HybridPipeline:
50
  model_name: str = "meta-llama/Llama-3.2-1B-Instruct",
51
  mode: str = "local",
52
  scale: float = 2.5,
53
- entropy_gate: float = 0.85,
54
- suppress_threshold: float = 0.01):
 
55
  """
56
  Args:
57
  hypothesis_labels: List of hypothesis names
@@ -64,6 +65,7 @@ class HybridPipeline:
64
  scale: Logit bias magnitude
65
  entropy_gate: Convergence threshold for bias emission
66
  suppress_threshold: Below this probability β†’ hard suppress
 
67
  """
68
  self.hypothesis_labels = hypothesis_labels
69
  self.model_name = model_name
@@ -71,6 +73,7 @@ class HybridPipeline:
71
  self.scale = scale
72
  self.entropy_gate = entropy_gate
73
  self.suppress_threshold = suppress_threshold
 
74
 
75
  # Initialize cognitive controller (template mode β€” no LLM for parsing)
76
  self.controller = CognitiveController(
@@ -125,6 +128,7 @@ class HybridPipeline:
125
  scale=self.scale,
126
  suppress_threshold=self.suppress_threshold,
127
  entropy_gate=self.entropy_gate,
 
128
  )
129
 
130
  logger.info(f"Vocabulary grounding coverage: {self._grounding.coverage()}")
 
50
  model_name: str = "meta-llama/Llama-3.2-1B-Instruct",
51
  mode: str = "local",
52
  scale: float = 2.5,
53
+ entropy_gate: float = 0.85,
54
+ suppress_threshold: float = 0.01,
55
+ async_graft: bool = True):
56
  """
57
  Args:
58
  hypothesis_labels: List of hypothesis names
 
65
  scale: Logit bias magnitude
66
  entropy_gate: Convergence threshold for bias emission
67
  suppress_threshold: Below this probability β†’ hard suppress
68
+ async_graft: Local mode only β€” poll beliefs in a background thread for non-blocking decode
69
  """
70
  self.hypothesis_labels = hypothesis_labels
71
  self.model_name = model_name
 
73
  self.scale = scale
74
  self.entropy_gate = entropy_gate
75
  self.suppress_threshold = suppress_threshold
76
+ self.async_graft = async_graft
77
 
78
  # Initialize cognitive controller (template mode β€” no LLM for parsing)
79
  self.controller = CognitiveController(
 
128
  scale=self.scale,
129
  suppress_threshold=self.suppress_threshold,
130
  entropy_gate=self.entropy_gate,
131
+ async_beliefs=self.async_graft,
132
  )
133
 
134
  logger.info(f"Vocabulary grounding coverage: {self._grounding.coverage()}")
tensegrity/memory/associative.py CHANGED
@@ -46,7 +46,9 @@ class AssociativeMemory:
46
 
47
  def __init__(self, pattern_dim: int, beta: float = 1.0,
48
  max_patterns: int = 10000, convergence_steps: int = 5,
49
- zipf_exponent: float = 1.0):
 
 
50
  """
51
  Args:
52
  pattern_dim: Dimensionality of stored patterns
@@ -55,12 +57,16 @@ class AssociativeMemory:
55
  max_patterns: Maximum number of stored patterns
56
  convergence_steps: Number of iterative updates for retrieval
57
  zipf_exponent: Controls power-law weighting of pattern importance
 
 
58
  """
59
  self.dim = pattern_dim
60
  self.beta = beta
61
  self.max_patterns = max_patterns
62
  self.convergence_steps = convergence_steps
63
  self.zipf_s = zipf_exponent
 
 
64
 
65
  # Pattern storage matrix X ∈ ℝ^(dim Γ— n_patterns)
66
  self.patterns: List[np.ndarray] = []
@@ -69,7 +75,17 @@ class AssociativeMemory:
69
 
70
  # Pattern metadata (labels, timestamps, access counts)
71
  self._metadata: List[Dict[str, Any]] = []
72
- self._access_counts: List[int] = []
 
 
 
 
 
 
 
 
 
 
73
 
74
  def store(self, pattern: np.ndarray, metadata: Optional[Dict] = None) -> int:
75
  """
@@ -97,7 +113,7 @@ class AssociativeMemory:
97
  idx = len(self.patterns)
98
  self.patterns.append(pattern.copy())
99
  self._metadata.append(metadata or {})
100
- self._access_counts.append(0)
101
  self._dirty = True
102
 
103
  # Capacity management
@@ -127,6 +143,10 @@ class AssociativeMemory:
127
  if not self.patterns:
128
  return (np.zeros(self.dim), float('inf')) if return_energy else np.zeros(self.dim)
129
 
 
 
 
 
130
  self._ensure_matrix()
131
 
132
  # Normalize query
@@ -199,6 +219,10 @@ class AssociativeMemory:
199
  if not self.patterns:
200
  return np.zeros(self.dim), np.array([])
201
 
 
 
 
 
202
  self._ensure_matrix()
203
 
204
  query = np.asarray(query, dtype=np.float64).flatten()
@@ -229,6 +253,12 @@ class AssociativeMemory:
229
  self.beta * similarities.max()
230
  return float(-log_sum_exp / self.beta + 0.5 * np.dot(xi, xi))
231
 
 
 
 
 
 
 
232
  def _zipf_weights(self) -> np.ndarray:
233
  """
234
  Compute Zipf weights based on access frequency.
@@ -237,7 +267,7 @@ class AssociativeMemory:
237
  This creates a self-reinforcing power law: popular patterns
238
  become more accessible, rare patterns fade.
239
  """
240
- counts = np.array(self._access_counts, dtype=np.float64) + 1.0
241
  # Rank by access count (descending)
242
  ranks = np.argsort(np.argsort(-counts)) + 1 # 1-indexed ranks
243
  # Zipf weight: 1/rank^s
@@ -284,7 +314,7 @@ class AssociativeMemory:
284
  'n_patterns': len(self.patterns),
285
  'total_accesses': int(accesses.sum()),
286
  'mean_access': float(accesses.mean()),
287
- 'max_access': int(accesses.max()),
288
  'beta': self.beta,
289
  'dimension': self.dim,
290
  }
 
46
 
47
  def __init__(self, pattern_dim: int, beta: float = 1.0,
48
  max_patterns: int = 10000, convergence_steps: int = 5,
49
+ zipf_exponent: float = 1.0,
50
+ access_decay: float = 0.99,
51
+ decay_every_n_retrieves: int = 50):
52
  """
53
  Args:
54
  pattern_dim: Dimensionality of stored patterns
 
57
  max_patterns: Maximum number of stored patterns
58
  convergence_steps: Number of iterative updates for retrieval
59
  zipf_exponent: Controls power-law weighting of pattern importance
60
+ access_decay: Multiplicative decay applied to access counts periodically
61
+ decay_every_n_retrieves: Invoke decay every N retrieve() calls (0 = never)
62
  """
63
  self.dim = pattern_dim
64
  self.beta = beta
65
  self.max_patterns = max_patterns
66
  self.convergence_steps = convergence_steps
67
  self.zipf_s = zipf_exponent
68
+ self.access_decay = access_decay
69
+ self.decay_every_n_retrieves = decay_every_n_retrieves
70
 
71
  # Pattern storage matrix X ∈ ℝ^(dim Γ— n_patterns)
72
  self.patterns: List[np.ndarray] = []
 
75
 
76
  # Pattern metadata (labels, timestamps, access counts)
77
  self._metadata: List[Dict[str, Any]] = []
78
+ self._access_counts: List[float] = []
79
+ self._retrieve_calls = 0
80
+
81
+ def clear(self):
82
+ """Drop all stored patterns (new episode)."""
83
+ self.patterns.clear()
84
+ self._metadata.clear()
85
+ self._access_counts.clear()
86
+ self._pattern_matrix = None
87
+ self._dirty = True
88
+ self._retrieve_calls = 0
89
 
90
  def store(self, pattern: np.ndarray, metadata: Optional[Dict] = None) -> int:
91
  """
 
113
  idx = len(self.patterns)
114
  self.patterns.append(pattern.copy())
115
  self._metadata.append(metadata or {})
116
+ self._access_counts.append(0.0)
117
  self._dirty = True
118
 
119
  # Capacity management
 
143
  if not self.patterns:
144
  return (np.zeros(self.dim), float('inf')) if return_energy else np.zeros(self.dim)
145
 
146
+ self._retrieve_calls += 1
147
+ if self.decay_every_n_retrieves > 0 and self._retrieve_calls % self.decay_every_n_retrieves == 0:
148
+ self._decay_access_counts()
149
+
150
  self._ensure_matrix()
151
 
152
  # Normalize query
 
219
  if not self.patterns:
220
  return np.zeros(self.dim), np.array([])
221
 
222
+ self._retrieve_calls += 1
223
+ if self.decay_every_n_retrieves > 0 and self._retrieve_calls % self.decay_every_n_retrieves == 0:
224
+ self._decay_access_counts()
225
+
226
  self._ensure_matrix()
227
 
228
  query = np.asarray(query, dtype=np.float64).flatten()
 
253
  self.beta * similarities.max()
254
  return float(-log_sum_exp / self.beta + 0.5 * np.dot(xi, xi))
255
 
256
+ def _decay_access_counts(self):
257
+ """Reduce access counts so stale dominance slowly evaporates."""
258
+ if not self._access_counts:
259
+ return
260
+ self._access_counts = [float(c) * self.access_decay for c in self._access_counts]
261
+
262
  def _zipf_weights(self) -> np.ndarray:
263
  """
264
  Compute Zipf weights based on access frequency.
 
267
  This creates a self-reinforcing power law: popular patterns
268
  become more accessible, rare patterns fade.
269
  """
270
+ counts = np.maximum(self._access_counts, 0.0) + 1.0
271
  # Rank by access count (descending)
272
  ranks = np.argsort(np.argsort(-counts)) + 1 # 1-indexed ranks
273
  # Zipf weight: 1/rank^s
 
314
  'n_patterns': len(self.patterns),
315
  'total_accesses': int(accesses.sum()),
316
  'mean_access': float(accesses.mean()),
317
+ 'max_access': float(accesses.max()),
318
  'beta': self.beta,
319
  'dimension': self.dim,
320
  }
tensegrity/memory/episodic.py CHANGED
@@ -96,6 +96,15 @@ class EpisodicMemory:
96
 
97
  self._timestep = 0
98
 
 
 
 
 
 
 
 
 
 
99
  def _compute_item_representation(self, observation: np.ndarray,
100
  belief_state: np.ndarray) -> np.ndarray:
101
  """
 
96
 
97
  self._timestep = 0
98
 
99
+ def clear(self):
100
+ """Reset store for a new independent episode (e.g. next benchmark item)."""
101
+ self.context = np.random.randn(self.context_dim)
102
+ self.context /= np.linalg.norm(self.context)
103
+ self.episodes.clear()
104
+ self._morton_index.clear()
105
+ self._surprise_heap.clear()
106
+ self._timestep = 0
107
+
108
  def _compute_item_representation(self, observation: np.ndarray,
109
  belief_state: np.ndarray) -> np.ndarray:
110
  """
tensegrity/pipeline/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """Single full-stack Tensegrity execution path (controller + semantic field scoring)."""
2
+
3
+ from tensegrity.pipeline.canonical import CanonicalPipeline
4
+
5
+ __all__ = ["CanonicalPipeline"]
tensegrity/pipeline/canonical.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Canonical Tensegrity pipeline β€” one code path for benchmarks and generation.
3
+
4
+ Composes:
5
+ β€’ CognitiveController + TensegrityAgent (Broca parse, free-energy inference,
6
+ causal arena, dynamic SCM injection when enabled)
7
+ β€’ ScoringBridge (FHRR + NGC + Hopfield scoring for multiple-choice items)
8
+
9
+ Benchmark mode scores each option by fusing LLM log-probabilities with this
10
+ stack. Hybrid generation can reuse the same controller for logit grafting.
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import logging
16
+ from typing import Any, Dict, List, Optional, Tuple
17
+
18
+ import numpy as np
19
+
20
+ from tensegrity.broca.controller import CognitiveController
21
+ from tensegrity.bench.tasks import TaskSample
22
+ from tensegrity.engine.scoring import ScoringBridge
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ class CanonicalPipeline:
28
+ """
29
+ Full cognitive stack shared by benchmarks and the hybrid graft path.
30
+
31
+ For each MC item: reset controller + field scorer state, ingest the prompt,
32
+ score choices with ScoringBridge, fuse hypothesis posterior with field scores.
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ hypothesis_labels: List[str],
38
+ *,
39
+ use_llm_broca: bool = False,
40
+ enable_hypothesis_generation: bool = False,
41
+ model_name: str = "meta-llama/Llama-3.2-1B-Instruct",
42
+ belief_blend: float = 0.35,
43
+ obs_dim: int = 256,
44
+ hidden_dims: Optional[List[int]] = None,
45
+ fhrr_dim: int = 2048,
46
+ ngc_settle_steps: int = 30,
47
+ ngc_learning_rate: float = 0.01,
48
+ hopfield_beta: float = 0.05,
49
+ confidence_threshold: float = 0.15,
50
+ context_settle_steps: int = 40,
51
+ choice_settle_steps: int = 25,
52
+ context_learning_epochs: int = 3,
53
+ ):
54
+ self.model_name = model_name
55
+ self.belief_blend = belief_blend
56
+ self.controller = CognitiveController(
57
+ n_hypotheses=max(len(hypothesis_labels), 2),
58
+ hypothesis_labels=hypothesis_labels,
59
+ use_llm=use_llm_broca,
60
+ enable_hypothesis_generation=enable_hypothesis_generation,
61
+ )
62
+ self.scoring = ScoringBridge(
63
+ obs_dim=obs_dim,
64
+ hidden_dims=hidden_dims or [128, 32],
65
+ fhrr_dim=fhrr_dim,
66
+ ngc_settle_steps=ngc_settle_steps,
67
+ ngc_learning_rate=ngc_learning_rate,
68
+ hopfield_beta=hopfield_beta,
69
+ confidence_threshold=confidence_threshold,
70
+ context_settle_steps=context_settle_steps,
71
+ choice_settle_steps=choice_settle_steps,
72
+ context_learning_epochs=context_learning_epochs,
73
+ )
74
+
75
+ def reset_for_multichoice(self, sample: TaskSample) -> None:
76
+ """I.I.D. benchmark item: fresh agent memories / arena and field scorer."""
77
+ labels = list(sample.choices)
78
+ if not labels:
79
+ labels = ["_empty_"]
80
+ self.controller.reset_session(labels)
81
+ self.scoring.reset()
82
+
83
+ def ingest_prompt(self, prompt: str) -> Dict[str, Any]:
84
+ """Parse + perceive (+ optional causal hypothesis); no verbalization."""
85
+ return self.controller.perceive_only(prompt)
86
+
87
+ def score_multichoice(self, sample: TaskSample) -> Tuple[List[float], float, Dict[str, Any]]:
88
+ """
89
+ Run the full stack on one TaskSample.
90
+
91
+ Returns:
92
+ combined_scores: fused list for argmax over choices
93
+ gate_entropy: scorer gate entropy
94
+ diagnostics: raw components for debugging / logging
95
+ """
96
+ self.reset_for_multichoice(sample)
97
+ ing = self.ingest_prompt(sample.prompt)
98
+ field_scores, entropy = self.scoring.score_choices(sample.prompt, sample.choices)
99
+ field_arr = np.asarray(field_scores, dtype=np.float64)
100
+ agent_probs = self._agent_choice_posterior(len(sample.choices))
101
+ combined = self._fuse_field_and_hypotheses(field_arr, agent_probs)
102
+ return (
103
+ combined.tolist(),
104
+ float(entropy),
105
+ {
106
+ "field_scores": field_scores,
107
+ "agent_probs": agent_probs.tolist(),
108
+ "perception_tension": ing.get("perception", {}).get("tension"),
109
+ "free_energy": ing.get("perception", {}).get("free_energy"),
110
+ },
111
+ )
112
+
113
+ def _agent_choice_posterior(self, n_choices: int) -> np.ndarray:
114
+ hs = self.controller.belief_state.hypotheses
115
+ if len(hs) != n_choices:
116
+ return np.ones(n_choices, dtype=np.float64) / max(n_choices, 1)
117
+ p = np.array([float(h.probability) for h in hs], dtype=np.float64)
118
+ s = p.sum()
119
+ if s <= 0:
120
+ return np.ones(n_choices, dtype=np.float64) / n_choices
121
+ return p / s
122
+
123
+ def _fuse_field_and_hypotheses(self, field: np.ndarray, agent_probs: np.ndarray) -> np.ndarray:
124
+ if field.shape != agent_probs.shape:
125
+ return field
126
+ zf = (field - field.mean()) / (field.std() + 1e-8)
127
+ a = np.log(agent_probs + 1e-12)
128
+ za = (a - a.mean()) / (a.std() + 1e-8)
129
+ return zf + self.belief_blend * za
tensegrity/v2/__init__.py DELETED
@@ -1,30 +0,0 @@
1
- """
2
- Tensegrity v2: Unified Energy Architecture.
3
-
4
- The v2 architecture replaces v1's flat inference with:
5
- - FHRR-RNS encoding (compositional hypervectors with semantic grounding)
6
- - NGC predictive coding (hierarchical prediction errors, replaces flat POMDP)
7
- - Unified energy landscape (perception + memory + causal in one functional)
8
- - CausalArenaV2 (energy-based model competition, replaces log-likelihood arena)
9
- - Sentence-level semantic scoring via sentence-transformers
10
-
11
- Public API:
12
- UnifiedField β€” The main cognitive engine (FHRR β†’ NGC β†’ Hopfield β†’ Causal)
13
- V2ScoringBridge β€” Bridge to benchmark harness (scores choices by semantic similarity + NGC energy)
14
- NGCLogitsProcessor β€” LogitsProcessor for LLM generation grafting
15
- CausalArenaV2 β€” Energy-based causal model competition
16
- FHRREncoder β€” Compositional hypervector encoder (semantic=True by default)
17
- SemanticFHRRCodebook β€” FHRR codebook grounded in sentence-transformer embeddings
18
- PredictiveCodingCircuit β€” Hierarchical NGC circuit
19
- """
20
-
21
- __version__ = "0.2.0"
22
-
23
- from tensegrity.v2.field import UnifiedField, HopfieldMemoryBank, EnergyDecomposition
24
- from tensegrity.v2.ngc import PredictiveCodingCircuit, LayerState
25
- from tensegrity.v2.fhrr import (
26
- FHRREncoder, FHRRCodebook, SemanticFHRRCodebook,
27
- bind, bundle, unbind, permute,
28
- )
29
- from tensegrity.v2.causal_energy import CausalArenaV2, CausalEnergyTerm
30
- from tensegrity.v2.graft import V2ScoringBridge, NGCLogitsProcessor
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tests/test_async_graft.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for asynchronous logit grafting and NGC warm-start settling.
3
+ """
4
+
5
+ import os
6
+ import sys
7
+ import time
8
+
9
+ import numpy as np
10
+
11
+ ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
12
+ if ROOT not in sys.path:
13
+ sys.path.insert(0, ROOT)
14
+
15
+
16
+ def _load_module(name: str, relpath: str):
17
+ import importlib.util
18
+
19
+ path = os.path.join(ROOT, relpath)
20
+ spec = importlib.util.spec_from_file_location(name, path)
21
+ mod = importlib.util.module_from_spec(spec)
22
+ assert spec.loader is not None
23
+ spec.loader.exec_module(mod)
24
+ return mod
25
+
26
+
27
+ def test_ngc_warm_start_fewer_steps():
28
+ mod = _load_module("tensegrity_engine_ngc_test", os.path.join("tensegrity", "engine", "ngc.py"))
29
+ PredictiveCodingCircuit = mod.PredictiveCodingCircuit
30
+
31
+ ngc = PredictiveCodingCircuit(
32
+ layer_sizes=[16, 8, 4],
33
+ settle_steps=30,
34
+ settle_steps_warm=4,
35
+ obs_change_threshold=1e-6,
36
+ adaptive_precision=False,
37
+ )
38
+ pattern = np.random.RandomState(0).randn(16)
39
+ full = ngc.settle(pattern)
40
+ assert full["settle_steps"] == 30
41
+ warm = ngc.settle(pattern)
42
+ assert warm["settle_steps"] == 4
43
+
44
+
45
+ def test_async_beliefs_processor_matches_sync():
46
+ try:
47
+ import torch
48
+ except ImportError:
49
+ return
50
+
51
+ mod = _load_module("logit_bias_test", os.path.join("tensegrity", "graft", "logit_bias.py"))
52
+ TensegrityLogitsProcessor = mod.TensegrityLogitsProcessor
53
+
54
+ hypothesis_tokens = {"up": {10, 11}, "down": {20, 21}}
55
+ posteriors = {"up": 0.88, "down": 0.12}
56
+
57
+ def belief_fn():
58
+ return posteriors
59
+
60
+ sync = TensegrityLogitsProcessor(
61
+ hypothesis_tokens=hypothesis_tokens,
62
+ belief_fn=belief_fn,
63
+ vocab_size=128,
64
+ entropy_gate=0.95,
65
+ min_confidence=0.2,
66
+ async_beliefs=False,
67
+ )
68
+ async_p = TensegrityLogitsProcessor(
69
+ hypothesis_tokens=hypothesis_tokens,
70
+ belief_fn=belief_fn,
71
+ vocab_size=128,
72
+ entropy_gate=0.95,
73
+ min_confidence=0.2,
74
+ async_beliefs=True,
75
+ belief_poll_s=0.002,
76
+ )
77
+ time.sleep(0.05)
78
+ fake_ids = torch.zeros(1, 3, dtype=torch.long)
79
+ scores = torch.randn(1, 128)
80
+ out_sync = sync(fake_ids, scores.clone())
81
+ out_async = async_p(fake_ids, scores.clone())
82
+ async_p.close()
83
+ if async_p.state.bias_emitted and sync.state.bias_emitted:
84
+ assert torch.allclose(out_sync, out_async, atol=1e-5)
85
+
86
+
87
+ def test_associative_access_decay():
88
+ mod = _load_module("associative_test", os.path.join("tensegrity", "memory", "associative.py"))
89
+ AssociativeMemory = mod.AssociativeMemory
90
+
91
+ mem = AssociativeMemory(
92
+ pattern_dim=8,
93
+ beta=1.0,
94
+ decay_every_n_retrieves=1,
95
+ access_decay=0.5,
96
+ max_patterns=100,
97
+ )
98
+ mem.store(np.array([1.0, 0, 0, 0, 0, 0, 0, 0]))
99
+ mem.retrieve(np.array([1.0, 0, 0, 0, 0, 0, 0, 0]))
100
+ assert mem._access_counts[0] >= 1.0
101
+ mem._decay_access_counts()
102
+ assert mem._access_counts[0] < 1.0
103
+
104
+
105
+ def test_build_scm_from_proposal():
106
+ try:
107
+ import networkx # noqa: F401
108
+ except ImportError:
109
+ return
110
+ from tensegrity.broca.schemas import ProposedSCM, CausalEdge
111
+ from tensegrity.causal.from_proposal import build_scm_from_proposal
112
+
113
+ p = ProposedSCM(
114
+ name="test_model",
115
+ description="x drives y",
116
+ edges=[
117
+ CausalEdge(source="x", target="y", mechanism="causes"),
118
+ CausalEdge(source="y", target="z", mechanism="enables"),
119
+ ],
120
+ )
121
+ scm = build_scm_from_proposal(p)
122
+ assert "x" in scm.variables and "z" in scm.variables
123
+
124
+
125
+ if __name__ == "__main__":
126
+ test_ngc_warm_start_fewer_steps()
127
+ test_async_beliefs_processor_matches_sync()
128
+ test_associative_access_decay()
129
+ test_build_scm_from_proposal()
130
+ print("ok")
tests/{test_v2.py β†’ test_engine.py} RENAMED
@@ -1,5 +1,5 @@
1
  """
2
- Test Tensegrity v2: unified energy landscape.
3
  """
4
 
5
  import sys
@@ -14,7 +14,7 @@ def test_fhrr_encoding():
14
  print("TEST 1: FHRR-RNS Compositional Encoding")
15
  print("=" * 60)
16
 
17
- from tensegrity.v2.fhrr import FHRREncoder, bind, unbind, bundle
18
 
19
  enc = FHRREncoder(dim=2048)
20
 
@@ -61,12 +61,12 @@ def test_fhrr_encoding():
61
  print(f" βœ“ Same sequences more similar than different ones")
62
 
63
  # Test numeric vector encoding (modality-agnostic)
64
- v1 = enc.encode_numeric_vector(np.array([1.0, 2.0, 3.0]))
65
- v2 = enc.encode_numeric_vector(np.array([1.0, 2.0, 3.1]))
66
- v3 = enc.encode_numeric_vector(np.array([9.0, 8.0, 7.0]))
67
 
68
- print(f"\n sim([1,2,3], [1,2,3.1]) = {enc.similarity(v1, v2):.4f}")
69
- print(f" sim([1,2,3], [9,8,7]) = {enc.similarity(v1, v3):.4f}")
70
  print(f" βœ“ Numeric vectors: similar inputs β†’ similar encodings")
71
 
72
  return True
@@ -78,7 +78,7 @@ def test_predictive_coding():
78
  print("TEST 2: Hierarchical Predictive Coding (NGC)")
79
  print("=" * 60)
80
 
81
- from tensegrity.v2.ngc import PredictiveCodingCircuit
82
 
83
  # 3-layer hierarchy: 64 β†’ 32 β†’ 8
84
  ngc = PredictiveCodingCircuit(
@@ -133,7 +133,6 @@ def test_predictive_coding():
133
 
134
  # THE KEY TEST: the system now PREDICTS its input
135
  predicted = ngc.predict_observation()
136
- actual = pattern_a # Last odd epoch was pattern_b, so next should predict pattern_a-ish
137
  residual = np.linalg.norm(predicted)
138
  print(f"\n Prediction norm: {residual:.4f} (>0 means the system has learned to predict)")
139
  assert residual > 0.01, "System should generate non-trivial predictions"
@@ -149,7 +148,7 @@ def test_unified_field():
149
  print("TEST 3: Unified Energy Landscape")
150
  print("=" * 60)
151
 
152
- from tensegrity.v2.field import UnifiedField
153
 
154
  field = UnifiedField(
155
  obs_dim=128,
@@ -216,8 +215,8 @@ def main():
216
  ]
217
 
218
  print("\n" + "β–ˆ" * 60)
219
- print(" TENSEGRITY v2: Unified Energy Architecture")
220
- print(" FHRR-RNS Γ— Predictive Coding Γ— Hopfield Memory")
221
  print("β–ˆ" * 60)
222
 
223
  results = []
 
1
  """
2
+ Tests for the unified cognitive engine: FHRR, NGC, and UnifiedField.
3
  """
4
 
5
  import sys
 
14
  print("TEST 1: FHRR-RNS Compositional Encoding")
15
  print("=" * 60)
16
 
17
+ from tensegrity.engine.fhrr import FHRREncoder, bind, unbind, bundle
18
 
19
  enc = FHRREncoder(dim=2048)
20
 
 
61
  print(f" βœ“ Same sequences more similar than different ones")
62
 
63
  # Test numeric vector encoding (modality-agnostic)
64
+ v_base = enc.encode_numeric_vector(np.array([1.0, 2.0, 3.0]))
65
+ v_near = enc.encode_numeric_vector(np.array([1.0, 2.0, 3.1]))
66
+ v_far = enc.encode_numeric_vector(np.array([9.0, 8.0, 7.0]))
67
 
68
+ print(f"\n sim([1,2,3], [1,2,3.1]) = {enc.similarity(v_base, v_near):.4f}")
69
+ print(f" sim([1,2,3], [9,8,7]) = {enc.similarity(v_base, v_far):.4f}")
70
  print(f" βœ“ Numeric vectors: similar inputs β†’ similar encodings")
71
 
72
  return True
 
78
  print("TEST 2: Hierarchical Predictive Coding (NGC)")
79
  print("=" * 60)
80
 
81
+ from tensegrity.engine.ngc import PredictiveCodingCircuit
82
 
83
  # 3-layer hierarchy: 64 β†’ 32 β†’ 8
84
  ngc = PredictiveCodingCircuit(
 
133
 
134
  # THE KEY TEST: the system now PREDICTS its input
135
  predicted = ngc.predict_observation()
 
136
  residual = np.linalg.norm(predicted)
137
  print(f"\n Prediction norm: {residual:.4f} (>0 means the system has learned to predict)")
138
  assert residual > 0.01, "System should generate non-trivial predictions"
 
148
  print("TEST 3: Unified Energy Landscape")
149
  print("=" * 60)
150
 
151
+ from tensegrity.engine.unified_field import UnifiedField
152
 
153
  field = UnifiedField(
154
  obs_dim=128,
 
215
  ]
216
 
217
  print("\n" + "β–ˆ" * 60)
218
+ print(" Tensegrity engine: unified energy architecture")
219
+ print(" FHRR-RNS Γ— Predictive Coding Γ— Hopfield memory")
220
  print("β–ˆ" * 60)
221
 
222
  results = []
tests/test_needle.py CHANGED
@@ -1,5 +1,5 @@
1
  """
2
- Needle-in-Lies Test: Can v2's NGC detect the true statement among contradictions?
3
 
4
  The test:
5
  - One true statement ("The key is under the oak table")
@@ -28,9 +28,9 @@ sys.path.insert(0, '/app')
28
  import numpy as np
29
  np.random.seed(42)
30
 
31
- from tensegrity.v2.fhrr import FHRREncoder, bind, bundle, unbind
32
- from tensegrity.v2.ngc import PredictiveCodingCircuit
33
- from tensegrity.v2.field import UnifiedField, HopfieldMemoryBank
34
 
35
 
36
  def make_needle_scenario(n_lies: int = 13):
 
1
  """
2
+ Needle-in-Lies Test: Can NGC detect the true statement among contradictions?
3
 
4
  The test:
5
  - One true statement ("The key is under the oak table")
 
28
  import numpy as np
29
  np.random.seed(42)
30
 
31
+ from tensegrity.engine.fhrr import FHRREncoder, bind, bundle, unbind
32
+ from tensegrity.engine.ngc import PredictiveCodingCircuit
33
+ from tensegrity.engine.unified_field import UnifiedField, HopfieldMemoryBank
34
 
35
 
36
  def make_needle_scenario(n_lies: int = 13):
tests/{test_v2_bench.py β†’ test_scoring_bench.py} RENAMED
@@ -1,5 +1,5 @@
1
  """
2
- Test v2 scoring bridge against benchmarks.
3
  """
4
  import sys
5
  sys.path.insert(0, '/app')
@@ -7,16 +7,16 @@ import numpy as np
7
  np.random.seed(42)
8
 
9
 
10
- def test_v2_scoring():
11
- """Test v2 NGC-based scoring on benchmark samples."""
12
  print("=" * 60)
13
- print("TEST: v2 NGC Scoring vs v1 Baseline on Sample Tasks")
14
  print("=" * 60)
15
 
16
- from tensegrity.v2.graft import V2ScoringBridge
17
  from tensegrity.bench.tasks import load_task_samples
18
 
19
- bridge = V2ScoringBridge(obs_dim=128, hidden_dims=[64, 16])
20
 
21
  tasks = ["copa", "sciq", "arc_challenge"]
22
 
@@ -41,18 +41,18 @@ def test_v2_scoring():
41
  acc = correct / max(total, 1)
42
  print(f"\n {task_name}: {correct}/{total} = {acc:.1%}")
43
 
44
- print(f"\n βœ“ v2 scoring bridge functional")
45
  return True
46
 
47
 
48
- def test_causal_energy():
49
- """Test the causal energy term."""
50
  print("\n" + "=" * 60)
51
- print("TEST: Causal Energy Arena v2")
52
  print("=" * 60)
53
 
54
  from tensegrity.causal.scm import StructuralCausalModel
55
- from tensegrity.v2.causal_energy import CausalArenaV2
56
 
57
  # Two competing models
58
  m_correct = StructuralCausalModel("correct")
@@ -68,7 +68,7 @@ def test_causal_energy():
68
  m_correct.update_from_data(data)
69
  m_wrong.update_from_data(data)
70
 
71
- arena = CausalArenaV2(precision=1.0, beta=2.0)
72
  arena.register(m_correct)
73
  arena.register(m_wrong)
74
 
@@ -89,18 +89,18 @@ def test_causal_energy():
89
  print(f" Last energies: {last_result['energies']}")
90
  print(f" Last posteriors: {last_result['posteriors']}")
91
 
92
- print(f" βœ“ Causal energy arena functional")
93
  return True
94
 
95
 
96
  if __name__ == "__main__":
97
  tests = [
98
- ("v2 Scoring", test_v2_scoring),
99
- ("Causal Energy", test_causal_energy),
100
  ]
101
 
102
  print("\n" + "β–ˆ" * 60)
103
- print(" v2 Integration Tests")
104
  print("β–ˆ" * 60)
105
 
106
  for name, fn in tests:
 
1
  """
2
+ Integration tests: ScoringBridge on benchmark samples and energy causal arena.
3
  """
4
  import sys
5
  sys.path.insert(0, '/app')
 
7
  np.random.seed(42)
8
 
9
 
10
+ def test_scoring_bridge_on_tasks():
11
+ """ScoringBridge on a small slice of benchmark tasks."""
12
  print("=" * 60)
13
+ print("TEST: semantic field scoring on sample tasks")
14
  print("=" * 60)
15
 
16
+ from tensegrity.engine.scoring import ScoringBridge
17
  from tensegrity.bench.tasks import load_task_samples
18
 
19
+ bridge = ScoringBridge(obs_dim=128, hidden_dims=[64, 16])
20
 
21
  tasks = ["copa", "sciq", "arc_challenge"]
22
 
 
41
  acc = correct / max(total, 1)
42
  print(f"\n {task_name}: {correct}/{total} = {acc:.1%}")
43
 
44
+ print(f"\n βœ“ ScoringBridge functional")
45
  return True
46
 
47
 
48
+ def test_causal_energy_arena():
49
+ """Energy-based causal model competition."""
50
  print("\n" + "=" * 60)
51
+ print("TEST: energy-based causal arena")
52
  print("=" * 60)
53
 
54
  from tensegrity.causal.scm import StructuralCausalModel
55
+ from tensegrity.engine.causal_energy import EnergyCausalArena
56
 
57
  # Two competing models
58
  m_correct = StructuralCausalModel("correct")
 
68
  m_correct.update_from_data(data)
69
  m_wrong.update_from_data(data)
70
 
71
+ arena = EnergyCausalArena(precision=1.0, beta=2.0)
72
  arena.register(m_correct)
73
  arena.register(m_wrong)
74
 
 
89
  print(f" Last energies: {last_result['energies']}")
90
  print(f" Last posteriors: {last_result['posteriors']}")
91
 
92
+ print(f" βœ“ Energy causal arena functional")
93
  return True
94
 
95
 
96
  if __name__ == "__main__":
97
  tests = [
98
+ ("Scoring bridge", test_scoring_bridge_on_tasks),
99
+ ("Causal energy", test_causal_energy_arena),
100
  ]
101
 
102
  print("\n" + "β–ˆ" * 60)
103
+ print(" Scoring + causal energy integration")
104
  print("β–ˆ" * 60)
105
 
106
  for name, fn in tests:
uv.lock ADDED
The diff for this file is too large to render. See raw diff