fatty-belly commited on
Commit
f7a0ff3
·
verified ·
1 Parent(s): 7ab6de8

Reduce max_chun_len from 128k to 32k

Browse files
pipelines/vqa_extract_optimized_pipeline.py CHANGED
@@ -42,7 +42,7 @@ class PDF_VQA_extract_optimized_pipeline(PipelineABC):
42
  self.vqa_extractor = ChunkedPromptedGenerator(
43
  llm_serving=self.llm_serving,
44
  system_prompt = self.vqa_extract_prompt.build_prompt(),
45
- max_chunk_len=128000,
46
  )
47
  self.llm_output_parser = LLMOutputParser(output_dir="./cache", intermediate_dir="intermediate")
48
  self.qa_merger = QA_Merger(output_dir="./cache", strict_title_match=False)
 
42
  self.vqa_extractor = ChunkedPromptedGenerator(
43
  llm_serving=self.llm_serving,
44
  system_prompt = self.vqa_extract_prompt.build_prompt(),
45
+ max_chunk_len=32000,
46
  )
47
  self.llm_output_parser = LLMOutputParser(output_dir="./cache", intermediate_dir="intermediate")
48
  self.qa_merger = QA_Merger(output_dir="./cache", strict_title_match=False)