diff --git a/.semversioner/next-release/patch-20240808225534741702.json b/.semversioner/next-release/patch-20240808225534741702.json new file mode 100644 index 0000000000..3e6755c5c5 --- /dev/null +++ b/.semversioner/next-release/patch-20240808225534741702.json @@ -0,0 +1,4 @@ +{ + "type": "patch", + "description": "Fix file dumps using json for non ASCII chars" +} diff --git a/graphrag/index/cache/json_pipeline_cache.py b/graphrag/index/cache/json_pipeline_cache.py index b88a38990c..b9e85889ad 100644 --- a/graphrag/index/cache/json_pipeline_cache.py +++ b/graphrag/index/cache/json_pipeline_cache.py @@ -44,7 +44,9 @@ async def set(self, key: str, value: Any, debug_data: dict | None = None) -> Non if value is None: return data = {"result": value, **(debug_data or {})} - await self._storage.set(key, json.dumps(data), encoding=self._encoding) + await self._storage.set( + key, json.dumps(data, ensure_ascii=False), encoding=self._encoding + ) async def has(self, key: str) -> bool: """Has method definition.""" diff --git a/graphrag/index/graph/extractors/summarize/description_summary_extractor.py b/graphrag/index/graph/extractors/summarize/description_summary_extractor.py index 76d77202d3..011090654c 100644 --- a/graphrag/index/graph/extractors/summarize/description_summary_extractor.py +++ b/graphrag/index/graph/extractors/summarize/description_summary_extractor.py @@ -127,7 +127,9 @@ async def _summarize_descriptions_with_llm( name="summarize", variables={ self._entity_name_key: json.dumps(items), - self._input_descriptions_key: json.dumps(sorted(descriptions)), + self._input_descriptions_key: json.dumps( + sorted(descriptions), ensure_ascii=False + ), }, model_parameters={"max_tokens": self._max_summary_length}, ) diff --git a/graphrag/index/reporting/blob_workflow_callbacks.py b/graphrag/index/reporting/blob_workflow_callbacks.py index 28f0b6d991..ea80daef68 100644 --- a/graphrag/index/reporting/blob_workflow_callbacks.py +++ b/graphrag/index/reporting/blob_workflow_callbacks.py @@ -78,7 +78,7 @@ def _write_log(self, log: dict[str, Any]): blob_client = self._blob_service_client.get_blob_client( self._container_name, self._blob_name ) - blob_client.append_block(json.dumps(log) + "\n") + blob_client.append_block(json.dumps(log, ensure_ascii=False) + "\n") # update the blob's block count self._num_blocks += 1 diff --git a/graphrag/index/reporting/file_workflow_callbacks.py b/graphrag/index/reporting/file_workflow_callbacks.py index e659c4f644..5f43e7934d 100644 --- a/graphrag/index/reporting/file_workflow_callbacks.py +++ b/graphrag/index/reporting/file_workflow_callbacks.py @@ -34,13 +34,16 @@ def on_error( ): """Handle when an error occurs.""" self._out_stream.write( - json.dumps({ - "type": "error", - "data": message, - "stack": stack, - "source": str(cause), - "details": details, - }) + json.dumps( + { + "type": "error", + "data": message, + "stack": stack, + "source": str(cause), + "details": details, + }, + ensure_ascii=False, + ) + "\n" ) message = f"{message} details={details}" @@ -49,14 +52,21 @@ def on_error( def on_warning(self, message: str, details: dict | None = None): """Handle when a warning occurs.""" self._out_stream.write( - json.dumps({"type": "warning", "data": message, "details": details}) + "\n" + json.dumps( + {"type": "warning", "data": message, "details": details}, + ensure_ascii=False, + ) + + "\n" ) _print_warning(message) def on_log(self, message: str, details: dict | None = None): """Handle when a log message is produced.""" self._out_stream.write( - json.dumps({"type": "log", "data": message, "details": details}) + "\n" + json.dumps( + {"type": "log", "data": message, "details": details}, ensure_ascii=False + ) + + "\n" ) message = f"{message} details={details}" diff --git a/graphrag/index/run.py b/graphrag/index/run.py index 94a519de87..b179f12c39 100644 --- a/graphrag/index/run.py +++ b/graphrag/index/run.py @@ -234,7 +234,9 @@ async def run_pipeline( ) async def dump_stats() -> None: - await storage.set("stats.json", json.dumps(asdict(stats), indent=4)) + await storage.set( + "stats.json", json.dumps(asdict(stats), indent=4, ensure_ascii=False) + ) async def load_table_from_storage(name: str) -> pd.DataFrame: if not await storage.has(name): diff --git a/graphrag/index/text_splitting/text_splitting.py b/graphrag/index/text_splitting/text_splitting.py index 0badc8977c..c65515da5b 100644 --- a/graphrag/index/text_splitting/text_splitting.py +++ b/graphrag/index/text_splitting/text_splitting.py @@ -224,7 +224,7 @@ def _append_to_result(self, chunk_list: list[str], new_chunk: list[str]): """Append the current chunk to the result.""" if new_chunk and len(new_chunk) > 0: if self._type == TextListSplitterType.JSON: - chunk_list.append(json.dumps(new_chunk)) + chunk_list.append(json.dumps(new_chunk, ensure_ascii=False)) else: chunk_list.append(self._output_delimiter.join(new_chunk)) diff --git a/graphrag/index/verbs/graph/report/strategies/graph_intelligence/run_graph_intelligence.py b/graphrag/index/verbs/graph/report/strategies/graph_intelligence/run_graph_intelligence.py index 2f47fa8858..d9a5235b8f 100644 --- a/graphrag/index/verbs/graph/report/strategies/graph_intelligence/run_graph_intelligence.py +++ b/graphrag/index/verbs/graph/report/strategies/graph_intelligence/run_graph_intelligence.py @@ -82,7 +82,7 @@ async def _run_extractor( rank_explanation=report.get("rating_explanation", ""), summary=report.get("summary", ""), findings=report.get("findings", []), - full_content_json=json.dumps(report, indent=4), + full_content_json=json.dumps(report, indent=4, ensure_ascii=False), ) except Exception as e: log.exception("Error processing community: %s", community) diff --git a/graphrag/index/verbs/snapshot_rows.py b/graphrag/index/verbs/snapshot_rows.py index 99aae70a04..6c6c16653d 100644 --- a/graphrag/index/verbs/snapshot_rows.py +++ b/graphrag/index/verbs/snapshot_rows.py @@ -49,9 +49,11 @@ def get_row_name(row: Any, row_idx: Any): if fmt.format == "json": await storage.set( f"{row_name}.{extension}", - json.dumps(row[column]) - if column is not None - else json.dumps(row.to_dict()), + ( + json.dumps(row[column], ensure_ascii=False) + if column is not None + else json.dumps(row.to_dict(), ensure_ascii=False) + ), ) elif fmt.format == "text": if column is None: @@ -65,9 +67,11 @@ def get_row_name(row: Any, row_idx: Any): def _parse_formats(formats: list[str | dict[str, Any]]) -> list[FormatSpecifier]: """Parse the formats into a list of FormatSpecifiers.""" return [ - FormatSpecifier(**fmt) - if isinstance(fmt, dict) - else FormatSpecifier(format=fmt, extension=_get_format_extension(fmt)) + ( + FormatSpecifier(**fmt) + if isinstance(fmt, dict) + else FormatSpecifier(format=fmt, extension=_get_format_extension(fmt)) + ) for fmt in formats ]