Skip to content

Commit

Permalink
refactor: replace console with CLIInterface
Browse files Browse the repository at this point in the history
These changes replace the use of the Console class from the rich library with a custom CLIInterface for displaying messages. This enhances the modularity of the code and allows for easier adjustments to the output format in the future.
  • Loading branch information
runner committed Dec 25, 2024
1 parent 51eef7f commit 10f3967
Show file tree
Hide file tree
Showing 4 changed files with 21 additions and 27 deletions.
20 changes: 9 additions & 11 deletions aicmt/ai_analyzer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,7 @@
import json
from openai import OpenAI, OpenAIError
from .config import load_config
from rich.console import Console

console = Console()
from .cli_interface import CLIInterface


class AIAnalyzer:
Expand Down Expand Up @@ -36,13 +34,13 @@ def _client(self):
def analyze_changes(self, changes: list) -> List[Dict]:
"""Analyze changes and suggest commit groupings"""
if not changes:
console.print("No changes to analyze, returning empty list")
CLIInterface.display_info("No changes to analyze, returning empty list")
return []

try:
self._client()
if not self.client:
console.print("[red]Error: Failed to initialize the OpenAI client[/red]")
CLIInterface.display_error("Failed to initialize the OpenAI client")
return []

def process_batch(batch_changes):
Expand Down Expand Up @@ -77,7 +75,7 @@ def process_batch(batch_changes):
if "maximum context length" not in str(e).lower():
raise e

console.print("[yellow]Changes exceed token limit, switching to batch processing...[/yellow]")
CLIInterface.display_warning("Changes exceed token limit, switching to batch processing...")

# If failed, switch to smart batch processing
all_results = []
Expand Down Expand Up @@ -116,7 +114,7 @@ def process_batch(batch_changes):
except OpenAIError as batch_e:
if "maximum context length" in str(batch_e).lower():
batch_size = max(1, batch_size // 2)
console.print(f"[yellow]Reducing batch size to {batch_size} and retrying...[/yellow]")
CLIInterface.display_warning(f"Reducing batch size to {batch_size} and retrying...")
continue
raise batch_e

Expand Down Expand Up @@ -192,20 +190,20 @@ def _generate_system_prompt(self) -> str:
# Get analysis prompt from config
prompt = self.CONFIG.get("analysis_prompt", "")
if not prompt:
console.print("[yellow]Warning: Analysis prompt not found, using default value[/yellow]")
CLIInterface.display_warning("Analysis prompt not found, using default value")

# Add commit number to prompt
if self.CONFIG.get("num_commits"):
console.print(f"[yellow] Set commit num: {self.CONFIG['num_commits']} [/yellow]")
CLIInterface.display_warning(f" Set commit num: {self.CONFIG['num_commits']} ")
prompt += f"\nImportant: You must group the changes into exactly {self.CONFIG['num_commits']} commits."

# Validate prompt format
if not isinstance(prompt, str):
console.print("Invalid prompt type: %s", type(prompt))
CLIInterface.display_error(f"Invalid prompt type: {type(prompt)}")
raise ValueError("Prompt must be a string type")

if len(prompt.strip()) < 10:
console.print("[yellow]Warning: Prompt is too short, may affect analysis quality[/yellow]")
CLIInterface.display_warning("Prompt is too short, may affect analysis quality")

return prompt

Expand Down
2 changes: 1 addition & 1 deletion aicmt/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,5 +77,5 @@ def cli():
assistant.run()
except Exception as e:
# Other runtime errors
console.print(f"[bold red]Error:[/bold red] {str(e)}")
CLIInterface.display_error(str(e))
sys.exit(1)
16 changes: 7 additions & 9 deletions aicmt/config.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
import configparser
from pathlib import Path
from typing import Dict, Any
from rich.console import Console
from .cli_args import parse_args

console = Console()
from .cli_interface import CLIInterface

# Default configuration settings for OpenAI API integration and prompt templates
_DEFAULT_CONFIG = {
Expand Down Expand Up @@ -103,17 +101,17 @@ def _parse_config_file(config_path: Path) -> Dict[str, Any]:
result[key] = "\n".join(processed_lines)

except configparser.Error as e:
console.print(
f"[yellow]Warning: Failed to parse config file {config_path}[/yellow]\n"
CLIInterface.display_warning(
f"Warning: Failed to parse config file {config_path}\n"
f"[blue]Error message: {str(e)}[/blue]\n"
"[yellow]Please check the following:[/yellow]\n"
"1. Ensure the config file format is correct\n"
"2. Check if all configuration items have correct section tags (e.g., [openai], [prompts])\n"
"3. Ensure the file is saved with UTF-8 encoding"
)
except Exception as e:
console.print(
f"[yellow]Warning: Unexpected error occurred while reading config file {config_path}[/yellow]\n"
CLIInterface.display_warning(
f"Warning: Unexpected error occurred while reading config file {config_path}\n"
f"[blue]Error message: {str(e)}[/blue]\n"
"[yellow]Suggested actions:[/yellow]\n"
"1. Check file permissions\n"
Expand Down Expand Up @@ -177,7 +175,7 @@ def load_config() -> Dict[str, Any]:
try:
validate_config(config)
except ValueError as e:
console.print(f"[red]Configuration Error:[/red] {str(e)}")
CLIInterface.display_error(f"Configuration Error: {str(e)}")
raise

return config
Expand Down Expand Up @@ -238,4 +236,4 @@ def validate_config(config: Dict[str, Any]):
prompt = config.get("analysis_prompt")
if prompt:
if len(prompt.strip()) < 10:
console.print(f"[yellow]Warning: analysis prompt is too short " f"({len(prompt.strip())} characters), " "this may affect analysis quality[/yellow]")
CLIInterface.display_warning(f"Warning: analysis prompt is too short " f"({len(prompt.strip())} characters), " "this may affect analysis quality")
10 changes: 4 additions & 6 deletions aicmt/git_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,9 @@
from typing import List, NamedTuple, Optional, Tuple, Union, Any
import git
from git import Repo
from rich.console import Console
from pathlib import Path
from enum import Enum

console = Console()
from .cli_interface import CLIInterface


class Change(NamedTuple):
Expand Down Expand Up @@ -52,7 +50,7 @@ def safe_file_operation(file_path: Union[str, Path]) -> Any:
except UnicodeDecodeError:
return FileStatus.NEW_BINARY, BINARY_MESSAGE
except IOError as e:
console.print(f"[red]Error reading file {file_path}: {str(e)}[/red]")
CLIInterface.display_error(f"Error reading file {file_path}: {str(e)}")
raise


Expand Down Expand Up @@ -133,7 +131,7 @@ def get_unstaged_changes(self) -> List[Change]:
insertions, deletions = (0, 0) if diff.startswith("[") else self._calculate_diff_stats(diff)
changes.append(Change(file=item.a_path, status=file_status, diff=diff, insertions=insertions, deletions=deletions))
except Exception as e:
console.print(f"[yellow]Warning: Could not process {item.a_path}: {str(e)}[/yellow]")
CLIInterface.display_warning(f"Warning: Could not process {item.a_path}: {str(e)}")

# Handle untracked files separately
for file_path in self.repo.untracked_files:
Expand All @@ -143,7 +141,7 @@ def get_unstaged_changes(self) -> List[Change]:
insertions = len(diff.splitlines()) if not diff.startswith("[") else 0
changes.append(Change(file=file_path, status=file_status, diff=diff, insertions=insertions, deletions=0))
except Exception as e:
console.print(f"[yellow]Warning: Could not process {file_path}: {str(e)}[/yellow]")
CLIInterface.display_warning(f"Warning: Could not process {file_path}: {str(e)}")

return changes

Expand Down

0 comments on commit 10f3967

Please sign in to comment.