Initial commit: AI Shell v0.1.0

- AI-powered shell command generator using DeepSeek V3
- Support for natural language to shell command conversion
- Secure configuration management with .env files
- Package structure with uv tool installation support
- Chinese and English language support
- Configuration validation and error handling
This commit is contained in:
2025-07-12 22:06:15 +08:00
commit 644071850a
21 changed files with 3252 additions and 0 deletions

14
ai_shell/__init__.py Normal file
View File

@ -0,0 +1,14 @@
"""
AI Shell - AI-powered shell command generator using DeepSeek V3
A command-line tool that generates shell commands from natural language descriptions.
"""
__version__ = "0.1.0"
__author__ = "AI Shell Team"
__email__ = "ai-shell@example.com"
__description__ = "AI-powered shell command generator using DeepSeek V3"
from .main import main
__all__ = ["main"]

74
ai_shell/agent.py Normal file
View File

@ -0,0 +1,74 @@
"""
AI Agent module for shell command generation
"""
from textwrap import dedent
from pydantic_ai import Agent
from pydantic_ai.models.openai import OpenAIModel
from .config import get_model, setup_environment
from .models import Answer
# System prompt for the AI agent
SYSTEM_PROMPT = dedent(
"""\
You are a professional developer specializing in shell commands.
Your task is to generate the correct shell commands based on the
user's request.
IMPORTANT: ALWAYS USE THE SAME LANGUAGE AS THE USER PROMPT IN
YOUR RESPONSE.
Process:
1. Think Aloud: Use the `think` function to explain your reasoning.
Justify why you chose a particular command, considering efficiency,
safety, and best practices.
2. Provide the Final Command: Use the `answer` function to present
the final shell command concisely.
"""
)
def create_agent() -> Agent:
"""Create and configure the AI agent"""
# Setup environment variables
setup_environment()
# Create OpenAI compatible model
model = OpenAIModel(get_model())
# Create agent
agent = Agent(
model=model,
system_prompt=SYSTEM_PROMPT,
output_type=Answer,
)
# Register tools
@agent.tool_plain
def think(s: str) -> None:
"""Communicate your thought process to the user.
Args:
s (str): A description of your reasoning or decision-making process.
"""
print(f"(AI Thinking): {s}\n")
@agent.tool_plain
def answer(success: bool, cmd: str | None, failure: str | None) -> Answer:
"""Provide the final shell command or explain why it couldn't be generated.
Args:
success (bool): Indicates whether a shell command was successfully generated.
cmd (str | None): The generated shell command if `success` is True.
It must be a single-line command. If `success` is False, this should be None.
failure (str | None): If `success` is False, provide a reason why the command
could not be generated. If `success` is True, this should be None.
Returns:
Answer: A structured response that will be processed for the user.
"""
return Answer(success, cmd, failure)
return agent

77
ai_shell/config.py Normal file
View File

@ -0,0 +1,77 @@
"""
Configuration module for AI Shell
"""
import os
from pathlib import Path
try:
from dotenv import load_dotenv
except ImportError:
load_dotenv = None
# Load .env file if it exists
def load_env_file() -> None:
"""Load environment variables from .env file"""
if load_dotenv is None:
return
# Try to find .env file in current directory or package directory
env_paths = [
Path.cwd() / ".env",
Path(__file__).parent.parent / ".env",
Path.home() / ".ai-shell" / ".env",
]
for env_path in env_paths:
if env_path.exists():
load_dotenv(env_path)
break
# Load .env file on import
load_env_file()
# Default API configuration (fallback values)
DEFAULT_API_KEY = "your_api_key_here"
DEFAULT_BASE_URL = "https://api.openai.com/v1/"
DEFAULT_MODEL = "gpt-3.5-turbo"
def get_api_key() -> str:
"""Get API key from environment or use default"""
api_key = os.getenv("AI_SHELL_API_KEY", DEFAULT_API_KEY)
if api_key == DEFAULT_API_KEY:
raise ValueError(
"API key not configured. Please set AI_SHELL_API_KEY in .env file or environment variable."
)
return api_key
def get_base_url() -> str:
"""Get base URL from environment or use default"""
return os.getenv("AI_SHELL_BASE_URL", DEFAULT_BASE_URL)
def get_model() -> str:
"""Get model name from environment or use default"""
return os.getenv("AI_SHELL_MODEL", DEFAULT_MODEL)
def get_timeout() -> int:
"""Get request timeout from environment"""
return int(os.getenv("AI_SHELL_TIMEOUT", "30"))
def get_max_retries() -> int:
"""Get max retries from environment"""
return int(os.getenv("AI_SHELL_MAX_RETRIES", "3"))
def setup_environment() -> None:
"""Setup environment variables for OpenAI client"""
os.environ["OPENAI_API_KEY"] = get_api_key()
os.environ["OPENAI_BASE_URL"] = get_base_url()
def validate_config() -> bool:
"""Validate configuration"""
try:
get_api_key()
get_base_url()
get_model()
return True
except ValueError:
return False

140
ai_shell/main.py Normal file
View File

@ -0,0 +1,140 @@
"""
Main entry point for AI Shell
"""
import os
import sys
import argparse
from typing import List, Optional
from .agent import create_agent
from . import __version__
def execute_command(command: str) -> None:
"""Execute a shell command"""
os.system(command)
def get_user_confirmation(command: str) -> bool:
"""Get user confirmation before executing command"""
print(f"(AI Answer): {command}")
response = input("Execute? [Y/n]: ").strip().lower()
return response in ["", "y", "yes"]
def process_prompt(prompt: str) -> None:
"""Process user prompt and generate shell command"""
if not prompt.strip():
print("Error: No prompt provided")
sys.exit(1)
# Create AI agent
agent = create_agent()
try:
# Generate response
resp = agent.run_sync(prompt)
answer = resp.output
if answer.success and answer.cmd is not None:
if get_user_confirmation(answer.cmd):
execute_command(answer.cmd)
else:
print(f"(AI Answer): {answer.failure}")
print("Command generation failed")
sys.exit(1)
except Exception as e:
print(f"Error: {e}")
sys.exit(1)
def create_parser() -> argparse.ArgumentParser:
"""Create command line argument parser"""
parser = argparse.ArgumentParser(
prog="ai",
description="AI-powered shell command generator using DeepSeek V3",
epilog="Examples:\n"
" ai list files\n"
" ai \"show disk usage\"\n"
" ai 显示当前目录\n",
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"prompt",
nargs="*",
help="Natural language description of the command you want"
)
parser.add_argument(
"--version",
action="version",
version=f"ai-shell {__version__}"
)
parser.add_argument(
"--config",
action="store_true",
help="Show current configuration"
)
return parser
def show_config() -> None:
"""Show current configuration"""
from .config import get_api_key, get_base_url, get_model, get_timeout, get_max_retries, validate_config
print("AI Shell Configuration:")
print(f" Model: {get_model()}")
print(f" Base URL: {get_base_url()}")
try:
api_key = get_api_key()
print(f" API Key: {api_key[:8]}...{api_key[-4:]}")
except ValueError as e:
print(f" API Key: ❌ {e}")
print(f" Timeout: {get_timeout()}s")
print(f" Max Retries: {get_max_retries()}")
print(f"\nConfiguration Status: {'✅ Valid' if validate_config() else '❌ Invalid'}")
print("\nConfiguration Sources (in priority order):")
print(" 1. Environment variables")
print(" 2. .env file in current directory")
print(" 3. .env file in package directory")
print(" 4. ~/.ai-shell/.env file")
print(" 5. Default values")
print("\nEnvironment Variables:")
print(" AI_SHELL_API_KEY - API key")
print(" AI_SHELL_BASE_URL - API base URL")
print(" AI_SHELL_MODEL - Model name")
print(" AI_SHELL_TIMEOUT - Request timeout (seconds)")
print(" AI_SHELL_MAX_RETRIES - Maximum retry attempts")
def main() -> None:
"""Main entry point"""
parser = create_parser()
args = parser.parse_args()
if args.config:
show_config()
return
if not args.prompt:
parser.print_help()
sys.exit(1)
# Validate configuration before processing
from .config import validate_config
if not validate_config():
print("❌ Configuration error: API key not configured.")
print("Please set AI_SHELL_API_KEY in .env file or environment variable.")
print("Run 'ai --config' to see current configuration.")
sys.exit(1)
# Join all prompt arguments into a single string
prompt = " ".join(args.prompt)
process_prompt(prompt)
if __name__ == "__main__":
main()

13
ai_shell/models.py Normal file
View File

@ -0,0 +1,13 @@
"""
Data models for AI Shell
"""
from dataclasses import dataclass
from typing import Optional
@dataclass
class Answer:
"""Response model for AI-generated shell commands"""
success: bool
cmd: Optional[str]
failure: Optional[str]