Initial commit with Python .gitignore

This commit is contained in:
2025-10-16 12:17:34 +08:00
commit 90719b8416
19 changed files with 3387 additions and 0 deletions

View File

@ -0,0 +1,11 @@
"""
Scientific Surfing - A Python package for surfing internet scientifically.
This package provides tools and utilities for surfing internet scientifically,
including clash rss subscription support, custom routing rules, and others.
"""
__version__ = "0.1.0"
__author__ = "Scientific Surfing Team"
__email__ = "team@scientific-surfing.com"

View File

@ -0,0 +1,8 @@
"""
Entry point for python -m scientific_surfing
"""
from scientific_surfing.cli import main
if __name__ == '__main__':
main()

177
scientific_surfing/cli.py Normal file
View File

@ -0,0 +1,177 @@
"""
Command-line interface for scientific-surfing package.
"""
import argparse
import sys
from scientific_surfing.subscription_manager import SubscriptionManager
def create_parser() -> argparse.ArgumentParser:
"""Create the argument parser."""
parser = argparse.ArgumentParser(
description="Scientific Surfing - CLI for managing clash RSS subscriptions"
)
subparsers = parser.add_subparsers(dest='command', help='Available commands')
# Subscription commands
subscription_parser = subparsers.add_parser('subscription', help='Manage subscriptions')
subscription_subparsers = subscription_parser.add_subparsers(dest='subcommand', help='Subscription operations')
# Add subscription command
add_parser = subscription_subparsers.add_parser('add', help='Add a new subscription')
add_parser.add_argument('name', help='Custom name for the subscription')
add_parser.add_argument('url', help='Clash RSS subscription URL')
# Refresh subscription command
refresh_parser = subscription_subparsers.add_parser('refresh', help='Refresh a subscription')
refresh_parser.add_argument('name', help='Name of the subscription to refresh')
# Delete subscription command (rm)
delete_parser = subscription_subparsers.add_parser('rm', help='Delete a subscription')
delete_parser.add_argument('name', help='Name of the subscription to delete')
# Rename subscription command
rename_parser = subscription_subparsers.add_parser('rename', help='Rename a subscription')
rename_parser.add_argument('name', help='Current name of the subscription')
rename_parser.add_argument('new_name', help='New name for the subscription')
# Activate subscription command
activate_parser = subscription_subparsers.add_parser('activate', help='Activate a subscription')
activate_parser.add_argument('name', help='Name of the subscription to activate')
# List subscriptions command
list_parser = subscription_subparsers.add_parser('list', help='List all subscriptions')
# Storage info command
storage_parser = subscription_subparsers.add_parser('storage', help='Show storage information')
# Core config commands
core_config_parser = subparsers.add_parser('core-config', help='Manage core configuration')
core_config_subparsers = core_config_parser.add_subparsers(dest='core_config_command', help='Configuration operations')
# Import config
import_parser = core_config_subparsers.add_parser('import', help='Import configuration from file')
import_parser.add_argument('source', help='Path to configuration file to import')
# Export config
export_parser = core_config_subparsers.add_parser('export', help='Export configuration to file')
export_parser.add_argument('destination', help='Path to save configuration file')
# Edit config
edit_parser = core_config_subparsers.add_parser('edit', help='Edit configuration with system editor')
# Reset config
reset_parser = core_config_subparsers.add_parser('reset', help='Reset configuration to default values')
# Show config
show_parser = core_config_subparsers.add_parser('show', help='Show current configuration')
# Apply config
apply_parser = core_config_subparsers.add_parser('apply', help='Apply active subscription to generate final config')
# Upgrade mihomo binary
upgrade_parser = core_config_subparsers.add_parser('upgrade', help='Download and upgrade mihomo binary from GitHub releases')
upgrade_parser.add_argument('--version', help='Specific version to download (e.g., v1.18.5). If not specified, downloads latest')
upgrade_parser.add_argument('--force', action='store_true', help='Force download even if binary already exists')
# Core commands
core_parser = subparsers.add_parser('core', help='Manage scientific-surfing core components')
core_subparsers = core_parser.add_subparsers(dest='core_command', help='Core operations')
# Update core command
update_parser = core_subparsers.add_parser('update', help='Update scientific-surfing core components')
update_parser.add_argument('--version', help='Specific version to download (e.g., v1.18.5). If not specified, downloads latest')
update_parser.add_argument('--force', action='store_true', help='Force update even if binary already exists')
return parser
def main() -> None:
"""Main CLI entry point."""
parser = create_parser()
args = parser.parse_args()
if not args.command:
parser.print_help()
return
try:
if args.command == 'subscription':
if not hasattr(args, 'subcommand') or not args.subcommand:
parser.parse_args(['subscription', '--help'])
return
manager = SubscriptionManager()
if args.subcommand == 'add':
manager.add_subscription(args.name, args.url)
elif args.subcommand == 'refresh':
manager.refresh_subscription(args.name)
elif args.subcommand == 'rm':
manager.delete_subscription(args.name)
elif args.subcommand == 'rename':
manager.rename_subscription(args.name, args.new_name)
elif args.subcommand == 'activate':
manager.activate_subscription(args.name)
elif args.subcommand == 'list':
manager.list_subscriptions()
elif args.subcommand == 'storage':
manager.show_storage_info()
else:
parser.parse_args(['subscription', '--help'])
elif args.command == 'core-config':
if not hasattr(args, 'core_config_command') or not args.core_config_command:
parser.parse_args(['core-config', '--help'])
return
from scientific_surfing.corecfg_manager import CoreConfigManager
from scientific_surfing.core_manager import CoreManager
core_config_manager = CoreConfigManager()
core_manager = CoreManager(core_config_manager)
if args.core_config_command == 'import':
core_config_manager.import_config(args.source)
elif args.core_config_command == 'export':
core_config_manager.export_config(args.destination)
elif args.core_config_command == 'edit':
core_config_manager.edit_config()
elif args.core_config_command == 'reset':
core_config_manager.reset_config()
elif args.core_config_command == 'show':
core_config_manager.show_config()
elif args.core_config_command == 'apply':
core_config_manager.apply()
elif args.core_config_command == 'upgrade':
core_manager.update(version=args.version, force=args.force)
else:
parser.parse_args(['core-config', '--help'])
elif args.command == 'core':
if not hasattr(args, 'core_command') or not args.core_command:
parser.parse_args(['core', '--help'])
return
from scientific_surfing.corecfg_manager import CoreConfigManager
from scientific_surfing.core_manager import CoreManager
core_config_manager = CoreConfigManager()
core_manager = CoreManager(core_config_manager)
if args.core_command == 'update':
core_manager.update(version=args.version, force=args.force)
else:
parser.parse_args(['core', '--help'])
else:
parser.print_help()
except KeyboardInterrupt:
print("\n❌ Operation cancelled by user")
sys.exit(1)
except Exception as e:
print(f"❌ Error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,435 @@
"""
User configuration manager for scientific-surfing.
Handles user preferences with import, export, and edit operations.
"""
import os
import platform
import gzip
import zipfile
import shutil
import subprocess
import signal
from typing import Optional, Dict, Any
import requests
from pathlib import Path
from scientific_surfing.corecfg_manager import CoreConfigManager
class CoreManager:
"""Manages user configuration with import, export, and edit operations."""
def __init__(self, core_config_manager: CoreConfigManager):
self.core_config_manager = core_config_manager
def update(self, version: Optional[str] = None, force: bool = False) -> bool:
"""
Download and update mihomo binary from GitHub releases.
Args:
version: Specific version to download (e.g., 'v1.18.5'). If None, downloads latest.
force: Force download even if binary already exists.
Returns:
bool: True if update successful, False otherwise.
"""
try:
# Determine current OS and architecture
system = platform.system().lower()
machine = platform.machine().lower()
# Map platform to mihomo binary naming (base name without extension)
platform_map = {
'windows': {
'amd64': 'mihomo-windows-amd64',
'386': 'mihomo-windows-386',
'arm64': 'mihomo-windows-arm64',
'arm': 'mihomo-windows-arm32v7'
},
'linux': {
'amd64': 'mihomo-linux-amd64',
'386': 'mihomo-linux-386',
'arm64': 'mihomo-linux-arm64',
'arm': 'mihomo-linux-armv7'
},
'darwin': {
'amd64': 'mihomo-darwin-amd64',
'arm64': 'mihomo-darwin-arm64'
}
}
# Normalize architecture names
arch_map = {
'x86_64': 'amd64',
'amd64': 'amd64',
'i386': '386',
'i686': '386',
'arm64': 'arm64',
'aarch64': 'arm64',
'armv7l': 'arm',
'arm': 'arm'
}
if system not in platform_map:
print(f"❌ Unsupported operating system: {system}")
return False
normalized_arch = arch_map.get(machine, machine)
if normalized_arch not in platform_map[system]:
print(f"❌ Unsupported architecture: {machine} ({normalized_arch})")
return False
binary_name = platform_map[system][normalized_arch]
# Setup directories
binary_dir = self.core_config_manager.storage.config_dir / "bin"
binary_dir.mkdir(parents=True, exist_ok=True)
binary_path = binary_dir / ("mihomo.exe" if system == "windows" else "mihomo")
# Check if binary already exists
if binary_path.exists() and not force:
print(f" Binary already exists at: {binary_path}")
print(" Use --force to overwrite")
return True
# Get release info
if version:
# Specific version
release_url = f"https://api.github.com/repos/MetaCubeX/mihomo/releases/tags/{version}"
else:
# Latest release
release_url = "https://api.github.com/repos/MetaCubeX/mihomo/releases/latest"
print(f"[INFO] Fetching release info from: {release_url}")
headers = {
'Accept': 'application/vnd.github.v3+json',
'User-Agent': 'scientific-surfing/1.0'
}
response = requests.get(release_url, headers=headers, timeout=30)
response.raise_for_status()
release_data = response.json()
release_version = release_data['tag_name']
print(f"[INFO] Found release: {release_version}")
# Find the correct asset
assets = release_data.get('assets', [])
target_asset = None
# Determine file extension based on system
file_extension = '.zip' if system == 'windows' else '.gz'
expected_filename = f"{binary_name}-{release_version}{file_extension}"
# Look for exact match first
for asset in assets:
if asset['name'] == expected_filename:
target_asset = asset
break
# Fallback to prefix matching if exact match not found
if not target_asset:
binary_name_prefix = f"{binary_name}-{release_version}"
for asset in assets:
if asset['name'].startswith(binary_name_prefix) and (asset['name'].endswith('.gz') or asset['name'].endswith('.zip')):
target_asset = asset
break
if not target_asset:
print(f"[ERROR] Binary not found for {system}/{normalized_arch}: {expected_filename}")
print("Available binaries:")
for asset in assets:
if 'mihomo' in asset['name'] and (asset['name'].endswith('.gz') or asset['name'].endswith('.zip')):
print(f" - {asset['name']}")
return False
# Download the compressed file
download_url = target_asset['browser_download_url']
compressed_filename = target_asset['name']
print(f"[DOWNLOAD] Downloading: {compressed_filename}")
print(f" Size: {target_asset['size']:,} bytes")
download_response = requests.get(download_url, stream=True, timeout=60)
download_response.raise_for_status()
# Download to temporary file
temp_compressed_path = binary_path.with_suffix(f".tmp{file_extension}")
temp_extracted_path = binary_path.with_suffix('.tmp')
with open(temp_compressed_path, 'wb') as f:
for chunk in download_response.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
# Verify download
if temp_compressed_path.stat().st_size != target_asset['size']:
temp_compressed_path.unlink()
print("[ERROR] Download verification failed - size mismatch")
return False
# Extract the binary
try:
if file_extension == '.gz':
# Extract .gz file
with gzip.open(temp_compressed_path, 'rb') as f_in:
with open(temp_extracted_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
elif file_extension == '.zip':
# Extract .zip file
with zipfile.ZipFile(temp_compressed_path, 'r') as zip_ref:
# Find the executable file in the zip
file_info = zip_ref.filelist[0]
with zip_ref.open(file_info.filename) as f_in:
with open(temp_extracted_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
else:
raise ValueError(f"Unsupported file format: {file_extension}")
except Exception as e:
temp_compressed_path.unlink()
if temp_extracted_path.exists():
temp_extracted_path.unlink()
print(f"[ERROR] Failed to extract binary: {e}")
return False
# Clean up compressed file
temp_compressed_path.unlink()
# Make executable on Unix-like systems
if system != 'windows':
os.chmod(temp_extracted_path, 0o755)
# Move to final location
if binary_path.exists():
backup_path = binary_path.with_suffix('.backup')
binary_path.rename(backup_path)
print(f"[INFO] Backed up existing binary to: {backup_path}")
temp_extracted_path.rename(binary_path)
print(f"[SUCCESS] Successfully updated mihomo {release_version}")
print(f" Location: {binary_path}")
print(f" Size: {binary_path.stat().st_size:,} bytes")
return True
except requests.exceptions.RequestException as e:
print(f"[ERROR] Network error: {e}")
return False
except Exception as e:
print(f"[ERROR] Upgrade failed: {e}")
return False
def daemon(self, config_path: Optional[str] = None) -> bool:
"""
Run the mihomo executable as a daemon with the generated configuration.
Args:
config_path: Path to the configuration file. If None, uses generated_config.yaml
Returns:
bool: True if daemon started successfully, False otherwise.
"""
try:
# Determine binary path
system = platform.system().lower()
binary_dir = self.core_config_manager.storage.config_dir / "bin"
binary_path = binary_dir / ("mihomo.exe" if system == "windows" else "mihomo")
if not binary_path.exists():
print(f"❌ Mihomo binary not found at: {binary_path}")
print(" Run 'core update' to download the binary first.")
return False
# Determine config path
if config_path is None:
config_file = self.core_config_manager.storage.config_dir / "generated_config.yaml"
else:
config_file = Path(config_path)
if not config_file.exists():
print(f"❌ Configuration file not found: {config_file}")
print(" Run 'core-config apply' to generate the configuration first.")
return False
print(f"[INFO] Starting mihomo daemon...")
print(f" Binary: {binary_path}")
print(f" Config: {config_file}")
# Prepare command
cmd = [
str(binary_path),
"-f", str(config_file),
"-d", str(self.core_config_manager.storage.config_dir)
]
# Start the process
if system == "windows":
# Windows: Use CREATE_NEW_PROCESS_GROUP to avoid console window
creation_flags = subprocess.CREATE_NEW_PROCESS_GROUP if hasattr(subprocess, 'CREATE_NEW_PROCESS_GROUP') else 0
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
creationflags=creation_flags,
cwd=str(self.core_config_manager.storage.config_dir)
)
else:
# Unix-like systems
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid if hasattr(os, 'setsid') else None,
cwd=str(self.core_config_manager.storage.config_dir)
)
# Check if process started successfully
try:
return_code = process.poll()
if return_code is not None:
stdout, stderr = process.communicate(timeout=2)
print(f"❌ Failed to start daemon (exit code: {return_code})")
if stderr:
print(f" Error: {stderr.decode().strip()}")
return False
except subprocess.TimeoutExpired:
# Process is still running, which is good
pass
# Save PID for later management
pid_file = self.core_config_manager.storage.config_dir / "mihomo.pid"
with open(pid_file, 'w') as f:
f.write(str(process.pid))
print(f"✅ Daemon started successfully (PID: {process.pid})")
print(f" PID file: {pid_file}")
return True
except Exception as e:
print(f"❌ Failed to start daemon: {e}")
return False
def stop_daemon(self) -> bool:
"""
Stop the running mihomo daemon.
Returns:
bool: True if daemon stopped successfully, False otherwise.
"""
try:
pid_file = self.core_config_manager.storage.config_dir / "mihomo.pid"
if not pid_file.exists():
print("❌ No daemon appears to be running (PID file not found)")
return False
with open(pid_file, 'r') as f:
pid = int(f.read().strip())
system = platform.system().lower()
try:
if system == "windows":
# Windows: Use taskkill
subprocess.run(["taskkill", "/F", "/PID", str(pid)],
check=True, capture_output=True, text=True)
else:
# Unix-like systems: Use kill
os.kill(pid, signal.SIGTERM)
# Wait a bit and check if process is still running
try:
os.kill(pid, 0) # Signal 0 just checks if process exists
# Process still exists, try SIGKILL
os.kill(pid, signal.SIGKILL)
except ProcessLookupError:
# Process already terminated
pass
pid_file.unlink()
print(f"✅ Daemon stopped successfully (PID: {pid})")
return True
except (ProcessLookupError, subprocess.CalledProcessError):
# Process not found, clean up PID file
pid_file.unlink()
print(" Daemon was not running, cleaned up PID file")
return True
except Exception as e:
print(f"❌ Failed to stop daemon: {e}")
return False
def daemon_status(self) -> Dict[str, Any]:
"""
Get the status of the mihomo daemon.
Returns:
Dict containing daemon status information.
"""
status = {
"running": False,
"pid": None,
"binary_path": None,
"config_path": None,
"error": None
}
try:
pid_file = self.core_config_manager.storage.config_dir / "mihomo.pid"
if not pid_file.exists():
status["error"] = "PID file not found"
return status
with open(pid_file, 'r') as f:
pid = int(f.read().strip())
# Check if process is running
system = platform.system().lower()
try:
if system == "windows":
# Windows: Use tasklist
result = subprocess.run(["tasklist", "/FI", f"PID eq {pid}"],
capture_output=True, text=True)
if str(pid) in result.stdout:
status["running"] = True
status["pid"] = pid
else:
status["error"] = "Process not found"
pid_file.unlink() # Clean up stale PID file
else:
# Unix-like systems: Use kill signal 0
os.kill(pid, 0) # Signal 0 just checks if process exists
status["running"] = True
status["pid"] = pid
except (ProcessLookupError, subprocess.CalledProcessError):
status["error"] = "Process not found"
pid_file.unlink() # Clean up stale PID file
except Exception as e:
status["error"] = str(e)
# Add binary and config paths
system = platform.system().lower()
binary_path = self.core_config_manager.storage.config_dir / "bin" / ("mihomo.exe" if system == "windows" else "mihomo")
config_path = self.core_config_manager.storage.config_dir / "generated_config.yaml"
status["binary_path"] = str(binary_path) if binary_path.exists() else None
status["config_path"] = str(config_path) if config_path.exists() else None
return status
def deep_merge(dict1, dict2):
for k, v in dict2.items():
if k in dict1 and isinstance(dict1[k], dict) and isinstance(v, dict):
dict1[k] = deep_merge(dict1[k], v)
elif k in dict1 and isinstance(dict1[k], list) and isinstance(v, list):
dict1[k].extend(v) # Example: extend lists. Adjust logic for other list merging needs.
else:
dict1[k] = v
return dict1

View File

@ -0,0 +1,370 @@
"""
User configuration manager for scientific-surfing.
Handles user preferences with import, export, and edit operations.
"""
import os
import shutil
import subprocess
import sys
from pathlib import Path
import yaml
from scientific_surfing.models import Config
from scientific_surfing.storage import StorageManager
class CoreConfigManager:
"""Manages user configuration with import, export, and edit operations."""
def __init__(self):
self.storage = StorageManager()
self.config_file = self.storage.config_dir / "core-config.yaml"
self.default_config_path = Path(__file__).parent / "templates" / "default-core-config.yaml"
def _ensure_config_exists(self) -> bool:
"""Ensure config.yaml exists, create from default if not."""
if not self.config_file.exists():
if self.default_config_path.exists():
self.storage.config_dir.mkdir(parents=True, exist_ok=True)
shutil.copy2(self.default_config_path, self.config_file)
print(f"✅ Created default config at: {self.config_file}")
return True
else:
print("❌ Default config template not found")
return False
return True
def load_config(self) -> dict:
"""Load configuration from YAML file."""
if not self.config_file.exists():
return {}
try:
with open(self.config_file, 'r', encoding='utf-8') as f:
data = yaml.safe_load(f)
if isinstance(data, dict):
return data
return {}
except (yaml.YAMLError, IOError) as e:
print(f"Warning: Failed to load config: {e}")
return {}
def save_config(self, config: dict) -> bool:
"""Save configuration to YAML file."""
try:
with open(self.config_file, 'w', encoding='utf-8') as f:
# Convert Pydantic model to dict for YAML serialization
data = config
yaml.dump(data, f, default_flow_style=False, allow_unicode=True)
return True
except (yaml.YAMLError, IOError, ValueError) as e:
print(f"Error: Failed to save config: {e}")
return False
def import_config(self, source_path: str) -> bool:
"""Import configuration from a YAML file."""
source = Path(source_path)
if not source.exists():
print(f"❌ Source file not found: {source_path}")
return False
try:
with open(source, 'r', encoding='utf-8') as f:
data = yaml.safe_load(f)
if not isinstance(data, dict):
print("❌ Invalid YAML format")
return False
# Validate with Pydantic model
config = Config(**data)
# Save to user config
self.save_config(config)
print(f"✅ Imported configuration from: {source_path}")
return True
except yaml.YAMLError as e:
print(f"❌ Invalid YAML: {e}")
return False
except Exception as e:
print(f"❌ Failed to import: {e}")
return False
def export_config(self, destination_path: str) -> bool:
"""Export current configuration to a YAML file."""
destination = Path(destination_path)
try:
config = self.load_config()
# Ensure destination directory exists
destination.parent.mkdir(parents=True, exist_ok=True)
# Export as YAML
with open(destination, 'w', encoding='utf-8') as f:
data = config.dict()
yaml.dump(data, f, default_flow_style=False, allow_unicode=True)
print(f"✅ Exported configuration to: {destination_path}")
return True
except Exception as e:
print(f"❌ Failed to export: {e}")
return False
def edit_config(self) -> bool:
"""Edit configuration using system default editor."""
if not self._ensure_config_exists():
return False
# Get system editor
editor = os.environ.get('EDITOR') or os.environ.get('VISUAL')
if not editor:
# Try common editors
for cmd in ['code', 'subl', 'atom', 'vim', 'nano', 'notepad']:
if shutil.which(cmd):
editor = cmd
break
if not editor:
print("❌ No editor found. Please set EDITOR or VISUAL environment variable")
return False
try:
# Backup current config
backup_path = self.config_file.with_suffix('.yaml.backup')
if self.config_file.exists():
shutil.copy2(self.config_file, backup_path)
# Open editor
subprocess.run([editor, str(self.config_file)], check=True)
# Validate edited config
try:
config = self.load_config()
print("✅ Configuration edited successfully")
return True
except Exception as e:
# Restore backup if validation fails
if backup_path.exists():
shutil.copy2(backup_path, self.config_file)
print(f"❌ Invalid configuration: {e}")
print("🔄 Restored previous configuration")
return False
except subprocess.CalledProcessError:
print("❌ Editor command failed")
return False
except Exception as e:
print(f"❌ Failed to edit configuration: {e}")
return False
def reset_config(self) -> bool:
"""Reset configuration to default values."""
if self.default_config_path.exists():
shutil.copy2(self.default_config_path, self.config_file)
print("✅ Configuration reset to default values")
return True
else:
print("❌ Default config template not found")
return False
def show_config(self) -> None:
"""Display current configuration."""
config = self.load_config()
print("⚙️ Current Configuration:")
print(f" Auto-refresh: {config.auto_refresh}")
print(f" Refresh interval: {config.refresh_interval_hours} hours")
print(f" User-Agent: {config.default_user_agent}")
print(f" Timeout: {config.timeout_seconds} seconds")
def get_config(self) -> Config:
"""Get current configuration."""
return self.load_config()
def update_config(self, **kwargs) -> bool:
"""Update specific configuration values."""
config = self.load_config()
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
else:
print(f"⚠️ Unknown configuration key: {key}")
return False
return self.save_config(config)
def _execute_hook(self, hook_path: Path, config_file_path: Path) -> bool:
"""Execute a hook script with the generated config file path."""
if not hook_path.exists():
return False
try:
# Determine the interpreter based on file extension and platform
if hook_path.suffix.lower() == '.py':
cmd = [sys.executable, str(hook_path), str(config_file_path)]
elif hook_path.suffix.lower() == '.js':
cmd = ['node', str(hook_path), str(config_file_path)]
elif hook_path.suffix.lower() == '.nu':
cmd = ['nu', str(hook_path), str(config_file_path)]
else:
# On Unix-like systems, execute directly
if os.name != 'nt':
cmd = [str(hook_path), str(config_file_path)]
# Make sure the script is executable
os.chmod(hook_path, 0o755)
else:
# On Windows, try to execute directly (batch files, etc.)
cmd = [str(hook_path), str(config_file_path)]
print(f"🔧 Executing hook: {hook_path.name}")
result = subprocess.run(
cmd,
cwd=hook_path.parent,
capture_output=True,
text=True,
timeout=30
)
if result.returncode == 0:
print(f"✅ Hook executed successfully: {hook_path.name}")
if result.stdout.strip():
print(f" Output: {result.stdout.strip()}")
return True
else:
print(f"❌ Hook failed: {hook_path.name}")
if result.stderr.strip():
print(f" Error: {result.stderr.strip()}")
return False
except subprocess.TimeoutExpired:
print(f"⏰ Hook timed out: {hook_path.name}")
return False
except Exception as e:
print(f"❌ Failed to execute hook {hook_path.name}: {e}")
return False
def _execute_hooks(self, config_file_path: Path) -> None:
"""Execute all hooks in the hooks directory after config generation."""
hooks_dir = self.storage.config_dir / "hooks"
if not hooks_dir.exists():
return
# Look for core_config_generated.* files
hook_pattern = "core_config_generated.*"
hook_files = list(hooks_dir.glob(hook_pattern))
if not hook_files:
return
print(f"🔧 Found {len(hook_files)} hook(s) to execute")
# Sort hooks for consistent execution order
hook_files.sort()
for hook_file in hook_files:
self._execute_hook(hook_file, config_file_path)
def apply(self) -> bool:
"""Apply active subscription to generate final config file."""
from scientific_surfing.subscription_manager import SubscriptionManager
# Load current configuration
config = self.load_config()
# Load subscriptions to get active subscription
subscription_manager = SubscriptionManager()
active_subscription = subscription_manager.subscriptions_data.get_active_subscription()
if not active_subscription:
print("❌ No active subscription found")
return False
if not active_subscription.file_path or not Path(active_subscription.file_path).exists():
print("❌ Active subscription file not found. Please refresh the subscription first.")
return False
try:
# Load the subscription content
with open(active_subscription.file_path, 'r', encoding='utf-8') as f:
subscription_content = f.read()
# Parse subscription YAML
subscription_data = yaml.safe_load(subscription_content)
if not isinstance(subscription_data, dict):
subscription_data = {}
# Create final config by merging subscription with user config
final_config = deep_merge(subscription_data, config)
external_ui = final_config.get("external-ui")
if external_ui:
final_config["external-ui"] = os.path.join(self.storage.config_dir, external_ui)
# Define essential defaults that should be present in any Clash config
essential_defaults = {
'port': 7890,
'socks-port': 7891,
'mixed-port': 7890,
'allow-lan': False,
'mode': 'rule',
'log-level': 'info',
'external-controller': '127.0.0.1:9090',
'ipv6': True,
}
# Add missing essential keys from subscription
for key, default_value in essential_defaults.items():
if key not in final_config:
final_config[key] = default_value
# Ensure basic DNS configuration exists if not provided by subscription
if 'dns' not in final_config:
final_config['dns'] = {
'enable': True,
'listen': '0.0.0.0:53',
'enhanced-mode': 'fake-ip',
'fake-ip-range': '198.18.0.1/16',
'nameserver': [
'https://doh.pub/dns-query',
'https://dns.alidns.com/dns-query'
],
'fallback': [
'https://1.1.1.1/dns-query',
'https://8.8.8.8/dns-query'
]
}
# Generate final config file
generated_path = self.storage.config_dir / "generated_config.yaml"
with open(generated_path, 'w', encoding='utf-8') as f:
yaml.dump(final_config, f, default_flow_style=False, allow_unicode=True)
print(f"✅ Generated final configuration: {generated_path}")
print(f" Active subscription: {active_subscription.name}")
print(f" Source file: {active_subscription.file_path}")
# Execute hooks after successful config generation
self._execute_hooks(generated_path)
return True
except yaml.YAMLError as e:
print(f"❌ Invalid YAML in subscription: {e}")
return False
except Exception as e:
print(f"❌ Failed to apply configuration: {e}")
return False
def deep_merge(dict1, dict2):
for k, v in dict2.items():
if k in dict1 and isinstance(dict1[k], dict) and isinstance(v, dict):
dict1[k] = deep_merge(dict1[k], v)
elif k in dict1 and isinstance(dict1[k], list) and isinstance(v, list):
dict1[k].extend(v) # Example: extend lists. Adjust logic for other list merging needs.
else:
dict1[k] = v
return dict1

View File

@ -0,0 +1,107 @@
"""
Pydantic models for scientific-surfing data structures.
"""
from datetime import datetime
from typing import Dict, List, Optional
from pydantic import BaseModel, Field, validator
class Subscription(BaseModel):
"""Model for a single subscription."""
name: str = Field(..., description="Name of the subscription")
url: str = Field(..., description="Clash RSS subscription URL")
status: str = Field(default="inactive", description="Status: active or inactive")
last_refresh: Optional[datetime] = Field(default=None, description="Last refresh timestamp")
file_path: Optional[str] = Field(default=None, description="Path to downloaded file")
file_size: Optional[int] = Field(default=None, description="Size of downloaded file in bytes")
status_code: Optional[int] = Field(default=None, description="HTTP status code of last refresh")
content_hash: Optional[int] = Field(default=None, description="Hash of downloaded content")
last_error: Optional[str] = Field(default=None, description="Last error message if any")
@validator('status')
def validate_status(cls, v):
if v not in ['active', 'inactive']:
raise ValueError('Status must be either "active" or "inactive"')
return v
class Config:
json_encoders = {
datetime: lambda v: v.isoformat() if v else None
}
class Config(BaseModel):
"""Model for application configuration."""
auto_refresh: bool = Field(default=False, description="Auto-refresh subscriptions")
refresh_interval_hours: int = Field(default=24, description="Refresh interval in hours")
default_user_agent: str = Field(
default="scientific-surfing/0.1.0",
description="Default User-Agent for HTTP requests"
)
timeout_seconds: int = Field(default=30, description="HTTP request timeout in seconds")
@validator('refresh_interval_hours')
def validate_refresh_interval(cls, v):
if v < 1:
raise ValueError('Refresh interval must be at least 1 hour')
return v
@validator('timeout_seconds')
def validate_timeout(cls, v):
if v < 1:
raise ValueError('Timeout must be at least 1 second')
return v
class SubscriptionsData(BaseModel):
"""Model for the entire subscriptions collection."""
subscriptions: Dict[str, Subscription] = Field(default_factory=dict)
def get_active_subscription(self) -> Optional[Subscription]:
"""Get the currently active subscription."""
for subscription in self.subscriptions.values():
if subscription.status == 'active':
return subscription
return None
def set_active(self, name: str) -> bool:
"""Set a subscription as active and deactivate others."""
if name not in self.subscriptions:
return False
for sub_name, subscription in self.subscriptions.items():
subscription.status = 'active' if sub_name == name else 'inactive'
return True
def add_subscription(self, name: str, url: str) -> Subscription:
"""Add a new subscription."""
subscription = Subscription(name=name, url=url)
# If this is the first subscription, set it as active
if not self.subscriptions:
subscription.status = 'active'
self.subscriptions[name] = subscription
return subscription
def remove_subscription(self, name: str) -> bool:
"""Remove a subscription."""
if name not in self.subscriptions:
return False
del self.subscriptions[name]
return True
def rename_subscription(self, old_name: str, new_name: str) -> bool:
"""Rename a subscription."""
if old_name not in self.subscriptions or new_name in self.subscriptions:
return False
subscription = self.subscriptions.pop(old_name)
subscription.name = new_name
self.subscriptions[new_name] = subscription
return True

View File

@ -0,0 +1,114 @@
"""
Cross-platform data storage for scientific-surfing.
Handles configuration and subscription data storage using YAML format.
"""
import os
import platform
import yaml
from pathlib import Path
from typing import Optional, Dict
from scientific_surfing.models import SubscriptionsData
class StorageManager:
"""Manages cross-platform data storage for subscriptions and configuration."""
def __init__(self):
self.config_dir = self._get_config_dir()
self.config_file = self.config_dir / "config.yaml"
self.subscriptions_file = self.config_dir / "subscriptions.yaml"
self._ensure_config_dir()
def _get_config_dir(self) -> Path:
"""Get the appropriate configuration directory for the current platform."""
system = platform.system().lower()
if system == "windows":
# Windows: %APPDATA%/scientific_surfing
app_data = os.environ.get("APPDATA")
if app_data:
return Path(app_data) / "scientific_surfing"
else:
return Path.home() / "AppData" / "Roaming" / "scientific_surfing"
elif system == "darwin":
# macOS: ~/Library/Application Support/scientific_surfing
return Path.home() / "Library" / "Application Support" / "scientific_surfing"
else:
# Linux and other Unix-like systems: ~/.config/scientific_surfing
xdg_config_home = os.environ.get("XDG_CONFIG_HOME")
if xdg_config_home:
return Path(xdg_config_home) / "scientific_surfing"
else:
return Path.home() / ".config" / "scientific_surfing"
def _ensure_config_dir(self) -> None:
"""Ensure the configuration directory exists."""
self.config_dir.mkdir(parents=True, exist_ok=True)
def load_subscriptions(self) -> SubscriptionsData:
"""Load subscriptions from YAML file."""
if not self.subscriptions_file.exists():
return SubscriptionsData()
try:
with open(self.subscriptions_file, 'r', encoding='utf-8') as f:
data = yaml.safe_load(f)
if isinstance(data, dict):
return SubscriptionsData(**data)
return SubscriptionsData()
except (yaml.YAMLError, IOError) as e:
print(f"Warning: Failed to load subscriptions: {e}")
return SubscriptionsData()
def save_subscriptions(self, subscriptions: SubscriptionsData) -> bool:
"""Save subscriptions to YAML file."""
try:
with open(self.subscriptions_file, 'w', encoding='utf-8') as f:
# Convert Pydantic model to dict for YAML serialization
data = subscriptions.dict()
yaml.dump(data, f, default_flow_style=False, allow_unicode=True)
return True
except (yaml.YAMLError, IOError, ValueError) as e:
print(f"Error: Failed to save subscriptions: {e}")
return False
def load_config(self) -> dict:
"""Load configuration from YAML file."""
if not self.config_file.exists():
return {}
try:
with open(self.config_file, 'r', encoding='utf-8') as f:
data = yaml.safe_load(f)
if isinstance(data, dict):
return data
return {}
except (yaml.YAMLError, IOError) as e:
print(f"Warning: Failed to load config: {e}")
return {}
def save_config(self, config: dict) -> bool:
"""Save configuration to YAML file."""
try:
with open(self.config_file, 'w', encoding='utf-8') as f:
# Convert Pydantic model to dict for YAML serialization
data = config
yaml.dump(data, f, default_flow_style=False, allow_unicode=True)
return True
except (yaml.YAMLError, IOError, ValueError) as e:
print(f"Error: Failed to save config: {e}")
return False
def get_storage_info(self) -> Dict[str, str]:
"""Get information about the storage location."""
return {
'config_dir': str(self.config_dir),
'config_file': str(self.config_file),
'subscriptions_file': str(self.subscriptions_file),
'platform': platform.system(),
'exists': str(self.config_dir.exists())
}

View File

@ -0,0 +1,161 @@
"""
Subscription management module for scientific-surfing.
Handles subscription operations with persistent storage.
"""
import os
import requests
from datetime import datetime
from pathlib import Path
from typing import Optional
from scientific_surfing.storage import StorageManager
from scientific_surfing.models import Subscription, SubscriptionsData, Config
class SubscriptionManager:
"""Manages clash RSS subscriptions with persistent storage."""
storage: StorageManager = None
def __init__(self):
self.storage = StorageManager()
self.subscriptions_data = self.storage.load_subscriptions()
self.config = self.storage.load_config()
# Create subscriptions directory for storing downloaded files
self.subscriptions_dir = self.storage.config_dir / "subscriptions"
self.subscriptions_dir.mkdir(exist_ok=True)
def add_subscription(self, url: str, name: str) -> None:
"""Add a new subscription."""
subscription = self.subscriptions_data.add_subscription(name, url)
if self.storage.save_subscriptions(self.subscriptions_data):
self.refresh_subscription(name)
print(f"✅ Added subscription: {name} -> {url}")
else:
print("❌ Failed to save subscription")
def refresh_subscription(self, name: str) -> None:
"""Refresh a subscription by downloading from URL."""
if name not in self.subscriptions_data.subscriptions:
print(f"❌ Subscription '{name}' not found")
return
subscription = self.subscriptions_data.subscriptions[name]
url = subscription.url
print(f"🔄 Refreshing subscription: {name}")
try:
# Download the subscription content
headers = {
'User-Agent': self.config.default_user_agent
}
timeout = self.config.timeout_seconds
response = requests.get(url, headers=headers, timeout=timeout)
response.raise_for_status()
# File path without timestamp
file_path = self.subscriptions_dir / f"{name}.yml"
# Handle existing file by renaming with creation date
if file_path.exists():
# Get creation time of existing file
stat = file_path.stat()
try:
# Try st_birthtime first (macOS/Unix)
creation_time = datetime.fromtimestamp(stat.st_birthtime)
except AttributeError:
# Fallback to st_ctime (Windows)
creation_time = datetime.fromtimestamp(stat.st_ctime)
backup_name = f"{name}_{creation_time.strftime('%Y%m%d_%H%M%S')}.yml"
backup_path = self.subscriptions_dir / backup_name
# Rename existing file
file_path.rename(backup_path)
print(f" 🔄 Backed up existing file to: {backup_name}")
# Save the new downloaded content
with open(file_path, 'w', encoding='utf-8') as f:
f.write(response.text)
# Update subscription metadata
subscription.last_refresh = datetime.now()
subscription.file_path = str(file_path)
subscription.file_size = len(response.text)
subscription.status_code = response.status_code
subscription.content_hash = hash(response.text)
subscription.last_error = None
if self.storage.save_subscriptions(self.subscriptions_data):
print(f"✅ Subscription '{name}' refreshed successfully")
print(f" 📁 Saved to: {file_path}")
print(f" 📊 Size: {len(response.text)} bytes")
else:
print("❌ Failed to save subscription metadata")
except requests.exceptions.RequestException as e:
print(f"❌ Failed to download subscription: {e}")
subscription.last_error = str(e)
self.storage.save_subscriptions(self.subscriptions_data)
except IOError as e:
print(f"❌ Failed to save file: {e}")
subscription.last_error = str(e)
self.storage.save_subscriptions(self.subscriptions_data)
def delete_subscription(self, name: str) -> None:
"""Delete a subscription."""
if self.subscriptions_data.remove_subscription(name):
if self.storage.save_subscriptions(self.subscriptions_data):
print(f"🗑️ Deleted subscription: {name}")
else:
print("❌ Failed to delete subscription")
else:
print(f"❌ Subscription '{name}' not found")
def rename_subscription(self, old_name: str, new_name: str) -> None:
"""Rename a subscription."""
if self.subscriptions_data.rename_subscription(old_name, new_name):
if self.storage.save_subscriptions(self.subscriptions_data):
print(f"✅ Renamed subscription: {old_name} -> {new_name}")
else:
print("❌ Failed to rename subscription")
else:
print(f"❌ Failed to rename subscription: '{old_name}' not found or '{new_name}' already exists")
def activate_subscription(self, name: str) -> None:
"""Activate a subscription."""
if self.subscriptions_data.set_active(name):
if self.storage.save_subscriptions(self.subscriptions_data):
print(f"✅ Activated subscription: {name}")
else:
print("❌ Failed to activate subscription")
else:
print(f"❌ Subscription '{name}' not found")
def list_subscriptions(self) -> None:
"""List all subscriptions."""
if not self.subscriptions_data.subscriptions:
print("No subscriptions found")
return
print("📋 Subscriptions:")
for name, subscription in self.subscriptions_data.subscriptions.items():
active_marker = "" if subscription.status == 'active' else " "
last_refresh_str = ""
if subscription.last_refresh:
last_refresh_str = f" (last: {subscription.last_refresh.strftime('%Y-%m-%d %H:%M:%S')})"
print(f" {active_marker} {name}: {subscription.url} ({subscription.status}){last_refresh_str}")
def show_storage_info(self) -> None:
"""Show storage information."""
info = self.storage.get_storage_info()
print("📁 Storage Information:")
print(f" Platform: {info['platform']}")
print(f" Config Directory: {info['config_dir']}")
print(f" Config File: {info['config_file']}")
print(f" Subscriptions File: {info['subscriptions_file']}")
print(f" Directory Exists: {info['exists']}")

View File

@ -0,0 +1,359 @@
#unified-delay: true
# port: 7890 # HTTP(S) 代理服务器端口
# socks-port: 7891 # SOCKS5 代理端口
mixed-port: 7890 # HTTP(S) 和 SOCKS 代理混合端口
# redir-port: 7892 # 透明代理端口,用于 Linux 和 MacOS
# Transparent proxy server port for Linux (TProxy TCP and TProxy UDP)
# tproxy-port: 7893
allow-lan: false # 允许局域网连接
bind-address: "*" # 绑定 IP 地址,仅作用于 allow-lan 为 true'*'表示所有地址
authentication: # http,socks 入口的验证用户名,密码
- "username:password"
skip-auth-prefixes: # 设置跳过验证的 IP 段
- 127.0.0.1/8
- ::1/128
lan-allowed-ips: # 允许连接的 IP 地址段,仅作用于 allow-lan 为 true, 默认值为 0.0.0.0/0 和::/0
- 0.0.0.0/0
- ::/0
lan-disallowed-ips: # 禁止连接的 IP 地址段,黑名单优先级高于白名单,默认值为空
- 192.168.0.3/32
# find-process-mode has 3 values:always, strict, off
# - always, 开启,强制匹配所有进程
# - strict, 默认,由 mihomo 判断是否开启
# - off, 不匹配进程,推荐在路由器上使用此模式
find-process-mode: strict
mode: rule
#自定义 geodata url
geox-url:
geoip: "https://fastly.jsdelivr.net/gh/MetaCubeX/meta-rules-dat@release/geoip.dat"
geosite: "https://fastly.jsdelivr.net/gh/MetaCubeX/meta-rules-dat@release/geosite.dat"
mmdb: "https://fastly.jsdelivr.net/gh/MetaCubeX/meta-rules-dat@release/geoip.metadb"
geo-auto-update: false # 是否自动更新 geodata
geo-update-interval: 24 # 更新间隔,单位:小时
# Matcher implementation used by GeoSite, available implementations:
# - succinct (default, same as rule-set)
# - mph (from V2Ray, also `hybrid` in Xray)
# geosite-matcher: succinct
log-level: debug # 日志等级 silent/error/warning/info/debug
ipv6: true # 开启 IPv6 总开关,关闭阻断所有 IPv6 链接和屏蔽 DNS 请求 AAAA 记录
tls:
certificate: string # 证书 PEM 格式,或者 证书的路径
private-key: string # 证书对应的私钥 PEM 格式,或者私钥路径
# 下面两项为mTLS配置项如果client-auth-type设置为 "verify-if-given" 或 "require-and-verify" 则client-auth-cert必须不为空
# client-auth-type: "" # 可选值:""、"request"、"require-any"、"verify-if-given"、"require-and-verify"
# client-auth-cert: string # 证书 PEM 格式,或者 证书的路径
# 如果填写则开启ech可由 mihomo generate ech-keypair <明文域名> 生成)
# ech-key: |
# -----BEGIN ECH KEYS-----
# ACATwY30o/RKgD6hgeQxwrSiApLaCgU+HKh7B6SUrAHaDwBD/g0APwAAIAAgHjzK
# madSJjYQIf9o1N5GXjkW4DEEeb17qMxHdwMdNnwADAABAAEAAQACAAEAAwAIdGVz
# dC5jb20AAA==
# -----END ECH KEYS-----
custom-certifactes:
- |
-----BEGIN CERTIFICATE-----
format/pem...
-----END CERTIFICATE-----
external-controller: 0.0.0.0:9097 # RESTful API 监听地址
external-controller-tls: 0.0.0.0:9443 # RESTful API HTTPS 监听地址,需要配置 tls 部分配置文件
# secret: "123456" # `Authorization:Bearer ${secret}`
secret: scientific_surfing-secret-dC5jb20AAA
# RESTful API CORS标头配置
external-controller-cors:
allow-origins:
- "*"
allow-private-network: true
allow-origins:
- tauri://localhost
- http://tauri.localhost
- https://yacd.metacubex.one
- https://metacubex.github.io
- https://board.zash.run.place
# RESTful API Unix socket 监听地址( windows版本大于17063也可以使用即大于等于1803/RS4版本即可使用
# !!!注意: 从Unix socket访问api接口不会验证secret 如果开启请自行保证安全问题
# 测试方法: curl -v --unix-socket "mihomo.sock" http://localhost/
external-controller-unix: mihomo.sock
# RESTful API Windows namedpipe 监听地址
# !!!注意: 从Windows namedpipe访问api接口不会验证secret 如果开启请自行保证安全问题
# external-controller-pipe: \\.\pipe\mihomo
# external-controller-pipe: \\.\pipe\verge-mihomo
# tcp-concurrent: true # TCP 并发连接所有 IP, 将使用最快握手的 TCP
# 配置 WEB UI 目录,使用 http://{{external-controller}}/ui 访问
external-ui: /path/to/ui/folder/
external-ui-name: xd
# 目前支持下载zip,tgz格式的压缩包
external-ui-url: "https://github.com/MetaCubeX/metacubexd/archive/refs/heads/gh-pages.zip"
# 在RESTful API端口上开启DOH服务器
# 该URL不会验证secret 如果开启请自行保证安全问题
external-doh-server: /dns-query
# interface-name: en0 # 设置出口网卡
# 全局 TLS 指纹,优先低于 proxy 内的 client-fingerprint
# 可选: "chrome","firefox","safari","ios","random","none" options.
# Utls is currently support TLS transport in TCP/grpc/WS/HTTP for VLESS/Vmess and trojan.
global-client-fingerprint: chrome
# TCP keep alive interval
# disable-keep-alive: false #目前在android端强制为true
# keep-alive-idle: 15
# keep-alive-interval: 15
# routing-mark:6666 # 配置 fwmark 仅用于 Linux
experimental:
# Disable quic-go GSO support. This may result in reduced performance on Linux.
# This is not recommended for most users.
# Only users encountering issues with quic-go's internal implementation should enable this,
# and they should disable it as soon as the issue is resolved.
# This field will be removed when quic-go fixes all their issues in GSO.
# This equivalent to the environment variable QUIC_GO_DISABLE_GSO=1.
#quic-go-disable-gso: true
# 类似于 /etc/hosts, 仅支持配置单个 IP
hosts:
# '*.mihomo.dev': 127.0.0.1
# '.dev': 127.0.0.1
# 'alpha.mihomo.dev': '::1'
# test.com: [1.1.1.1, 2.2.2.2]
# home.lan: lan # lan 为特别字段,将加入本地所有网卡的地址
# baidu.com: google.com # 只允许配置一个别名
profile: # 存储 select 选择记录
store-selected: false
# 持久化 fake-ip
store-fake-ip: true
# Tun 配置
tun:
enable: true
device: Mihomo
stack: mixed # gvisor/mixed
dns-hijack:
- any:53 # 需要劫持的 DNS
auto-detect-interface: true # 自动识别出口网卡
auto-route: true # 配置路由表
mtu: 1500 # 最大传输单元
# gso: false # 启用通用分段卸载,仅支持 Linux
# gso-max-size: 65536 # 通用分段卸载包的最大大小
auto-redirect: false # 自动配置 iptables 以重定向 TCP 连接。仅支持 Linux。带有 auto-redirect 的 auto-route 现在可以在路由器上按预期工作,无需干预。
strict-route: false # 将所有连接路由到 tun 来防止泄漏,但你的设备将无法其他设备被访问
# disable-icmp-forwarding: true # 禁用 ICMP 转发,防止某些情况下的 ICMP 环回问题ping 将不会显示真实的延迟
# route-address-set: # 将指定规则集中的目标 IP CIDR 规则添加到防火墙, 不匹配的流量将绕过路由, 仅支持 Linux且需要 nftables`auto-route` 和 `auto-redirect` 已启用。
# - ruleset-1
# - ruleset-2
# route-exclude-address-set: # 将指定规则集中的目标 IP CIDR 规则添加到防火墙, 匹配的流量将绕过路由, 仅支持 Linux且需要 nftables`auto-route` 和 `auto-redirect` 已启用。
# - ruleset-3
# - ruleset-4
# route-address: # 启用 auto-route 时使用自定义路由而不是默认路由
# - 0.0.0.0/1
# - 128.0.0.0/1
# - "::/1"
# - "8000::/1"
# inet4-route-address: # 启用 auto-route 时使用自定义路由而不是默认路由(旧写法)
# - 0.0.0.0/1
# - 128.0.0.0/1
# inet6-route-address: # 启用 auto-route 时使用自定义路由而不是默认路由(旧写法)
# - "::/1"
# - "8000::/1"
# endpoint-independent-nat: false # 启用独立于端点的 NAT
# include-interface: # 限制被路由的接口。默认不限制,与 `exclude-interface` 冲突
# - "lan0"
# exclude-interface: # 排除路由的接口,与 `include-interface` 冲突
# - "lan1"
# include-uid: # UID 规则仅在 Linux 下被支持,并且需要 auto-route
# - 0
# include-uid-range: # 限制被路由的的用户范围
# - 1000:9999
# exclude-uid: # 排除路由的的用户
#- 1000
# exclude-uid-range: # 排除路由的的用户范围
# - 1000:9999
# Android 用户和应用规则仅在 Android 下被支持
# 并且需要 auto-route
# include-android-user: # 限制被路由的 Android 用户
# - 0
# - 10
# include-package: # 限制被路由的 Android 应用包名
# - com.android.chrome
# exclude-package: # 排除被路由的 Android 应用包名
# - com.android.captiveportallogin
# 嗅探域名 可选配置
sniffer:
enable: false
## 对 redir-host 类型识别的流量进行强制嗅探
## 如Tun、Redir 和 TProxy 并 DNS 为 redir-host 皆属于
# force-dns-mapping: false
## 对所有未获取到域名的流量进行强制嗅探
# parse-pure-ip: false
# 是否使用嗅探结果作为实际访问,默认 true
# 全局配置,优先级低于 sniffer.sniff 实际配置
override-destination: false
sniff: # TLS 和 QUIC 默认如果不配置 ports 默认嗅探 443
QUIC:
# ports: [ 443 ]
TLS:
# ports: [443, 8443]
# 默认嗅探 80
HTTP: # 需要嗅探的端口
ports: [80, 8080-8880]
# 可覆盖 sniffer.override-destination
override-destination: true
force-domain:
- +.v2ex.com
# skip-src-address: # 对于来源ip跳过嗅探
# - 192.168.0.3/32
# skip-dst-address: # 对于目标ip跳过嗅探
# - 192.168.0.3/32
## 对嗅探结果进行跳过
# skip-domain:
# - Mijia Cloud
# 需要嗅探协议
# 已废弃,若 sniffer.sniff 配置则此项无效
sniffing:
- tls
- http
# 强制对此域名进行嗅探
# 仅对白名单中的端口进行嗅探,默认为 44380
# 已废弃,若 sniffer.sniff 配置则此项无效
port-whitelist:
- "80"
- "443"
# - 8000-9999
tunnels: # one line config
- tcp/udp,127.0.0.1:6553,114.114.114.114:53,proxy
- tcp,127.0.0.1:6666,rds.mysql.com:3306,vpn
# full yaml config
- network: [tcp, udp]
address: 127.0.0.1:7777
target: target.com
proxy: proxy
# DNS 配置
dns:
cache-algorithm: arc
enable: false # 关闭将使用系统 DNS
prefer-h3: false # 是否开启 DoH 支持 HTTP/3将并发尝试
listen: 0.0.0.0:53 # 开启 DNS 服务器监听
# ipv6: false # false 将返回 AAAA 的空结果
# ipv6-timeout: 300 # 单位ms内部双栈并发时向上游查询 AAAA 时,等待 AAAA 的时间,默认 100ms
# 用于解析 nameserverfallback 以及其他 DNS 服务器配置的DNS 服务域名
# 只能使用纯 IP 地址,可使用加密 DNS
default-nameserver:
- 114.114.114.114
- 8.8.8.8
- tls://1.12.12.12:853
- tls://223.5.5.5:853
- system # append DNS server from system configuration. If not found, it would print an error log and skip.
enhanced-mode: fake-ip # or redir-host
fake-ip-range: 198.18.0.1/16 # fake-ip 池设置
# 配置不使用 fake-ip 的域名
fake-ip-filter:
- '*.lan'
- localhost.ptlogin2.qq.com
# fakeip-filter 为 rule-providers 中的名为 fakeip-filter 规则订阅,
# 且 behavior 必须为 domain/classical当为 classical 时仅会生效域名类规则
- rule-set:fakeip-filter
# fakeip-filter 为 geosite 中名为 fakeip-filter 的分类(需要自行保证该分类存在)
- geosite:fakeip-filter
# 配置fake-ip-filter的匹配模式默认为blacklist即如果匹配成功不返回fake-ip
# 可设置为whitelist即只有匹配成功才返回fake-ip
fake-ip-filter-mode: blacklist
# use-hosts: true # 查询 hosts
# 配置后面的nameserver、fallback和nameserver-policy向dns服务器的连接过程是否遵守遵守rules规则
# 如果为false默认值则这三部分的dns服务器在未特别指定的情况下会直连
# 如果为true将会按照rules的规则匹配链接方式走代理或直连如果有特别指定则任然以指定值为准
# 仅当proxy-server-nameserver非空时可以开启此选项, 强烈不建议和prefer-h3一起使用
# 此外这三者配置中的dns服务器如果出现域名会采用default-nameserver配置项解析也请确保正确配置default-nameserver
respect-rules: false
# DNS 主要域名配置
# 支持 UDPTCPDoTDoHDoQ
# 这部分为主要 DNS 配置,影响所有直连,确保使用对大陆解析精准的 DNS
nameserver:
- 114.114.114.114 # default value
- 8.8.8.8 # default value
- tls://223.5.5.5:853 # DNS over TLS
- https://doh.pub/dns-query # DNS over HTTPS
- https://dns.alidns.com/dns-query#h3=true # 强制 HTTP/3与 perfer-h3 无关,强制开启 DoH 的 HTTP/3 支持,若不支持将无法使用
- https://mozilla.cloudflare-dns.com/dns-query#DNS&h3=true # 指定策略组和使用 HTTP/3
- dhcp://en0 # dns from dhcp
- quic://dns.adguard.com:784 # DNS over QUIC
# - '8.8.8.8#RULES' # 效果同respect-rules但仅对该服务器生效
# - '8.8.8.8#en0' # 兼容指定 DNS 出口网卡
# 当配置 fallback 时,会查询 nameserver 中返回的 IP 是否为 CN非必要配置
# 当不是 CN则使用 fallback 中的 DNS 查询结果
# 确保配置 fallback 时能够正常查询
# fallback:
# - tcp://1.1.1.1
# - 'tcp://1.1.1.1#ProxyGroupName' # 指定 DNS 过代理查询ProxyGroupName 为策略组名或节点名,过代理配置优先于配置出口网卡,当找不到策略组或节点名则设置为出口网卡
# 专用于节点域名解析的 DNS 服务器非必要配置项如果不填则遵循nameserver-policy、nameserver和fallback的配置
# proxy-server-nameserver:
# - https://dns.google/dns-query
# - tls://one.one.one.one
# 专用于direct出口域名解析的 DNS 服务器非必要配置项如果不填则遵循nameserver-policy、nameserver和fallback的配置
# direct-nameserver:
# - system://
# direct-nameserver-follow-policy: false # 是否遵循nameserver-policy默认为不遵守仅当direct-nameserver不为空时生效
# 配置 fallback 使用条件
# fallback-filter:
# geoip: true # 配置是否使用 geoip
# geoip-code: CN # 当 nameserver 域名的 IP 查询 geoip 库为 CN 时,不使用 fallback 中的 DNS 查询结果
# 配置强制 fallback优先于 IP 判断,具体分类自行查看 geosite 库
# geosite:
# - gfw
# 如果不匹配 ipcidr 则使用 nameservers 中的结果
# ipcidr:
# - 240.0.0.0/4
# domain:
# - '+.google.com'
# - '+.facebook.com'
# - '+.youtube.com'
# 配置查询域名使用的 DNS 服务器
nameserver-policy:
# 'www.baidu.com': '114.114.114.114'
# '+.internal.crop.com': '10.0.0.1'
"geosite:cn,private,apple":
- https://doh.pub/dns-query
- https://dns.alidns.com/dns-query
"geosite:category-ads-all": rcode://success
"www.baidu.com,+.google.cn": [223.5.5.5, https://dns.alidns.com/dns-query]
## globaldns 为 rule-providers 中的名为 global 和 dns 规则订阅,
## 且 behavior 必须为 domain/classical当为 classical 时仅会生效域名类规则
# "rule-set:global,dns": 8.8.8.8

View File

@ -0,0 +1,33 @@
#!/usr/bin/env node
/**
* Test hook script for core config generation (Node.js version)
* This script will be executed after the configuration is generated.
*/
const fs = require('fs');
const path = require('path');
function main() {
if (process.argv.length < 3) {
console.error('Error: No config file path provided');
process.exit(1);
}
const configPath = process.argv[2];
if (!fs.existsSync(configPath)) {
console.error(`Error: Config file not found: ${configPath}`);
process.exit(1);
}
const stats = fs.statSync(configPath);
console.log(`Node.js hook executed successfully! Config file: ${configPath}`);
console.log(`File size: ${stats.size} bytes`);
// You can add custom processing here
process.exit(0);
}
if (require.main === module) {
main();
}

View File

@ -0,0 +1,31 @@
#!/usr/bin/env python3
"""
Test hook script for core config generation.
This script will be executed after the configuration is generated.
"""
import sys
import os
from pathlib import Path
def main():
if len(sys.argv) < 2:
print("Error: No config file path provided")
sys.exit(1)
config_path = Path(sys.argv[1])
if not config_path.exists():
print(f"Error: Config file not found: {config_path}")
sys.exit(1)
print(f"Hook executed successfully! Config file: {config_path}")
print(f"File size: {config_path.stat().st_size} bytes")
# You can add custom processing here
# For example, copy the file to another location, modify it, etc.
sys.exit(0)
if __name__ == "__main__":
main()