Hunyuan翻译API设计规范RESTful接口封装部署教程1. 前言为什么需要API封装在实际项目中直接调用模型虽然简单但存在很多问题每次都要加载模型、代码重复、难以维护、缺乏标准化。通过RESTful API封装我们可以统一调用方式所有服务通过HTTP接口调用提高复用性一次部署多处使用便于扩展轻松添加负载均衡、监控、认证等功能标准化输入输出定义清晰的请求响应格式本教程将带你从零开始将HY-MT1.5-1.8B翻译模型封装成生产可用的RESTful API服务。2. 环境准备与依赖安装2.1 系统要求确保你的系统满足以下要求Python 3.8CUDA 11.7GPU推理至少8GB RAM16GB推荐至少10GB磁盘空间2.2 安装必要依赖创建新的项目目录并安装依赖# 创建项目目录 mkdir hunyuan-translator-api cd hunyuan-translator-api # 创建虚拟环境 python -m venv venv source venv/bin/activate # Linux/Mac # venv\Scripts\activate # Windows # 安装核心依赖 pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117 pip install transformers accelerate sentencepiece fastapi uvicorn python-multipart pydantic2.3 下载模型文件from transformers import AutoTokenizer, AutoModelForCausalLM import torch # 下载模型首次运行会自动下载 model_name tencent/HY-MT1.5-1.8B tokenizer AutoTokenizer.from_pretrained(model_name) model AutoModelForCausalLM.from_pretrained( model_name, device_mapauto, torch_dtypetorch.bfloat16 ) # 测试模型加载是否成功 print(模型加载成功)3. RESTful API设计规范3.1 API端点设计设计良好的API端点应该清晰、一致、易于理解端点方法描述/healthGET服务健康检查/translatePOST执行翻译任务/languagesGET获取支持的语言列表/batch-translatePOST批量翻译接口3.2 请求响应格式规范翻译请求示例{ text: Hello, world!, source_lang: en, target_lang: zh, parameters: { max_length: 512, temperature: 0.7 } }成功响应示例{ status: success, data: { translated_text: 你好世界, source_lang: en, target_lang: zh, processing_time: 0.45 } }错误响应示例{ status: error, error_code: INVALID_LANGUAGE, message: 不支持的语言代码: xx, details: 请检查语言代码是否正确 }4. 完整API服务实现4.1 创建FastAPI应用创建main.py文件from fastapi import FastAPI, HTTPException from pydantic import BaseModel from typing import List, Optional import time import torch from transformers import AutoTokenizer, AutoModelForCausalLM app FastAPI( titleHunyuan翻译API服务, description基于HY-MT1.5-1.8B的RESTful翻译API, version1.0.0 ) # 全局变量存储模型和分词器 model None tokenizer None class TranslationRequest(BaseModel): text: str source_lang: str auto target_lang: str max_length: int 512 temperature: float 0.7 class BatchTranslationRequest(BaseModel): texts: List[str] source_lang: str auto target_lang: str max_length: int 512 temperature: float 0.7 class TranslationResponse(BaseModel): status: str data: dict app.on_event(startup) async def load_model(): 启动时加载模型 global model, tokenizer try: model_name tencent/HY-MT1.5-1.8B tokenizer AutoTokenizer.from_pretrained(model_name) model AutoModelForCausalLM.from_pretrained( model_name, device_mapauto, torch_dtypetorch.bfloat16 ) print(模型加载成功) except Exception as e: print(f模型加载失败: {e}) raise e app.get(/health) async def health_check(): 健康检查端点 return { status: healthy, model_loaded: model is not None, timestamp: time.time() } app.get(/languages) async def get_supported_languages(): 获取支持的语言列表 languages [ 中文, English, Français, Português, Español, 日本語, Türkçe, Русский, العربية, 한국어, ภาษาไทย, Italiano, Deutsch, Tiếng Việt, Bahasa Melayu, Bahasa Indonesia, Filipino, हिन्दी, 繁体中文, Polski, Čeština, Nederlands, ខ្មែរ, မြန်မာ, فارسی, ગુજરાતી, اردو, తెలుగు, मराठी, עברית, বাংলা, தமிழ், Українська, བོད་སྐད, Қазақша, Монгол хэл, ئۇيغۇرچە, 粵語 ] return {languages: languages} app.post(/translate, response_modelTranslationResponse) async def translate_text(request: TranslationRequest): 单文本翻译接口 if model is None or tokenizer is None: raise HTTPException(status_code503, detail模型未加载完成) try: start_time time.time() # 构建翻译提示 prompt fTranslate the following {text if request.source_lang auto else request.source_lang} \ fto {request.target_lang}, without additional explanation:\n\n{request.text} messages [{role: user, content: prompt}] # 编码输入 tokenized tokenizer.apply_chat_template( messages, tokenizeTrue, add_generation_promptFalse, return_tensorspt ) # 生成翻译 with torch.no_grad(): outputs model.generate( tokenized.to(model.device), max_new_tokensrequest.max_length, temperaturerequest.temperature, do_sampleTrue ) # 解码结果 result tokenizer.decode(outputs[0], skip_special_tokensTrue) # 提取翻译结果去除提示部分 translated_text result.split(\n\n)[-1].strip() processing_time time.time() - start_time return { status: success, data: { translated_text: translated_text, source_lang: request.source_lang, target_lang: request.target_lang, processing_time: round(processing_time, 3) } } except Exception as e: raise HTTPException(status_code500, detailf翻译失败: {str(e)}) app.post(/batch-translate) async def batch_translate(request: BatchTranslationRequest): 批量翻译接口 results [] for text in request.texts: try: # 重用单文本翻译逻辑 translation_request TranslationRequest( texttext, source_langrequest.source_lang, target_langrequest.target_lang, max_lengthrequest.max_length, temperaturerequest.temperature ) result await translate_text(translation_request) results.append(result.dict()) except Exception as e: results.append({ status: error, error: str(e), text: text }) return {results: results} if __name__ __main__: import uvicorn uvicorn.run(app, host0.0.0.0, port8000)4.2 创建配置文件创建config.py文件import os class Config: # 服务器配置 HOST os.getenv(HOST, 0.0.0.0) PORT int(os.getenv(PORT, 8000)) # 模型配置 MODEL_NAME tencent/HY-MT1.5-1.8B DEVICE cuda if torch.cuda.is_available() else cpu MAX_LENGTH 512 DEFAULT_TEMPERATURE 0.7 # API配置 MAX_BATCH_SIZE 10 REQUEST_TIMEOUT 30 # 日志配置 LOG_LEVEL os.getenv(LOG_LEVEL, INFO) config Config()5. 部署与运行5.1 直接运行API服务# 启动API服务 uvicorn main:app --host 0.0.0.0 --port 8000 --reload # 或者使用生产模式 uvicorn main:app --host 0.0.0.0 --port 8000 --workers 45.2 使用Docker部署创建DockerfileFROM python:3.9-slim WORKDIR /app # 安装系统依赖 RUN apt-get update apt-get install -y \ git \ rm -rf /var/lib/apt/lists/* # 复制项目文件 COPY requirements.txt . COPY main.py . COPY config.py . # 安装Python依赖 RUN pip install --no-cache-dir -r requirements.txt # 暴露端口 EXPOSE 8000 # 启动命令 CMD [uvicorn, main:app, --host, 0.0.0.0, --port, 8000, --workers, 4]创建docker-compose.ymlversion: 3.8 services: hunyuan-translator: build: . ports: - 8000:8000 environment: - LOG_LEVELINFO deploy: resources: reservations: devices: - driver: nvidia count: 1 capabilities: [gpu] volumes: - ./models:/app/models restart: unless-stopped构建和运行# 构建镜像 docker build -t hunyuan-translator . # 运行容器 docker run -d -p 8000:8000 --gpus all hunyuan-translator # 或者使用docker-compose docker-compose up -d6. API测试与使用6.1 使用curl测试API# 健康检查 curl http://localhost:8000/health # 获取支持的语言 curl http://localhost:8000/languages # 单文本翻译 curl -X POST http://localhost:8000/translate \ -H Content-Type: application/json \ -d { text: Hello, how are you?, source_lang: en, target_lang: zh, parameters: { max_length: 512, temperature: 0.7 } } # 批量翻译 curl -X POST http://localhost:8000/batch-translate \ -H Content-Type: application/json \ -d { texts: [Hello, Good morning, How are you?], source_lang: en, target_lang: zh }6.2 使用Python客户端创建client.pyimport requests import json class HunyuanTranslatorClient: def __init__(self, base_urlhttp://localhost:8000): self.base_url base_url def health_check(self): 检查服务状态 response requests.get(f{self.base_url}/health) return response.json() def get_languages(self): 获取支持的语言列表 response requests.get(f{self.base_url}/languages) return response.json() def translate(self, text, target_lang, source_langauto, max_length512, temperature0.7): 单文本翻译 payload { text: text, source_lang: source_lang, target_lang: target_lang, max_length: max_length, temperature: temperature } response requests.post( f{self.base_url}/translate, jsonpayload, timeout30 ) if response.status_code 200: return response.json() else: raise Exception(f翻译失败: {response.text}) def batch_translate(self, texts, target_lang, source_langauto): 批量翻译 payload { texts: texts, source_lang: source_lang, target_lang: target_lang } response requests.post( f{self.base_url}/batch-translate, jsonpayload, timeout60 ) if response.status_code 200: return response.json() else: raise Exception(f批量翻译失败: {response.text}) # 使用示例 if __name__ __main__: client HunyuanTranslatorClient() # 检查服务状态 print(服务状态:, client.health_check()) # 获取支持的语言 languages client.get_languages() print(支持的语言:, languages[languages][:5]) # 单文本翻译 result client.translate(Hello, world!, zh) print(翻译结果:, result[data][translated_text]) # 批量翻译 batch_result client.batch_translate( [Good morning, How are you?, Thank you], zh ) print(批量翻译结果:, batch_result)7. 性能优化与最佳实践7.1 启用模型缓存在main.py的启动事件中添加模型缓存app.on_event(startup) async def load_model(): global model, tokenizer try: # 使用缓存目录 cache_dir ./models model_name tencent/HY-MT1.5-1.8B tokenizer AutoTokenizer.from_pretrained( model_name, cache_dircache_dir ) model AutoModelForCausalLM.from_pretrained( model_name, device_mapauto, torch_dtypetorch.bfloat16, cache_dircache_dir ) print(模型加载成功) except Exception as e: print(f模型加载失败: {e}) raise e7.2 添加请求限流安装依赖pip install slowapi在main.py中添加限流中间件from slowapi import Limiter, _rate_limit_exceeded_handler from slowapi.util import get_remote_address from slowapi.errors import RateLimitExceeded # 添加限流器 limiter Limiter(key_funcget_remote_address) app.state.limiter limiter app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler) # 在路由上添加限流 app.post(/translate) limiter.limit(10/minute) # 每分钟10次 async def translate_text(request: TranslationRequest): # ... 原有代码7.3 添加日志记录创建logger.pyimport logging import sys def setup_logger(): logger logging.getLogger(hunyuan-translator) logger.setLevel(logging.INFO) # 控制台处理器 console_handler logging.StreamHandler(sys.stdout) console_handler.setLevel(logging.INFO) # 文件处理器 file_handler logging.FileHandler(app.log) file_handler.setLevel(logging.INFO) # 格式化 formatter logging.Formatter( %(asctime)s - %(name)s - %(levelname)s - %(message)s ) console_handler.setFormatter(formatter) file_handler.setFormatter(formatter) logger.addHandler(console_handler) logger.addHandler(file_handler) return logger logger setup_logger()在main.py中导入并使用from logger import logger # 在适当的地方添加日志 logger.info(模型加载成功) logger.warning(请求频率过高) logger.error(翻译失败, exc_infoTrue)8. 总结通过本教程我们完成了HY-MT1.5-1.8B翻译模型的RESTful API封装实现了标准化API设计遵循RESTful规范设计清晰的端点和数据格式完整的功能实现单文本翻译、批量翻译、语言列表查询等功能生产级部署支持Docker部署包含健康检查、日志记录等生产特性性能优化添加模型缓存、请求限流等优化措施完整的测试方案提供curl和Python客户端测试示例这个API服务现在已经可以投入生产环境使用你可以轻松地将其集成到你的应用程序、网站或服务中享受高质量的机器翻译能力。获取更多AI镜像想探索更多AI镜像和应用场景访问 CSDN星图镜像广场提供丰富的预置镜像覆盖大模型推理、图像生成、视频生成、模型微调等多个领域支持一键部署。