| from fastapi import FastAPI, HTTPException |
| from pydantic import BaseModel |
| from langchain import LLMChain |
| from langchain.llms import LlamaCpp |
| from concurrent.futures import ThreadPoolExecutor, as_completed |
| from tqdm import tqdm |
| import uvicorn |
| from dotenv import load_dotenv |
| import io |
| import requests |
| import asyncio |
| import time |
|
|
| |
| load_dotenv() |
|
|
| |
| app = FastAPI() |
|
|
| |
| model_configs = [ |
| {"repo_id": "Ffftdtd5dtft/gpt2-xl-Q2_K-GGUF", "filename": "gpt2-xl-q2_k.gguf", "name": "GPT-2 XL"}, |
| {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Instruct-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-instruct-q2_k.gguf", "name": "Meta Llama 3.1-8B Instruct"}, |
| {"repo_id": "Ffftdtd5dtft/gemma-2-9b-it-Q2_K-GGUF", "filename": "gemma-2-9b-it-q2_k.gguf", "name": "Gemma 2-9B IT"}, |
| {"repo_id": "Ffftdtd5dtft/gemma-2-27b-Q2_K-GGUF", "filename": "gemma-2-27b-q2_k.gguf", "name": "Gemma 2-27B"}, |
| {"repo_id": "Ffftdtd5dtft/Phi-3-mini-128k-instruct-Q2_K-GGUF", "filename": "phi-3-mini-128k-instruct-q2_k.gguf", "name": "Phi-3 Mini 128K Instruct"}, |
| {"repo_id": "Ffftdtd5dtft/Meta-Llama-3.1-8B-Q2_K-GGUF", "filename": "meta-llama-3.1-8b-q2_k.gguf", "name": "Meta Llama 3.1-8B"}, |
| {"repo_id": "Ffftdtd5dtft/Qwen2-7B-Instruct-Q2_K-GGUF", "filename": "qwen2-7b-instruct-q2_k.gguf", "name": "Qwen2 7B Instruct"}, |
| {"repo_id": "Ffftdtd5dtft/starcoder2-3b-Q2_K-GGUF", "filename": "starcoder2-3b-q2_k.gguf", "name": "Starcoder2 3B"}, |
| {"repo_id": "Ffftdtd5dtft/Qwen2-1.5B-Instruct-Q2_K-GGUF", "filename": "qwen2-1.5b-instruct-q2_k.gguf", "name": "Qwen2 1.5B Instruct"}, |
| {"repo_id": "Ffftdtd5dtft/starcoder2-15b-Q2_K-GGUF", "filename": "starcoder2-15b-q2_k.gguf", "name": "Starcoder2 15B"}, |
| {"repo_id": "Ffftdtd5dtft/gemma-2-2b-it-Q2_K-GGUF", "filename": "gemma-2-2b-it-q2_k.gguf", "name": "Gemma 2-2B IT"}, |
| {"repo_id": "Ffftdtd5dtft/sarvam-2b-v0.5-Q2_K-GGUF", "filename": "sarvam-2b-v0.5-q2_k.gguf", "name": "Sarvam 2B v0.5"}, |
| {"repo_id": "Ffftdtd5dtft/WizardLM-13B-Uncensored-Q2_K-GGUF", "filename": "wizardlm-13b-uncensored-q2_k.gguf", "name": "WizardLM 13B Uncensored"}, |
| {"repo_id": "Ffftdtd5dtft/Qwen2-Math-72B-Instruct-Q2_K-GGUF", "filename": "qwen2-math-72b-instruct-q2_k.gguf", "name": "Qwen2 Math 72B Instruct"}, |
| {"repo_id": "Ffftdtd5dtft/WizardLM-7B-Uncensored-Q2_K-GGUF", "filename": "wizardlm-7b-uncensored-q2_k.gguf", "name": "WizardLM 7B Uncensored"}, |
| {"repo_id": "Ffftdtd5dtft/Qwen2-Math-7B-Instruct-Q2_K-GGUF", "filename": "qwen2-math-7b-instruct-q2_k.gguf", "name": "Qwen2 Math 7B Instruct"} |
| ] |
|
|
| |
| class ModelManager: |
| def __init__(self): |
| self.models = [] |
| self.configs = {} |
|
|
| async def download_model_to_memory(self, model_config): |
| print(f"Descargando modelo: {model_config['name']}...") |
| url = f"https://huggingface.co/{model_config['repo_id']}/resolve/main/{model_config['filename']}" |
| response = requests.get(url) |
| if response.status_code == 200: |
| model_file = io.BytesIO(response.content) |
| return model_file |
| else: |
| raise Exception(f"Error al descargar el modelo: {response.status_code}") |
|
|
| async def load_model(self, model_config): |
| try: |
| start_time = time.time() |
| model_file = await self.download_model_to_memory(model_config) |
| print(f"Cargando modelo: {model_config['name']}...") |
| |
| |
| async def load_part(part): |
| |
| await asyncio.sleep(0.1) |
|
|
| |
| if time.time() - start_time > 1: |
| print(f"Modelo {model_config['name']} tard贸 m谩s de 1 segundo en cargarse, dividiendo la carga...") |
| await asyncio.gather(*(load_part(part) for part in range(5))) |
| else: |
| model = await asyncio.get_event_loop().run_in_executor( |
| None, |
| lambda: Llama.from_pretrained(model_file) |
| ) |
| |
| model = await asyncio.get_event_loop().run_in_executor( |
| None, |
| lambda: Llama.from_pretrained(model_file) |
| ) |
| tokenizer = model.tokenizer |
|
|
| |
| model_data = { |
| 'model': model, |
| 'tokenizer': tokenizer, |
| 'pad_token': tokenizer.pad_token, |
| 'pad_token_id': tokenizer.pad_token_id, |
| 'eos_token': tokenizer.eos_token, |
| 'eos_token_id': tokenizer.eos_token_id, |
| 'bos_token': tokenizer.bos_token, |
| 'bos_token_id': tokenizer.bos_token_id, |
| 'unk_token': tokenizer.unk_token, |
| 'unk_token_id': tokenizer.unk_token_id |
| } |
| |
| self.models.append({"model_data": model_data, "name": model_config['name']}) |
| except Exception as e: |
| print(f"Error al cargar el modelo: {e}") |
|
|
| async def load_all_models(self): |
| print("Iniciando carga de modelos...") |
| start_time = time.time() |
| tasks = [self.load_model(config) for config in model_configs] |
| await asyncio.gather(*tasks) |
| end_time = time.time() |
| print(f"Todos los modelos han sido cargados en {end_time - start_time:.2f} segundos.") |
|
|
| |
| model_manager = ModelManager() |
|
|
| @app.on_event("startup") |
| async def startup_event(): |
| await model_manager.load_all_models() |
|
|
| |
| class ChatRequest(BaseModel): |
| message: str |
| top_k: int = 50 |
| top_p: float = 0.95 |
| temperature: float = 0.7 |
|
|
| |
| TOKEN_LIMIT = 1000 |
|
|
| |
| async def generate_chat_response(request, model_data): |
| try: |
| user_input = normalize_input(request.message) |
| llm = model_data['model_data']['model'] |
| tokenizer = model_data['model_data']['tokenizer'] |
| |
| |
| response = await asyncio.get_event_loop().run_in_executor( |
| None, |
| lambda: llm(user_input, max_length=TOKEN_LIMIT, do_sample=True, top_k=request.top_k, top_p=request.top_p, temperature=request.temperature) |
| ) |
| generated_text = response['generated_text'] |
| |
| split_response = split_long_response(generated_text) |
| return {"response": split_response, "literal": user_input, "model_name": model_data['name']} |
| except Exception as e: |
| print(f"Error al generar la respuesta: {e}") |
| return {"response": "Error al generar la respuesta", "literal": user_input, "model_name": model_data['name']} |
|
|
| def split_long_response(response): |
| """ Divide la respuesta en partes m谩s peque帽as si excede el l铆mite de tokens. """ |
| parts = [] |
| while len(response) > TOKEN_LIMIT: |
| part = response[:TOKEN_LIMIT] |
| response = response[TOKEN_LIMIT:] |
| parts.append(part.strip()) |
| if response: |
| parts.append(response.strip()) |
| return '\n'.join(parts) |
|
|
| def remove_duplicates(text): |
| """ Elimina duplicados en el texto. """ |
| lines = text.splitlines() |
| unique_lines = list(dict.fromkeys(lines)) |
| return '\n'.join(unique_lines) |
|
|
| def remove_repetitive_responses(responses): |
| unique_responses = [] |
| seen_responses = set() |
| for response in responses: |
| normalized_response = remove_duplicates(response['response']) |
| if normalized_response not in seen_responses: |
| seen_responses.add(normalized_response) |
| response['response'] = normalized_response |
| unique_responses.append(response) |
| return unique_responses |
|
|
| @app.post("/chat") |
| async def chat(request: ChatRequest): |
| results = [] |
| for model_data in model_manager.models: |
| response = await generate_chat_response(request, model_data) |
| results.append(response) |
| unique_results = remove_repetitive_responses(results) |
| return {"results": unique_results} |
|
|
| |
| if __name__ == "__main__": |
| uvicorn.run(app, host="0.0.0.0", port=8000) |
|
|