import httpx
from typing import Tuple, List, Dict, Any
import os
from dotenv import load_dotenv
from langchain_openai import OpenAIEmbeddings

from app.api.v1.libraries.chromadbRAG import initialize_chromadb, process_retrieval_documents_from_chromadb, truncate_content
from app.api.v1.routers.projects import get_all_content
from app.api.v1.libraries.Vector import VectorDBQueryHandler  # Ensure this is correctly imported
from langchain.schema import Document  # Check if still valid in the latest version
from langchain_community.vectorstores import Chroma
from langchain.chains.query_constructor.base import AttributeInfo  # Check if still valid
from langchain.retrievers.self_query.base import SelfQueryRetriever  # Check if still valid
#from langchain_community.embeddings import SentenceTransformerEmbeddings
from langchain_openai import ChatOpenAI
from langchain_openai import OpenAI  # Assuming this is valid in langchain_openai

MAX_TOKENS_FOR_PROMPT = 2500
MIN_TOKENS_FOR_PROMPT = 100
TOKENIZERS_PARALLELISM = "true"

load_dotenv()
embeddings = OpenAIEmbeddings()

os.environ["TOKENIZERS_PARALLELISM"] = TOKENIZERS_PARALLELISM
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
CHAT_MODEL = os.getenv('CHAT_MODEL')

if not load_dotenv():
    print("Could not load .env file or it is empty. Please check if it exists and is readable.")
    exit(1)


async def retrieve_from_rag_api(rag_url: str, rag_header: str, query: str) -> Dict:
    """
    Calls the RAG API with the provided URL and header for authentication.
    """
    headers = {'Authorization': rag_header}
    async with httpx.AsyncClient() as client:
        response = await client.post(rag_url, headers=headers, json={"q": query})
        return response.json()

async def retrieve_from_vector_store(query: str, vector_details: Dict[str, Any]) -> Any:
    """
    Retrieve documents using the Vector Store.
    """
    handler = VectorDBQueryHandler(vector_details, query)
    results = await handler.vector_query()
    return results


async def retrieve_from_chroma(account_id: str, project_id: str, query: str, dbmongo) -> Tuple[str, int, List, List, List]:
    """
    Retrieves documents using the Chroma local pipeline.
    """
    db = initialize_chromadb(account_id, project_id)
    retriever = db.as_retriever()
    response = retriever.get_relevant_documents(query)
    content, links, youtube, docs = process_retrieval_documents_from_chromadb(response, dbmongo)
    return content, links, youtube, docs

async def rag_retriever(project_id: str, project_data: Dict, search_query: str, dbmongo) -> Tuple[str, int, List, List, List]:
    """
    Main retriever function supporting Chroma, RAG, and Vector Store pipelines.
    """
    account_id = project_data.get('account_id', '')
    content_data = await get_all_content(project_id, dbmongo)
    retrieval_response = None

    for content in content_data:
        if 'rag_url' in content and 'rag_header' in content:
            # Use RAG pipeline if available
            rag_details = content['rag_url']
            retrieval_response = await retrieve_from_rag_api(rag_details['url'], rag_details['header'], search_query)
            break  # Assuming we stop at the first match; adjust logic if needed

        elif 'vector' in content and content['vector'].get('type') is not None:
            # Use Vector Store pipeline if available
            vector_details = content['vector']
            retrieval_response = await retrieve_from_vector_store(search_query, vector_details)
            break  # Adjust logic if multiple vectors are supported
        
    chromadb_content, links, youtube, docs = await retrieve_from_chroma(account_id, project_id, search_query, dbmongo)
    
    full_response = f"{retrieval_response} {chromadb_content}".stripe()  # Example of combining; adjust as needed

    # Default to Chroma if no external pipeline is used

    BUSINESS_CONTENT, token_count = truncate_content(full_response, MAX_TOKENS_FOR_PROMPT)
    
    if token_count < MIN_TOKENS_FOR_PROMPT:
        return "Sorry, we don't have enough relevant information on this topic at the moment.", 0, [], [], []

    return BUSINESS_CONTENT, token_count, links, youtube, docs


#Not right now
async def parse_content_from_RAG_response(response: Dict) -> Tuple[str, List, List, List]: 
    """
    Parses the response from RAG API and returns the content, links, YouTube videos, and documents.
    """
    # Example of parsing the response; adjust according to actual API response
    content = response.get("content", "")
    links = response.get("links", [])
    youtube = response.get("youtube", [])
    docs = response.get("docs", [])
    return content, links, youtube, docs

# This function should replace the relevant part of your `chatcall` function
async def handle_retrieval_qa(input_text, chat_history, document_content_description, db):
    # Initialize LangChain's components (assuming you've setup the necessary data and models)
    try:
        llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY, model_name=CHAT_MODEL, temperature=0)

        #vectorStore = await Chroma.fromDocuments(docs, OpenAIEmbeddings(), collectionName="your_collection_name")
        #chain = ConversationalRetrievalChain(model, db.asRetriever())        
        #print(" Chuttu Bayya 1 " , chain)
        #document_content_description = "Unstructured text data, such as paragraphs, articles, or documents."
        metadata_field_info = []        

        # Process the input
        #response = await chain.call({"question": input_text, "chat_history": chat_history})
        retriever = SelfQueryRetriever.from_llm(
            llm,
            db,
            document_content_description,
            metadata_field_info,
            verbose=True,
            enable_limit=True,
        )
        response = retriever.get_relevant_documents(input_text)
        return response
    
    except Exception as e:
        # Handle exceptions
        return {"error": str(e)}