from openai import OpenAI
import os
import re
from datetime import datetime

client = OpenAI(api_key=os.environ.get('OPENAI_API_KEY'))


def get_response_davinci003(system_message, user_message,response_message=""):
    """
    This function can be generalized to accommodate different chatbot implementations.
    For now, it just uses OpenAI's API.
    """

    prompt_text = f"Content: {system_message['content']}\n Query: {user_message['content']}\n "
    cost_per_token ='0.01'

    response = client.completions.create(model="text-davinci-003",
    prompt=prompt_text,
    max_tokens=500 )
    # increased a bit to accommodate the extra instruction)
    #print(response)
    # Extract token count
    token_count = response.usage.total_tokens

    # Log the API call information
    #log_api_call("text-davinci-003", token_count, cost_per_token)

    response_text = response.choices[0].text.strip().split("\n")
    
    last_line = response_text[-1]
    context_match = re.search(r'\(([^)]+)\)$', last_line)

    if context_match:
        context = context_match.group(1)
        bot_response = "\n".join(response_text[:-1])  # Exclude the last line for the bot response
    else:
        context = "General"
        bot_response = "\n".join(response_text)

    # Remove prefixes like "Answer:", "System:", etc.
    bot_response = re.sub(r'^\w+:\s*', '', bot_response)
    #url_pattern = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
    #bot_response = re.sub(url_pattern, lambda m: f'<a href="{m.group(0)}">Learn more</a>', bot_response)
    
    return {"context": context, "bot_response": bot_response}

    """
    Get a response from GPT-3.5 using the provided system and user messages.
    
    Parameters:
    - system_message (dict): System instruction message for GPT.
    - user_message (dict): User's message/query.
    - max_tokens (int): Maximum tokens for GPT response.

    Returns:
    - dict: A dictionary containing 'context' and 'bot_response'.
    """

def get_response_gpt35(system_message, user_message, response_message="", max_tokens=500):

    # Construct the messages with instruction
    if isinstance(system_message, dict) and isinstance(user_message, dict):
        messages_with_instruction = [system_message, user_message]
    else:
        print("Error: Either system_message or user_message is not a dictionary.")
        return

    print("logging GPT3.5:")
    cost_per_token = '0.01'
    
    try:
        response = client.chat.completions.create(model="gpt-3.5-turbo",
        messages=messages_with_instruction,
        max_tokens=max_tokens)
        #token_count = response['usage']['total_tokens']

        # Log the API call information
        #log_api_call("gpt-3.5-turbo", token_count, cost_per_token)  # <-- Fixed the model name

    except Exception as e:
        # Handle potential API exceptions or add more specific error catches
        print(e)
        return {"context": "Error", "bot_response": f"An error occurred: {str(e)}"}

    response_text = response.choices[0].message.content.strip()
    context_match = re.search(r'\(([^)]+)\)$', response_text)  # Extract content inside parentheses at the end

    if context_match:
        context = context_match.group(1)
        bot_response = response_text[:context_match.start()].strip()  # Remove context from the main response
    else:
        context = "General"
        bot_response = response_text

    return {"context": context, "bot_response": bot_response}

#Calling GPT35
def get_response_gpt35_old(system_message, user_message,response_message ="", max_tokens=500,):
    """
    Get a response from GPT-3.5 using the provided system and user messages.
    
    Parameters:
    - system_message (dict): System instruction message for GPT.
    - user_message (dict): User's message/query.
    - max_tokens (int): Maximum tokens for GPT response.

    Returns:
    - dict: A dictionary containing 'context' and 'bot_response'.
    """
    print(system_message)
    print(user_message)
    messages_with_instruction = [system_message, user_message]
    print(messages_with_instruction)

    print("logging GPT3.5:")
    cost_per_token ='0.01'
    
    try:
        response = client.chat.completions.create(model="gpt-3.5-turbo",
        messages=messages_with_instruction,
        max_tokens=max_tokens)
        #token_count = response['usage']['total_tokens']

        # Log the API call information
        #log_api_call("text-davinci-003", token_count, cost_per_token)

    except Exception as e:
        # Handle potential API exceptions or add more specific error catches
        print(str(e))
        return {"context": "Error", "bot_response": f"An error occurred: {str(e)}"}

    response_text = response.choices[0].message.content.strip()
    context_match = re.search(r'\(([^)]+)\)$', response_text)  # Extract content inside parentheses at the end

    if context_match:
        context = context_match.group(1)
        bot_response = response_text[:context_match.start()].strip()  # Remove context from the main response
    else:
        context = "General"
        bot_response = response_text


    return {"context": context, "bot_response": bot_response}


def get_clean_document_davinci(text):
    """
    This function can be generalized to accommodate different chatbot implementations.
    For now, it just uses OpenAI's API.
    """
    prompt_text = f" Please format this content properly -> Content: {text}\n"
    print("logging Davinci:")

    try:
        response = client.completions.create(model="text-davinci-003",
        prompt=prompt_text,
        max_tokens=1000 )  # increased a bit to accommodate the extra instruction)
        response_text = response.choices[0].text.strip()
        return response_text
    except Exception as e:
        print(f"Error in getting response from Davinci: {e}")
        return text
    

def log_api_call(model_name, token_count, cost_per_token):
    """
    Log the API call information to a local file.
    
    Parameters:
    - model_name (str): Name of the OpenAI model used.
    - token_count (int): Total tokens used in the API call.
    - cost_per_token (float): Cost per token.
    """
    
    # Calculate the total cost for the API call
    total_cost = token_count * cost_per_token
    
    # Prepare the log entry
    log_entry = f"{model_name} - {datetime.utcnow()} - {token_count} tokens - ${total_cost:.2f}\n"
    
    # Write the log entry to a local file
    with open("api_call_log.txt", "a") as file:
        file.write(log_entry)
