Skip to content

gemini_model

arai_ai_agents.models.gemini_model

GeminiModel

Bases: ModelInterface

Gemini model implementation.

Attributes:

Name Type Description
model str

The name of the Gemini model to use.

Source code in arai_ai_agents/models/gemini_model.py
class GeminiModel(ModelInterface):
    """Gemini model implementation.

    Attributes:
        model (str): The name of the Gemini model to use.
    """

    def __init__(self, api_key=None, model_name="gemini-exp-1206"):
        """Initialize the Gemini model.

        Args:
            api_key (str): The API key to use for the Gemini model.
            model_name (str): The name of the Gemini model to use.

        Example:
            >>> gemini_model = GeminiModel()
        """
        if api_key:
            genai.configure(api_key=api_key)
        else:
            genai.configure(api_key=os.environ.get('GOOGLE_GEMINI_API_KEY'))
        self.model = genai.GenerativeModel(model_name)

    # -------------------------------------------------------------------
    # Helper to generate a response to a given prompt using the Gemini API.
    # -------------------------------------------------------------------
    def generate_response(self, prompt, **kwargs):
        """Generate a response to a given prompt using the Gemini API.

        Args:
            prompt (str): The prompt to generate a response to.
            **kwargs: Additional keyword arguments.

        Returns:
            str: The generated response.

        Example:
            >>> gemini_model = GeminiModel()
            >>> response = gemini_model.generate_response("What is the weather in Tokyo?")
        """
        if isinstance(prompt, str):
            return self.generate_response_from_string(prompt, **kwargs)       
        elif isinstance(prompt, list[dict]):
            return self.generate_response_dictionary(prompt)

    # -------------------------------------------------------------------
    # Helper to generate a response to a given prompt using a list of dictionaries
    # -------------------------------------------------------------------
    def generate_response_dictionary(self, prompt: list[dict]) -> str:
        """Generate a response to a given prompt using a list of dictionaries.

        Args:
            prompt (list[dict]): The prompt to generate a response to.

        Returns:
            str: The generated response.

        Example:
            >>> gemini_model = GeminiModel()
            >>> response = gemini_model.generate_response_dictionary([{"role": "user", "parts": "What is the weather in Tokyo?"}])
        """
        try:
            response = self.model.generate_content(prompt)
            return response.text.strip()
        except Exception as e:
            return f"Error generating response: {str(e)}"

    # -------------------------------------------------------------------
    # Helper to generate a response to a given prompt using a string
    # -------------------------------------------------------------------
    def generate_response_from_string(self, prompt, **kwargs):
        """
        Description: 
            Generate a response to a given prompt using a string.

        Args:
            prompt (str): The prompt to generate a response to.
            **kwargs: Additional keyword arguments.

        Returns:
            str: The generated response.

        Example:
            >>> gemini_model = GeminiModel()
            >>> response = gemini_model.generate_response_from_string("What is the weather in Tokyo?")
        """
        # Extract personality and style from kwargs, or use defaults from agent_template
        if kwargs:
            if "personality" in kwargs:
                personality = kwargs.get("personality")
            if "communication_style" in kwargs:
                communication_style = kwargs.get("communication_style")
        else:
            personality = ""
            communication_style = ""        

        try:
            # instructions being sent to the ai model
            messages = []

            # add personality and style to the instructions
            if personality or communication_style:
                persona_prompt = f"{personality} {communication_style}"
                messages.append({
                    "role": "user",
                    "parts": [persona_prompt]
                })

            # user message
            messages.append({
                "role": "user",
                "parts": [prompt]
            })

            # Make sure that what is being sent to the model is correct
            # print(messages)

            # generate the response
            response = self.model.generate_content(messages)
            return response.text.strip()

        except Exception as e:
            return f"Error generating response: {str(e)}"

    # -------------------------------------------------------------------
    # Helper to fix a response that is not valid YAML
    # -------------------------------------------------------------------
    def fix_response(self, prompt, response):
        """Fix a response that is not valid YAML.

        Args:
            prompt (str): The prompt to generate a response to.
            response (str): The response to fix.

        Returns:
            str: The fixed response.

        Raises:
            Exception: If there's an error calling the API.

        Example:
            >>> gemini_model = GeminiModel()
            >>> response = gemini_model.fix_response("What is the weather in Tokyo?", "The weather in Tokyo is sunny.")
        """
        try:
            # instructions being sent to the ai model
            messages = []

            # add personality and style to the instructions            
            messages.append({
                "role": "user",
                "parts": [prompt]
            })

            # user message
            messages.append({
                "role": "user",
                "parts": [response]
            })

            # Make sure that what is being sent to the model is correct
            # print(messages)

            # generate the response
            response = self.model.generate_content(messages)
            return response.text.strip()

        except Exception as e:
            return f"Error generating response: {str(e)}"

    def generate_yaml_response(self):
        """Generate a YAML response.

        Args:
            None

        Returns:
            None

        Example:
            >>> gemini_model = GeminiModel()
            >>> gemini_model.generate_yaml_response()
        """
        messages = []

        messages.append({
            "role": "user",
            "parts": "Create me a YAML file with the following fields: name, personality, communication_style, topic, backstory, universe, hashtags, emojis. Start and end the file with ```yaml and ```"
        })

        response = self.model.generate_content(messages)
        print(response)

        # save the response to a file
        with open("response.txt", "w", encoding="utf-8") as f:
            f.write(response.text.strip())

        # save the response to a file
        with open("response.yaml", "w", encoding="utf-8") as f:
            f.write(response.text)

        # strip the response
        with open("response_stripped.yaml", "w", encoding="utf-8") as f:
            f.write(response.text.strip())


        # save the response to a yaml file
        with open("yaml_response.yaml", "w", encoding="utf-8") as f:
            yaml.dump(response.text, f)       

        # strip the response
        with open("yaml_response_stripped.yaml", "w", encoding="utf-8") as f:
            yaml.dump(response.text.strip(), f)    

__init__(api_key=None, model_name='gemini-exp-1206')

Initialize the Gemini model.

Parameters:

Name Type Description Default
api_key str

The API key to use for the Gemini model.

None
model_name str

The name of the Gemini model to use.

'gemini-exp-1206'
Example

gemini_model = GeminiModel()

Source code in arai_ai_agents/models/gemini_model.py
def __init__(self, api_key=None, model_name="gemini-exp-1206"):
    """Initialize the Gemini model.

    Args:
        api_key (str): The API key to use for the Gemini model.
        model_name (str): The name of the Gemini model to use.

    Example:
        >>> gemini_model = GeminiModel()
    """
    if api_key:
        genai.configure(api_key=api_key)
    else:
        genai.configure(api_key=os.environ.get('GOOGLE_GEMINI_API_KEY'))
    self.model = genai.GenerativeModel(model_name)

fix_response(prompt, response)

Fix a response that is not valid YAML.

Parameters:

Name Type Description Default
prompt str

The prompt to generate a response to.

required
response str

The response to fix.

required

Returns:

Name Type Description
str

The fixed response.

Raises:

Type Description
Exception

If there's an error calling the API.

Example

gemini_model = GeminiModel() response = gemini_model.fix_response("What is the weather in Tokyo?", "The weather in Tokyo is sunny.")

Source code in arai_ai_agents/models/gemini_model.py
def fix_response(self, prompt, response):
    """Fix a response that is not valid YAML.

    Args:
        prompt (str): The prompt to generate a response to.
        response (str): The response to fix.

    Returns:
        str: The fixed response.

    Raises:
        Exception: If there's an error calling the API.

    Example:
        >>> gemini_model = GeminiModel()
        >>> response = gemini_model.fix_response("What is the weather in Tokyo?", "The weather in Tokyo is sunny.")
    """
    try:
        # instructions being sent to the ai model
        messages = []

        # add personality and style to the instructions            
        messages.append({
            "role": "user",
            "parts": [prompt]
        })

        # user message
        messages.append({
            "role": "user",
            "parts": [response]
        })

        # Make sure that what is being sent to the model is correct
        # print(messages)

        # generate the response
        response = self.model.generate_content(messages)
        return response.text.strip()

    except Exception as e:
        return f"Error generating response: {str(e)}"

generate_response(prompt, **kwargs)

Generate a response to a given prompt using the Gemini API.

Parameters:

Name Type Description Default
prompt str

The prompt to generate a response to.

required
**kwargs

Additional keyword arguments.

{}

Returns:

Name Type Description
str

The generated response.

Example

gemini_model = GeminiModel() response = gemini_model.generate_response("What is the weather in Tokyo?")

Source code in arai_ai_agents/models/gemini_model.py
def generate_response(self, prompt, **kwargs):
    """Generate a response to a given prompt using the Gemini API.

    Args:
        prompt (str): The prompt to generate a response to.
        **kwargs: Additional keyword arguments.

    Returns:
        str: The generated response.

    Example:
        >>> gemini_model = GeminiModel()
        >>> response = gemini_model.generate_response("What is the weather in Tokyo?")
    """
    if isinstance(prompt, str):
        return self.generate_response_from_string(prompt, **kwargs)       
    elif isinstance(prompt, list[dict]):
        return self.generate_response_dictionary(prompt)

generate_response_dictionary(prompt)

Generate a response to a given prompt using a list of dictionaries.

Parameters:

Name Type Description Default
prompt list[dict]

The prompt to generate a response to.

required

Returns:

Name Type Description
str str

The generated response.

Example

gemini_model = GeminiModel() response = gemini_model.generate_response_dictionary([{"role": "user", "parts": "What is the weather in Tokyo?"}])

Source code in arai_ai_agents/models/gemini_model.py
def generate_response_dictionary(self, prompt: list[dict]) -> str:
    """Generate a response to a given prompt using a list of dictionaries.

    Args:
        prompt (list[dict]): The prompt to generate a response to.

    Returns:
        str: The generated response.

    Example:
        >>> gemini_model = GeminiModel()
        >>> response = gemini_model.generate_response_dictionary([{"role": "user", "parts": "What is the weather in Tokyo?"}])
    """
    try:
        response = self.model.generate_content(prompt)
        return response.text.strip()
    except Exception as e:
        return f"Error generating response: {str(e)}"

generate_response_from_string(prompt, **kwargs)

Description

Generate a response to a given prompt using a string.

Parameters:

Name Type Description Default
prompt str

The prompt to generate a response to.

required
**kwargs

Additional keyword arguments.

{}

Returns:

Name Type Description
str

The generated response.

Example

gemini_model = GeminiModel() response = gemini_model.generate_response_from_string("What is the weather in Tokyo?")

Source code in arai_ai_agents/models/gemini_model.py
def generate_response_from_string(self, prompt, **kwargs):
    """
    Description: 
        Generate a response to a given prompt using a string.

    Args:
        prompt (str): The prompt to generate a response to.
        **kwargs: Additional keyword arguments.

    Returns:
        str: The generated response.

    Example:
        >>> gemini_model = GeminiModel()
        >>> response = gemini_model.generate_response_from_string("What is the weather in Tokyo?")
    """
    # Extract personality and style from kwargs, or use defaults from agent_template
    if kwargs:
        if "personality" in kwargs:
            personality = kwargs.get("personality")
        if "communication_style" in kwargs:
            communication_style = kwargs.get("communication_style")
    else:
        personality = ""
        communication_style = ""        

    try:
        # instructions being sent to the ai model
        messages = []

        # add personality and style to the instructions
        if personality or communication_style:
            persona_prompt = f"{personality} {communication_style}"
            messages.append({
                "role": "user",
                "parts": [persona_prompt]
            })

        # user message
        messages.append({
            "role": "user",
            "parts": [prompt]
        })

        # Make sure that what is being sent to the model is correct
        # print(messages)

        # generate the response
        response = self.model.generate_content(messages)
        return response.text.strip()

    except Exception as e:
        return f"Error generating response: {str(e)}"

generate_yaml_response()

Generate a YAML response.

Returns:

Type Description

None

Example

gemini_model = GeminiModel() gemini_model.generate_yaml_response()

Source code in arai_ai_agents/models/gemini_model.py
def generate_yaml_response(self):
    """Generate a YAML response.

    Args:
        None

    Returns:
        None

    Example:
        >>> gemini_model = GeminiModel()
        >>> gemini_model.generate_yaml_response()
    """
    messages = []

    messages.append({
        "role": "user",
        "parts": "Create me a YAML file with the following fields: name, personality, communication_style, topic, backstory, universe, hashtags, emojis. Start and end the file with ```yaml and ```"
    })

    response = self.model.generate_content(messages)
    print(response)

    # save the response to a file
    with open("response.txt", "w", encoding="utf-8") as f:
        f.write(response.text.strip())

    # save the response to a file
    with open("response.yaml", "w", encoding="utf-8") as f:
        f.write(response.text)

    # strip the response
    with open("response_stripped.yaml", "w", encoding="utf-8") as f:
        f.write(response.text.strip())


    # save the response to a yaml file
    with open("yaml_response.yaml", "w", encoding="utf-8") as f:
        yaml.dump(response.text, f)       

    # strip the response
    with open("yaml_response_stripped.yaml", "w", encoding="utf-8") as f:
        yaml.dump(response.text.strip(), f)