Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added Llama and Google Gemini AI/LLM #158

Merged
merged 3 commits into from
Dec 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 53 additions & 0 deletions cogs/llm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
import disnake
from disnake.ext import commands, tasks
from azure.ai.inference import ChatCompletionsClient
from azure.ai.inference.models import SystemMessage, UserMessage
from azure.core.credentials import AzureKeyCredential
import google.generativeai as genai
from env import Api

class Llm(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
print("LLM Cog loaded!")

@commands.slash_command()
async def ai(self, inter):
pass

@ai.sub_command(description="Stel vragen aan Llama-3.3-70B-Instruct")
async def llama(self, inter, vraag: str):
channel = self.bot.get_channel(inter.channel.id)
message = await inter.response.send_message(f"Ik ben aan het denken!...", ephemeral=True)

client = ChatCompletionsClient(
endpoint="https://models.inference.ai.azure.com",
credential=AzureKeyCredential(Api.GH_TOKEN),
)

response = client.complete(
messages=[
SystemMessage(content="You are a helpful assistant."),
UserMessage(content=str(vraag)),
],
temperature=1.0,
top_p=1.0,
max_tokens=1000,
model="Llama-3.3-70B-Instruct"
)

await channel.send(content=f"# Llama\n\n ## Vraag:\n{vraag}\n## Antwoord: \n{response.choices[0].message.content}\n-# Aangevraagd door: {inter.author.display_name}\n-# Deze tekst is gegenereerd door AI/LLM. Het kan fouten bevatten.")

@ai.sub_command(description="Stel vragen aan Gemini!")
async def gemini(self, inter, vraag: str):
genai.configure(api_key=Api.GEMINI_API_KEY)
model = genai.GenerativeModel("gemini-1.5-flash")

channel = self.bot.get_channel(inter.channel.id)
message = await inter.response.send_message(f"Ik ben aan het denken!...", ephemeral=True)
response = model.generate_content(str(vraag))

await channel.send(f"# Gemini\n\n ## Vraag:\n{vraag}\n## Antwoord: \n{response.text}\n-# Aangevraagd door: {inter.author.display_name}\n-# Deze tekst is gegenereerd door AI/LLM. Het kan fouten bevatten.")

def setup(bot: commands.Bot):
bot.add_cog(Llm(bot))
1 change: 1 addition & 0 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ async def keep_sql_active():
bot.load_extension("cogs.showcase_remover")
bot.load_extension("cogs.anti_bot")
bot.load_extension("cogs.status")
bot.load_extension("cogs.llm")

# Running the bot and starting thread
if __name__ == '__main__':
Expand Down
4 changes: 3 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,6 @@ numpy
xmltodict
audioop-lts
sentry_sdk
uptime_kuma_api
uptime_kuma_api
azure-ai-inference
google-generativeai