Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Chat with Shrek! #2

Merged
merged 9 commits into from
Dec 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
104 changes: 92 additions & 12 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 3 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
name = "swamp-discord-bot"
version = "0.1.0"
description = "A Discord bot for managing a friend server"
authors = ["Forest <>"]
authors = ["Forest <[email protected]>"]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Out of curiosity, was this causing issues? I could imagine something might not like a missing email, but curious what it might be :)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It caused an error message. Not sure if it's the poetry version or what. Everything works but the message is annoying.

readme = "README.md"

[tool.poetry.dependencies]
Expand All @@ -15,6 +15,8 @@ aiohttp = "^3.11.11"
black = "^24.10.0"
isort = "^5.13.2"
pylint = "^3.3.2"
pytest = "^8.3.4"
pytest-asyncio = "^0.25.0"

[build-system]
requires = ["poetry-core"]
Expand Down
Empty file added src/__init__.py
Empty file.
59 changes: 29 additions & 30 deletions src/bot.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,19 @@
import asyncio
import os
import sys
import discord
import aiohttp
import asyncio
from datetime import datetime, timedelta

import discord
from discord.ext import commands, tasks
from dotenv import load_dotenv

from src.llm import query_ai

sys.path.append(os.path.dirname(__file__))
# Configure logging to stdout
import logging


logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(message)s',
Expand All @@ -18,7 +23,6 @@

# Constants
GENERAL_CHANNEL_ID = 670339732059979807
AI_ENDPOINT = "http://192.168.0.192:5000/v1/completions"
MORNING_INTERVAL = 23 # hours

# Load environment variables
Expand All @@ -30,28 +34,18 @@
intents.message_content = True
intents.members = True

bot = commands.Bot(command_prefix='!', intents=intents)

async def query_ai(prompt):
"""Query the AI endpoint with a prompt"""
try:
payload = {
"prompt": prompt,
"max_tokens": 200,
"temperature": 0.6,
"seed": 10
}
async with aiohttp.ClientSession() as session:
async with session.post(AI_ENDPOINT, json=payload) as response:
if response.status == 200:
data = await response.json()
return data.get('text', 'No response from AI')
else:
logger.error(f"AI API error: {response.status}")
return "Sorry, I couldn't get a response from the AI right now."
except Exception as e:
logger.error(f"Error querying AI: {e}")
return "Sorry, there was an error communicating with the AI."
class MrBot(commands.Bot):
async def setup_hook(self):
extensions = [
"shrek_chat",
]
for extension in extensions:
await self.load_extension(extension)


bot = MrBot(command_prefix='!', intents=intents)


async def check_ai_health():
"""Check if the AI endpoint is responsive"""
Expand All @@ -67,6 +61,7 @@ async def check_ai_health():
logger.error(f"AI endpoint health check failed: {e}")
return False


@tasks.loop(hours=MORNING_INTERVAL)
async def send_good_morning():
"""Send good morning message every 23 hours"""
Expand All @@ -77,6 +72,7 @@ async def send_good_morning():
else:
logger.error("Could not find general channel")


@send_good_morning.before_loop
async def before_good_morning():
"""Wait until the bot is ready before starting the good morning loop"""
Expand All @@ -88,28 +84,31 @@ async def before_good_morning():
next_run = next_run + timedelta(days=1)
await asyncio.sleep((next_run - now).seconds)


@bot.event
async def on_ready():
logger.info(f'Bot {bot.user} has connected to Discord!')
logger.info(f'Bot is in {len(bot.guilds)} guilds')


# Start the good morning task
send_good_morning.start()

# Check AI endpoint health
logger.info("Performing AI endpoint health check...")
is_healthy = await check_ai_health()

if is_healthy:
# Start the good morning task only if AI is healthy
send_good_morning.start()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Any reason to remove the existing bot features? (features is used loosely here, but nonetheless :P )

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Typo, needs to be fixed.

logger.info("Bot startup complete - AI endpoint is healthy")
else:
logger.warning("Bot started but AI endpoint is not responding - some features may not work")


# Shrek Chat Part
# Run the bot
if __name__ == '__main__':
logger.info("Starting bot...")
token = os.getenv('DISCORD_TOKEN')

# Debug token presence (don't log the actual token!)
if token:
logger.info("Discord token found")
Expand All @@ -120,4 +119,4 @@ async def on_ready():
sys.exit(1)

logger.info("Attempting to start bot with token...")
bot.run(token)
bot.run(token)
63 changes: 63 additions & 0 deletions src/llm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
import json
import logging

import aiohttp


AI_ENDPOINT = "http://192.168.0.192:5000/v1/completions"

logger = logging.getLogger("llm")

async def query_ai(prompt):
"""Query the AI endpoint with a prompt"""
try:
payload = {
"prompt": prompt,
"max_tokens": 200,
"temperature": 0.6,
"seed": 10
}
async with aiohttp.ClientSession() as session:
async with session.post(AI_ENDPOINT, json=payload) as response:
if response.status == 200:
data = await response.json()
return data.get('text', 'No response from AI')
else:
logger.error(f"AI API error: {response.status}")
return "Sorry, I couldn't get a response from the AI right now."
except Exception as e:
logger.error(f"Error querying AI: {e}")
return "Sorry, there was an error communicating with the AI."



# mainly used for testing. Should not be used in production unless the user is aware that openai or anthropic or other apis are used
async def query_open_router(prompt: str) -> str:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shouldn't be too hard to make sure we don't run this in prod, since there won't be any API keys available where it's running 👍🏻

OPENROUTER_API_KEY=""
async with aiohttp.ClientSession() as session:
async with session.post(
url="https://openrouter.ai/api/v1/chat/completions",
headers={
"Authorization": f"Bearer {OPENROUTER_API_KEY}",

},
data=json.dumps({
"model": "meta-llama/llama-3.1-8b-instruct", # Optional
"messages": [
{
"role": "user",
"content": prompt
}
]
})) as response:
data = await response.json()
return data['choices'][0]['message']['content']

class OpenRouterLlm():
async def query_ai(self, prompt:str)-> str:
return await query_open_router(prompt)

class LocalLlm:
async def query_ai(self, prompt: str):
return await query_ai(prompt)

Loading