dev #2

Merged
WhatDidYouExpect merged 24 commits from dev into main 2025-07-22 00:26:21 +02:00
17 changed files with 717 additions and 354 deletions

2
.gitmodules vendored
View file

@ -1,3 +1,3 @@
[submodule "modules/volta"]
path = modules/volta
url = https://github.com/gooberinc/volta
url = https://forgejo.expect.ovh/gooberinc/volta

View file

@ -5,4 +5,5 @@ Special thanks to [Charlie's Computers](https://github.com/PowerPCFan) for being
[Goober Central](https://github.com/whatdidyouexpect/goober-central)
![the goober](https://goober.whatdidyouexpect.eu/imgs/goobs/goobs.png)
[Another mirror](https://forgejo.expect.ovh/gooberinc/goober)
no promises that it'll be stable

View file

@ -8,9 +8,6 @@ by PowerPCFan
[Cog Manager](https://github.com/WhatDidYouExpect/goober/blob/main/cogs/cogmanager.py)
by expect
[TensorFlow integration](https://github.com/WhatDidYouExpect/goober/blob/main/cogs/tf.py)
by SuperSilly2 (requires Python 3.7 - 3.10, tensorflow-metal/tensorflow-gpu and tensorflow/tensorflow-macos)
[Web Scraper](https://raw.githubusercontent.com/WhatDidYouExpect/goober/refs/heads/main/cogs/webscraper.py)
by expect (requires goober version 0.11.7.2 or higher)

123
assets/cogs/lyrics.py Normal file
View file

@ -0,0 +1,123 @@
import discord
from discord.ext import commands
from discord import app_commands
import aiohttp
import re
class Lyrics(commands.Cog):
def __init__(self, bot):
self.bot = bot
@app_commands.command(name="lyrics", description="Get lyrics for a song")
@app_commands.describe(
artist="Name of the artist",
song="Title of the song",
language="Target language code (optional)"
)
@app_commands.choices(language=[
app_commands.Choice(name="Bulgarian", value="bg"),
app_commands.Choice(name="Czech", value="cs"),
app_commands.Choice(name="Danish", value="da"),
app_commands.Choice(name="German", value="de"),
app_commands.Choice(name="Greek", value="el"),
app_commands.Choice(name="English", value="en"),
app_commands.Choice(name="Spanish", value="es"),
app_commands.Choice(name="Estonian", value="et"),
app_commands.Choice(name="Finnish", value="fi"),
app_commands.Choice(name="French", value="fr"),
app_commands.Choice(name="Irish", value="ga"),
app_commands.Choice(name="Croatian", value="hr"),
app_commands.Choice(name="Hungarian", value="hu"),
app_commands.Choice(name="Italian", value="it"),
app_commands.Choice(name="Lithuanian", value="lt"),
app_commands.Choice(name="Latvian", value="lv"),
app_commands.Choice(name="Maltese", value="mt"),
app_commands.Choice(name="Dutch", value="nl"),
app_commands.Choice(name="Polish", value="pl"),
app_commands.Choice(name="Portuguese", value="pt"),
app_commands.Choice(name="Romanian", value="ro"),
app_commands.Choice(name="Slovak", value="sk"),
app_commands.Choice(name="Slovene", value="sl"),
app_commands.Choice(name="Swedish", value="sv"),
])
async def lyrics(self, interaction: discord.Interaction, artist: str = None, song: str = None, language: app_commands.Choice[str] = None):
await interaction.response.defer()
if not artist or not song:
member = interaction.guild.get_member(interaction.user.id)
if not member:
member = await interaction.guild.fetch_member(interaction.user.id)
act_artist, act_song = await self.get_artist_song_from_presence(member)
if act_artist and act_song:
artist = artist or act_artist
song = song or act_song
else:
await interaction.followup.send("No artist or song provided and couldn't find it from your current activity.")
return
lyrics = await self.fetch_lyrics(artist, song)
if not lyrics:
await interaction.followup.send(f"Could not find lyrics for **{artist} - {song}**")
return
if language:
translated = await self.translate_text(lyrics, language.value)
if translated:
lyrics = translated
if len(lyrics) > 1900:
lyrics = lyrics[:1900] + "\n\n[...lyrics truncated...]"
embed = discord.Embed(
title=f"{artist} - {song}",
description=lyrics,
color=discord.Color.blue()
)
embed.set_footer(text=f"Requested by {interaction.user}", icon_url=interaction.user.display_avatar.url)
await interaction.followup.send(embed=embed)
async def get_artist_song_from_presence(self, member: discord.Member):
for activity in member.activities:
if isinstance(activity, discord.Spotify):
return activity.artist, activity.title
return None, None
async def fetch_lyrics(self, artist, song):
artist_q = artist.replace(' ', '+').lower()
song_q = song.replace(' ', '+').lower()
url = f"https://lrclib.net/api/get?artist_name={artist_q}&track_name={song_q}"
print(url)
async with aiohttp.ClientSession() as session:
try:
async with session.get(url) as resp:
if resp.status != 200:
return None
data = await resp.json()
return data.get('plainLyrics')
except Exception:
return None
async def translate_text(self, text: str, target_lang: str) -> str | None:
translate_url = "https://translate.googleapis.com/translate_a/single"
params = {
"client": "gtx",
"sl": "auto",
"tl": target_lang,
"dt": "t",
"q": text
}
async with aiohttp.ClientSession() as session:
try:
async with session.get(translate_url, params=params) as resp:
if resp.status != 200:
return None
result = await resp.json()
translated_chunks = [item[0] for item in result[0] if item[0]]
return ''.join(translated_chunks)
except Exception:
return None
async def setup(bot):
await bot.add_cog(Lyrics(bot))

View file

@ -1,155 +0,0 @@
import discord
from discord.ext import commands
import os
import numpy as np
import json
import pickle
import functools
import re
import time
import asyncio
ready = True
MODEL_MATCH_STRING = r"[0-9]{2}_[0-9]{2}_[0-9]{4}-[0-9]{2}_[0-9]{2}"
try:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Embedding, LSTM, Dense
from tensorflow.keras.backend import clear_session
if tf.config.list_physical_devices('GPU'):
print("Using GPU acceleration")
elif tf.config.list_physical_devices('Metal'):
print("Using Metal for macOS acceleration")
except ImportError:
print("ERROR: Failed to import TensorFlow. Ensure you have the correct dependencies:")
print("tensorflow>=2.15.0")
print("For macOS (Apple Silicon): tensorflow-metal")
ready = False
class TFCallback(keras.callbacks.Callback):
def __init__(self, bot, progress_embed: discord.Embed, message):
self.embed = progress_embed
self.bot = bot
self.message = message
self.times = [time.time()]
async def send_message(self, message: str, description: str, **kwargs):
if "epoch" in kwargs:
self.times.append(time.time())
avg_epoch_time = np.mean(np.diff(self.times))
description = f"ETA: {round(avg_epoch_time)}s"
self.embed.add_field(name=f"<t:{round(time.time())}:t> - {message}", value=description, inline=False)
await self.message.edit(embed=self.embed)
def on_train_end(self, logs=None):
self.bot.loop.create_task(self.send_message("Training stopped", "Training has been stopped."))
def on_epoch_begin(self, epoch, logs=None):
self.bot.loop.create_task(self.send_message(f"Starting epoch {epoch}", "This might take a while", epoch=True))
def on_epoch_end(self, epoch, logs=None):
self.bot.loop.create_task(self.send_message(f"Epoch {epoch} ended", f"Accuracy: {round(logs.get('accuracy', 0.0), 4)}"))
class Ai:
def __init__(self):
model_path = settings.get("model_path")
if model_path:
self.__load_model(model_path)
self.is_loaded = model_path is not None
self.batch_size = 64
def generate_model_name(self):
return time.strftime('%d_%m_%Y-%H_%M', time.localtime())
def __load_model(self, model_path):
clear_session()
self.model = load_model(os.path.join(model_path, "model.h5"))
model_name = os.path.basename(model_path)
try:
with open(os.path.join(model_path, "tokenizer.pkl"), "rb") as f:
self.tokenizer = pickle.load(f)
except FileNotFoundError:
print("Failed to load tokenizer, using default.")
self.tokenizer = Tokenizer()
with open("memory.json", "r") as f:
self.tokenizer.fit_on_texts(json.load(f))
self.is_loaded = True
def reload_model(self):
clear_session()
model_path = settings.get("model_path")
if model_path:
self.__load_model(model_path)
self.is_loaded = True
async def run_async(self, func, bot, *args, **kwargs):
return await bot.loop.run_in_executor(None, functools.partial(func, *args, **kwargs))
class Learning(Ai):
def create_model(self, memory, epochs=2):
memory = memory[:2000]
tokenizer = Tokenizer()
tokenizer.fit_on_texts(memory)
sequences = tokenizer.texts_to_sequences(memory)
X, y = [], []
for seq in sequences:
for i in range(1, len(seq)):
X.append(seq[:i])
y.append(seq[i])
maxlen = max(map(len, X))
X = pad_sequences(X, maxlen=maxlen, padding="pre")
y = np.array(y)
model = Sequential([
Embedding(input_dim=VOCAB_SIZE, output_dim=128, input_length=maxlen),
LSTM(64),
Dense(VOCAB_SIZE, activation="softmax")
])
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
history = model.fit(X, y, epochs=epochs, batch_size=64, callbacks=[tf_callback])
self.save_model(model, tokenizer, history)
def save_model(self, model, tokenizer, history, name=None):
name = name or self.generate_model_name()
model_dir = os.path.join("models", name)
os.makedirs(model_dir, exist_ok=True)
with open(os.path.join(model_dir, "info.json"), "w") as f:
json.dump(history.history, f)
with open(os.path.join(model_dir, "tokenizer.pkl"), "wb") as f:
pickle.dump(tokenizer, f)
model.save(os.path.join(model_dir, "model.h5"))
class Generation(Ai):
def generate_sentence(self, word_amount, seed):
if not self.is_loaded:
return False
for _ in range(word_amount):
token_list = self.tokenizer.texts_to_sequences([seed])[0]
token_list = pad_sequences([token_list], maxlen=self.model.input_shape[1], padding="pre")
predicted_word_index = np.argmax(self.model.predict(token_list, verbose=0), axis=-1)[0]
output_word = next((w for w, i in self.tokenizer.word_index.items() if i == predicted_word_index), "")
seed += " " + output_word
return seed
VOCAB_SIZE = 100_000
settings = {}
learning = Learning()
generation = Generation()
tf_callback = None
async def setup(bot):
await bot.add_cog(Tf(bot))

View file

@ -1,8 +1,23 @@
{
"minigames_hangman_game": "Word: {display_word()}\nWrong guesses: {wrong_guesses}/{max_wrong}",
"minigames_hangman_lost": "You lost! The word was:",
"minigames_hangman_won": "You won! The word was:",
"minigames_hangman_already_guessed": "You already guessed",
"minigames_hangman_user_letter_guess": "Your letter guess",
"minigames_hangman_guess": "Guess a Letter",
"minigames_hangman_api_failed": "Failed to get a random word.",
"minigames_hangman": "Play Hangman with a random word",
"minigames_click_to_guess": "Click to guess a number from 1 to 10",
"minigames_guess_button": "Guess",
"minigames_wrong_number": "Wrong! The number was",
"minigames_correct": "Correct!",
"minigames_invalid_number": "Invalid number!",
"minigames_guess_the_number": "Guess the number",
"minigames_your_guess": "Your guess (1-10)",
"memory_file_valid": "The memory.json file is valid!",
"file_aint_uft8": "File is not valid UTF-8 text. Might be binary or corrupted.",
"psutil_not_installed": "Memory check skipped.",
"not_cloned": "Goober is not cloned! Please clone it from GitHub.",
"not_cloned": "Goober is not cloned! Please clone it from Git.",
"checks_disabled": "Checks are disabled!",
"unhandled_exception": "An unhandled exception occurred. Please report this issue on GitHub.",
"active_users:": "Active users:",

View file

@ -1,130 +0,0 @@
{
"checks_disabled": "Les vérifications sont désactivées !",
"unhandled_exception": "Une exception non gérée est survenue. Merci de rapporter ce problème sur GitHub.",
"active_users:": "Utilisateurs actifs :",
"spacy_initialized": "spaCy et spacytextblob sont prêts.",
"spacy_model_not_found": "Le modèle spaCy est introuvable ! Téléchargement en cours...",
"env_file_not_found": "Le fichier .env est introuvable ! Créez-en un avec les variables nécessaires.",
"error_fetching_active_users": "Erreur lors de la récupération des utilisateurs actifs : {error}",
"error_sending_alive_ping": "Erreur lors de lenvoi du ping actif : {error}",
"already_started": "Jai déjà démarré ! Je ne me mets pas à jour...",
"please_restart": "Redémarre, stp !",
"local_ahead": "Local {remote}/{branch} est en avance ou à jour. Pas de mise à jour...",
"remote_ahead": "Remote {remote}/{branch} est en avance. Mise à jour en cours...",
"cant_find_local_version": "Je ne trouve pas la variable local_version ! Ou elle a été modifiée et ce nest pas un entier !",
"running_prestart_checks": "Exécution des vérifications préalables au démarrage...",
"continuing_in_seconds": "Reprise dans {seconds} secondes... Appuie sur une touche pour passer.",
"missing_requests_psutil": "requests et psutil manquants ! Installe-les avec pip : `pip install requests psutil`",
"requirements_not_found": "requirements.txt introuvable à {path}, a-t-il été modifié ?",
"warning_failed_parse_imports": "Avertissement : Échec du parsing des imports depuis {filename} : {error}",
"cogs_dir_not_found": "Répertoire des cogs introuvable à {path}, scan ignoré.",
"std_lib_local_skipped": "LIB STD / LOCAL {package} (vérification sautée)",
"ok_installed": "OK",
"missing_package": "MANQUANT",
"missing_package2": "nest pas installé",
"missing_packages_detected": "Packages manquants détectés :",
"telling_goober_central": "Envoi à goober central à {url}",
"failed_to_contact": "Impossible de contacter {url} : {error}",
"all_requirements_satisfied": "Toutes les dépendances sont satisfaites.",
"ping_to": "Ping vers {host} : {latency} ms",
"high_latency": "Latence élevée détectée ! Tu pourrais avoir des délais de réponse.",
"could_not_parse_latency": "Impossible danalyser la latence.",
"ping_failed": "Ping vers {host} échoué.",
"error_running_ping": "Erreur lors du ping : {error}",
"memory_usage": "Utilisation mémoire : {used} Go / {total} Go ({percent}%)",
"memory_above_90": "Usage mémoire au-dessus de 90% ({percent}%). Pense à libérer de la mémoire.",
"total_memory": "Mémoire totale : {total} Go",
"used_memory": "Mémoire utilisée : {used} Go",
"low_free_memory": "Mémoire libre faible détectée ! Seulement {free} Go disponibles.",
"measuring_cpu": "Mesure de lusage CPU par cœur...",
"core_usage": "Cœur {idx} : [{bar}] {usage}%",
"total_cpu_usage": "Usage total CPU : {usage}%",
"high_avg_cpu": "Moyenne CPU élevée : {usage}%",
"really_high_cpu": "Charge CPU vraiment élevée ! Le système pourrait ralentir ou planter.",
"memory_file": "Fichier mémoire : {size} Mo",
"memory_file_large": "Fichier mémoire de 1 Go ou plus, pense à le nettoyer pour libérer de lespace.",
"memory_file_corrupted": "Fichier mémoire corrompu ! Erreur JSON : {error}",
"consider_backup_memory": "Pense à sauvegarder et recréer le fichier mémoire.",
"memory_file_encoding": "Problèmes dencodage du fichier mémoire : {error}",
"error_reading_memory": "Erreur lecture fichier mémoire : {error}",
"memory_file_not_found": "Fichier mémoire introuvable.",
"modification_warning": "Goober a été modifié ! Toutes les modifications seront perdues lors d'une mise à jour !",
"reported_version": "Version rapportée :",
"current_hash": "Hachage actuel :",
"not_found": "n'est pas trouvé !",
"version_error": "Impossible de récupérer les informations de version. Code d'état",
"loaded_cog": "Cog chargé :",
"loaded_cog2": "Module chargé :",
"cog_fail": "Échec du chargement du cog :",
"cog_fail2": "Échec du chargement du module :",
"no_model": "Aucun modèle Markov sauvegardé trouvé. Démarrage à partir de zéro.",
"folder_created": "Dossier '{folder_name}' créé.",
"folder_exists": "Le dossier '{folder_name}' existe déjà. Ignorons...",
"logged_in": "Connecté en tant que",
"synced_commands": "Synchronisé",
"synced_commands2": "commandes !",
"fail_commands_sync": "Échec de la synchronisation des commandes :",
"started": "{name} a démarré !",
"name_check": "Erreur lors de la vérification de la disponibilité du nom :",
"name_taken": "Le nom est déjà pris. Veuillez choisir un autre nom.",
"name_check2": "Erreur lors de la vérification de la disponibilité du nom :",
"add_token": "Token : {token}\nVeuillez ajouter ce token à votre fichier .env comme",
"token_exists": "Le token existe déjà dans .env. Utilisation du token existant.",
"registration_error": "Erreur lors de l'enregistrement :",
"version_backup": "Sauvegarde créée :",
"backup_error": "Erreur : {LOCAL_VERSION_FILE} introuvable pour la sauvegarde.",
"model_loaded": "Modèle Markov chargé depuis",
"fetch_update_fail": "Impossible de récupérer les informations de mise à jour.",
"invalid_server": "Erreur : Informations de version invalides reçues du serveur.",
"goober_server_alert": "Alerte du serveur Goober central !\n",
"new_version": "Nouvelle version disponible : {latest_version} (Actuelle : {local_version})",
"changelog": "Consultez {VERSION_URL}/goob/changes.txt pour voir les modifications\n\n",
"invalid_version": "La version : {local_version} n'est pas valide !",
"invalid_version2": "Si c'est intentionnel, ignorez ce message. Sinon, appuyez sur Y pour récupérer une version valide depuis le serveur, quelle que soit la version actuelle de Goober.",
"invalid_version3": "La version actuelle sera sauvegardée dans current_version.bak..",
"input": "(Y ou toute autre touche pour ignorer...)",
"modification_ignored": "Vous avez modifié",
"modification_ignored2": "IGNOREWARNING est désactivé..",
"latest_version": "Vous utilisez la dernière version :",
"latest_version2": "Consultez {VERSION_URL}/goob/changes.txt pour voir les modifications",
"pinging_disabled": "Le ping est désactivé ! Je ne préviens pas le serveur que je suis en ligne...",
"goober_ping_success": "Connecté à Goober central en tant que {NAME}",
"goober_ping_fail": "Échec de l'envoi des données. Le serveur a retourné le code d'état :",
"goober_ping_fail2": "Une erreur est survenue lors de l'envoi des données :",
"sentence_positivity": "La positivité de la phrase est :",
"command_edit_fail": "Échec de la modification du message :",
"command_desc_retrain": "Réentraîne manuellement le modèle Markov.",
"command_markov_retrain": "Réentraînement du modèle Markov... Veuillez patienter.",
"command_markov_memory_not_found": "Erreur : fichier de mémoire introuvable !",
"command_markov_memory_is_corrupt": "Erreur : le fichier de mémoire est corrompu !",
"command_markov_retraining": "Traitement de {processed_data}/{data_size} points de données...",
"command_markov_retrain_successful": "Modèle Markov réentraîné avec succès en utilisant {data_size} points de données !",
"command_desc_talk": "parle et tout ça",
"command_talk_insufficent_text": "Je dois apprendre plus de messages avant de pouvoir parler.",
"command_talk_generation_fail": "Je n'ai rien à dire pour le moment !",
"command_desc_help": "aide",
"command_help_embed_title": "Aide du bot",
"command_help_embed_desc": "Liste des commandes regroupées par catégorie.",
"command_help_categories_general": "Général",
"command_help_categories_admin": "Administration",
"command_help_categories_custom": "Commandes personnalisées",
"command_ran": "Info : {message.author.name} a exécuté {message.content}",
"command_ran_s": "Info : {interaction.user} a exécuté ",
"command_desc_ping": "ping",
"command_ping_embed_desc": "Latence du bot :",
"command_ping_footer": "Demandé par",
"command_about_desc": "à propos",
"command_about_embed_title": "À propos de moi",
"command_about_embed_field1": "Nom",
"command_about_embed_field2name": "Version",
"command_about_embed_field2value": "Locale : {local_version} \nDernière : {latest_version}",
"command_desc_stats": "statistiques",
"command_stats_embed_title": "Statistiques du bot",
"command_stats_embed_desc": "Données sur la mémoire du bot.",
"command_stats_embed_field1name": "Statistiques du fichier",
"command_stats_embed_field1value": "Taille : {file_size} octets\nLignes : {line_count}",
"command_stats_embed_field2name": "Version",
"command_stats_embed_field2value": "Locale : {local_version} \nDernière : {latest_version}",
"command_stats_embed_field3name": "Informations variables",
"command_stats_embed_field3value": "Nom : {NAME} \nPréfixe : {PREFIX} \nID du propriétaire : {ownerid}\nLigne de ping : {PING_LINE} \nPartage de mémoire activé : {showmemenabled} \nEntraînement utilisateur activé : {USERTRAIN_ENABLED} \nChanson : {song} \nTexte de démarrage : ```{splashtext}```"
}

149
assets/locales/fr_ca.json Normal file
View file

@ -0,0 +1,149 @@
{
"minigames_hangman_game": "Mot à deviner : {display_word()}\nMauvaises guesses : {wrong_guesses}/{max_wrong}",
"minigames_hangman_lost": "T'es échoué solide! Le mot était :",
"minigames_hangman_won": "T'as gagné en masse! Le mot était :",
"minigames_hangman_already_guessed": "T'as déjà essayé ça mon chum",
"minigames_hangman_user_letter_guess": "Ta guess de lettre",
"minigames_hangman_guess": "Devine une lettre",
"minigames_hangman_api_failed": "Ça a chié en essayant d'avoir un mot aléatoire.",
"minigames_hangman": "Jouer au Pendu avec un mot pogné au hasard",
"minigames_click_to_guess": "Clique pour deviner un chiffre entre 1 pis 10",
"minigames_guess_button": "Devine",
"minigames_wrong_number": "Nope! C'était",
"minigames_correct": "Bonne guess!",
"minigames_invalid_number": "Chiffre pas valide!",
"minigames_guess_the_number": "Devine le chiffre",
"minigames_your_guess": "Ta guess (1-10)",
"memory_file_valid": "Le fichier memory.json est correct!",
"file_aint_uft8": "Le fichier est pas du bon UTF-8. Ça doit être binaire ou scrap.",
"psutil_not_installed": "Vérification de mémoire skipée.",
"not_cloned": "Goober est pas cloné! Va donc le cloner depuis Git.",
"checks_disabled": "Les checks sont désactivées!",
"unhandled_exception": "Y'a eu une erreur pas prévue. Rapporte ça sur GitHub mon gars.",
"active_users:": "Monde actif :",
"spacy_initialized": "spaCy pis spacytextblob sont prêts.",
"spacy_model_not_found": "Le modèle spaCy est introuvable! On le télécharge...",
"env_file_not_found": "Le fichier .env est pas là! Fais-en un avec les variables nécessaires.",
"error_fetching_active_users": "Ça a chié en essayant de pogner les utilisateurs actifs : {error}",
"error_sending_alive_ping": "Ça a chié en envoyant le ping : {error}",
"already_started": "J'suis déjà parti! J'me mets pas à jour...",
"please_restart": "Redémarre-moi donc!",
"local_ahead": "La version locale {remote}/{branch} est à jour. Pas besoin d'update...",
"remote_ahead": "La version remote {remote}/{branch} est en avance. On update...",
"cant_find_local_version": "J'arrive pas à trouver la variable local_version! Ou ben elle a été modifiée pis c'est pas un chiffre!",
"running_prestart_checks": "On fait les checks avant de partir...",
"continuing_in_seconds": "On continue dans {seconds} secondes... Appuie sur une touche pour skip.",
"missing_requests_psutil": "Y manque requests pis psutil! Installe-les avec pip : `pip install requests psutil`",
"requirements_not_found": "requirements.txt introuvable à {path}, est-ce qu'il a été modifié?",
"warning_failed_parse_imports": "Attention : Ça a chié en lisant les imports de {filename} : {error}",
"cogs_dir_not_found": "Le dossier des cogs est pas à {path}, on skip le scan.",
"std_lib_local_skipped": "LIB STD / LOCAL {package} (check skipé)",
"ok_installed": "OK",
"missing_package": "MANQUANT",
"missing_package2": "est pas installé",
"missing_packages_detected": "Y'a des affaires qui manquent :",
"telling_goober_central": "J'envoie ça à goober central à {url}",
"failed_to_contact": "J'ai pas réussi à contacter {url} : {error}",
"all_requirements_satisfied": "Tout ce qu'il faut est installé.",
"ping_to": "Ping à {host} : {latency} ms",
"high_latency": "Latence élevée! Ça pourrait être lent.",
"could_not_parse_latency": "J'ai pas pu comprendre la latence.",
"ping_failed": "Le ping à {host} a chié.",
"error_running_ping": "Ça a chié en faisant le ping : {error}",
"memory_usage": "Mémoire utilisée : {used} Go / {total} Go ({percent}%)",
"memory_above_90": "La mémoire est à plus de 90% ({percent}%). Libère de la mémoire.",
"total_memory": "Mémoire totale : {total} Go",
"used_memory": "Mémoire utilisée : {used} Go",
"low_free_memory": "Y'a presque plus de mémoire! Juste {free} Go de libre.",
"measuring_cpu": "On check l'usage CPU par coeur...",
"core_usage": "Coeur {idx} : [{bar}] {usage}%",
"total_cpu_usage": "Usage total CPU : {usage}%",
"high_avg_cpu": "CPU trop élevé : {usage}%",
"really_high_cpu": "Le CPU est en tabarnak! Ça pourrait crasher.",
"memory_file": "Fichier mémoire : {size} Mo",
"memory_file_large": "Fichier mémoire de 1 Go ou plus, nettoie ça pour faire de la place.",
"memory_file_corrupted": "Fichier mémoire scrap! Erreur JSON : {error}",
"consider_backup_memory": "Pense à faire un backup pis recréer le fichier mémoire.",
"memory_file_encoding": "Problème d'encodage du fichier mémoire : {error}",
"error_reading_memory": "Ça a chié en lisant le fichier mémoire : {error}",
"memory_file_not_found": "Fichier mémoire pas trouvé.",
"modification_warning": "Goober a été modifié! Tes modifications vont être perdues à l'update!",
"reported_version": "Version rapportée :",
"current_hash": "Hash actuel :",
"not_found": "est pas trouvé!",
"version_error": "J'ai pas pu avoir les infos de version. Code d'état",
"loaded_cog": "Cog chargé :",
"loaded_cog2": "Module chargé :",
"cog_fail": "Ça a chié en chargeant le cog :",
"cog_fail2": "Ça a chié en chargeant le module :",
"no_model": "Y'a pas de modèle Markov de sauvegardé. On part de zéro.",
"folder_created": "Dossier '{folder_name}' créé.",
"folder_exists": "Le dossier '{folder_name}' existe déjà. On skip...",
"logged_in": "Connecté en tant que",
"synced_commands": "Synchronisé",
"synced_commands2": "commandes!",
"fail_commands_sync": "Ça a chié en synchronisant les commandes :",
"started": "{name} est parti!",
"name_check": "Ça a chié en checkant si le nom est libre :",
"name_taken": "Le nom est déjà pris. Choisis-en un autre.",
"name_check2": "Ça a chié en checkant si le nom est libre :",
"add_token": "Token : {token}\nAjoute ce token dans ton .env comme",
"token_exists": "Le token existe déjà dans .env. On utilise celui-là.",
"registration_error": "Ça a chié en s'enregistrant :",
"version_backup": "Backup créé :",
"backup_error": "Erreur : {LOCAL_VERSION_FILE} pas trouvé pour le backup.",
"model_loaded": "Modèle Markov chargé depuis",
"fetch_update_fail": "J'ai pas pu avoir les infos d'update.",
"invalid_server": "Erreur : Infos de version invalides du serveur.",
"goober_server_alert": "Alerte du serveur Goober central!\n",
"new_version": "Nouvelle version disponible : {latest_version} (Actuelle : {local_version})",
"changelog": "Va voir {VERSION_URL}/goob/changes.txt pour les changements\n\n",
"invalid_version": "La version : {local_version} est pas valide!",
"invalid_version2": "Si c'est fait exprès, ignore ça. Sinon, appuie sur Y pour avoir une version valide du serveur, peu importe ta version actuelle de Goober.",
"invalid_version3": "La version actuelle va être backupée dans current_version.bak..",
"input": "(Y ou n'importe quelle touche pour skip...)",
"modification_ignored": "T'as modifié",
"modification_ignored2": "IGNOREWARNING est désactivé..",
"latest_version": "T'as la dernière version :",
"latest_version2": "Va voir {VERSION_URL}/goob/changes.txt pour les changements",
"pinging_disabled": "Le ping est désactivé! J'dis pas au serveur que j'suis en ligne...",
"goober_ping_success": "Connecté à Goober central en tant que {NAME}",
"goober_ping_fail": "Ça a chié en envoyant les données. Le serveur a retourné :",
"goober_ping_fail2": "Ça a chié en envoyant les données :",
"sentence_positivity": "La phrase est positive à :",
"command_edit_fail": "Ça a chié en éditant le message :",
"command_desc_retrain": "Réentraîne le modèle Markov à la main.",
"command_markov_retrain": "Réentraînement du modèle Markov... Attend un peu.",
"command_markov_memory_not_found": "Erreur : fichier mémoire pas trouvé!",
"command_markov_memory_is_corrupt": "Erreur : fichier mémoire scrap!",
"command_markov_retraining": "Traitement de {processed_data}/{data_size} points de données...",
"command_markov_retrain_successful": "Modèle Markov réentraîné avec succès avec {data_size} points de données!",
"command_desc_talk": "parle pis toute",
"command_talk_insufficent_text": "J'ai pas assez appris pour pouvoir parler.",
"command_talk_generation_fail": "J'ai rien à dire pour l'instant!",
"command_desc_help": "aide",
"command_help_embed_title": "Aide du bot",
"command_help_embed_desc": "Liste des commandes par catégorie.",
"command_help_categories_general": "Général",
"command_help_categories_admin": "Admin",
"command_help_categories_custom": "Commandes perso",
"command_ran": "Info : {message.author.name} a fait {message.content}",
"command_ran_s": "Info : {interaction.user} a fait ",
"command_desc_ping": "ping",
"command_ping_embed_desc": "Latence du bot :",
"command_ping_footer": "Demandé par",
"command_about_desc": "à propos",
"command_about_embed_title": "À propos de moi",
"command_about_embed_field1": "Nom",
"command_about_embed_field2name": "Version",
"command_about_embed_field2value": "Locale : {local_version} \nDernière : {latest_version}",
"command_desc_stats": "stats",
"command_stats_embed_title": "Stats du bot",
"command_stats_embed_desc": "Infos sur la mémoire du bot.",
"command_stats_embed_field1name": "Stats du fichier",
"command_stats_embed_field1value": "Taille : {file_size} octets\nLignes : {line_count}",
"command_stats_embed_field2name": "Version",
"command_stats_embed_field2value": "Locale : {local_version} \nDernière : {latest_version}",
"command_stats_embed_field3name": "Infos variables",
"command_stats_embed_field3value": "Nom : {NAME} \nPréfixe : {PREFIX} \nID du proprio : {ownerid}\nLigne de ping : {PING_LINE} \nPartage de mémoire activé : {showmemenabled} \nEntraînement utilisateur activé : {USERTRAIN_ENABLED} \nChanson : {song} \nTexte de démarrage : ```{splashtext}```"
}

View file

@ -1,8 +1,23 @@
{
"minigames_hangman_game": "Parola: {display_word()}\nErrori: {wrong_guesses}/{max_wrong}",
"minigames_hangman_lost": "Hai perso! La parola era:",
"minigames_hangman_won": "Hai vinto! La parola era:",
"minigames_hangman_already_guessed": "Hai già indovinato",
"minigames_hangman_user_letter_guess": "La tua lettera",
"minigames_hangman_guess": "Indovina una lettera",
"minigames_hangman_api_failed": "Impossibile ottenere una parola casuale.",
"minigames_hangman": "Gioca all'impiccato con una parola casuale",
"minigames_click_to_guess": "Clicca per indovinare un numero da 1 a 10",
"minigames_guess_button": "Indovina",
"minigames_wrong_number": "Sbagliato! Il numero era",
"minigames_correct": "Corretto!",
"minigames_invalid_number": "Numero non valido!",
"minigames_guess_the_number": "Indovina il numero",
"minigames_your_guess": "Il tuo numero (1-10)",
"memory_file_valid": "Il file JSON è valido!",
"file_aint_utf8": "Il file non è un UTF-8 valido. Forse è binario?",
"psutil_not_installed": "Controllo memoria saltato.",
"not_cloned": "Goober non è stato clonato! Clonalo da GitHub.",
"not_cloned": "Goober non è stato clonato! Clonalo da Git.",
"checks_disabled": "I controlli sono disabilitati!",
"unhandled_exception": "Si è verificata un'eccezione non gestita. Segnala questo problema su GitHub, per favore.",
"active_users:": "Utenti attivi:",

58
bot.py
View file

@ -7,8 +7,9 @@ import traceback
import subprocess
import tempfile
import shutil
import uuid
import psutil
import asyncio
import platform
import sys
from typing import List, Dict, Set, Optional, Tuple, Any, Union, Callable, Coroutine, TypeVar, Type
import logging
@ -41,8 +42,6 @@ from discord.ext import commands
from discord import app_commands
from discord import Colour, Embed, File, Interaction, Message
from discord.abc import Messageable
from better_profanity import profanity
from discord.ext import commands
from modules.volta.main import _, set_language
@ -51,10 +50,9 @@ from modules.version import *
from modules.sentenceprocessing import *
from modules.unhandledexception import handle_exception
from modules.image import gen_meme, gen_demotivator
from modules.minigames import guessthenumber, hangman
sys.excepthook = handle_exception
check_for_update() # Check for updates (from modules/version.py)
# Type aliases
T = TypeVar('T')
MessageContext = Union[commands.Context, discord.Interaction]
@ -66,16 +64,6 @@ currenthash: str = ""
launched: bool = False
slash_commands_enabled: bool = False
# Set up Discord bot intents and create bot instance
intents: discord.Intents = discord.Intents.default()
intents.messages = True
intents.message_content = True
bot: commands.Bot = commands.Bot(
command_prefix=PREFIX,
intents=intents,
allowed_mentions=discord.AllowedMentions(everyone=False, roles=False, users=False, replied_user=True)
)
# Load memory and Markov model for text generation
memory: List[str] = load_memory()
markov_model: Optional[markovify.Text] = load_markov_model()
@ -125,7 +113,6 @@ async def on_ready() -> None:
logger.info(f"{_('synced_commands')} {len(synced)} {(_('synced_commands2'))}")
slash_commands_enabled = True
logger.info(f"{(_('started')).format(name=NAME)}")
bot.loop.create_task(send_alive_ping_periodically())
except discord.errors.Forbidden as perm_error:
logger.error(f"Permission error while syncing commands: {perm_error}")
@ -147,7 +134,6 @@ async def on_ready() -> None:
}.get(status.lower(), discord.Status.online)
await bot.change_presence(status=status, activity=discord.Activity(type=discord.ActivityType.listening, name=f"{song}"))
launched = True
@bot.event
async def on_command_error(ctx: commands.Context, error: commands.CommandError) -> None:
from modules.unhandledexception import handle_exception
@ -231,6 +217,12 @@ async def talk(ctx: commands.Context, sentence_size: int = 5) -> None:
else:
await send_message(ctx, f"{(_('command_talk_generation_fail'))}")
@bot.hybrid_command(description=f"RAM")
async def ramusage(ctx):
process = psutil.Process(os.getpid())
mem = process.memory_info().rss
await send_message(ctx, f"Total memory used: {mem / 1024 / 1024:.2f} MB")
# Command: Generate an image
@bot.hybrid_command(description=f"{(_('command_desc_help'))}")
async def impact(ctx: commands.Context, text: Optional[str] = None) -> None:
@ -386,14 +378,11 @@ async def on_message(message: discord.Message) -> None:
await bot.process_commands(message)
return
if profanity.contains_profanity(message.content):
return
if message.content:
if not USERTRAIN_ENABLED:
return
formatted_message: str = append_mentions_to_18digit_integer(message.content)
cleaned_message: str = preprocess_message(formatted_message)
formatted_message: str = message.content
cleaned_message: str = formatted_message
if cleaned_message:
memory.append(cleaned_message)
message_metadata = {
@ -428,10 +417,14 @@ async def on_message(message: discord.Message) -> None:
await bot.process_commands(message)
# Event: Called on every interaction (slash command, etc.)
@bot.event
async def on_interaction(interaction: discord.Interaction) -> None:
logger.info(f"{(_('command_ran_s')).format(interaction=interaction)}{interaction.data['name']}")
name = None
if interaction.data.get('name') is None:
name = "Unknown"
else:
name = interaction.data['name']
logger.info(f"{(_('command_ran_s')).format(interaction=interaction)}{name}")
# Global check: Block blacklisted users from running commands
@bot.check
@ -468,6 +461,17 @@ async def ping(ctx: commands.Context) -> None:
await ctx.send(embed=LOLembed)
def get_git_remote_url():
try:
url = subprocess.check_output(
["git", "config", "--get", "remote.origin.url"],
text=True,
stderr=subprocess.DEVNULL,
).strip()
return url
except subprocess.CalledProcessError:
return "Unknown"
# Command: Show about information
@bot.hybrid_command(description=f"{(_('command_about_desc'))}")
async def about(ctx: commands.Context) -> None:
@ -477,7 +481,8 @@ async def about(ctx: commands.Context) -> None:
embed: discord.Embed = discord.Embed(title=f"{(_('command_about_embed_title'))}", description="", color=Colour(0x000000))
embed.add_field(name=f"{(_('command_about_embed_field1'))}", value=f"{NAME}", inline=False)
embed.add_field(name=f"{(_('command_about_embed_field2name'))}", value=f"{(_('command_about_embed_field2value')).format(local_version=local_version, latest_version=latest_version)}", inline=False)
embed.add_field(name=f"Github", value=f"https://github.com/gooberinc/goober")
embed.add_field(name=f"Git", value=get_git_remote_url())
embed.add_field(name=f"OS", value=platform.platform())
await send_message(ctx, embed=embed)
@ -499,7 +504,8 @@ async def stats(ctx: commands.Context) -> None:
embed.add_field(name=f"{(_('command_stats_embed_field1name'))}", value=f"{(_('command_stats_embed_field1value')).format(file_size=file_size, line_count=line_count)}", inline=False)
embed.add_field(name=f"{(_('command_stats_embed_field2name'))}", value=f"{(_('command_stats_embed_field2value')).format(local_version=local_version, latest_version=latest_version)}", inline=False)
embed.add_field(name=f"{(_('command_stats_embed_field3name'))}", value=f"{(_('command_stats_embed_field3value')).format(NAME=NAME, PREFIX=PREFIX, ownerid=ownerid, PING_LINE=PING_LINE, showmemenabled=showmemenabled, USERTRAIN_ENABLED=USERTRAIN_ENABLED, song=song, splashtext=splashtext)}", inline=False)
embed.add_field(name=f"OS", value=platform.platform())
embed.add_field(name="Python Version", value=platform.python_version())
await send_message(ctx, embed=embed)
# Command: Upload memory.json to litterbox.catbox.moe and return the link

245
botminimal.py Normal file
View file

@ -0,0 +1,245 @@
import discord
from discord.ext import commands, tasks
import json
import markovify
import nltk
from nltk.tokenize import word_tokenize
import random
import os
import time
import re
from dotenv import load_dotenv
load_dotenv()
# download NLTK data files
nltk.download('punkt')
MEMORY_FILE = "memory.json"
MEMORY_LOADED_FILE = "MEMORY_LOADED"
def load_memory():
data = []
# Try to load data from MEMORY_FILE
try:
with open(MEMORY_FILE, "r") as f:
data = json.load(f)
except FileNotFoundError:
pass
return data
# Save memory data to MEMORY_FILE
def save_memory(memory):
with open(MEMORY_FILE, "w") as f:
json.dump(memory, f, indent=4)
def train_markov_model(memory, additional_data=None):
if not memory:
return None
filtered_memory = [line for line in memory if isinstance(line, str)]
if additional_data:
filtered_memory.extend(line for line in additional_data if isinstance(line, str))
if not filtered_memory:
return None
text = "\n".join(filtered_memory)
model = markovify.NewlineText(text, state_size=2)
return model
#this doesnt work and im extremely pissed and mad
def append_mentions_to_18digit_integer(message):
pattern = r'\b\d{18}\b'
return re.sub(pattern, lambda match: f"<@{match.group(0)}>", message)
def preprocess_message(message):
message = append_mentions_to_18digit_integer(message)
tokens = word_tokenize(message)
tokens = [token for token in tokens if token.isalnum()]
return " ".join(tokens)
intents = discord.Intents.default()
intents.messages = True
intents.message_content = True
bot = commands.Bot(command_prefix="g!", intents=intents)
memory = load_memory()
markov_model = train_markov_model(memory)
generated_sentences = set()
used_words = set()
@bot.event
async def on_ready():
print(f"Logged in as {bot.user}")
post_message.start()
positive_keywords = ["happy", "good", "great", "amazing", "awesome", "joy", "love", "fantastic", "positive", "cheerful", "victory", "favorite", "lmao", "lol", "xd", "XD", "xD", "Xd"]
positive_gifs = [
"https://tenor.com/view/chill-guy-my-new-character-gif-2777893510283028272",
"https://tenor.com/view/goodnight-goodnight-friends-weezer-weezer-goodnight-gif-7322052181075806988"
]
def is_positive(sentence):
sentence_lower = sentence.lower()
return any(keyword in sentence_lower for keyword in positive_keywords)
@bot.command()
async def ask(ctx):
await ctx.send("Command undergoing fixes!")
#not really lol
@bot.command()
async def talk(ctx):
if markov_model:
response = None
for _ in range(10): # im going to shit my pants 10 times to get a coherent sentence
response = markov_model.make_sentence(tries=100)
if response and response not in generated_sentences:
# preprocess shit for grammer
response = improve_sentence_coherence(response)
generated_sentences.add(response)
break
if response:
async with ctx.typing():
cleaned_response = re.sub(r'[^\w\s]', '', response)
cleaned_response = cleaned_response.lower()
coherent_response = rephrase_for_coherence(cleaned_response)
if random.random() < 0.9:
if is_positive(coherent_response):
gif_url = random.choice(positive_gifs)
combined_message = f"{coherent_response}\n[jif]({gif_url})"
await ctx.send(combined_message)
else:
await ctx.send(coherent_response)
else:
await ctx.send(coherent_response)
else:
await ctx.send("I have nothing to say right now!")
else:
await ctx.send("I need to learn more from messages before I can talk.")
def improve_sentence_coherence(sentence):
sentence = sentence.replace(" i ", " I ")
return sentence
def rephrase_for_coherence(sentence):
words = sentence.split()
coherent_sentence = " ".join(words)
return coherent_sentence
bot.help_command = None
@bot.command()
async def help(ctx, *args):
if args:
command_name = args[0]
command = bot.get_command(command_name)
if command:
embed = discord.Embed(
title=f"Help: g!{command_name}",
description=f"**Description:** {command.help}",
color=discord.Color.blue()
)
await ctx.send(embed=embed)
else:
await ctx.send(f"Command `{command_name}` not found.")
else:
embed = discord.Embed(
title="Bot Help",
description="List of commands grouped by category.",
color=discord.Color.blue()
)
command_categories = {
"General": ["show_memory", "talk", "ask", "ping"],
"Debug": ["word_usage"]
}
for category, commands_list in command_categories.items():
commands_in_category = "\n".join([f"g!{command}" for command in commands_list])
embed.add_field(name=category, value=commands_in_category, inline=False)
await ctx.send(embed=embed)
@bot.event
async def on_message(message):
global memory, markov_model, last_random_talk_time
if message.author.bot:
return
if message.content.startswith(("g!talk", "g!show_memory", "g!help", "g!")):
await bot.process_commands(message)
return
if message.content:
formatted_message = append_mentions_to_18digit_integer(message.content)
cleaned_message = preprocess_message(formatted_message)
if cleaned_message:
memory.append(cleaned_message)
save_memory(memory)
markov_model = train_markov_model(memory)
# process any commands in the message
await bot.process_commands(message)
@bot.command()
async def ping(ctx):
await ctx.defer()
#stolen from my expect bot very proud
latency = round(bot.latency * 1000)
LOLembed = discord.Embed(
title="Pong!!",
description=(
f"The Beretta fires fast and won't make you feel any better!\n"
f"`Bot Latency: {latency}ms`\n"
),
color=discord.Color.blue()
)
LOLembed.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=LOLembed) # use ctx.send instead of respond because it has nothing to respond to and its not a slash command
@bot.command()
async def show_memory(ctx):
memory = load_memory()
memory_text = json.dumps(memory, indent=4)
if len(memory_text) > 1024:
with open(MEMORY_FILE, "r") as f:
await ctx.send(" ", file=discord.File(f, MEMORY_FILE))
else:
embed = discord.Embed(title="Memory Contents", description="The bot's memory.", color=discord.Color.blue())
embed.add_field(name="Memory Data", value=f"```json\n{memory_text}\n```", inline=False)
await ctx.send(embed=embed)
def improve_sentence_coherence(sentence):
sentence = sentence.replace(" i ", " I ")
return sentence
@tasks.loop(minutes=60)
async def post_message():
channel_id = 1296141985253691433
channel = bot.get_channel(channel_id)
if channel and markov_model:
response = None
for _ in range(10):
response = markov_model.make_sentence(tries=100)
if response and response not in generated_sentences:
generated_sentences.add(response)
break
if response:
await channel.send(response)
# run the bot
TOKEN = os.getenv("DISCORDBOTTOKEN", "0")
bot.run(TOKEN)

View file

@ -2,6 +2,12 @@ import os
import platform
from dotenv import load_dotenv
import pathlib
import discord
from discord.ext import commands
from discord import app_commands
from discord import Colour, Embed, File, Interaction, Message
from discord.abc import Messageable
from discord.ext import commands
import subprocess
def get_git_branch():
try:
@ -15,7 +21,6 @@ def get_git_branch():
env_path = pathlib.Path(__file__).parent.parent / '.env'
load_dotenv(dotenv_path=env_path)
ANSI = "\033["
RED = f"{ANSI}31m"
GREEN = f"{ANSI}32m"
@ -48,7 +53,7 @@ song = os.getenv("SONG")
arch = platform.machine()
launched = False
latest_version = "0.0.0"
local_version = "2.3.4"
local_version = "2.3.5"
os.environ['gooberlocal_version'] = local_version
REACT = os.getenv("REACT")
if get_git_branch() == "dev":
@ -56,3 +61,12 @@ if get_git_branch() == "dev":
# this makes goober think its a beta version, so it will not update to the latest stable version or run any version checks
else:
beta = False
# Set up Discord bot intents and create bot instance
intents: discord.Intents = discord.Intents.default()
intents.messages = True
intents.presences = True
intents.members = True
intents.message_content = True
bot: commands.Bot = commands.Bot(command_prefix=PREFIX, intents=intents, allowed_mentions=discord.AllowedMentions(everyone=False, roles=False, users=False, replied_user=True))

View file

@ -1,4 +1,5 @@
import logging
import re
from modules.globalvars import *
class GooberFormatter(logging.Formatter):
@ -16,10 +17,14 @@ class GooberFormatter(logging.Formatter):
}
def format(self, record: logging.LogRecord):
ansiescape = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]')
if self.colors:
log_fmt = self.FORMATS.get(record.levelno) # Add colors
else:
log_fmt = self._format # Just use the default format
formatter = logging.Formatter(log_fmt, datefmt="%m/%d/%y %H:%M:%S")
return formatter.format(record)
formatted = formatter.format(record)
if not self.colors:
formatted = ansiescape.sub('', formatted)
return formatted

71
modules/minigames.py Normal file
View file

@ -0,0 +1,71 @@
import random
import discord
from discord import ui, Interaction, TextStyle
from discord.ext import commands
import aiohttp
import asyncio
from modules.globalvars import bot
from modules.volta.main import _
# @bot.hybrid_command(description=_('minigames_guess_the_number'))
async def guessthenumber(ctx: commands.Context):
number = random.randint(1, 10)
class GuessModal(ui.Modal, title=_('minigames_guess_the_number')):
guess = ui.TextInput(label=_('minigames_your_guess'), style=TextStyle.short)
async def on_submit(self, interaction: Interaction):
try:
user_guess = int(self.guess.value)
except:
await interaction.response.send_message(_('minigames_invalid_number'), ephemeral=True)
return
if user_guess == number:
await interaction.response.send_message(_('minigames_correct'), ephemeral=True)
else:
await interaction.response.send_message(f"{_('minigames_wrong_number')} {number}.", ephemeral=True)
async def button_callback(interaction: Interaction):
await interaction.response.send_modal(GuessModal())
button = ui.Button(label=_('minigames_guess_button'), style=discord.ButtonStyle.primary)
button.callback = button_callback
view = ui.View()
view.add_item(button)
await ctx.send(_('minigames_click_to_guess'), view=view)
# @bot.hybrid_command(description=_('minigames_hangman')) nope nope nope fuck no nope no thanks no nuh uh not today nope
async def hangman(ctx: commands.Context):
async with aiohttp.ClientSession() as session:
async with session.get("https://random-word-api.herokuapp.com/word?number=1") as resp:
if resp.status != 200:
await ctx.send("Failed to get a random word.")
return
data = await resp.json()
word = data[0].lower()
print(word)
guessed_letters = set()
wrong_guesses = 0
max_wrong = 6
def display_word():
return " ".join([c if c in guessed_letters else "_" for c in word])
class GuessModal(ui.Modal, title=_('minigames_hangman_guess')):
letter = ui.TextInput(label=_('minigames_hangman_user_letter_guess'), style=TextStyle.short, max_length=1)
async def on_submit(self, interaction: Interaction):
nonlocal guessed_letters, wrong_guesses
guess = self.letter.value.lower()
if guess in guessed_letters:
await interaction.response.send_message(f"{_('minigames_hangman_already_guessed')}'{guess}'!", ephemeral=True)
return
guessed_letters.add(guess)
if guess not in word:
wrong_guesses += 1
if all(c in guessed_letters for c in word):
await interaction.response.edit_message(content=f"{_('minigames_hangman_won')} **{word}**", view=None)
elif wrong_guesses >= max_wrong:
await interaction.response.edit_message(content=f"{_('minigames_hangman_lost')} **{word}**", view=None)
else:
await interaction.response.edit_message(content=_('minigames_hangman_game').format(display_word=display_word(),wrong_guesses=wrong_guesses,max_wrong=max_wrong), view=view)
async def button_callback(interaction: Interaction):
await interaction.response.send_modal(GuessModal())
button = ui.Button(label=_('minigames_click_to_guess'), style=discord.ButtonStyle.primary)
button.callback = button_callback
view = ui.View()
view.add_item(button)
await ctx.send(_('minigames_hangman_game').format(display_word=display_word,wrong_guesses=wrong_guesses,max_wrong=max_wrong), view=view)

View file

@ -61,20 +61,15 @@ async def send_message(ctx, message=None, embed=None, file=None, edit=False, mes
sent_message = await ctx.send(file=file)
return sent_message
def append_mentions_to_18digit_integer(message):
pattern = r'\b\d{18}\b'
return re.sub(pattern, lambda match: "", message)
def preprocess_message(message):
message = append_mentions_to_18digit_integer(message)
message = message
doc = nlp(message)
tokens = [token.text for token in doc if token.is_alpha or token.is_digit]
return " ".join(tokens)
def improve_sentence_coherence(sentence):
return re.sub(r'\bi\b', 'I', sentence)
return ""
def rephrase_for_coherence(sentence):
words = sentence.split()
coherent_sentence = " ".join(words)
coherent_sentence = sentence
return coherent_sentence

View file

@ -7,8 +7,11 @@ import locale
import json
import pathlib
import threading
import platform
import sys
import time
from dotenv import load_dotenv
from functools import lru_cache
ANSI = "\033["
RED = f"{ANSI}31m"
@ -61,10 +64,6 @@ if working_dir != module_dir:
translations = {}
_file_mod_times = {}
import locale
import platform
import os
import sys
def get_system_locale():
system = platform.system() # fallback incase locale isnt set
@ -119,6 +118,7 @@ def reload_if_changed():
current_mtime = file_path.stat().st_mtime
if current_mtime != last_mtime:
print(f"[VOLTA] {RED}Translation file changed: {file_path}, reloading...{RESET}")
_lookup_translation.cache_clear()
load_translations()
break
except FileNotFoundError:
@ -140,9 +140,10 @@ def set_language(lang: str):
else:
print(f"[VOLTA] {RED}The fallback translations cannot be found! No fallback available.{RESET}")
ENGLISH_MISSING = True
_lookup_translation.cache_clear()
def check_missing_translations():
global LOCALE, ENGLISH_MISSING
def check_missing_translations(LOCALE=LOCALE):
global ENGLISH_MISSING
load_translations()
if FALLBACK_LOCALE not in translations:
print(f"[VOLTA] {RED}Fallback translations ({FALLBACK_LOCALE}.json) missing from assets/locales.{RESET}")
@ -175,24 +176,31 @@ def check_missing_translations():
printedsystemfallback = False
@lru_cache(maxsize=600)
def _lookup_translation(lang: str, key: str):
return translations.get(lang, {}).get(key)
def get_translation(lang: str, key: str):
global printedsystemfallback
if ENGLISH_MISSING:
return f"[VOLTA] {RED}No fallback available!{RESET}"
fallback_translations = translations.get(FALLBACK_LOCALE, {})
val = _lookup_translation(lang, key)
if val:
return val
sys_lang = get_system_locale().split("_")[0] if get_system_locale() else None
sys_translations = translations.get(sys_lang, {}) if sys_lang else {}
lang_translations = translations.get(lang, {})
if key in lang_translations:
return lang_translations[key]
if sys_lang and sys_lang != lang and key in sys_translations:
if not printedsystemfallback:
print(f"[VOLTA] {YELLOW}Falling back to system language {sys_lang}!{RESET}")
printedsystemfallback = True
return sys_translations[key]
if key in fallback_translations:
if sys_lang and sys_lang != lang:
sys_val = _lookup_translation(sys_lang, key)
if sys_val:
if not printedsystemfallback:
print(f"[VOLTA] {YELLOW}Falling back to system language {sys_lang}!{RESET}")
printedsystemfallback = True
return sys_val
fallback_val = _lookup_translation(FALLBACK_LOCALE, key)
if fallback_val:
print(f"[VOLTA] {YELLOW}Missing key: '{key}' in '{lang}', falling back to fallback locale '{FALLBACK_LOCALE}'{RESET}")
return fallback_translations[key]
return fallback_val
return f"[VOLTA] {YELLOW}Missing key: '{key}' in all locales!{RESET}"
def _(key: str) -> str:
@ -204,4 +212,9 @@ watchdog_thread = threading.Thread(target=reload_if_changed, daemon=True)
watchdog_thread.start()
if __name__ == '__main__':
print("Volta should not be run directly! Please use it as a module..")
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("LOCALE", help="Locale to validate")
args = parser.parse_args()
print("[VOLTA] Validating all locales....")
check_missing_translations(LOCALE=f"{args.LOCALE}")

View file

@ -4,7 +4,6 @@ spacy
spacytextblob
requests
psutil
better_profanity
python-dotenv
dotenv
pillow