From 1d036ded9cb23aad4f60a0a4baca8ed0a7337556 Mon Sep 17 00:00:00 2001
From: thedragonsinn <98635854+thedragonsinn@users.noreply.github.com>
Date: Sat, 20 Apr 2024 19:13:19 +0530
Subject: [PATCH] clean up .ids, -d in rename, do not log service texts. Beta
AI cmds: .stt, .aim
---
app/__init__.py | 12 +-
app/plugins/admin/fbans.py | 13 +-
app/plugins/ai/media_query.py | 187 ++++++++++++++++++
app/plugins/ai/models.py | 55 ++++++
.../{misc/gemini.py => ai/text_query.py} | 84 ++------
app/plugins/files/download.py | 6 +-
app/plugins/files/rename.py | 20 +-
app/plugins/tg_tools/chat.py | 29 ++-
app/plugins/tg_tools/pm_n_tag_logger.py | 15 +-
app/plugins/tg_tools/pm_permit.py | 4 +
10 files changed, 313 insertions(+), 112 deletions(-)
create mode 100644 app/plugins/ai/media_query.py
create mode 100644 app/plugins/ai/models.py
rename app/plugins/{misc/gemini.py => ai/text_query.py} (60%)
diff --git a/app/__init__.py b/app/__init__.py
index 4f8d7fb..3a050da 100644
--- a/app/__init__.py
+++ b/app/__init__.py
@@ -1,11 +1 @@
-from ub_core import (
- BOT,
- DB,
- DB_CLIENT,
- LOGGER,
- Config,
- Convo,
- CustomDB,
- Message,
- bot,
-)
+from ub_core import BOT, DB, DB_CLIENT, LOGGER, Config, Convo, CustomDB, Message, bot
diff --git a/app/plugins/admin/fbans.py b/app/plugins/admin/fbans.py
index e7c029e..09bbcc5 100644
--- a/app/plugins/admin/fbans.py
+++ b/app/plugins/admin/fbans.py
@@ -36,11 +36,7 @@ async def add_fed(bot: BOT, message: Message):
data = dict(name=message.input or message.chat.title, type=str(message.chat.type))
await FED_DB.add_data({"_id": message.chat.id, **data})
text = f"#FBANS\n{data['name']}: {message.chat.id} added to FED LIST."
- await message.reply(
- text=text,
- del_in=5,
- block=True,
- )
+ await message.reply(text=text, del_in=5, block=True)
await bot.log_text(text=text, type="info")
@@ -64,13 +60,10 @@ async def remove_fed(bot: BOT, message: Message):
chat = chat.id
elif chat.lstrip("-").isdigit():
chat = int(chat)
- deleted: bool | None = await FED_DB.delete_data(id=chat)
+ deleted: int = await FED_DB.delete_data(id=chat)
if deleted:
text = f"#FBANS\n{name}{chat} removed from FED LIST."
- await message.reply(
- text=text,
- del_in=8,
- )
+ await message.reply(text=text, del_in=8)
await bot.log_text(text=text, type="info")
else:
await message.reply(text=f"{name or chat} not in FED LIST.", del_in=8)
diff --git a/app/plugins/ai/media_query.py b/app/plugins/ai/media_query.py
new file mode 100644
index 0000000..b97afad
--- /dev/null
+++ b/app/plugins/ai/media_query.py
@@ -0,0 +1,187 @@
+import asyncio
+import glob
+import mimetypes
+import os
+import shutil
+import time
+from io import BytesIO
+
+import google.generativeai as genai
+from google.ai import generativelanguage as glm
+from ub_core.utils import run_shell_cmd
+
+from app import BOT, Message, bot
+from app.plugins.ai.models import (
+ IMAGE_MODEL,
+ MEDIA_MODEL,
+ TEXT_MODEL,
+ get_response_text,
+)
+
+CODE_EXTS = {
+ ".java",
+ ".c",
+ ".cpp",
+ ".cc",
+ ".cxx",
+ ".py",
+ ".js",
+ ".html",
+ ".htm",
+ ".css",
+ ".rb",
+ ".php",
+ ".swift",
+ ".go",
+ ".sql",
+ ".r",
+ ".pl",
+ ".kt",
+}
+PHOTO_EXTS = {".png", ".jpg", ".jpeg", ".webp"}
+VIDEO_EXTS = {".mp4", ".mkv", ".webm", ".gif"}
+AUDIO_EXTS = {".aac", ".mp3", ".opus", ".m4a", ".ogg"}
+
+
+@bot.add_cmd(cmd="ocr")
+async def photo_query(bot: BOT, message: Message):
+ """
+ CMD: OCR
+ INFO: Ask a question to Gemini AI about replied image.
+ USAGE: .ocr [reply to a photo] explain the image.
+ """
+ prompt = message.input
+ reply = message.replied
+
+ if not (prompt and reply and reply.photo):
+ await message.reply("Reply to an image and give a prompt.")
+ return
+ response_text = await handle_photo(prompt, reply)
+ await message.reply(response_text)
+
+
+@bot.add_cmd(cmd="stt")
+async def audio_to_text(bot: BOT, message: Message):
+ """
+ CMD: STT
+ INFO: Convert Audio files to text.
+ USAGE: .stt [reply to audio file] summarise/transcribe the audio file.
+ """
+ prompt = message.input
+ reply = message.replied
+ audio = message.audio or message.voice
+
+ if not (prompt and reply and audio):
+ await message.reply("Reply to an image and give a prompt.")
+ return
+ response_text = await handle_audio(prompt, reply)
+ await message.reply(response_text)
+
+
+@bot.add_cmd(cmd="aim")
+async def handle_document(bot: BOT, message: Message):
+ """
+ CMD: AIM
+ INFO: Prompt Ai to perform task for documents containing pic, vid, code, audio.
+ USAGE: .aim [reply to audio file] convert this file to python | summarise the video, audio, picture.
+ """
+ prompt = message.input
+ reply = message.replied
+ document = reply.document
+
+ if not (prompt and reply and document):
+ await message.reply("Reply to a document and give a prompt.")
+ return
+
+ file_name = document.file_name
+ if not file_name:
+ await message.reply("Unsupported file.")
+ return
+
+ name, ext = os.path.splitext(file_name)
+
+ if ext in PHOTO_EXTS:
+ response_text = await handle_photo(prompt, reply)
+ elif ext in AUDIO_EXTS:
+ response_text = await handle_audio(prompt, reply)
+ elif ext in CODE_EXTS:
+ response_text = await handle_code(prompt, reply)
+ elif ext in VIDEO_EXTS:
+ response_text = await handle_video(prompt, reply)
+ else:
+ await message.reply("Unsupported Media.")
+ return
+
+ await message.reply(response_text)
+
+
+async def download_file(file_name: str, message: Message) -> tuple[str, str]:
+ download_dir = os.path.join("downloads", str(time.time()))
+ file_path = os.path.join(download_dir, file_name)
+ await message.download(file_path)
+ return file_path, download_dir
+
+
+async def handle_audio(prompt: str, message: Message):
+ audio = message.document or message.audio or message.voice
+ file_name = audio.file_name or "audio.aac"
+
+ file_path, download_dir = await download_file(file_name, message)
+ file_response = genai.upload_file(path=file_path)
+
+ response = await MEDIA_MODEL.generate_content_async([prompt, file_response])
+ response_text = get_response_text(response)
+
+ genai.delete_file(name=file_response.name)
+ shutil.rmtree(file_path, ignore_errors=True)
+
+ return response_text
+
+
+async def handle_code(prompt: str, message: Message):
+ file: BytesIO = await message.download(in_memory=True)
+ text = file.getvalue().decode("utf-8")
+ final_prompt = f"{text}\n\n{prompt}"
+ response = await TEXT_MODEL.generate_content_async(final_prompt)
+ return get_response_text(response)
+
+
+async def handle_photo(prompt: str, message: Message):
+ file = await message.download(in_memory=True)
+
+ mime_type, _ = mimetypes.guess_type(file.name)
+ if mime_type is None:
+ mime_type = "image/unknown"
+
+ image_blob = glm.Blob(mime_type=mime_type, data=file.getvalue())
+ response = await IMAGE_MODEL.generate_content_async([prompt, image_blob])
+ return get_response_text(response)
+
+
+async def handle_video(prompt: str, message: Message):
+ file_name = "v.mp4"
+ file_path, download_dir = await download_file(file_name, message)
+
+ output_path = os.path.join(download_dir, "output_frame_%04d.png")
+ ffmpeg_output_error = await run_shell_cmd(
+ f'ffmpeg -hide_banner -loglevel error -i {file_path} -vf "fps=1" {output_path}'
+ )
+
+ if ffmpeg_output_error:
+ return ffmpeg_output_error
+
+ extracted_frames = glob.glob(f"{download_dir}/*png")
+
+ uploaded_frames = []
+ for frame in extracted_frames:
+ uploaded_frame = await asyncio.to_thread(genai.upload_file, frame)
+ uploaded_frames.append(uploaded_frame)
+
+ response = await MEDIA_MODEL.generate_content_async([prompt, *uploaded_frames])
+ response_text = get_response_text(response)
+
+ for uploaded_frame in uploaded_frames:
+ genai.delete_file(name=uploaded_frame.name)
+
+ shutil.rmtree(download_dir, ignore_errors=True)
+ return response_text
diff --git a/app/plugins/ai/models.py b/app/plugins/ai/models.py
new file mode 100644
index 0000000..fe6497f
--- /dev/null
+++ b/app/plugins/ai/models.py
@@ -0,0 +1,55 @@
+import google.generativeai as genai
+
+from app import Message, extra_config
+
+
+async def init_task():
+ if extra_config.GEMINI_API_KEY:
+ genai.configure(api_key=extra_config.GEMINI_API_KEY)
+
+
+GENERATION_CONFIG = {"temperature": 0.69, "max_output_tokens": 2048}
+
+SAFETY_SETTINGS = [
+ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"},
+ {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_ONLY_HIGH"},
+ {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_ONLY_HIGH"},
+ {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_ONLY_HIGH"},
+]
+
+
+TEXT_MODEL = genai.GenerativeModel(
+ model_name="gemini-pro",
+ generation_config=GENERATION_CONFIG,
+ safety_settings=SAFETY_SETTINGS,
+)
+
+IMAGE_MODEL = genai.GenerativeModel(
+ model_name="gemini-pro-vision",
+ generation_config=GENERATION_CONFIG,
+ safety_settings=SAFETY_SETTINGS,
+)
+
+MEDIA_MODEL = genai.GenerativeModel(
+ model_name="models/gemini-1.5-pro-latest",
+ generation_config=GENERATION_CONFIG,
+ safety_settings=SAFETY_SETTINGS,
+)
+
+
+async def basic_check(message: Message):
+ if not extra_config.GEMINI_API_KEY:
+ await message.reply(
+ "Gemini API KEY not found."
+ "\nGet it HERE "
+ "and set GEMINI_API_KEY var."
+ )
+ return
+ if not message.input:
+ await message.reply("Ask a Question.")
+ return
+ return 1
+
+
+def get_response_text(response):
+ return "\n".join([part.text for part in response.parts])
diff --git a/app/plugins/misc/gemini.py b/app/plugins/ai/text_query.py
similarity index 60%
rename from app/plugins/misc/gemini.py
rename to app/plugins/ai/text_query.py
index ad2de26..02948aa 100644
--- a/app/plugins/misc/gemini.py
+++ b/app/plugins/ai/text_query.py
@@ -1,54 +1,11 @@
-import mimetypes
import pickle
from io import BytesIO
-import google.generativeai as genai
-from google.ai import generativelanguage as glm
from pyrogram import filters
from pyrogram.enums import ParseMode
-from app import BOT, Convo, Message, bot, extra_config
-
-GENERATION_CONFIG = {"temperature": 0.69, "max_output_tokens": 2048}
-
-SAFETY_SETTINGS = [
- {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"},
- {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_ONLY_HIGH"},
- {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_ONLY_HIGH"},
- {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_ONLY_HIGH"},
-]
-
-
-TEXT_MODEL = genai.GenerativeModel(
- model_name="gemini-pro",
- generation_config=GENERATION_CONFIG,
- safety_settings=SAFETY_SETTINGS,
-)
-
-VISION_MODEL = genai.GenerativeModel(
- model_name="gemini-pro-vision",
- generation_config=GENERATION_CONFIG,
- safety_settings=SAFETY_SETTINGS,
-)
-
-
-async def init_task():
- if extra_config.GEMINI_API_KEY:
- genai.configure(api_key=extra_config.GEMINI_API_KEY)
-
-
-async def basic_check(message: Message):
- if not extra_config.GEMINI_API_KEY:
- await message.reply(
- "Gemini API KEY not found."
- "\nGet it HERE "
- "and set GEMINI_API_KEY var."
- )
- return
- if not message.input:
- await message.reply("Ask a Question.")
- return
- return 1
+from app import BOT, Convo, Message, bot
+from app.plugins.ai.models import TEXT_MODEL, basic_check, get_response_text
@bot.add_cmd(cmd="ai")
@@ -59,25 +16,15 @@ async def question(bot: BOT, message: Message):
USAGE: .ai what is the meaning of life.
"""
- if not (await basic_check(message)): # fmt:skip
+ if not await basic_check(message):
return
+
prompt = message.input
- reply = message.replied
- if reply and reply.photo:
- file = await reply.download(in_memory=True)
-
- mime_type, _ = mimetypes.guess_type(file.name)
- if mime_type is None:
- mime_type = "image/unknown"
-
- image_blob = glm.Blob(mime_type=mime_type, data=file.getvalue())
- response = await VISION_MODEL.generate_content_async([prompt, image_blob])
-
- else:
- response = await TEXT_MODEL.generate_content_async(prompt)
+ response = await TEXT_MODEL.generate_content_async(prompt)
response_text = get_response_text(response)
+
if not isinstance(message, Message):
await message.edit(
text=f"```\n{prompt}```**GEMINI AI**:\n{response_text.strip()}",
@@ -103,7 +50,7 @@ async def ai_chat(bot: BOT, message: Message):
After 5 mins of Idle bot will export history and stop chat.
use .load_history to continue
"""
- if not (await basic_check(message)): # fmt:skip
+ if not await basic_check(message):
return
chat = TEXT_MODEL.start_chat(history=[])
try:
@@ -113,14 +60,14 @@ async def ai_chat(bot: BOT, message: Message):
@bot.add_cmd(cmd="load_history")
-async def ai_chat(bot: BOT, message: Message):
+async def history_chat(bot: BOT, message: Message):
"""
CMD: LOAD_HISTORY
INFO: Load a Conversation with Gemini AI from previous session.
USAGE:
.load_history {question} [reply to history document]
"""
- if not (await basic_check(message)): # fmt:skip
+ if not await basic_check(message):
return
reply = message.replied
if (
@@ -142,15 +89,11 @@ async def ai_chat(bot: BOT, message: Message):
await export_history(chat, message)
-def get_response_text(response):
- return "\n".join([part.text for part in response.parts])
-
-
async def do_convo(chat, message: Message, history: bool = False):
prompt = message.input
reply_to_message_id = message.id
async with Convo(
- client=bot,
+ client=message._client,
chat_id=message.chat.id,
filters=generate_filter(message),
timeout=300,
@@ -177,7 +120,7 @@ def generate_filter(message: Message):
or msg.from_user.id != message.from_user.id
or not msg.reply_to_message
or not msg.reply_to_message.from_user
- or msg.reply_to_message.from_user.id != bot.me.id
+ or msg.reply_to_message.from_user.id != message._client.me.id
):
return False
return True
@@ -188,6 +131,7 @@ def generate_filter(message: Message):
async def export_history(chat, message: Message):
doc = BytesIO(pickle.dumps(chat.history))
doc.name = "AI_Chat_History.pkl"
- await bot.send_document(
- chat_id=message.from_user.id, document=doc, caption=message.text
+ caption = get_response_text(
+ await chat.send_message_async("Summarize our Conversation into one line.")
)
+ await bot.send_document(chat_id=message.from_user.id, document=doc, caption=caption)
diff --git a/app/plugins/files/download.py b/app/plugins/files/download.py
index 4ddf672..be5c53d 100644
--- a/app/plugins/files/download.py
+++ b/app/plugins/files/download.py
@@ -66,8 +66,10 @@ async def down_load(bot: BOT, message: Message):
async def telegram_download(
- message: Message, response: Message, path: str, file_name: str | None = None
-) -> DownloadedFile:
+ message: Message,
+ response: Message,
+ path: str,
+ file_name: str | None = None) -> DownloadedFile:
"""
:param message: Message Containing Media
:param response: Response to Edit
diff --git a/app/plugins/files/rename.py b/app/plugins/files/rename.py
index fc69f7f..de5d398 100644
--- a/app/plugins/files/rename.py
+++ b/app/plugins/files/rename.py
@@ -8,7 +8,7 @@ from ub_core.utils.helpers import progress
from app import BOT, Message, bot
from app.plugins.files.download import telegram_download
-from app.plugins.files.upload import FILE_TYPE_MAP
+from app.plugins.files.upload import FILE_TYPE_MAP, MediaType
@bot.add_cmd(cmd="rename")
@@ -31,10 +31,7 @@ async def rename(bot: BOT, message: Message):
await response.edit("Input verified....Starting Download...")
if message.replied:
download_coro = telegram_download(
- message=message.replied,
- path=dl_path,
- file_name=input,
- response=response,
+ message=message.replied, path=dl_path, file_name=input, response=response
)
else:
url, file_name = input.split(maxsplit=1)
@@ -44,9 +41,16 @@ async def rename(bot: BOT, message: Message):
download_coro = dl_obj.download()
try:
downloaded_file: DownloadedFile = await download_coro
- media: dict = await FILE_TYPE_MAP[downloaded_file.type](
- downloaded_file, has_spoiler="-s" in message.flags
- )
+
+ if "-d" in message.flags:
+ media: dict = await FILE_TYPE_MAP[MediaType.DOCUMENT](
+ downloaded_file, has_spoiler="-s" in message.flags
+ )
+ else:
+ media: dict = await FILE_TYPE_MAP[downloaded_file.type](
+ downloaded_file, has_spoiler="-s" in message.flags
+ )
+
progress_args = (
response,
"Uploading...",
diff --git a/app/plugins/tg_tools/chat.py b/app/plugins/tg_tools/chat.py
index d034162..1884d18 100644
--- a/app/plugins/tg_tools/chat.py
+++ b/app/plugins/tg_tools/chat.py
@@ -1,7 +1,8 @@
+import asyncio
import os
-from pyrogram.enums import ChatType
from pyrogram.errors import BadRequest
+from ub_core.utils import get_name
from app import BOT, Message, bot
@@ -10,19 +11,26 @@ from app import BOT, Message, bot
async def get_ids(bot: BOT, message: Message) -> None:
reply: Message = message.replied
if reply:
- ids: str = ""
- reply_forward = reply.forward_from_chat
- reply_user = reply.from_user
- ids += f"Chat: `{reply.chat.id}`\n"
+ resp_str: str = ""
+
+ reply_user, reply_forward = reply.forward_from_chat, reply.from_user
+
+ resp_str += f"{get_name(reply.chat)}: {reply.chat.id}\n"
+
if reply_forward:
- ids += f"Replied {'Channel' if reply_forward.type == ChatType.CHANNEL else 'Chat'}: `{reply_forward.id}`\n"
+ resp_str += (
+ f"{get_name(reply_forward)}: {reply_forward.id}\n"
+ )
+
if reply_user:
- ids += f"User: {reply.from_user.id}"
+ resp_str += f"{get_name(reply_user)}: {reply_user.id}"
elif message.input:
- ids: int = (await bot.get_chat(message.input[1:])).id
+ resp_str: int = (await bot.get_chat(message.input[1:])).id
else:
- ids: str = f"Chat:`{message.chat.id}`"
- await message.reply(ids)
+ resp_str: str = (
+ f"{get_name(message.chat)}: {message.chat.id}"
+ )
+ await message.reply(resp_str)
@bot.add_cmd(cmd="join")
@@ -50,6 +58,7 @@ async def leave_chat(bot: BOT, message: Message) -> None:
del_in=5,
block=True,
)
+ await asyncio.sleep(2)
try:
await bot.leave_chat(chat)
except Exception as e:
diff --git a/app/plugins/tg_tools/pm_n_tag_logger.py b/app/plugins/tg_tools/pm_n_tag_logger.py
index 066043a..1b5afbe 100644
--- a/app/plugins/tg_tools/pm_n_tag_logger.py
+++ b/app/plugins/tg_tools/pm_n_tag_logger.py
@@ -64,6 +64,7 @@ basic_filters = (
& ~filters.service
& ~filters.chat(chats=[bot.me.id])
& ~filters.me
+ & ~filters.create(lambda _, __, m: m.chat.is_support)
)
@@ -134,31 +135,43 @@ async def runner():
if not (extra_config.TAG_LOGGER or extra_config.PM_LOGGER):
return
last_pm_logged_id = 0
+
while True:
+
cached_keys = list(MESSAGE_CACHE.keys())
if not cached_keys:
await asyncio.sleep(5)
continue
+
first_key = cached_keys[0]
cached_list = MESSAGE_CACHE.copy()[first_key]
+
if not cached_list:
MESSAGE_CACHE.pop(first_key)
+
for idx, msg in enumerate(cached_list):
+
if msg.chat.type == ChatType.PRIVATE:
+
if last_pm_logged_id != first_key:
last_pm_logged_id = first_key
+
log_info = True
else:
log_info = False
+
coro = log_pm(message=msg, log_info=log_info)
else:
coro = log_chat(message=msg)
+
try:
await coro
except BaseException:
pass
+
MESSAGE_CACHE[first_key].remove(msg)
await asyncio.sleep(5)
+
await asyncio.sleep(15)
@@ -205,7 +218,7 @@ async def log_chat(message: Message):
try:
logged = await message.forward(extra_config.MESSAGE_LOGGER_CHAT)
await logged.reply(
- text=f"#TAG\n{mention} [{u_id}]\nMessage: \n{message.chat.title} ({message.chat.id})",
+ text=f"#TAG\n{mention} [{u_id}]\nMessage: \n{message.chat.title} ({message.chat.id})"
)
except MessageIdInvalid:
await message.copy(extra_config.MESSAGE_LOGGER_CHAT, caption=notice)
diff --git a/app/plugins/tg_tools/pm_permit.py b/app/plugins/tg_tools/pm_permit.py
index 69d7cde..01adb34 100644
--- a/app/plugins/tg_tools/pm_permit.py
+++ b/app/plugins/tg_tools/pm_permit.py
@@ -41,6 +41,10 @@ async def handle_new_pm(bot: BOT, message: Message):
type="info",
)
RECENT_USERS[user_id] += 1
+
+ if message.chat.is_support:
+ return
+
if RECENT_USERS[user_id] >= 5:
await message.reply("You've been blocked for spamming.")
await bot.block_user(user_id)