Add Image Gen lexicapi, Improved logging format
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -14,4 +14,5 @@ __pycache__/
|
||||
.idea
|
||||
config.ini
|
||||
unknown_errors.txt
|
||||
moonlogs.txt
|
||||
.env
|
||||
6
main.py
6
main.py
@@ -54,7 +54,11 @@ if config.STRINGSESSION:
|
||||
app = Client("my_account", **common_params)
|
||||
|
||||
async def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
||||
handlers=[logging.FileHandler("moonlogs.txt"), logging.StreamHandler()],
|
||||
level=logging.INFO,
|
||||
)
|
||||
DeleteAccount.__new__ = None
|
||||
|
||||
try:
|
||||
|
||||
@@ -10,9 +10,6 @@ from utils.config import vca_api_key
|
||||
from utils.misc import modules_help, prefix
|
||||
from utils.scripts import format_exc, import_library
|
||||
|
||||
lexica = import_library("lexica", "lexica-api")
|
||||
from lexica import Client as lcl
|
||||
|
||||
api_url = "https://visioncraft.top"
|
||||
|
||||
async def fetch_models():
|
||||
@@ -52,13 +49,7 @@ async def download_image(session, image_url, filename):
|
||||
with open(filename, "wb") as f:
|
||||
f.write(image_data)
|
||||
|
||||
def upscale_request_lexica(image: bytes) -> bytes:
|
||||
"""Request Maker Helper function to upscale image for Lexica API"""
|
||||
client = lcl()
|
||||
imageBytes = client.upscale(image)
|
||||
return imageBytes
|
||||
|
||||
async def upscale_request_vc(api_key, image_data):
|
||||
async def upscale_request(api_key, image_data):
|
||||
"""Request Maker Helper function to upscale image for VisionCraft API"""
|
||||
image_base64 = base64.b64encode(image_data).decode('utf-8')
|
||||
|
||||
@@ -102,23 +93,19 @@ async def vdxl(c: Client, message: Message):
|
||||
await message.edit_text("<code>Please Wait...</code>")
|
||||
|
||||
if len(message.command) > 2:
|
||||
model, prompt = message.text.split(maxsplit=2)[1:]
|
||||
if model not in models:
|
||||
await message.edit_text(f"<b>Usage: </b><code>{prefix}vdxl [model]* [prompt/reply to prompt]*</code>\n <b>Available Models:</b> <blockquote>{models}</blockquote>")
|
||||
return
|
||||
model, prompt = message.text.split(maxsplit=2)[1:]
|
||||
if model not in models:
|
||||
return await message.edit_text(f"<b>Usage: </b><code>{prefix}vdxl [model]* [prompt/reply to prompt]*</code>\n <b>Available Models:</b> <blockquote>{models}</blockquote>")
|
||||
elif message.reply_to_message and len(message.command) > 1:
|
||||
model = message.text.split(maxsplit=1)[1]
|
||||
if model in models:
|
||||
prompt = message.reply_to_message.text
|
||||
else:
|
||||
await message.edit_text(f"<b>Usage: </b><code>{prefix}vdxl [model]* [prompt/reply to prompt]*</code>\n <b>Available Models:</b> <blockquote>{models}</blockquote>")
|
||||
return
|
||||
|
||||
model = message.text.split(maxsplit=1)[1]
|
||||
if model in models:
|
||||
prompt = message.reply_to_message.text
|
||||
else:
|
||||
return await message.edit_text(f"<b>Usage: </b><code>{prefix}vdxl [model]* [prompt/reply to prompt]*</code>\n <b>Available Models:</b> <blockquote>{models}</blockquote>")
|
||||
else:
|
||||
await message.edit_text(
|
||||
return await message.edit_text(
|
||||
f"<b>Usage: </b><code>{prefix}vdxl [model]* [prompt/reply to prompt]*</code>\n <b>Available Models:</b> <blockquote>{models}</blockquote>"
|
||||
)
|
||||
return
|
||||
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
@@ -243,27 +230,6 @@ async def vgif(c: Client, message: Message):
|
||||
except Exception as e:
|
||||
await message.edit_text(f"An error occurred: {format_exc(e)}")
|
||||
|
||||
|
||||
@Client.on_message(filters.command("lupscale", prefix) & filters.me)
|
||||
async def lupscale(client: Client, message: Message):
|
||||
"""Upscale Image Using Lexica API"""
|
||||
try:
|
||||
photo_data = await message.download()
|
||||
except ValueError:
|
||||
try:
|
||||
photo_data = await message.reply_to_message.download()
|
||||
except ValueError:
|
||||
await message.edit("<b>File not found</b>", parse_mode=enums.ParseMode.HTML)
|
||||
return
|
||||
await message.edit("<code>Processing...</code>", parse_mode=enums.ParseMode.HTML)
|
||||
image = open(photo_data, 'rb').read()
|
||||
upscaled_image = upscale_request_lexica(image)
|
||||
with open('upscaled.png', 'wb') as f:
|
||||
f.write(upscaled_image)
|
||||
# await message.delete()
|
||||
await client.send_document(message.chat.id, 'upscaled.png', caption="Upscaled!", reply_to_message_id=message.id)
|
||||
os.remove('upscaled.png')
|
||||
|
||||
@Client.on_message(filters.command("upscale", prefix) & filters.me)
|
||||
async def upscale(c: Client, message: Message):
|
||||
"""Default Upscaler of Moon-Userbot: Uses VisionCraft APi"""
|
||||
@@ -281,7 +247,7 @@ async def upscale(c: Client, message: Message):
|
||||
|
||||
api_key = vca_api_key
|
||||
image = open(photo_data, 'rb').read()
|
||||
upscaled_image_data = await upscale_request_vc(api_key, image)
|
||||
upscaled_image_data = await upscale_request(api_key, image)
|
||||
with open('upscaled_image.png', 'wb') as file:
|
||||
file.write(upscaled_image_data)
|
||||
await i.delete()
|
||||
@@ -320,6 +286,7 @@ async def whisp(message: Message):
|
||||
except Exception as e:
|
||||
await message.edit_text(f"An error occurred: {format_exc(e)}")
|
||||
|
||||
|
||||
modules_help["aiutils"] = {
|
||||
"vdxl [model]* [prompt/reply to prompt]*": "Text to Image with SDXL model",
|
||||
"vgif [prompt/reply to prompt]*": "Text to GIF",
|
||||
|
||||
82
modules/lexica.py
Normal file
82
modules/lexica.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import os
|
||||
import requests
|
||||
|
||||
from pyrogram import Client, filters
|
||||
from pyrogram.types import Message
|
||||
|
||||
from utils.misc import modules_help, prefix
|
||||
from utils.scripts import format_exc
|
||||
from utils.lexicapi import ImageGeneration, UpscaleImages, ImageModels
|
||||
|
||||
@Client.on_message(filters.command("lupscale", prefix) & filters.me)
|
||||
async def lupscale(client: Client, message: Message):
|
||||
"""Upscale Image Using Lexica API"""
|
||||
|
||||
await message.edit("<code>Processing...</code>")
|
||||
try:
|
||||
photo_data = await message.download()
|
||||
except ValueError:
|
||||
try:
|
||||
photo_data = await message.reply_to_message.download()
|
||||
except ValueError:
|
||||
await message.edit("<b>File not found</b>")
|
||||
return
|
||||
try:
|
||||
image = open(photo_data, 'rb').read()
|
||||
upscaled_image = UpscaleImages(image)
|
||||
if message.reply_to_message:
|
||||
message_id = message.reply_to_message.id
|
||||
await message.delete()
|
||||
else:
|
||||
message_id = message.id
|
||||
await client.send_document(message.chat.id, upscaled_image, caption="Upscaled!", reply_to_message_id=message_id)
|
||||
os.remove(upscaled_image)
|
||||
except Exception as e:
|
||||
await message.edit(format_exc(e))
|
||||
|
||||
@Client.on_message(filters.command("lgen", prefix) & filters.me)
|
||||
async def lgen(client: Client, message: Message):
|
||||
try:
|
||||
await message.edit_text("<code>Processing...</code>")
|
||||
|
||||
models = ImageModels()
|
||||
models_ids = models.values()
|
||||
|
||||
if len(message.command) > 2:
|
||||
model_id = int(message.text.split()[1])
|
||||
if model_id not in models_ids:
|
||||
return await message.edit_text(f"<b>Usage: </b><code>{prefix}lgen [model_id]* [prompt/reply to prompt]*</code>\n <b>Available Models and IDs:</b> <blockquote>{models}</blockquote>")
|
||||
message_id = None
|
||||
prompt = ''.join(message.text.split()[2:])
|
||||
elif message.reply_to_message and len(message.command) > 1:
|
||||
model_id = int(message.text.split()[1])
|
||||
if model_id not in models_ids:
|
||||
return await message.edit_text(f"<b>Usage: </b><code>{prefix}lgen [model_id]* [prompt/reply to prompt]*</code>\n <b>Available Models and IDs:</b> <blockquote>{models}</blockquote>")
|
||||
message_id = message.reply_to_message.id
|
||||
prompt = message.reply_to_message.text
|
||||
else:
|
||||
return await message.edit_text(f"<b>Usage: </b><code>{prefix}lgen [model_id]* [prompt/reply to prompt]*</code>\n <b>Available Models and IDs:</b> <blockquote>{models}</blockquote>")
|
||||
|
||||
for key, val in models.items():
|
||||
if val == model_id:
|
||||
model_name = key
|
||||
|
||||
img = await ImageGeneration(model_id, prompt)
|
||||
if img is None or img == 1 or img == 2:
|
||||
return await message.edit_text("Something went wrong!")
|
||||
elif img == 69:
|
||||
return await message.edit_text("NSFW is not allowed")
|
||||
else:
|
||||
img_url = img[0]
|
||||
with open("generated_image.png", 'wb') as f:
|
||||
f.write(requests.get(img_url, timeout=5).content)
|
||||
|
||||
await client.send_document(message.chat.id, "generated_image.png", caption=f"<b>Prompt: </b><code>{prompt}</code>\n<b>Model: </b><code>{model_name}</code>", reply_to_message_id=message_id)
|
||||
os.remove("generated_image.png")
|
||||
except Exception as e:
|
||||
await message.edit(format_exc(e))
|
||||
|
||||
modules_help["lexica"] = {
|
||||
"lgen [model_id]* [prompt/reply to prompt]*": "Generate Image with Lexica API",
|
||||
"lupscale [cap/reply to image]*": "Upscale Image through Lexica API",
|
||||
}
|
||||
@@ -19,4 +19,5 @@ beautifulsoup4
|
||||
aiohttp
|
||||
aiofiles
|
||||
rentry
|
||||
pySmartDL
|
||||
pySmartDL
|
||||
lexica-api
|
||||
@@ -17,13 +17,13 @@
|
||||
import re
|
||||
import json
|
||||
import threading
|
||||
import dns.resolver
|
||||
import pymongo
|
||||
import sqlite3
|
||||
from dns import resolver
|
||||
import pymongo
|
||||
from utils import config
|
||||
|
||||
dns.resolver.default_resolver = dns.resolver.Resolver(configure=False)
|
||||
dns.resolver.default_resolver.nameservers = ["8.8.8.8"]
|
||||
resolver.default_resolver = resolver.Resolver(configure=False)
|
||||
resolver.default_resolver.nameservers = ["1.1.1.1"]
|
||||
|
||||
|
||||
class Database:
|
||||
|
||||
55
utils/lexicapi.py
Normal file
55
utils/lexicapi.py
Normal file
@@ -0,0 +1,55 @@
|
||||
# Copyright 2023 Qewertyy, MIT License
|
||||
import logging
|
||||
import asyncio
|
||||
from lexica import AsyncClient, Client
|
||||
|
||||
def ImageModels():
|
||||
client = Client()
|
||||
models = Client().models['models']['image']
|
||||
dict_models = {}
|
||||
for model in models:
|
||||
model_id = model['id']
|
||||
model_name = model['name']
|
||||
dict_models[model_name] = model_id
|
||||
return dict_models
|
||||
|
||||
async def ImageGeneration(model,prompt):
|
||||
try:
|
||||
client = AsyncClient()
|
||||
output = await client.generate(model,prompt,"")
|
||||
if output['code'] != 1:
|
||||
return 2
|
||||
elif output['code'] == 69:
|
||||
return output['code']
|
||||
task_id, request_id = output['task_id'],output['request_id']
|
||||
await asyncio.sleep(20)
|
||||
tries = 0
|
||||
image_url = None
|
||||
resp = await client.getImages(task_id,request_id)
|
||||
while True:
|
||||
if resp['code'] == 2:
|
||||
image_url = resp['img_urls']
|
||||
break
|
||||
if tries > 15:
|
||||
break
|
||||
await asyncio.sleep(5)
|
||||
resp = await client.getImages(task_id,request_id)
|
||||
tries += 1
|
||||
continue
|
||||
return image_url
|
||||
except Exception as e:
|
||||
logging.warn(f"Failed to generate the image:",e)
|
||||
finally:
|
||||
await client.close()
|
||||
|
||||
async def UpscaleImages(image: bytes) -> str:
|
||||
"""
|
||||
Upscales an image and return with upscaled image path.
|
||||
"""
|
||||
client = AsyncClient()
|
||||
content = await client.upscale(image)
|
||||
await client.close()
|
||||
upscaled_file_path = "upscaled.png"
|
||||
with open(upscaled_file_path, "wb") as output_file:
|
||||
output_file.write(content)
|
||||
return upscaled_file_path
|
||||
Reference in New Issue
Block a user