From e4e364da0eeacab2d7fe04d68694240b06b4177f Mon Sep 17 00:00:00 2001 From: thedragonsinn <98635854+thedragonsinn@users.noreply.github.com> Date: Fri, 1 Mar 2024 12:25:24 +0530 Subject: [PATCH] limit response lenght in gemini --- app/plugins/misc/gemini.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/app/plugins/misc/gemini.py b/app/plugins/misc/gemini.py index 8606580..812efa3 100644 --- a/app/plugins/misc/gemini.py +++ b/app/plugins/misc/gemini.py @@ -11,9 +11,7 @@ MODEL = genai.GenerativeModel( "gemini-pro", safety_settings={"HARASSMENT": "block_none"} ) -INSTRUCTIONS = ( - "your response length must not exceed 4000 for this following question:\n" -) +INSTRUCTIONS = "Your response length must not exceed 4000 for all of my question(s):\n" async def init_task(): @@ -44,7 +42,8 @@ async def question(bot: BOT, message: Message): """ if not (await basic_check(message)): # fmt:skip return - response = await MODEL.generate_content_async(message.input) + prompt = INSTRUCTIONS + message.input + response = await MODEL.generate_content_async(prompt) response_text = get_response_text(response) await message.reply( text="**GEMINI AI**:\n\n" + response_text, parse_mode=ParseMode.MARKDOWN @@ -96,7 +95,7 @@ async def ai_chat(bot: BOT, message: Message): await resp.edit("History Loaded... Resuming chat") chat = MODEL.start_chat(history=history) try: - await do_convo(chat=chat, message=message) + await do_convo(chat=chat, message=message, history=True) except TimeoutError: await export_history(chat, message) @@ -105,8 +104,11 @@ def get_response_text(response): return "\n".join([part.text for part in response.parts]) -async def do_convo(chat, message: Message): - prompt = message.input +async def do_convo(chat, message: Message, history: bool = False): + if not history: + prompt = INSTRUCTIONS + message.input + else: + prompt = message.input reply_to_message_id = message.id async with Convo( client=bot,