diff --git a/assistant.py b/assistant.py
index 1775e12..f53e072 100755
--- a/assistant.py
+++ b/assistant.py
@@ -136,6 +136,36 @@ def chat(message, stream=True):
history.append({"role": 'assistant', 'content': result})
return result
+def chat2(args, user_input, stream=False):
+ if args.reflect:
+ result = reflection_mode(user_input, stream)
+ else:
+ result = chat(user_input, stream)
+ return result
+
+def highlightify_text(full_text):
+ lines = full_text.split('\n')
+ result = ''
+ language = None
+ for line in lines:
+ text = line + '\n'
+
+ # update language if entering or leaving code block
+ if '```' in text:
+ language = text.split('```')[1].split('\n')[0]
+ if language == '':
+ language = None
+ result += text
+ text = ''
+
+ # Only print full lines
+ if '\n' in text:
+ output = text
+ if language:
+ output = highlight_code(language, output)
+ result += output
+ return result
+
def parse_args():
# Create the parser
parser = argparse.ArgumentParser(description='Copy and open a source file in TextEdit')
@@ -149,11 +179,37 @@ def parse_args():
parser.add_argument('--model', '-m', nargs='?', const=True, default=False, help='Specify model')
# Add the --temp (-t) argument
parser.add_argument('--temp', '-t', nargs='?', const=True, default=False, help='Specify temperature')
- # Add the --host (-h) argument
+ # Add the --host argument
parser.add_argument('--host', nargs='?', const=True, default=False, help='Specify host of ollama server')
+ # Add the --reflect argument
+ parser.add_argument('--reflect', action='store_true', help='Use reflection prompting style to improve output. May be slower and not work with all models.')
# Parse the arguments
return parser.parse_args()
+def reflection_mode(query, should_print=False):
+ reflection_prompt = """
+You are a helpful ai assistant that answers every question thoroughly and accurately. You always begin your response with a section where you lay out your plan for answering the question. It is important that you don't make any assumptions while planning. Then you on your plan to make sure it correctly answers the user's question. Then, if you are confident your plan in correct, you give your , followed by to make sure the answer correctly addresses the user's question. Finally, give a with your answer to the user. If there are any ambiguous or unknown requirements, ask the user for more information as your final answer. You must always have a no matter what, even if you are asking for clarifying questions. If you do not have the tags, the user will not see your response. Additionally, the user can not see your planning or reflecting, they can only see what goes in the tags, so make sure you provide any information you want to tell the user in there.
+
+
+"""
+ result = chat(reflection_prompt + query, stream=False)
+ highlighted_result = highlightify_text(result)
+
+ # print('==DEBUG==')
+ # print(highlighted_result)
+ # print('==DEBUG==')
+
+
+ final_answer = highlighted_result.split('')
+ while len(final_answer) < 2:
+ final_answer = chat('Please put your final answer in tags.', stream=False)
+ final_answer = highlighted_result.split('')
+ final_answer = final_answer[1].split('')[0]
+
+ if should_print:
+ print(final_answer)
+ return final_answer
+
def set_host(host):
global server
server = host
@@ -172,9 +228,9 @@ def arg_shell(args):
query += args.shell
else:
query += input('> ')
- result = chat(query, False)
+ result = chat2(args, query, False)
result = blocks[0] if len(blocks := extract_code_block(result)) else result
- print(blocks)
+ print(result)
copy_string_to_clipboard(result)
def handle_piped_input(args):
@@ -186,7 +242,7 @@ def handle_piped_input(args):
query += arg_follow_up(args)
query += '\n'
- result = chat(query)
+ result = chat2(args, query)
blocks = extract_code_block(result)
if args.copy and len(blocks):
copy_string_to_clipboard(blocks[0])
@@ -197,7 +253,7 @@ def handle_non_piped_input(args):
exit()
if args.follow_up:
user_input = arg_follow_up(args)
- result = chat(user_input)
+ result = chat2(args, user_input)
exit()
while True:
try:
@@ -206,7 +262,7 @@ def handle_non_piped_input(args):
print()
exit()
else:
- chat(user_input)
+ result = chat2(args, user_input)
client = None