A GPT-2 implementation with no OpenAI API key 🎉🎉🎉, that acts like a chatbot (similar to AutoGPT and gpt4all)
class Plugin:
def __init__(self): # if the __ __ is disapeared and its bold, its because of markdown formatting.
self.tasks = []
def add_task(self, task_name):
self.tasks.append(task_name)
def remove_task(self, task_name):
self.tasks.remove(task_name)
def list_tasks(self):
return self.tasks
tokenizer = AutoTokenizer.from_pretrained(‘gpt2’) model = AutoModelForCausalLM.from_pretrained(‘gpt2’)
text_generator = pipeline(‘text-generation’, model=model, tokenizer=tokenizer)
todo_plugin = Plugin()
text = input(“Message: “)
if text.startswith(“add task”): task_name = text[9:].strip() # Extract the task name from the user input todo_plugin.add_task(task_name) print(“Task added successfully.”)
elif text == “list tasks”: tasks = todo_plugin.list_tasks() print(“Tasks:”) for task in tasks: print(task)
else: prompt = f”Guiding Prompt: You are a helpful assistant trained by OpenAI. Your goal is to provide appropriate and helpful responses based on the user’s input. Do not guess what the user is going to say, and avoid adding other bots to the conversation, unless the user specifically requests it by including the text ““/more_ai” “ in their input. Your responses should be concise and to the point, but also friendly and natural-sounding. Keep in mind that while the maximum number of tokens per response is set to 1000, it is generally better to keep your responses shorter than that, as long responses may be less engaging for the user\nUser: “ + text + “\nBot:” text_gen = text_generator(prompt, max_length=1000, num_return_sequences=1)[0][‘generated_text’] # Save the generated response with open(‘saved_responses.txt’, ‘a’) as f: f.write(text_gen + ‘\n’)
txteng = TextEngine.textengine
gpt_3_gen = gpt3.Plugin.generator
txteng(text_gen)
txteng(gpt_3_gen(prompt=prompt)) ```