wake up, eric

This commit is contained in:
Adam 2022-12-30 22:30:16 -05:00
parent faa542aca4
commit faafd258a3
4 changed files with 64 additions and 26 deletions

View file

@ -3,31 +3,34 @@ import json
url = 'https://doordesk.net/chat'
def cartman_speak(user_message):
def cartman_respond(user_message):
message = {'Message': user_message}
response = requests.post(url,json.dumps(message))
return response.json().get('Cartman')
from transformers.models.auto.modeling_auto import AutoModelForCausalLM
from transformers.models.auto.tokenization_auto import AutoTokenizer
# from transformers.models.auto.tokenization_auto import AutoTokenizer
# from transformers.models.auto.modeling_auto import AutoModelForCausalLM
# import torch
#
# tokenizer = AutoTokenizer.from_pretrained('microsoft/DialoGPT-large')
# model = AutoModelForCausalLM.from_pretrained('../southpark/output-medium')
#
# def cartman_speak(user_message):
# new_user_input_ids = tokenizer.encode(user_message + tokenizer.eos_token, return_tensors='pt')
# bot_output = new_user_input_ids
# bot_input_ids = torch.cat([new_user_input_ids, bot_output])
# bot_output = model.generate(
# bot_input_ids, max_length= 200,
# pad_token_id=tokenizer.eos_token_id,
# no_repeat_ngram_size=3,
# do_sample=True,
# top_k=100,
# top_p=0.7,
# temperature=.8
# )
#
# return '{}'.format(tokenizer.decode(bot_output[:,bot_input_ids.shape[-1]:][0], skip_special_tokens=True))
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
model = AutoModelForCausalLM.from_pretrained("../chatbots/southpark/cartman/models/output-medium")
def cartman_speak(input_text):
input_ids = tokenizer(input_text + tokenizer.eos_token, return_tensors="pt").input_ids
outputs = model.generate(
input_ids,
pad_token_id=tokenizer.eos_token_id,
max_new_tokens = 200,
num_beams = 8,
num_beam_groups = 4,
no_repeat_ngram_size=3,
length_penalty = 1.4,
diversity_penalty = 0,
repetition_penalty = 2.1,
early_stopping = True,
# do_sample = True,
# top_k = 100,
# top_p = 0.7,
# temperature = 0.8,
)
return tokenizer.decode(outputs[:, input_ids.shape[-1]:][0], skip_special_tokens=True)

View file

@ -4,7 +4,7 @@ from transformers.models.t5.modeling_t5 import T5ForConditionalGeneration
device = torch.device("cuda")
tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-xl")
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xxl").cuda()
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-xl").cuda()
run = True
while run:
@ -12,7 +12,7 @@ while run:
if input_text in 'q':
run = False
break
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
input_ids = tokenizer.encode(input_text, return_tensors="pt").to("cuda")
outputs = model.generate(input_ids)
print(tokenizer.decode(outputs[0]))
print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])

18
test/gpt-jt_test.py Normal file
View file

@ -0,0 +1,18 @@
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
device = torch.device("cuda")
tokenizer = AutoTokenizer.from_pretrained("togethercomputer/GPT-JT-6B-v1")
model = AutoModelForCausalLM.from_pretrained("togethercomputer/GPT-JT-6B-v1").cuda()
run = True
while run:
input_text = input('>> ')
if input_text in 'q':
run = False
break
input_ids = tokenizer.encode(input_text, return_tensors="pt").to("cuda")
outputs = model.generate(input_ids)
print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])

17
test/gptjtest.py Normal file
View file

@ -0,0 +1,17 @@
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
device = torch.device("cuda")
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B").cuda()
run = True
while run:
input_text = input('>> ')
if input_text in 'q':
run = False
break
input_ids = tokenizer.encode(input_text, return_tensors="pt").to("cuda")
outputs = model.generate(input_ids)
print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])