|
@ -100,6 +100,11 @@ INPUT_SCHEMA = { |
|
|
'required': False, |
|
|
'required': False, |
|
|
'default': 0 |
|
|
'default': 0 |
|
|
}, |
|
|
}, |
|
|
|
|
|
'chat_generation_attempts': { |
|
|
|
|
|
'type': int, |
|
|
|
|
|
'required': False, |
|
|
|
|
|
'default': 1 |
|
|
|
|
|
}, |
|
|
|
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -191,27 +196,33 @@ def generator(job): |
|
|
|
|
|
|
|
|
input_ids = tokenizer(val_input['prompt'], return_tensors="pt").input_ids.to(device) |
|
|
input_ids = tokenizer(val_input['prompt'], return_tensors="pt").input_ids.to(device) |
|
|
|
|
|
|
|
|
gen_tokens = model.generate( |
|
|
output = [] |
|
|
input_ids, |
|
|
for i in range(val_input['chat_generation_attempts']): |
|
|
do_sample=val_input['do_sample'], |
|
|
gen_tokens = model.generate( |
|
|
temperature=val_input['temperature'], |
|
|
input_ids, |
|
|
max_length=val_input['max_length'], |
|
|
do_sample=val_input['do_sample'], |
|
|
repetition_penalty=val_input['repetition_penalty'], |
|
|
temperature=val_input['temperature'], |
|
|
top_p=val_input['top_p'], |
|
|
max_length=val_input['max_length'], |
|
|
top_k=val_input['top_k'], |
|
|
repetition_penalty=val_input['repetition_penalty'], |
|
|
typical_p=val_input['typical_p'], |
|
|
top_p=val_input['top_p'], |
|
|
encoder_repetition_penalty=val_input['encoder_repetition_penalty'], |
|
|
top_k=val_input['top_k'], |
|
|
min_length=val_input['min_length'], |
|
|
typical_p=val_input['typical_p'], |
|
|
num_beams=val_input['num_beams'], |
|
|
encoder_repetition_penalty=val_input['encoder_repetition_penalty'], |
|
|
early_stopping=val_input['early_stopping'], |
|
|
min_length=val_input['min_length'], |
|
|
penalty_alpha=val_input['penalty_alpha'], |
|
|
num_beams=val_input['num_beams'], |
|
|
length_penalty=val_input['length_penalty'], |
|
|
early_stopping=val_input['early_stopping'], |
|
|
no_repeat_ngram_size=val_input['no_repeat_ngram_size'], |
|
|
penalty_alpha=val_input['penalty_alpha'], |
|
|
).to(device) |
|
|
length_penalty=val_input['length_penalty'], |
|
|
|
|
|
no_repeat_ngram_size=val_input['no_repeat_ngram_size'], |
|
|
gen_text = tokenizer.batch_decode(gen_tokens)[0] |
|
|
).to(device) |
|
|
|
|
|
|
|
|
return gen_text |
|
|
gen_text = tokenizer.batch_decode(gen_tokens)[0] |
|
|
|
|
|
if val_input['chat_generation_attempts'] == 1: |
|
|
|
|
|
output = gen_text |
|
|
|
|
|
else: |
|
|
|
|
|
output.append(gen_text) |
|
|
|
|
|
|
|
|
|
|
|
return output |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------- # |
|
|
# ---------------------------------------------------------------------------- # |
|
|