import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, AwqConfig, TextStreamer

model_id = 'meta-llama/Meta-Llama-3.1-70B'

tokenizer = AutoTokenizer.from_pretrained(model_id)
streamer = TextStreamer(tokenizer)

input_ids = tokenizer.encode(open('input.txt').read(),return_tensors='pt')

model = AutoModelForCausalLM.from_pretrained(
  model_id,
  torch_dtype=torch.bfloat16,
  device_map="auto",
)

outputs = model.generate(input_ids=input_ids.cuda(), do_sample=False, max_new_tokens=512, streamer=streamer)
open('output.txt','w').write(tokenizer.batch_decode(outputs[:,input_ids.shape[1]:])[0])
