# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("yeontaek/WizardCoder-Python-13B-LoRa")
model = AutoModelForCausalLM.from_pretrained("yeontaek/WizardCoder-Python-13B-LoRa")Quick Links
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="yeontaek/WizardCoder-Python-13B-LoRa")