| import gradio as gr |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| import torch |
|
|
| model_name = "Salesforce/codegen-350M-multi" |
|
|
| |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
| model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| model = model.to(device) |
|
|
| def generate_code(prompt, max_length=100, temperature=0.7, top_p=0.95): |
| inputs = tokenizer(prompt, return_tensors='pt').to(device) |
| output = model.generate(**inputs, max_length=max_length, temperature=temperature, top_p=top_p, do_sample=True) |
| generated_code = tokenizer.decode(output[0], skip_special_tokens=True) |
| return generated_code |
|
|
| |
| with gr.Blocks() as demo: |
| gr.Markdown("## CODE GENERATION WITH CODEGEN MODEL") |
|
|
| prompt = gr.Textbox(lines=10, label="Enter your prompt for CodeGen") |
| max_length = gr.Slider(50, 500, value=100, label="Max Length") |
| temperature = gr.Slider(0.1, 0.9, value=0.7, label="Choose Temperature") |
| top_p = gr.Slider(0.1, 1.0, value=0.95, label="Top P Value") |
|
|
| output_box = gr.Textbox(lines=20, label="Generated Code") |
|
|
| generate_button = gr.Button("Generate Code") |
| generate_button.click(fn=generate_code, |
| inputs=[prompt, max_length, temperature, top_p], |
| outputs=output_box) |
|
|
| demo.launch() |
|
|