add 02
This commit is contained in:
@@ -2,17 +2,19 @@
|
||||
|
||||
|
||||
|
||||
## run
|
||||
|
||||
|
||||
## run it
|
||||
|
||||
python >= 3.9
|
||||
|
||||
```pip3 install -r requirements.txt```
|
||||
|
||||
|
||||
```bash
|
||||
python3 gptchat.py
|
||||
|
||||
```
|
||||
|
||||
then open url: http://localhost:8888
|
||||
open http://localhost:8888
|
||||
|
||||
|
||||
|
||||
0
02.ChatWithGemma7b /README.md
Normal file
0
02.ChatWithGemma7b /README.md
Normal file
11
02.ChatWithGemma7b /gemmachat.py
Normal file
11
02.ChatWithGemma7b /gemmachat.py
Normal file
@@ -0,0 +1,11 @@
|
||||
# pip install accelerate
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b-it")
|
||||
model = AutoModelForCausalLM.from_pretrained("google/gemma-7b-it", device_map="auto")
|
||||
|
||||
input_text = "Write me a poem about Machine Learning."
|
||||
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
||||
|
||||
outputs = model.generate(**input_ids)
|
||||
print(tokenizer.decode(outputs[0]))
|
||||
2
02.ChatWithGemma7b /requirements.txt
Normal file
2
02.ChatWithGemma7b /requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
accelerate
|
||||
transformers
|
||||
Reference in New Issue
Block a user