From ac8d3089e11e7780470eeb8fa4d0985b8caba29a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=E5=B0=8F=E4=BA=8C?= Date: Sat, 24 Feb 2024 01:20:17 +0800 Subject: [PATCH] add 02 --- 01.GPTChat/README.md | 8 +++++--- 02.ChatWithGemma7b /README.md | 0 02.ChatWithGemma7b /gemmachat.py | 11 +++++++++++ 02.ChatWithGemma7b /requirements.txt | 2 ++ 4 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 02.ChatWithGemma7b /README.md create mode 100644 02.ChatWithGemma7b /gemmachat.py create mode 100644 02.ChatWithGemma7b /requirements.txt diff --git a/01.GPTChat/README.md b/01.GPTChat/README.md index 179aae8..2ab501b 100644 --- a/01.GPTChat/README.md +++ b/01.GPTChat/README.md @@ -2,17 +2,19 @@ -## run + + +## run it python >= 3.9 ```pip3 install -r requirements.txt``` + ```bash python3 gptchat.py - ``` -then open url: http://localhost:8888 +open http://localhost:8888 diff --git a/02.ChatWithGemma7b /README.md b/02.ChatWithGemma7b /README.md new file mode 100644 index 0000000..e69de29 diff --git a/02.ChatWithGemma7b /gemmachat.py b/02.ChatWithGemma7b /gemmachat.py new file mode 100644 index 0000000..2a1bb3c --- /dev/null +++ b/02.ChatWithGemma7b /gemmachat.py @@ -0,0 +1,11 @@ +# pip install accelerate +from transformers import AutoTokenizer, AutoModelForCausalLM + +tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b-it") +model = AutoModelForCausalLM.from_pretrained("google/gemma-7b-it", device_map="auto") + +input_text = "Write me a poem about Machine Learning." +input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") + +outputs = model.generate(**input_ids) +print(tokenizer.decode(outputs[0])) diff --git a/02.ChatWithGemma7b /requirements.txt b/02.ChatWithGemma7b /requirements.txt new file mode 100644 index 0000000..e636e28 --- /dev/null +++ b/02.ChatWithGemma7b /requirements.txt @@ -0,0 +1,2 @@ +accelerate +transformers