diff --git a/01.GPTChat/README.md b/01.GPTChat/README.md index 179aae8..2ab501b 100644 --- a/01.GPTChat/README.md +++ b/01.GPTChat/README.md @@ -2,17 +2,19 @@ -## run + + +## run it python >= 3.9 ```pip3 install -r requirements.txt``` + ```bash python3 gptchat.py - ``` -then open url: http://localhost:8888 +open http://localhost:8888 diff --git a/02.ChatWithGemma7b /README.md b/02.ChatWithGemma7b /README.md new file mode 100644 index 0000000..e69de29 diff --git a/02.ChatWithGemma7b /gemmachat.py b/02.ChatWithGemma7b /gemmachat.py new file mode 100644 index 0000000..2a1bb3c --- /dev/null +++ b/02.ChatWithGemma7b /gemmachat.py @@ -0,0 +1,11 @@ +# pip install accelerate +from transformers import AutoTokenizer, AutoModelForCausalLM + +tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b-it") +model = AutoModelForCausalLM.from_pretrained("google/gemma-7b-it", device_map="auto") + +input_text = "Write me a poem about Machine Learning." +input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") + +outputs = model.generate(**input_ids) +print(tokenizer.decode(outputs[0])) diff --git a/02.ChatWithGemma7b /requirements.txt b/02.ChatWithGemma7b /requirements.txt new file mode 100644 index 0000000..e636e28 --- /dev/null +++ b/02.ChatWithGemma7b /requirements.txt @@ -0,0 +1,2 @@ +accelerate +transformers