2025-05-14 03:42:11 +02:00
|
|
|
# /// script
|
|
|
|
|
# requires-python = ">=3.13"
|
|
|
|
|
# dependencies = [
|
|
|
|
|
# "beautifulsoup4",
|
|
|
|
|
# "fake-useragent",
|
|
|
|
|
# "httpx",
|
|
|
|
|
# ]
|
|
|
|
|
# ///
|
|
|
|
|
|
2019-10-19 03:00:52 +05:30
|
|
|
import sys
|
|
|
|
|
import webbrowser
|
|
|
|
|
|
2025-05-14 03:42:11 +02:00
|
|
|
import httpx
|
2019-10-19 03:00:52 +05:30
|
|
|
from bs4 import BeautifulSoup
|
|
|
|
|
from fake_useragent import UserAgent
|
|
|
|
|
|
2019-11-17 20:38:48 +02:00
|
|
|
if __name__ == "__main__":
|
|
|
|
|
print("Googling.....")
|
|
|
|
|
url = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
|
2025-05-14 03:42:11 +02:00
|
|
|
res = httpx.get(
|
|
|
|
|
url,
|
|
|
|
|
headers={"UserAgent": UserAgent().random},
|
|
|
|
|
timeout=10,
|
|
|
|
|
follow_redirects=True,
|
|
|
|
|
)
|
2019-11-17 20:38:48 +02:00
|
|
|
# res.raise_for_status()
|
|
|
|
|
with open("project1a.html", "wb") as out_file: # only for knowing the class
|
|
|
|
|
for data in res.iter_content(10000):
|
|
|
|
|
out_file.write(data)
|
|
|
|
|
soup = BeautifulSoup(res.text, "html.parser")
|
|
|
|
|
links = list(soup.select(".eZt8xd"))[:5]
|
|
|
|
|
|
|
|
|
|
print(len(links))
|
|
|
|
|
for link in links:
|
2020-05-06 03:32:40 +02:00
|
|
|
if link.text == "Maps":
|
|
|
|
|
webbrowser.open(link.get("href"))
|
|
|
|
|
else:
|
2022-10-16 16:43:29 +09:00
|
|
|
webbrowser.open(f"https://google.com{link.get('href')}")
|