import json import time from bs4 import BeautifulSoup from utils.Logger import log from utils.createBrowserDriver import create from utils.filse import save_json from api.index import importJson, getReptileTask, importJsonPath from utils.index import convert_to_traditional, yt_dlp_download, convert_string_to_time, parse_twitter_time_string, \ extract_image_format, create_directory_if_not_exists, delete_directory # from pytube import YouTube import os import sys from datetime import datetime from utils.download_image import download_image from config.settings import get_base_file_url from config.settings import get_account # --------------- selenium 依赖 start ---------------- from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC # --------------- selenium 依赖 end ---------------- import platform ''' 打开指定网页,并使用 Selenium 模拟点击 "GDPR-accept" 按钮,然后循环点击 "search-show-more-button" 按钮来加载更多数据,直到按钮不再可点击为止。最后,获取完整的分页数据并关闭浏览器驱动。 ''' def reptile(browser=None, search_word=""): """ :param browser: :param search_word: """ print(f"搜索词:{search_word}") base_url = "https://twitter.com/" browser = browser or create(no_headless=False, using_user_data=True) # 打开网页 browser.get(base_url) time.sleep(2) try: try: login_button = browser.find_element('xpath', "//a[@href='/login']") login_button.click() time.sleep(2) except: error = "" # wait = WebDriverWait(browser, 20) # wait.until(EC.presence_of_element_located((By.XPATH, "//input[@autocomplete='username']"))) # 检测是否要登录 login_input = browser.find_element('xpath', "//input[@autocomplete='username']") login_input.send_keys(get_account("twitter")["name"]) # 获取下一步按钮 buttons = browser.find_element('xpath', "//div[@role='button'][2]") buttons.click() wait = WebDriverWait(browser, 10) wait.until(EC.presence_of_element_located((By.XPATH, "//input[@autocomplete='current-password']"))) password_input = browser.find_element('xpath', "//input[@autocomplete='current-password']") password_input.send_keys(get_account("twitter")["password"]) # # 获取登录按钮 button_login = browser.find_element('xpath', "//div[@data-testid='LoginForm_Login_Button']") button_login.click() time.sleep(2) except: error = "" url = 'https://twitter.com/search?q=' + search_word + '&src=typed_query' browser.get(url) time.sleep(2) # 浏览器滚动到底部 browser.execute_script("window.scrollTo(0, document.body.scrollHeight);") time.sleep(2) wait = WebDriverWait(browser, 10) wait.until( EC.presence_of_element_located((By.XPATH, "//div[@data-testid='cellInnerDiv']//article/div/div/div[2]/div[2]"))) base_xpath = "//div[@data-testid='cellInnerDiv']//article/div/div/div[2]/div[2]" # 内容块 element_content_list = browser.find_elements('xpath', base_xpath) # 小于10条,加载下一页 if len(element_content_list) < 10: browser.execute_script("window.scrollTo(0, document.body.scrollHeight);") time.sleep(2) element_content_list = browser.find_elements('xpath', base_xpath) # 作者 element_authors_list = browser.find_elements('xpath', f"{base_xpath}//div[@data-testid='User-Name']/div[1]//a[@role='link']") length = len(element_authors_list) for index in range(length): soup = BeautifulSoup(element_content_list[index].get_attribute("outerHTML"), "html.parser") # 查找time标签 try: time_soup = soup.find('time') timestamp = datetime.fromisoformat(time_soup['datetime'].replace("Z", "+00:00")).timestamp() link_soup = time_soup.parent link_str = base_url + link_soup["href"] except: link_str = "" timestamp = time.time() # 过滤时间 # # 如果'releaseTime'不是整数,则将其转换为整数 new_releaseTime = int(timestamp) if new_releaseTime < beginFiltrationTime or new_releaseTime > endFiltrationTime: # 如果'new_releaseTime'不在指定范围内,则跳过当前迭代,继续下一个项目 continue author = element_authors_list[index].text # 标题取:作者+日期 title = f"{author}-{datetime.fromtimestamp(int(timestamp))}" video_list = soup.find_all("video") # lth = len(ignore_list) if len(video_list) > 0: # for key,element in enumerate(video_list): div_elements = soup.find("div").findChildren("div", recursive=False) # div_tags = soup.find_all("div", recursive=False) for item in video_list: div = soup.new_tag('div') img_tag = soup.new_tag('img') img_tag["src"] = item["poster"] div.append(img_tag) for items in div_elements: attr = False try: attr = items["aria-labelledby"] except: attr = False if attr: # div["aria-labelledby"] = "sdfsf" # div[@aria-labelledby="xx"] 替换为img标签【内容含有视频的替换为img标签】 items.replaceWith(div) else: error = "" else: error = "" image_list = soup.find_all("img") picture_url = [] if len(image_list) > 0: for key, element in enumerate(image_list): # 如果是svg,就删除 if str(element['src']).find("svg") != -1: element.extract() else: # 下载图片至本地,替换标签中的src id = str(int(time.time())) image_type = extract_image_format(element['src']) # 下载地址 download_dir = f'{os.path.join(local_path, f"{id}.{image_type}")}' # 访问地址 access_address = f'{get_base_file_url()}{table_name.split("_")[1]}/{local_path_name}/{id}.{image_type}' # 下载状态 status = download_image(element['src'], download_dir) if status: element['src'] = access_address picture_url.append(download_dir) else: error = "" # 删除多余div # parent_div = soup.find("div") # 找到所有的
子元素 div_elements = soup.find("div").findChildren("div", recursive=False) for key, item in enumerate(div_elements): if key == 0 or key == len(div_elements) - 1: item.extract() content = soup.prettify() # ---------------- 判断类型 start ---------- # 类型 content_type = "" try: # 查找所有img标签 img_tags = soup.find_all('img') if len(img_tags) > 0: content_type = "图文" else: content_type = "文字" except: content_type = "文字" # ---------------- 判断类型 end ---------- # --------------- 组装数据 start--------------------- obj = { "title": title, "content": content, "link": link_str, "reptileTime": str(int(time.time())), "type": content_type, "author": author, "releaseTime": str(int(timestamp)), "picture_url": ",".join(picture_url) } # --------------- 组装数据 end--------------------- data.append(obj) soup = "" time.sleep(0.1) if len(data) > 0: # 保存json文件到本地 json_path = os.path.join(local_path, "data.json") state_save = save_json(json_path, data) # 保存task task = { # 爬取时间 "reptileTime": data[0]["reptileTime"], # 本地路径 "localPath": local_path, "beginFiltrationTime": beginFiltrationTime, "endFiltrationTime": endFiltrationTime, "keyword": keyword, "total": len(data) } state_save = save_json(os.path.join(file_dir, "task.json"), task) if state_save: log.debug('save file success') else: log.debug('save file failed') script_close(browser) else: # 爬取数据为空 log.info("未爬取到数据") # 删除目录 delete_directory(local_path) script_close(browser) def script_close(browser): # 关闭浏览器驱动 try: browser.close() browser.quit() except: log.debug("浏览器驱动关闭失败") try: # 一些代码... sys.exit() except SystemExit: raise # 重新抛出SystemExit异常,让脚本退出 except Exception as e: # 异常处理代码... print("sys.exit() 执行失败") def main(): """ """ # 请求关键词 response = getReptileTask() global status_task global beginFiltrationTime global endFiltrationTime global keyword if response['status_code'] == 200 and response['data']['code'] == 200: log.debug("call success") search_word = "" for item in response['data']['rows']: if item['name'] == 'twitter': search_word = item['keyword'] table_name = item['tableName'] keyword = str(item["keyword"]) status_task = int(item["status"]) beginFiltrationTime = int(item["beginFiltrationTime"]) endFiltrationTime = int(item["endFiltrationTime"]) # 简体转繁体 if status_task == 0 and len(search_word) > 0: reptile(None, convert_to_traditional(search_word)) else: log.debug("爬取任务未启用") else: log.debug("call failed") # 请求超时 reptile(None, convert_to_traditional("新闻")) # upload_control() # 全局变量 data = [] # 任务详情 task = {} table_name = "pms_twitter" # 全局字段 keyword = "" # 过滤时间开始 beginFiltrationTime = int(123) # 过滤时间结束 endFiltrationTime = int(123) file_dir = f'{os.path.join(os.path.abspath("../"), "network-assets-reptile", "reptile_data", table_name.split("_")[1])}' # 任务目录名称 local_path_name = str(int(time.time())) # 任务目录路径 local_path = f'{os.path.join(file_dir, local_path_name)}' # 任务目录是否创建 local_path_status = create_directory_if_not_exists(local_path) # 是否启用 status_task = 0 # 调用main函数 main()