import json import platform import time from bs4 import BeautifulSoup from utils.Logger import log from utils.createBrowserDriver import create from utils.filse import save_json from api.index import importJson, getReptileTask, importJsonPath from utils.index import convert_to_traditional, yt_dlp_download, convert_string_to_time, \ parse_time_string, create_directory_if_not_exists, delete_directory # from pytube import YouTube from datetime import datetime from utils.download_image import download_image import os from config.settings import get_base_file_url from config.settings import get_account import sys # --------------- selenium 依赖 start ---------------- from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC # --------------- selenium 依赖 end ---------------- # 工具函数-下载图片 ''' 打开指定网页,并使用 Selenium 模拟点击 "GDPR-accept" 按钮,然后循环点击 "search-show-more-button" 按钮来加载更多数据,直到按钮不再可点击为止。最后,获取完整的分页数据并关闭浏览器驱动。 ''' def reptile(browser=None, search_word=""): print(f"搜索词:{search_word}") url = "https://www.facebook.com/" browser = browser or create(no_headless=False, using_user_data=True) # 打开网页 browser.get(url) time.sleep(2) try: # time.sleep(3) # 检测是否要登录 login_input = browser.find_element('xpath', "//input[@name='email']") password_input = browser.find_element('xpath', "//input[@name='pass']") login_input.send_keys(get_account("facebook")["name"]) password_input.send_keys(get_account("facebook")["password"]) # 获取登录按钮 button_login = browser.find_element('xpath', "//button[@name='login']") button_login.click() time.sleep(3) except: print("已登录") log.debug("facebook login complete") url = f"https://www.facebook.com/search/top?q={search_word}" browser.get(url) time.sleep(2) # 使用 JavaScript 将网页滚动到底部 browser.execute_script("window.scrollTo(0, document.body.scrollHeight);") time.sleep(6) # 等待内容出现,设置最长等待时间为10秒 wait = WebDriverWait(browser, 10) # 通过 expected_conditions 来定义等待条件,这里以弹窗内容的某个元素为例 wait.until(EC.presence_of_element_located((By.XPATH, "//div[@role='feed']"))) # 内容 element_content_list = browser.find_elements('xpath', "//div[@role='feed']/div//div[@aria-describedby]/div/div/div/div/div/div[2]/div/div/div[3]") # 作者 element_authors_list = browser.find_elements('xpath', "//div[@role='feed']/div//div[@aria-describedby]//h3/span[1]") # 发布时间 element_release_list = browser.find_elements('xpath', "//div[@role='feed']/div//div[@aria-describedby]//span[@dir]/span//a[@role='link' and @aria-label]") # 查找所有 展开 按钮,循环点击后在查找内容 elements_expand_list = browser.find_elements('xpath', "//div[@role='feed']/div//div[@aria-describedby]//div[@role='button' and text()='展开']") for key, element in enumerate(elements_expand_list): try: # 使用JavaScript 执行点击操作 browser.execute_script("arguments[0].click();", element) except Exception as e: print("Clicking element failed: " + str(e)) length = len(element_content_list) for index in range(length): author_soup = BeautifulSoup(element_authors_list[index].get_attribute("outerHTML"), "html") time_soup = BeautifulSoup(element_release_list[index].get_attribute("outerHTML"), "html") # author = element_authors_list[index].text author = author_soup.find_all("a")[0].text time_text = time_soup.find_all("a")[0].text release_time_timestamp = int(parse_time_string(time_text)) release_time = str(release_time_timestamp) # 过滤时间 # # 如果'releaseTime'不是整数,则将其转换为整数 new_releaseTime = int(release_time) if new_releaseTime < beginFiltrationTime or new_releaseTime > endFiltrationTime: # 如果'new_releaseTime'不在指定范围内,则跳过当前迭代,继续下一个项目 continue text = element_content_list[index].text soup = BeautifulSoup(element_content_list[index].get_attribute('outerHTML'), 'html.parser') soup_str = soup.prettify() # 查找是否含有视频 video_list = soup.find_all("video") image_list = soup.find_all("img") # lth = len(ignore_list) if len(video_list) > 0: # for key,element in enumerate(video_list): # 删除第二个子元素 # 找到包含两个
元素的父级元素 parent_div = soup.find('div') # 找到所有的
子元素 div_elements = parent_div.find_all('div', recursive=False) # div_tags = soup.find_all("div", recursive=False) # 确保列表中至少有两个
子元素 if len(div_elements) >= 2: # 获取第二个
元素,并将其从父级元素中移除 div_to_remove = div_elements[1] div_to_remove.extract() # 删除 # div.decompose() # 创建video标签占位 custom_video = soup.new_tag("video") custom_video["src"] = "" parent_div.append(custom_video) else: # print("") error = "" picture_url = [] if len(image_list) > 0: for key, element in enumerate(image_list): # 下载图片至本地,替换标签中的src id = str(int(time.time())) # 下载地址 download_dir = f'{os.path.join(local_path, f"{id}.jpg")}' # 访问地址 access_address = f'{get_base_file_url()}{table_name.split("_")[1]}/{local_path_name}/{id}.jpg' # 下载状态 status = download_image(element['src'], download_dir) if status: element['src'] = access_address picture_url.append(download_dir) else: # print("") error = "" content = soup.prettify() # 标题取:作者+日期 title = f"{author}-{datetime.fromtimestamp(release_time_timestamp)}" # title = "" # ---------------- 判断类型 start ---------- # 类型 content_type = "" try: # 查找所有img标签 img_tags = soup.find_all('img') if len(img_tags) > 0: content_type = "图文" else: content_type = "文字" except: content_type = "文字" # ---------------- 判断类型 end ---------- # --------------- 组装数据 start--------------------- obj = { "title": title, "content": content, "link": element_release_list[index].get_attribute("href"), "reptileTime": str(int(time.time())), "type": content_type, "author": author, "releaseTime": release_time, "picture_url": ",".join(picture_url) } # --------------- 组装数据 end--------------------- data.append(obj) if len(data) > 0: # 保存json文件到本地 json_path = os.path.join(local_path, "data.json") state_save = save_json(json_path, data) # 保存task task = { # 爬取时间 "reptileTime": data[0]["reptileTime"], # 本地路径 "localPath": local_path, "beginFiltrationTime": beginFiltrationTime, "endFiltrationTime": endFiltrationTime, "keyword": keyword, "total": len(data) } state_save = save_json(os.path.join(file_dir, "task.json"), task) if state_save: log.debug('save file success') else: log.debug('save file failed') script_close(browser) else: # 爬取数据为空 log.info("未爬取到数据") # 删除目录 delete_directory(local_path) script_close(browser) def script_close(browser): # 关闭浏览器驱动 try: browser.close() browser.quit() except: log.debug("浏览器驱动关闭失败") try: # 一些代码... sys.exit() except SystemExit: raise # 重新抛出SystemExit异常,让脚本退出 except Exception as e: # 异常处理代码... print("sys.exit() 执行失败") def main(): """ """ # 请求关键词 response = getReptileTask() global status_task global beginFiltrationTime global endFiltrationTime global keyword if response['status_code'] == 200 and response['data']['code'] == 200: log.debug("call success") search_word = "" for item in response['data']['rows']: if item['name'] == 'facebook': search_word = item['keyword'] table_name = item['tableName'] status_task = int(item["status"]) keyword = str(item["keyword"]) beginFiltrationTime = int(item["beginFiltrationTime"]) endFiltrationTime = int(item["endFiltrationTime"]) # 简体转繁体 if status_task == 0 and len(search_word) > 0: reptile(None, convert_to_traditional(search_word)) else: log.debug("爬取任务未启用") else: log.debug("call failed") # 请求超时 reptile(None, convert_to_traditional("新闻")) # upload_control() # 全局变量 data = [] # 任务详情 task = {} table_name = "pms_facebook" # 全局字段 keyword = "" # 过滤时间开始 beginFiltrationTime = int(123) # 过滤时间结束 endFiltrationTime = int(123) # 文件根目录 file_dir = f'{os.path.join(os.path.abspath("../"), "network-assets-reptile", "reptile_data", table_name.split("_")[1])}' # 任务目录名称 local_path_name = str(int(time.time())) # 任务目录路径 local_path = f'{os.path.join(file_dir, local_path_name)}' # 任务目录是否创建 local_path_status = create_directory_if_not_exists(local_path) # 是否启用 status_task = 0 # 调用main函数 main()