import json
import time
from bs4 import BeautifulSoup
from utils.Logger import log
from utils.createBrowserDriver import create
from utils.filse import save_json
from api.index import importJson, getReptileTask, importJsonPath
from utils.index import convert_to_traditional, yt_dlp_download, convert_string_to_time
from pytube import YouTube
from datetime import datetime
import os
from config.settings import get_base_file_url
from selenium.webdriver.common.action_chains import ActionChains
import sys
# ---------------   selenium 依赖 start ----------------
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC


# ---------------   selenium 依赖 end ----------------

def reptile(browser=None, search_word=""):
    """

    :param browser:
    :param search_word:
    :return:
    """
    browser = browser or create(no_headless=True, using_user_data=False)
    # print(browser)
    # 打开网页
    url = f'https://www.youtube.com/results?search_query={search_word}'
    browser.get(url)
    wait = WebDriverWait(browser, 10)
    wait.until(EC.presence_of_element_located((By.XPATH,"//div[@id='contents']")))
    log.debug("youtube login complete")
    classify_video_list = browser.find_elements('xpath',
                                                "//div[@id='contents']//ytd-video-renderer//div[@id='title-wrapper']//a")
    element_author_list = browser.find_elements('xpath',
                                                "//div[@id='contents']//ytd-video-renderer//ytd-channel-name//yt-formatted-string/a")
    element_time_list = browser.find_elements('xpath',
                                              "//div[@id='contents']//ytd-video-renderer//ytd-video-meta-block//div[@id='metadata-line']/span[2]")
    length = len(classify_video_list)
    for index in range(length):
        title = classify_video_list[index].get_attribute('title')
        link = classify_video_list[index].get_attribute('href')
        id = link.split("?")[1].split("&")[0].replace("v=", "")
        url = f'https://www.youtube.com/watch?v={id}'
        if index < 6 and YouTube(url).length // 60 < 60:
            base_urr = get_base_file_url()
            releaseTime = ""
            try:
                releaseTime = str(int(convert_string_to_time(element_time_list[index].text)))
            except:
                releaseTime = str(int(time.time()))
            video_url = []
            # 下载地址
            download_dir = f'{os.path.join(file_dir, f"{id}.mp4")}'
            # 访问地址
            access_address = f'{get_base_file_url()}{table_name.split("_")[1]}/{id}.mp4'
            # 下载视频
            state_download = yt_dlp_download(url, 'youtube')
            video_url.append(download_dir)

            if state_download:
                # 组装数据
                obj = {
                    "title": title,
                    "content": f"<video controls style='width:100%' src='{access_address}'></video>",
                    "videoUrl": ",".join(video_url),
                    "link": link,
                    "reptileTime": str(int(time.time())),
                    "type": '视频',
                    "author": element_author_list[index].text,
                    "releaseTime": releaseTime
                }
                data.append(obj)
            else:
                # print("")
                error = ""
    if len(data) > 0:
        # 保存json文件到本地
        # log.debug(os.path.abspath("../"))
        state_save = save_json(os.path.join(file_dir, str(int(time.time())) + ".json"), data)
        if state_save:
            log.debug('save file success')
        else:
            log.debug('save file failed')
        script_close(browser)
    else:
        # 爬取数据为空
        log.info("未爬取到数据")
        script_close(browser)


def script_close(browser):
    # 关闭浏览器驱动
    try:
        browser.close()
        browser.quit()
    except:
        log.debug("浏览器驱动关闭失败")
    sys.exit()


def main():
    """

    """
    # 请求关键词
    response = getReptileTask()
    global status_task
    # print(response)
    if response['status_code'] == 200 and response['data']['code'] == 200:
        log.debug("call success")
        search_word = ""
        for item in response['data']['rows']:
            if item['name'] == 'youtube':
                search_word = item['keyword']
                table_name = item['tableName']
                status_task = int(item["status"])
        # 简体转繁体
        if status_task == 0 and len(search_word) > 0:
            reptile(None, convert_to_traditional(search_word))
        else:
            log.debug("爬取任务未启用")
    else:
        log.debug("call failed")
        # 请求超时
        reptile(None, convert_to_traditional("新闻"))
        # upload_control()


# 全局变量
data = []
table_name = "pms_youtube"
file_dir = f'{os.path.join(os.path.abspath("../"), "network-assets-reptile", "reptile_data", table_name.split("_")[1])}'
# 是否启用
status_task = '0'
# 调用main函数
main()