diff --git a/xiaosa/api.json b/xiaosa/api.json
index 5d285cc0..84ec1e38 100644
--- a/xiaosa/api.json
+++ b/xiaosa/api.json
@@ -38,7 +38,7 @@
},
{
"key": "热播影视",
- "name": "热播|弹幕",
+ "name": "热播|影视",
"type": 3,
"api": "csp_AppRJ",
"searchable": 1,
@@ -50,7 +50,7 @@
},
{
"key": "天天影视",
- "name": "天天|弹幕",
+ "name": "天天|影视",
"type": 3,
"api": "csp_AppRJ",
"searchable": 1,
@@ -62,7 +62,7 @@
},
{
"key": "浪酷影视",
- "name": "浪酷|弹幕",
+ "name": "浪酷|影视",
"type": 3,
"api": "csp_AppRJ",
"searchable": 1,
@@ -74,11 +74,30 @@
},
{
"key": "菲儿影视",
- "name": "菲菲|弹幕",
+ "name": "菲菲|影视",
"type": 3,
"api": "csp_AppFerr",
"ext": "sHR2rlsfjI4L3t4RXQMkn/M3t4AXAKTrZj3tfhm1t/gMT3dOrHqIzUNqLUEOIDMvllTbX6e1hMhB2mfpOaCmHNOL1yBB3SmxNyqXlai90EIpdnwOOgCR9Z+YwCTj6ySjzJ2VBiH3eXeOGcavcNeVRA=="
},
+ {
+ "key": "影探影视",
+ "name": "影探|影视",
+ "type": 3,
+ "api": "csp_AppYsV2",
+ "searchable": 1,
+ "quickSearch": 1,
+ "filterable": 1,
+ "ext": "http://cmsyt114.lyyytv.cn/api.php/app/"
+ },
+ {
+ "key": "哔哩视频",
+ "name": "哔哩|视频",
+ "type": 3,
+ "api": "./py/哔哩视频.py",
+ "ext": {
+ "json": "http://127.0.0.1:9978/file/TVBox/bili_cookie.txt"
+ }
+ },
{
"key": "腾讯视频",
"name": "腾讯|视频",
@@ -102,7 +121,7 @@
},
{
"key": "爱奇艺",
- "name": "爱奇艺|视频",
+ "name": "爱奇异|视频",
"type": 3,
"api": "./js/drpy2.min.js",
"ext": "./js/爱奇艺.js"
@@ -449,6 +468,19 @@
"filterable": 1,
"ext": "https://cs.hgyx.vip/api2/api.php/app/"
},
+ {
+ "key": "金牌影视",
+ "name": "金牌|影视",
+ "type": 3,
+ "api": "./py/金牌影视.py",
+ "searchable": 1,
+ "quickSearch": 1,
+ "filterable": 1,
+ "playerType": 2,
+ "ext": {
+ "site": "https://www.hkybqufgh.com,https://www.sizhengxt.com,https://0996zp.com,https://9zhoukj.com/,https://www.sizhengxt.com,https://www.tjrongze.com,https://www.jiabaide.cn,https://cqzuoer.com"
+ }
+ },
{
"key": "猎手影视",
"name": "猎手|APP",
@@ -787,12 +819,6 @@
"playerType": 2,
"ext": "https://shdy5.us"
},
- {
- "key": "金牌影视",
- "name": "金牌|影视",
- "type": 3,
- "api": "csp_Jpys"
- },
{
"key": "尘落影视",
"name": "尘落|影视",
@@ -1691,6 +1717,12 @@
},
"ext": "./json/alist.json"
},
+ {
+ "key": "网络直播",
+ "name": "网络|直播",
+ "type": 3,
+ "api": "./py/网络直播.py"
+ },
{
"key": "88看球",
"name": "88|看球",
diff --git a/xiaosa/js/优酷视频.js b/xiaosa/js/优酷视频.js
index f335b99d..fea1ad3d 100644
--- a/xiaosa/js/优酷视频.js
+++ b/xiaosa/js/优酷视频.js
@@ -2132,14 +2132,14 @@ var rule = {
parse: 0,
url: bata.url,
jx: 0,
- danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
+ danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
} else {
input = {
parse: 0,
url: input.split("?")[0],
jx: 1,
- danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
+ danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
} catch {
@@ -2147,7 +2147,7 @@ var rule = {
parse: 0,
url: input.split("?")[0],
jx: 1,
- danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
+ danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
}),
diff --git a/xiaosa/js/爱奇艺.js b/xiaosa/js/爱奇艺.js
index 3f3c5aba..7617d4c3 100644
--- a/xiaosa/js/爱奇艺.js
+++ b/xiaosa/js/爱奇艺.js
@@ -1354,14 +1354,14 @@ var rule = {
parse: 0,
url: bata.url,
jx: 0,
- danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
+ danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
} else {
input = {
parse: 0,
url: input.split("?")[0],
jx: 1,
- danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
+ danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
} catch {
@@ -1369,7 +1369,7 @@ var rule = {
parse: 0,
url: input.split("?")[0],
jx: 1,
- danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
+ danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
}),
diff --git a/xiaosa/js/腾讯视频.js b/xiaosa/js/腾讯视频.js
index a3ebd8aa..753d9912 100644
--- a/xiaosa/js/腾讯视频.js
+++ b/xiaosa/js/腾讯视频.js
@@ -670,14 +670,14 @@ var rule = {
parse: 0,
url: bata.url,
jx: 0,
- danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
+ danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
} else {
input = {
parse: 0,
url: input.split("?")[0],
jx: 1,
- danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
+ danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
} catch {
@@ -685,7 +685,7 @@ var rule = {
parse: 0,
url: input.split("?")[0],
jx: 1,
- danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
+ danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
}),
diff --git a/xiaosa/js/芒果视频.js b/xiaosa/js/芒果视频.js
index 3f517be0..d7bd5e7b 100644
--- a/xiaosa/js/芒果视频.js
+++ b/xiaosa/js/芒果视频.js
@@ -679,14 +679,14 @@ var rule = {
parse: 0,
url: bata.url,
jx: 0,
- danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
+ danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
} else {
input = {
parse: 0,
url: input.split("?")[0],
jx: 1,
- danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
+ danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
} catch {
@@ -694,7 +694,7 @@ var rule = {
parse: 0,
url: input.split("?")[0],
jx: 1,
- danmaku: "http://103.45.162.207:25252/hbdm.php?key=7894561232&id=" + input.split("?")[0]
+ danmaku: "https://danmu.zxz.ee/?type=xml&id=" + input.split("?")[0]
};
}
}),
diff --git a/xiaosa/json/lj.json b/xiaosa/json/lj.json
index ca10862c..5ed43fb3 100644
--- a/xiaosa/json/lj.json
+++ b/xiaosa/json/lj.json
@@ -1,5 +1,5 @@
{
- "SiteUrl": "https://www.leijing1.com",
+ "SiteUrl": "https://www.leijing.xyz",
"Classes": [
{
"type_name": "电影",
diff --git a/xiaosa/py/优酷视频.py b/xiaosa/py/优酷视频.py
new file mode 100644
index 00000000..399d0db1
--- /dev/null
+++ b/xiaosa/py/优酷视频.py
@@ -0,0 +1,301 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+import time
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from urllib.parse import quote
+from Crypto.Hash import MD5
+import requests
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.session = requests.Session()
+ self.session.headers.update(self.headers)
+ self.session.cookies.update(self.cookie)
+ self.get_ctoken()
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host='https://www.youku.com'
+
+ shost='https://search.youku.com'
+
+ h5host='https://acs.youku.com'
+
+ ihost='https://v.youku.com'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (; Windows 10.0.26100.3194_64 ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Electron/14.2.0 Safari/537.36 Node/14.17.0 YoukuDesktop/9.2.60 UOSYouku (2.0.1)-Electron(UTDID ZYmGMAAAACkDAMU8hbiMmYdd;CHANNEL official;ZREAL 0;BTYPE TM2013;BRAND TIMI;BUILDVER 9.2.60.1001)',
+ 'Referer': f'{host}/'
+ }
+
+ cookie={
+ "__ysuid": "17416134165380iB",
+ "__aysid": "1741613416541WbD",
+ "xlly_s": "1",
+ "isI18n": "false",
+ "cna": "bNdVIKmmsHgCAXW9W6yrQ1/s",
+ "__ayft": "1741672162330",
+ "__arpvid": "1741672162331FBKgrn-1741672162342",
+ "__ayscnt": "1",
+ "__aypstp": "1",
+ "__ayspstp": "3",
+ "tfstk": "gZbiib4JpG-6DqW-B98_2rwPuFrd1fTXQt3vHEp4YpJIBA3OgrWcwOi90RTOo9XVQ5tAM5NcK_CP6Ep97K2ce1XDc59v3KXAgGFLyzC11ET2n8U8yoyib67M3xL25e8gS8pbyzC1_ET4e8URWTsSnHv2uh8VTeJBgEuN3d-ELQAWuKWV36PHGpJ2uEWVTxvicLX1ewyUXYSekxMf-CxMEqpnoqVvshvP_pABOwvXjL5wKqeulm52np_zpkfCDGW9Ot4uKFIRwZtP7vP9_gfAr3KEpDWXSIfWRay-DHIc_Z-hAzkD1i5Ooi5LZ0O5YO_1mUc476YMI3R6xzucUnRlNe_zemKdm172xMwr2L7CTgIkbvndhFAVh3_YFV9Ng__52U4SQKIdZZjc4diE4EUxlFrfKmiXbBOHeP72v7sAahuTtWm78hRB1yV3tmg9bBOEhWVnq5KwOBL5."
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ categories = ["电视剧", "电影", "综艺", "动漫", "少儿", "纪录片", "文化", "亲子", "教育", "搞笑", "生活",
+ "体育", "音乐", "游戏"]
+ classes = [{'type_name': category, 'type_id': category} for category in categories]
+ filters = {}
+ self.typeid = {}
+ with ThreadPoolExecutor(max_workers=len(categories)) as executor:
+ tasks = {
+ executor.submit(self.cf, {'type': category}, True): category
+ for category in categories
+ }
+
+ for future in as_completed(tasks):
+ try:
+ category = tasks[future]
+ session, ft = future.result()
+ filters[category] = ft
+ self.typeid[category] = session
+ except Exception as e:
+ print(f"处理分类 {tasks[future]} 时出错: {str(e)}")
+
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ try:
+ vlist = []
+ params={"ms_codes":"2019061000","params":"{\"debug\":0,\"gray\":0,\"pageNo\":1,\"utdid\":\"ZYmGMAAAACkDAMU8hbiMmYdd\",\"userId\":\"\",\"bizKey\":\"YOUKU_WEB\",\"appPackageKey\":\"com.youku.YouKu\",\"showNodeList\":0,\"reqSubNode\":0,\"nodeKey\":\"WEBHOME\",\"bizContext\":\"{\\\"spmA\\\":\\\"a2hja\\\"}\"}","system_info":"{\"device\":\"pcweb\",\"os\":\"pcweb\",\"ver\":\"1.0.0.0\",\"userAgent\":\"Mozilla/5.0 (; Windows 10.0.26100.3194_64 ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Electron/14.2.0 Safari/537.36 Node/14.17.0 YoukuDesktop/9.2.60 UOSYouku (2.0.1)-Electron(UTDID ZYmGMAAAACkDAMU8hbiMmYdd;CHANNEL official;ZREAL 0;BTYPE TM2013;BRAND TIMI;BUILDVER 9.2.60.1001)\",\"guid\":\"1590141704165YXe\",\"appPackageKey\":\"com.youku.pcweb\",\"young\":0,\"brand\":\"\",\"network\":\"\",\"ouid\":\"\",\"idfa\":\"\",\"scale\":\"\",\"operator\":\"\",\"resolution\":\"\",\"pid\":\"\",\"childGender\":0,\"zx\":0}"}
+ data=self.getdata(f'{self.h5host}/h5/mtop.youku.columbus.home.query/1.0/',params)
+ okey=list(data['data'].keys())[0]
+ for i in data['data'][okey]['data']['nodes'][0]['nodes'][-1]['nodes'][0]['nodes']:
+ if i.get('nodes') and i['nodes'][0].get('data'):
+ i=i['nodes'][0]['data']
+ if i.get('assignId'):
+ vlist.append({
+ 'vod_id': i['assignId'],
+ 'vod_name': i.get('title'),
+ 'vod_pic': i.get('vImg') or i.get('img'),
+ 'vod_year': i.get('mark',{}).get('data',{}).get('text'),
+ 'vod_remarks': i.get('summary')
+ })
+ return {'list': vlist}
+ except Exception as e:
+ print(f"处理主页视频数据时出错: {str(e)}")
+ return {'list': []}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ result = {}
+ vlist = []
+ result['page'] = pg
+ result['limit'] = 90
+ result['total'] = 999999
+ pagecount = 9999
+ params = {'type': tid}
+ id = self.typeid[tid]
+ params.update(extend)
+ if pg == '1':
+ id=self.cf(params)
+ data=self.session.get(f'{self.host}/category/data?session={id}¶ms={quote(json.dumps(params))}&pageNo={pg}').json()
+ try:
+ data=data['data']['filterData']
+ for i in data['listData']:
+ if i.get('videoLink') and 's=' in i['videoLink']:
+ vlist.append({
+ 'vod_id': i.get('videoLink').split('s=')[-1],
+ 'vod_name': i.get('title'),
+ 'vod_pic': i.get('img'),
+ 'vod_year': i.get('rightTagText'),
+ 'vod_remarks': i.get('summary')
+ })
+ self.typeid[tid]=quote(json.dumps(data['session']))
+ except:
+ pagecount=pg
+ result['list'] = vlist
+ result['pagecount'] = pagecount
+ return result
+
+ def detailContent(self, ids):
+ try:
+ data=self.session.get(f'{self.ihost}/v_getvideo_info/?showId={ids[0]}').json()
+ v=data['data']
+ vod = {
+ 'type_name': v.get('showVideotype'),
+ 'vod_year': v.get('lastUpdate'),
+ 'vod_remarks': v.get('rc_title'),
+ 'vod_actor': v.get('_personNameStr'),
+ 'vod_content': v.get('showdesc'),
+ 'vod_play_from': '优酷',
+ 'vod_play_url': ''
+ }
+ params={"biz":"new_detail_web2","videoId":v.get('vid'),"scene":"web_page","componentVersion":"3","ip":data.get('ip'),"debug":0,"utdid":"ZYmGMAAAACkDAMU8hbiMmYdd","userId":0,"platform":"pc","nextSession":"","gray":0,"source":"pcNoPrev","showId":ids[0]}
+ sdata,index=self.getinfo(params)
+ pdata=sdata['nodes']
+ if index > len(pdata):
+ batch_size = len(pdata)
+ total_batches = ((index + batch_size - 1) // batch_size) - 1
+ ssj = json.loads(sdata['data']['session'])
+ with ThreadPoolExecutor(max_workers=total_batches) as executor:
+ futures = []
+ for batch in range(total_batches):
+ start = batch_size + 1 + (batch * batch_size)
+ end = start + batch_size - 1
+ next_session = ssj.copy()
+ next_session.update({
+ "itemStartStage": start,
+ "itemEndStage": min(end, index)
+ })
+ current_params = params.copy()
+ current_params['nextSession'] = json.dumps(next_session)
+ futures.append((start, executor.submit(self.getvinfo, current_params)))
+ futures.sort(key=lambda x: x[0])
+
+ for _, future in futures:
+ try:
+ result = future.result()
+ pdata.extend(result['nodes'])
+ except Exception as e:
+ print(f"Error fetching data: {str(e)}")
+ vod['vod_play_url'] = '#'.join([f"{i['data'].get('title')}${i['data']['action'].get('value')}" for i in pdata])
+ return {'list': [vod]}
+ except Exception as e:
+ print(e)
+ return {'list': [{'vod_play_from': '哎呀翻车啦', 'vod_play_url': f'呜呜呜${self.host}'}]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.session.get(f'{self.shost}/api/search?pg={pg}&keyword={key}').json()
+ vlist = []
+ for i in data['pageComponentList']:
+ if i.get('commonData') and (i['commonData'].get('showId') or i['commonData'].get('realShowId')):
+ i=i['commonData']
+ vlist.append({
+ 'vod_id': i.get('showId') or i.get('realShowId'),
+ 'vod_name': i['titleDTO'].get('displayName'),
+ 'vod_pic': i['posterDTO'].get('vThumbUrl'),
+ 'vod_year': i.get('feature'),
+ 'vod_remarks': i.get('updateNotice')
+ })
+ return {'list': vlist, 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ return {'jx':1,'parse': 1, 'url': f"{self.ihost}/video?vid={id}", 'header': ''}
+
+ def localProxy(self, param):
+ pass
+
+ def cf(self,params,b=False):
+ response = self.session.get(f'{self.host}/category/data?params={quote(json.dumps(params))}&optionRefresh=1&pageNo=1').json()
+ data=response['data']['filterData']
+ session=quote(json.dumps(data['session']))
+ if b:
+ return session,self.get_filter_data(data['filter']['filterData'][1:])
+ return session
+
+ def process_key(self, key):
+ if '_' not in key:
+ return key
+ parts = key.split('_')
+ result = parts[0]
+ for part in parts[1:]:
+ if part:
+ result += part[0].upper() + part[1:]
+ return result
+
+ def get_filter_data(self, data):
+ result = []
+ try:
+ for item in data:
+ if not item.get('subFilter'):
+ continue
+ first_sub = item['subFilter'][0]
+ if not first_sub.get('filterType'):
+ continue
+ filter_item = {
+ 'key': self.process_key(first_sub['filterType']),
+ 'name': first_sub['title'],
+ 'value': []
+ }
+ for sub in item['subFilter']:
+ if 'value' in sub:
+ filter_item['value'].append({
+ 'n': sub['title'],
+ 'v': sub['value']
+ })
+ if filter_item['value']:
+ result.append(filter_item)
+
+ except Exception as e:
+ print(f"处理筛选数据时出错: {str(e)}")
+
+ return result
+
+ def get_ctoken(self):
+ data=self.session.get(f'{self.h5host}/h5/mtop.ykrec.recommendservice.recommend/1.0/?jsv=2.6.1&appKey=24679788')
+
+ def md5(self,t,text):
+ h = MD5.new()
+ token=self.session.cookies.get('_m_h5_tk').split('_')[0]
+ data=f"{token}&{t}&24679788&{text}"
+ h.update(data.encode('utf-8'))
+ return h.hexdigest()
+
+ def getdata(self, url, params, recursion_count=0, max_recursion=3):
+ data = json.dumps(params)
+ t = int(time.time() * 1000)
+ jsdata = {
+ 'appKey': '24679788',
+ 't': t,
+ 'sign': self.md5(t, data),
+ 'data': data
+ }
+ response = self.session.get(url, params=jsdata)
+ if '令牌过期' in response.text:
+ if recursion_count >= max_recursion:
+ raise Exception("达到最大递归次数,无法继续请求")
+ self.get_ctoken()
+ return self.getdata(url, params, recursion_count + 1, max_recursion)
+ else:
+ return response.json()
+
+ def getvinfo(self,params):
+ body = {
+ "ms_codes": "2019030100",
+ "params": json.dumps(params),
+ "system_info": "{\"os\":\"iku\",\"device\":\"iku\",\"ver\":\"9.2.9\",\"appPackageKey\":\"com.youku.iku\",\"appPackageId\":\"pcweb\"}"
+ }
+ data = self.getdata(f'{self.h5host}/h5/mtop.youku.columbus.gateway.new.execute/1.0/', body)
+ okey = list(data['data'].keys())[0]
+ i = data['data'][okey]['data']
+ return i
+
+ def getinfo(self,params):
+ i = self.getvinfo(params)
+ jdata=i['nodes'][0]['nodes'][3]
+ info=i['data']['extra']['episodeTotal']
+ if i['data']['extra']['showCategory'] in ['电影','游戏']:
+ jdata = i['nodes'][0]['nodes'][4]
+ return jdata,info
+
diff --git a/xiaosa/py/光速影视.py b/xiaosa/py/光速影视.py
index 4a58f393..33b12045 100644
--- a/xiaosa/py/光速影视.py
+++ b/xiaosa/py/光速影视.py
@@ -111,21 +111,21 @@ class Spider(Spider):
def playerContent(self, flag, id, vipFlags):
ids = json.loads(self.d64(id))
- h={"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
- url = ids['url']
- p=1
+ h = {"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
try:
- if re.search(r'\?url=', ids['parse_api_url']):
- data=self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
- url=data.get('url') or data['data'].get('url')
- elif not re.search(r'\.m3u8|\.mp4', ids.get('url')):
- body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes('encrypt', ids['url']))}&token={ids.get('token')}"
+ if re.search(r'url=', ids['parse_api_url']):
+ data = self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
+ url = data.get('url') or data['data'].get('url')
+ else:
+ body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes(ids['url'], True))}&token={ids.get('token')}"
b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
url = json.loads(b)['url']
- p=0
+ if 'error' in url: raise ValueError(f"解析失败: {url}")
+ p = 0
except Exception as e:
- print('错误信息:',e)
- pass
+ print('错误信息:', e)
+ url, p = ids['url'], 1
+
if re.search(r'\.jpg|\.png|\.jpeg', url):
url = self.Mproxy(url)
result = {}
diff --git a/xiaosa/py/哔哩视频.py b/xiaosa/py/哔哩视频.py
new file mode 100644
index 00000000..2b8a7cf5
--- /dev/null
+++ b/xiaosa/py/哔哩视频.py
@@ -0,0 +1,468 @@
+#coding=utf-8
+#!/usr/bin/python
+import sys
+import json
+import time
+from datetime import datetime
+from difflib import SequenceMatcher
+from urllib.parse import quote, unquote
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider): # 元类 默认的元类 type
+ def getName(self):
+ return "B站番剧"
+
+ def init(self, extend):
+ try:
+ self.extendDict = json.loads(extend)
+ except:
+ self.extendDict = {}
+
+ def destroy(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "番剧": "1",
+ "国创": "4",
+ "电影": "2",
+ "综艺": "7",
+ "电视剧": "5",
+ }
+ classes = []
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ result['class'] = classes
+ if filter:
+ result['filters'] = self.config['filter']
+ currentYear = datetime.now().year
+ for resultfilter in result['filters']:
+ for rf in result['filters'][resultfilter]:
+ if rf['key'] == 'year':
+ for rfv in rf['value']:
+ if rfv['n'].isdigit():
+ if int(rfv['n']) < currentYear:
+ pos = rf['value'].index(rfv)
+ for year in range(currentYear, int(rfv['n']), -1):
+ rf['value'].insert(pos, {'v': f'[{str(year)},{str(year+1)})', 'n': str(year)})
+ pos += 1
+ break
+ else:
+ break
+ elif rf['key'] == 'release_date':
+ for rfv in rf['value']:
+ if rfv['n'].isdigit():
+ if int(rfv['n']) < currentYear:
+ pos = rf['value'].index(rfv)
+ for year in range(currentYear, int(rfv['n']), -1):
+ rf['value'].insert(pos, {'v': f'[{str(year)}-01-01 00:00:00,{str(year+1)}-01-01 00:00:00)', 'n': str(year)})
+ pos += 1
+ break
+ else:
+ break
+ return result
+
+ def homeVideoContent(self):
+ return self.categoryContent('1', '1', False, {})
+
+ def categoryContent(self, cid, page, filter, ext):
+ page = int(page)
+ result = {}
+ videos = []
+ cookie, _, _ = self.getCookie('{}')
+ url = 'https://api.bilibili.com/pgc/season/index/result?order=2&sort=0&pagesize=20&type=1&st={}&season_type={}&page={}'.format(cid, cid, page)
+ for key in ext:
+ url += f'&{key}={quote(ext[key])}'
+ r = self.fetch(url, headers=self.header, cookies=cookie, timeout=5)
+ data = json.loads(self.cleanText(r.text))
+ vodList = data['data']['list']
+ for vod in vodList:
+ aid = str(vod['season_id']).strip()
+ title = self.removeHtmlTags(self.cleanText(vod['title']))
+ img = vod['cover'].strip()
+ remark = vod['index_show'].strip()
+ videos.append({
+ "vod_id": aid,
+ "vod_name": title,
+ "vod_pic": img,
+ "vod_remarks": remark
+ })
+ lenvideos = len(videos)
+ if data['data']['has_next'] == 1:
+ pagecount = page + 1
+ else:
+ pagecount = page
+ result['list'] = videos
+ result['page'] = page
+ result['pagecount'] = pagecount
+ result['limit'] = lenvideos
+ result['total'] = lenvideos
+ return result
+
+ def detailContent(self, did):
+ did = did[0]
+ url = "http://api.bilibili.com/pgc/view/web/season?season_id={0}".format(did)
+ r = self.fetch(url, headers=self.header, timeout=10)
+ data = json.loads(self.cleanText(r.text))
+ vod = {
+ "vod_id": did,
+ "vod_name": self.removeHtmlTags(data['result']['title']),
+ "vod_pic": data['result']['cover'],
+ "type_name": data['result']['share_sub_title'],
+ "vod_actor": data['result']['actors'].replace('\n', ','),
+ "vod_content": self.removeHtmlTags(data['result']['evaluate'])
+ }
+ videoList = data['result']['episodes']
+ playUrl = ''
+ for video in videoList:
+ eid = video['id']
+ cid = video['cid']
+ name = self.removeHtmlTags(video['share_copy']).replace("#", "-").replace('$', '*')
+ remark = time.strftime('%H:%M:%S', time.gmtime(video['duration']/1000))
+ if remark.startswith('00:'):
+ remark = remark[3:]
+ playUrl = playUrl + '[{}]/{}${}_{}#'.format(remark, name, eid, cid)
+ vod['vod_play_from'] = 'B站番剧'
+ vod['vod_play_url'] = playUrl.strip('#')
+ result = {
+ 'list': [
+ vod
+ ]
+ }
+ return result
+
+ def searchContent(self, key, quick):
+ return self.searchContentPage(key, quick, '1')
+
+ def searchContentPage(self, key, quick, page):
+ videos = []
+ cookie = ''
+ if 'cookie' in self.extendDict:
+ cookie = self.extendDict['cookie']
+ if 'json' in self.extendDict:
+ r = self.fetch(self.extendDict['json'], timeout=10)
+ if 'cookie' in r.json():
+ cookie = r.json()['cookie']
+ if cookie == '':
+ cookie = '{}'
+ elif type(cookie) == str and cookie.startswith('http'):
+ cookie = self.fetch(cookie, timeout=10).text.strip()
+ try:
+ if type(cookie) == dict:
+ cookie = json.dumps(cookie, ensure_ascii=False)
+ except:
+ pass
+ cookie, _, _ = self.getCookie(cookie)
+ url = f'https://api.bilibili.com/x/web-interface/search/type?search_type=media_bangumi&keyword={key}&page={page}'
+ r = self.fetch(url, headers=self.header, cookies=cookie, timeout=5)
+ data = json.loads(self.cleanText(r.text))
+ if 'result' not in data['data']:
+ return {'list': videos}, 1
+ vodList = data['data']['result']
+ for vod in vodList:
+ sid = str(vod['season_id']).strip()
+ title = self.removeHtmlTags(self.cleanText(vod['title']))
+ if SequenceMatcher(None, title, key).ratio() < 0.6 and key not in title:
+ continue
+ img = vod['eps'][0]['cover'].strip()
+ remark = self.removeHtmlTags(vod['index_show']).strip()
+ videos.append({
+ "vod_id": sid,
+ "vod_name": title,
+ "vod_pic": img,
+ "vod_remarks": remark
+ })
+ result = {
+ 'list': videos
+ }
+ return result
+
+ def playerContent(self, flag, pid, vipFlags):
+ result = {}
+ pidList = pid.split("_")
+ aid = pidList[0]
+ cid = pidList[1]
+ url = 'https://api.bilibili.com/pgc/player/web/playurl?ep_id={0}&cid={1}&qn=120&fnval=4048&fnver=0&fourk=1'.format(aid, cid)
+ cookie = ''
+ extendDict = self.extendDict
+ if 'cookie' in extendDict:
+ cookie = extendDict['cookie']
+ if 'json' in extendDict:
+ r = self.fetch(extendDict['json'], timeout=10)
+ if 'cookie' in r.json():
+ cookie = r.json()['cookie']
+ if cookie == '':
+ cookie = '{}'
+ elif type(cookie) == str and cookie.startswith('http'):
+ cookie = self.fetch(cookie, timeout=10).text.strip()
+ try:
+ if type(cookie) == dict:
+ cookie = json.dumps(cookie, ensure_ascii=False)
+ except:
+ pass
+ cookiesDict, _, _ = self.getCookie(cookie)
+ cookies = quote(json.dumps(cookiesDict))
+ if 'thread' in extendDict:
+ thread = str(extendDict['thread'])
+ else:
+ thread = '0'
+ result["parse"] = '0'
+ result["playUrl"] = ''
+ result["url"] = f'http://127.0.0.1:9978/proxy?do=py&type=mpd&cookies={cookies}&url={quote(url)}&aid={aid}&cid={cid}&thread={thread}'
+ result["header"] = self.header
+ result['danmaku'] = 'https://api.bilibili.com/x/v1/dm/list.so?oid={}'.format(cid)
+ result["format"] = 'application/dash+xml'
+ return result
+
+ def localProxy(self, params):
+ if params['type'] == "mpd":
+ return self.proxyMpd(params)
+ if params['type'] == "media":
+ return self.proxyMedia(params)
+ return None
+
+ def proxyMpd(self, params):
+ content, durlinfos, mediaType = self.getDash(params)
+ if mediaType == 'mpd':
+ return [200, "application/dash+xml", content]
+ else:
+ url = content
+ durlinfo = durlinfos['durl'][0]['backup_url']
+ try:
+ r = self.fetch(url, headers=self.header, stream=True, timeout=1)
+ statusCode = r.status_code
+ try:
+ r.close()
+ except:
+ pass
+ except:
+ try:
+ r.close()
+ except:
+ pass
+ statusCode = 404
+ for url in durlinfo:
+ try:
+ r = self.fetch(url, headers=self.header, stream=True, timeout=1)
+ statusCode = r.status_code
+ except:
+ statusCode = 404
+ if statusCode == 200:
+ break
+ try:
+ r.close()
+ except:
+ pass
+ if statusCode != 200 and self.retry == 0:
+ self.retry += 1
+ self.proxyMedia(params, True)
+ header = self.header.copy()
+ if 'range' in params:
+ header['Range'] = params['range']
+ if '127.0.0.1:7777' in url:
+ header['Location'] = url
+ return [302, "video/MP2T", None, header]
+ return [206, "application/octet-stream", self.fetch(content, headers=header, stream=True).content]
+
+ def proxyMedia(self, params, forceRefresh=False):
+ _, dashinfos, _ = self.getDash(params)
+ if 'videoid' in params:
+ videoid = int(params['videoid'])
+ dashinfo = dashinfos['video'][videoid]
+ url = dashinfo['baseUrl']
+ elif 'audioid' in params:
+ audioid = int(params['audioid'])
+ dashinfo = dashinfos['audio'][audioid]
+ url = dashinfo['baseUrl']
+ else:
+ return [404, "text/plain", ""]
+ try:
+ r = self.fetch(url, headers=params['headers'], stream=True)
+ statusCode = r.status_code
+ try:
+ r.close()
+ except:
+ pass
+ except:
+ try:
+ r.close()
+ except:
+ pass
+ statusCode = 404
+ for url in dashinfo['backupUrl']:
+ try:
+ r = self.fetch(url, headers=self.header, stream=True, timeout=1)
+ statusCode = r.status_code
+ except:
+ statusCode = 404
+ if statusCode == 200:
+ break
+ try:
+ r.close()
+ except:
+ pass
+ if statusCode != 200 and self.retry == 0:
+ self.retry += 1
+ self.proxyMedia(params, True)
+ header = self.header.copy()
+ if 'range' in params:
+ header['Range'] = params['range']
+ return [206, "application/octet-stream", self.fetch(url, headers=header, stream=True).content]
+
+ def getDash(self, params, forceRefresh=False):
+ aid = params['aid']
+ cid = params['cid']
+ url = unquote(params['url'])
+ if 'thread' in params:
+ thread = params['thread']
+ else:
+ thread = 0
+ header = self.header.copy()
+ self.setCache('debug', params['cookies'])
+ cookieDict = json.loads(params['cookies'])
+ key = f'bilimdmpdCache_{aid}_{cid}'
+ if forceRefresh:
+ self.delCache(key)
+ else:
+ data = self.getCache(key)
+ if data:
+ return data['content'], data['dashinfos'], data['type']
+
+ cookies = cookieDict.copy()
+ r = self.fetch(url, cookies=cookies, headers=header, timeout=5)
+ data = json.loads(self.cleanText(r.text))
+ if data['code'] != 0:
+ return '', {}, ''
+ if not 'dash' in data['result']:
+ purl = data['result']['durl'][0]['url']
+ try:
+ expiresAt = int(self.regStr(reg='deadline=(\d+)', src=purl).group(1)) - 60
+ except:
+ expiresAt = int(time.time()) + 600
+ if int(thread) > 0:
+ try:
+ self.fetch('http://127.0.0.1:7777')
+ except:
+ self.fetch('http://127.0.0.1:9978/go')
+ purl = f'http://127.0.0.1:7777?url={quote(purl)}&thread={thread}'
+ self.setCache(key, {'content': purl, 'type': 'mp4', 'dashinfos': data['result'], 'expiresAt': expiresAt})
+ return purl, data['result'], 'mp4'
+
+ dashinfos = data['result']['dash']
+ duration = dashinfos['duration']
+ minBufferTime = dashinfos['minBufferTime']
+ videoinfo = ''
+ videoid = 0
+ deadlineList = []
+ # videoList = sorted(dashinfos['video'], key=lambda x: x['bandwidth'], reverse=True)
+ for video in dashinfos['video']:
+ try:
+ deadline = int(self.regStr(reg='deadline=(\d+)', src=video['baseUrl']).group(1))
+ except:
+ deadline = int(time.time()) + 600
+ deadlineList.append(deadline)
+ codecs = video['codecs']
+ bandwidth = video['bandwidth']
+ frameRate = video['frameRate']
+ height = video['height']
+ width = video['width']
+ void = video['id']
+ vidparams = params.copy()
+ vidparams['videoid'] = videoid
+ baseUrl = f'http://127.0.0.1:9978/proxy?do=py&type=media&cookies={quote(json.dumps(cookies))}&url={quote(url)}&aid={aid}&cid={cid}&videoid={videoid}'
+ videoinfo = videoinfo + f"""
+ {baseUrl}
+
+
+
+ \n"""
+ videoid += 1
+ audioinfo = ''
+ audioid = 0
+ # audioList = sorted(dashinfos['audio'], key=lambda x: x['bandwidth'], reverse=True)
+ for audio in dashinfos['audio']:
+ try:
+ deadline = int(self.regStr(reg='deadline=(\d+)', src=audio['baseUrl']).group(1))
+ except:
+ deadline = int(time.time()) + 600
+ deadlineList.append(deadline)
+ bandwidth = audio['bandwidth']
+ codecs = audio['codecs']
+ aoid = audio['id']
+ aidparams = params.copy()
+ aidparams['audioid'] = audioid
+ baseUrl = f'http://127.0.0.1:9978/proxy?do=py&type=media&cookies={quote(json.dumps(cookies))}&url={quote(url)}&aid={aid}&cid={cid}&audioid={audioid}'
+ audioinfo = audioinfo + f"""
+ {baseUrl}
+
+
+
+ \n"""
+ audioid += 1
+ mpd = f"""
+
+
+
+ {videoinfo.strip()}
+
+
+ {audioinfo.strip()}
+
+
+ """
+ expiresAt = min(deadlineList) - 60
+ self.setCache(key, {'type': 'mpd', 'content': mpd.replace('&', '&'), 'dashinfos': dashinfos, 'expiresAt': expiresAt})
+ return mpd.replace('&', '&'), dashinfos, 'mpd'
+
+ def getCookie(self, cookie):
+ if '{' in cookie and '}' in cookie:
+ cookies = json.loads(cookie)
+ else:
+ cookies = dict([co.strip().split('=', 1) for co in cookie.strip(';').split(';')])
+ bblogin = self.getCache('bblogin')
+ if bblogin:
+ imgKey = bblogin['imgKey']
+ subKey = bblogin['subKey']
+ return cookies, imgKey, subKey
+
+ header = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.54 Safari/537.36"
+ }
+ r = self.fetch("http://api.bilibili.com/x/web-interface/nav", cookies=cookies, headers=header, timeout=10)
+ data = json.loads(r.text)
+ code = data["code"]
+ if code == 0:
+ imgKey = data['data']['wbi_img']['img_url'].rsplit('/', 1)[1].split('.')[0]
+ subKey = data['data']['wbi_img']['sub_url'].rsplit('/', 1)[1].split('.')[0]
+ self.setCache('bblogin', {'imgKey': imgKey, 'subKey': subKey, 'expiresAt': int(time.time()) + 1200})
+ return cookies, imgKey, subKey
+ r = self.fetch("https://www.bilibili.com/", headers=header, timeout=5)
+ cookies = r.cookies.get_dict()
+ imgKey = ''
+ subKey = ''
+ return cookies, imgKey, subKey
+
+ def removeHtmlTags(self, src):
+ from re import sub, compile
+ clean = compile('<.*?>')
+ return sub(clean, '', src)
+
+ retry = 0
+ header = {
+ "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.54 Safari/537.36",
+ "Referer": "https://www.bilibili.com"
+ }
+ config = {
+ "filter": {"1":[{"key":"season_version","name":"类型","value":[{"v":'-1',"n":"全部"},{"v":'1',"n":"正片"},{"v":'2',"n":"电影"},{"v":'3',"n":"其他"}]},{"key":"area","name":"地区","value":[{"v":'-1',"n":"全部"},{"v":'2',"n":"日本"},{"v":'3',"n":"美国"},{"v":"1,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70","n":"其他"}]},{"key":"is_finish","name":"状态","value":[{"v":'-1',"n":"全部"},{"v":'1',"n":"完结"},{"v":'0',"n":"连载"}]},{"key":"copyright","name":"版权","value":[{"v":'-1',"n":"全部"},{"v":'3',"n":"独家"},{"v":"1,2,4","n":"其他"}]},{"key":"season_status","name":"付费","value":[{"v":'-1',"n":"全部"},{"v":'1',"n":"免费"},{"v":"2,6","n":"付费"},{"v":"4,6","n":"大会员"}]},{"key":"season_month","name":"季度","value":[{"v":'-1',"n":"全部"},{"v":'1',"n":"1月"},{"v":'4',"n":"4月"},{"v":'7',"n":"7月"},{"v":'10',"n":"10月"}]},{"key":"year","name":"年份","value":[{"v":'-1',"n":"全部"},{"v":"[2023,2024)","n":"2023"},{"v":"[2022,2023)","n":"2022"},{"v":"[2021,2022)","n":"2021"},{"v":"[2020,2021)","n":"2020"},{"v":"[2019,2020)","n":"2019"},{"v":"[2018,2019)","n":"2018"},{"v":"[2017,2018)","n":"2017"},{"v":"[2016,2017)","n":"2016"},{"v":"[2015,2016)","n":"2015"},{"v":"[2010,2015)","n":"2014-2010"},{"v":"[2005,2010)","n":"2009-2005"},{"v":"[2000,2005)","n":"2004-2000"},{"v":"[1990,2000)","n":"90年代"},{"v":"[1980,1990)","n":"80年代"},{"v":"[,1980)","n":"更早"}]},{"key":"style_id","name":"风格","value":[{"v":'-1',"n":"全部"},{"v":'10010',"n":"原创"},{"v":'10011',"n":"漫画改"},{"v":'10012',"n":"小说改"},{"v":'10013',"n":"游戏改"},{"v":'10102',"n":"特摄"},{"v":'10015',"n":"布袋戏"},{"v":'10016',"n":"热血"},{"v":'10017',"n":"穿越"},{"v":'10018',"n":"奇幻"},{"v":'10020',"n":"战斗"},{"v":'10021',"n":"搞笑"},{"v":'10022',"n":"日常"},{"v":'10023',"n":"科幻"},{"v":'10024',"n":"萌系"},{"v":'10025',"n":"治愈"},{"v":'10026',"n":"校园"},{"v":'10027',"n":"少儿"},{"v":'10028',"n":"泡面"},{"v":'10029',"n":"恋爱"},{"v":'10030',"n":"少女"},{"v":'10031',"n":"魔法"},{"v":'10032',"n":"冒险"},{"v":'10033',"n":"历史"},{"v":'10034',"n":"架空"},{"v":'10035',"n":"机战"},{"v":'10036',"n":"神魔"},{"v":'10037',"n":"声控"},{"v":'10038',"n":"运动"},{"v":'10039',"n":"励志"},{"v":'10040',"n":"音乐"},{"v":'10041',"n":"推理"},{"v":'10042',"n":"社团"},{"v":'10043',"n":"智斗"},{"v":'10044',"n":"催泪"},{"v":'10045',"n":"美食"},{"v":'10046',"n":"偶像"},{"v":'10047',"n":"乙女"},{"v":'10048',"n":"职场"}]}],"4":[{"key":"season_version","name":"类型","value":[{"v":'-1',"n":"全部"},{"v":'1',"n":"正片"},{"v":'2',"n":"电影"},{"v":'3',"n":"其他"}]},{"key":"area","name":"地区","value":[{"v":'-1',"n":"全部"},{"v":'2',"n":"日本"},{"v":'3',"n":"美国"},{"v":"1,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70","n":"其他"}]},{"key":"is_finish","name":"状态","value":[{"v":'-1',"n":"全部"},{"v":'1',"n":"完结"},{"v":'0',"n":"连载"}]},{"key":"copyright","name":"版权","value":[{"v":'-1',"n":"全部"},{"v":'3',"n":"独家"},{"v":"1,2,4","n":"其他"}]},{"key":"season_status","name":"付费","value":[{"v":'-1',"n":"全部"},{"v":'1',"n":"免费"},{"v":"2,6","n":"付费"},{"v":"4,6","n":"大会员"}]},{"key":"year","name":"年份","value":[{"v":'-1',"n":"全部"},{"v":"[2023,2024)","n":"2023"},{"v":"[2022,2023)","n":"2022"},{"v":"[2021,2022)","n":"2021"},{"v":"[2020,2021)","n":"2020"},{"v":"[2019,2020)","n":"2019"},{"v":"[2018,2019)","n":"2018"},{"v":"[2017,2018)","n":"2017"},{"v":"[2016,2017)","n":"2016"},{"v":"[2015,2016)","n":"2015"},{"v":"[2010,2015)","n":"2014-2010"},{"v":"[2005,2010)","n":"2009-2005"},{"v":"[2000,2005)","n":"2004-2000"},{"v":"[1990,2000)","n":"90年代"},{"v":"[1980,1990)","n":"80年代"},{"v":"[,1980)","n":"更早"}]},{"key":"style_id","name":"风格","value":[{"v":'-1',"n":"全部"},{"v":'10010',"n":"原创"},{"v":'10011',"n":"漫画改"},{"v":'10012',"n":"小说改"},{"v":'10013',"n":"游戏改"},{"v":'10014',"n":"动态漫"},{"v":'10015',"n":"布袋戏"},{"v":'10016',"n":"热血"},{"v":'10018',"n":"奇幻"},{"v":'10019',"n":"玄幻"},{"v":'10020',"n":"战斗"},{"v":'10021',"n":"搞笑"},{"v":'10078',"n":"武侠"},{"v":'10022',"n":"日常"},{"v":'10023',"n":"科幻"},{"v":'10024',"n":"萌系"},{"v":'10025',"n":"治愈"},{"v":'10057',"n":"悬疑"},{"v":'10026',"n":"校园"},{"v":'10027',"n":"少儿"},{"v":'10028',"n":"泡面"},{"v":'10029',"n":"恋爱"},{"v":'10030',"n":"少女"},{"v":'10031',"n":"魔法"},{"v":'10033',"n":"历史"},{"v":'10035',"n":"机战"},{"v":'10036',"n":"神魔"},{"v":'10037',"n":"声控"},{"v":'10038',"n":"运动"},{"v":'10039',"n":"励志"},{"v":'10040',"n":"音乐"},{"v":'10041',"n":"推理"},{"v":'10042',"n":"社团"},{"v":'10043',"n":"智斗"},{"v":'10044',"n":"催泪"},{"v":'10045',"n":"美食"},{"v":'10046',"n":"偶像"},{"v":'10047',"n":"乙女"},{"v":'10048',"n":"职场"},{"v":'10049',"n":"古风"}]}],"2":[{"key":"area","name":"地区","value":[{"v":'-1',"n":"全部"},{"v":'1',"n":"中国大陆"},{"v":"6,7","n":"中国港台"},{"v":'3',"n":"美国"},{"v":'2',"n":"日本"},{"v":'8',"n":"韩国"},{"v":'9',"n":"法国"},{"v":'4',"n":"英国"},{"v":'15',"n":"德国"},{"v":'10',"n":"泰国"},{"v":'35',"n":"意大利"},{"v":'13',"n":"西班牙"},{"v":"5,11,12,14,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70","n":"其他"}]},{"key":"season_status","name":"付费","value":[{"v":'-1',"n":"全部"},{"v":'1',"n":"免费"},{"v":"2,6","n":"付费"},{"v":"4,6","n":"大会员"}]},{"key":"style_id","name":"风格","value":[{"v":'-1',"n":"全部"},{"v":'10104',"n":"短片"},{"v":'10050',"n":"剧情"},{"v":'10051',"n":"喜剧"},{"v":'10052',"n":"爱情"},{"v":'10053',"n":"动作"},{"v":'10054',"n":"恐怖"},{"v":'10023',"n":"科幻"},{"v":'10055',"n":"犯罪"},{"v":'10056',"n":"惊悚"},{"v":'10057',"n":"悬疑"},{"v":'10018',"n":"奇幻"},{"v":'10058',"n":"战争"},{"v":'10059',"n":"动画"},{"v":'10060',"n":"传记"},{"v":'10061',"n":"家庭"},{"v":'10062',"n":"歌舞"},{"v":'10033',"n":"历史"},{"v":'10032',"n":"冒险"},{"v":'10063',"n":"纪实"},{"v":'10064',"n":"灾难"},{"v":'10011',"n":"漫画改"},{"v":'10012',"n":"小说改"}]},{"key":"release_date","name":"年份","value":[{"v":'-1',"n":"全部"},{"v":"[2023-01-01 00:00:00,2024-01-01 00:00:00)","n":"2023"},{"v":"[2022-01-01 00:00:00,2023-01-01 00:00:00)","n":"2022"},{"v":"[2021-01-01 00:00:00,2022-01-01 00:00:00)","n":"2021"},{"v":"[2020-01-01 00:00:00,2021-01-01 00:00:00)","n":"2020"},{"v":"[2019-01-01 00:00:00,2020-01-01 00:00:00)","n":"2019"},{"v":"[2018-01-01 00:00:00,2019-01-01 00:00:00)","n":"2018"},{"v":"[2017-01-01 00:00:00,2018-01-01 00:00:00)","n":"2017"},{"v":"[2016-01-01 00:00:00,2017-01-01 00:00:00)","n":"2016"},{"v":"[2010-01-01 00:00:00,2016-01-01 00:00:00)","n":"2015-2010"},{"v":"[2005-01-01 00:00:00,2010-01-01 00:00:00)","n":"2009-2005"},{"v":"[2000-01-01 00:00:00,2005-01-01 00:00:00)","n":"2004-2000"},{"v":"[1990-01-01 00:00:00,2000-01-01 00:00:00)","n":"90年代"},{"v":"[1980-01-01 00:00:00,1990-01-01 00:00:00)","n":"80年代"},{"v":"[,1980-01-01 00:00:00)","n":"更早"}]}],"5":[{"key":"area","name":"地区","value":[{"v":'-1',"n":"全部"},{"v":"1,6,7","n":"中国"},{"v":'2',"n":"日本"},{"v":'3',"n":"美国"},{"v":'4',"n":"英国"},{"v":'10',"n":"泰国"},{"v":"5,8,9,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70","n":"其他"}]},{"key":"season_status","name":"付费","value":[{"v":'-1',"n":"全部"},{"v":'1',"n":"免费"},{"v":"2,6","n":"付费"},{"v":"4,6","n":"大会员"}]},{"key":"style_id","name":"风格","value":[{"v":'-1',"n":"全部"},{"v":'10021',"n":"搞笑"},{"v":'10018',"n":"奇幻"},{"v":'10058',"n":"战争"},{"v":'10078',"n":"武侠"},{"v":'10079',"n":"青春"},{"v":'10103',"n":"短剧"},{"v":'10080',"n":"都市"},{"v":'10081',"n":"古装"},{"v":'10082',"n":"谍战"},{"v":'10083',"n":"经典"},{"v":'10084',"n":"情感"},{"v":'10057',"n":"悬疑"},{"v":'10039',"n":"励志"},{"v":'10085',"n":"神话"},{"v":'10017',"n":"穿越"},{"v":'10086',"n":"年代"},{"v":'10087',"n":"农村"},{"v":'10088',"n":"刑侦"},{"v":'10050',"n":"剧情"},{"v":'10061',"n":"家庭"},{"v":'10033',"n":"历史"},{"v":'10089',"n":"军旅"},{"v":'10023',"n":"科幻"}]},{"key":"release_date","name":"年份","value":[{"v":'-1',"n":"全部"},{"v":"[2023-01-01 00:00:00,2024-01-01 00:00:00)","n":"2023"},{"v":"[2022-01-01 00:00:00,2023-01-01 00:00:00)","n":"2022"},{"v":"[2021-01-01 00:00:00,2022-01-01 00:00:00)","n":"2021"},{"v":"[2020-01-01 00:00:00,2021-01-01 00:00:00)","n":"2020"},{"v":"[2019-01-01 00:00:00,2020-01-01 00:00:00)","n":"2019"},{"v":"[2018-01-01 00:00:00,2019-01-01 00:00:00)","n":"2018"},{"v":"[2017-01-01 00:00:00,2018-01-01 00:00:00)","n":"2017"},{"v":"[2016-01-01 00:00:00,2017-01-01 00:00:00)","n":"2016"},{"v":"[2010-01-01 00:00:00,2016-01-01 00:00:00)","n":"2015-2010"},{"v":"[2005-01-01 00:00:00,2010-01-01 00:00:00)","n":"2009-2005"},{"v":"[2000-01-01 00:00:00,2005-01-01 00:00:00)","n":"2004-2000"},{"v":"[1990-01-01 00:00:00,2000-01-01 00:00:00)","n":"90年代"},{"v":"[1980-01-01 00:00:00,1990-01-01 00:00:00)","n":"80年代"},{"v":"[,1980-01-01 00:00:00)","n":"更早"}]}],"7":[{"key":"season_status","name":"付费","value":[{"v":'-1',"n":"全部"},{"v":'1',"n":"免费"},{"v":"2,6","n":"付费"},{"v":"4,6","n":"大会员"}]},{"key":"style_id","name":"风格","value":[{"v":'-1',"n":"全部"},{"v":'10040',"n":"音乐"},{"v":'10090',"n":"访谈"},{"v":'10091',"n":"脱口秀"},{"v":'10092',"n":"真人秀"},{"v":'10094',"n":"选秀"},{"v":'10045',"n":"美食"},{"v":'10095',"n":"旅游"},{"v":'10098',"n":"晚会"},{"v":'10096',"n":"演唱会"},{"v":'10084',"n":"情感"},{"v":'10051',"n":"喜剧"},{"v":'10097',"n":"亲子"},{"v":'10100',"n":"文化"},{"v":'10048',"n":"职场"},{"v":'10069',"n":"萌宠"},{"v":'10099',"n":"养成"}]}]}
+ }
\ No newline at end of file
diff --git a/xiaosa/py/爱奇艺.py b/xiaosa/py/爱奇艺.py
new file mode 100644
index 00000000..2b0af3d9
--- /dev/null
+++ b/xiaosa/py/爱奇艺.py
@@ -0,0 +1,248 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import random
+import sys
+from base64 import b64encode, b64decode
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from urllib.parse import urlencode
+sys.path.append('..')
+from base.spider import Spider
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.did = self.random_str(32)
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ rhost = 'https://www.iqiyi.com'
+
+ hhost='https://mesh.if.iqiyi.com'
+
+ dhost='https://miniapp.iqiyi.com'
+
+ headers = {
+ 'Origin': rhost,
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36',
+ 'Referer': f'{rhost}/',
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "全部": "1009",
+ "电影": "1",
+ "剧集": "2",
+ "综艺": "6",
+ "动漫": "4",
+ "儿童": "15",
+ "微剧": "35",
+ "纪录片": "3"
+ }
+ classes = []
+ filters = {}
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ with ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ results = executor.map(self.getf, classes)
+ for id, ft in results:
+ if len(ft):filters[id] = ft
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data=self.fetch(f'{self.hhost}/portal/lw/v5/channel/recommend?v=13.014.21150', headers=self.headers).json()
+ vlist = []
+ for i in data['items'][1:]:
+ for j in i['video'][0]['data']:
+ id = j.get('firstId')
+ pic=j.get('prevue',{}).get('image_url') or j.get('album_image_url_hover')
+ if id and pic:
+ pu=j.get('prevue',{}).get('page_url') or j.get('page_url').split('?')[0]
+ id = f'{id}@{self.e64(pu)}'
+ vlist.append({
+ 'vod_id': id,
+ 'vod_name': j.get('display_name'),
+ 'vod_pic': pic,
+ 'vod_year': j.get('sns_score'),
+ 'vod_remarks': j.get('dq_updatestatus') or j.get('rank_prefix')
+ })
+ return {'list':vlist}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ if pg == "1":
+ self.sid = ''
+ new_data = {'mode':'24'}
+ for key, value in extend.items():
+ if value:
+ key_value_pairs = self.d64(value).split(',')
+ for pair in key_value_pairs:
+ k, v = pair.split('=')
+ if k in new_data:
+ new_data[k] += "," + v
+ else:
+ new_data[k] = v
+ path=f'/portal/lw/videolib/data?uid=&passport_id=&ret_num=60&version=13.014.21150&device_id={self.did}&channel_id={tid}&page_id={pg}&session={self.sid}&os=&conduit_id=&vip=0&auth&recent_selected_tag=&ad=%5B%7B%22lm%22:%225%22,%22ai%22:%225%22,%22fp%22:%226%22,%22sei%22:%22Sa867aa9d326e2bd8654d8c2a8636055e%22,%22position%22:%22library%22%7D%5D&adExt=%7B%22r%22:%221.2.1-ares6-pure%22%7D&dfp=a12f96215b2f7842a98c082799ca0c3d9236be00946701b106829754d8ece3aaf8&filter={urlencode(new_data)}'
+ data=self.fetch(f'{self.hhost}{path}', headers=self.headers).json()
+ self.sid = data['session']
+ videos = []
+ for i in data['data']:
+ id = i.get('firstId') or i.get('tv_id')
+ if not id:
+ id=i.get('play_url').split(';')[0].split('=')[-1]
+ if id and not i.get('h'):
+ id=f'{id}@{self.e64(i.get("page_url"))}'
+ videos.append({
+ 'vod_id': id,
+ 'vod_name': i.get('display_name'),
+ 'vod_pic': i.get('album_image_url_hover'),
+ 'vod_year': i.get('sns_score'),
+ 'vod_remarks': i.get('dq_updatestatus') or i.get('pay_mark')
+ })
+ result = {}
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ ids = ids[0].split('@')
+ ids[-1] = self.d64(ids[-1])
+ data = self.fetch(f'{self.dhost}/h5/mina/baidu/play/body/v1/{ids[0]}/', headers=self.headers).json()
+ v=data['data']['playInfo']
+ vod = {
+ 'vod_name': v.get('albumName'),
+ 'type_name': v.get('tags'),
+ 'vod_year': v.get('albumYear'),
+ 'vod_remarks': v.get('updateStrategy'),
+ 'vod_actor': v.get('mainActors'),
+ 'vod_director': v.get('directors'),
+ 'vod_content': v.get('albumDesc'),
+ 'vod_play_from': '爱奇艺',
+ 'vod_play_url': ''
+ }
+ if data.get('data') and data['data'].get('videoList') and data['data']['videoList'].get('videos'):
+ purl=[f'{i["shortTitle"]}${i["pageUrl"]}' for i in data['data']['videoList']['videos']]
+ pg=data['data']['videoList'].get('totalPages')
+ if pg and pg > 1:
+ id = v['albumId']
+ pages = list(range(2, pg + 1))
+ page_results = {}
+ with ThreadPoolExecutor(max_workers=10) as executor:
+ future_to_page = {
+ executor.submit(self.fetch_page_data, page, id): page
+ for page in pages
+ }
+ for future in as_completed(future_to_page):
+ page = future_to_page[future]
+ try:
+ result = future.result()
+ page_results[page] = result
+ except Exception as e:
+ print(f"Error fetching page {page}: {e}")
+ for page in sorted(page_results.keys()):
+ purl.extend(page_results[page])
+ vod['vod_play_url'] = '#'.join(purl)
+ else:
+ vdata=self.fetch(f'{self.dhost}/h5/mina/baidu/play/head/v1/{ids[0]}/', headers=self.headers).json()
+ v=vdata['data']['playInfo']
+ vod = {
+ 'vod_name': v.get('shortTitle'),
+ 'type_name': v.get('channelName'),
+ 'vod_year': v.get('year'),
+ 'vod_remarks': v.get('focus'),
+ 'vod_actor': v.get('mainActors'),
+ 'vod_director': v.get('directors'),
+ 'vod_content': v.get('desc'),
+ 'vod_play_from': '爱奇艺',
+ 'vod_play_url': f'{v.get("shortTitle")}${ids[-1]}'
+ }
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.fetch(f'{self.hhost}/portal/lw/search/homePageV3?key={key}¤t_page={pg}&mode=1&source=input&suggest=&version=13.014.21150&pageNum={pg}&pageSize=25&pu=&u={self.did}&scale=150&token=&userVip=0&conduit=&vipType=-1&os=&osShortName=win10&dataType=&appMode=', headers=self.headers).json()
+ videos = []
+ vdata=data['data']['templates']
+ for i in data['data']['templates']:
+ if i.get('intentAlbumInfos'):
+ vdata=[{'albumInfo': c} for c in i['intentAlbumInfos']]+vdata
+
+ for i in vdata:
+ if i.get('albumInfo') and (i['albumInfo'].get('playQipuId','') or i['albumInfo'].get('qipuId')) and i['albumInfo'].get('pageUrl'):
+ b=i['albumInfo']
+ id=f"{(b.get('playQipuId','') or b.get('qipuId'))}@{self.e64(b.get('pageUrl'))}"
+ videos.append({
+ 'vod_id': id,
+ 'vod_name': b.get('title'),
+ 'vod_pic': b.get('img'),
+ 'vod_year': (b.get('year',{}) or {}).get('value'),
+ 'vod_remarks': b.get('subscriptContent') or b.get('channel') or b.get('vipTips')
+ })
+ return {'list':videos,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ return {'jx':1,'parse': 1, 'url': id, 'header': ''}
+
+ def localProxy(self, param):
+ pass
+
+ def fetch_page_data(self, page, id):
+ try:
+ url = f'{self.dhost}/h5/mina/avlist/{page}/{id}/'
+ data = self.fetch(url, headers=self.headers).json()
+ return [f'{i["shortTitle"]}${i["pageUrl"]}' for i in data['data']['videoList']['videos']]
+ except:
+ return []
+
+ def getf(self,body):
+ data=self.fetch(f'{self.hhost}/portal/lw/videolib/tag?channel_id={body["type_id"]}&tagAdd=&selected_tag_name=&version=13.014.21150&device={self.did}&uid=', headers=self.headers).json()
+ ft = []
+ # for i in data[:-1]:
+ for i in data:
+ try:
+ value_array = [{"n": value['text'], "v": self.e64(value['tag_param'])} for value in i['tags'] if
+ value.get('tag_param')]
+ ft.append({"key": i['group'], "name": i['group'], "value": value_array})
+ except:
+ print(i)
+ return (body['type_id'], ft)
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self,encoded_text: str):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def random_str(self,length=16):
+ hex_chars = '0123456789abcdef'
+ return ''.join(random.choice(hex_chars) for _ in range(length))
diff --git a/xiaosa/py/网络直播.py b/xiaosa/py/网络直播.py
new file mode 100644
index 00000000..5fbeeaaf
--- /dev/null
+++ b/xiaosa/py/网络直播.py
@@ -0,0 +1,768 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import re
+import sys
+import time
+from base64 import b64decode, b64encode
+from urllib.parse import parse_qs
+import requests
+from pyquery import PyQuery as pq
+sys.path.append('..')
+from base.spider import Spider
+from concurrent.futures import ThreadPoolExecutor
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ tid = 'douyin'
+ headers = self.gethr(0, tid)
+ response = requests.head(self.hosts[tid], headers=headers)
+ ttwid = response.cookies.get('ttwid')
+ headers.update({
+ 'authority': self.hosts[tid].split('//')[-1],
+ 'cookie': f'ttwid={ttwid}' if ttwid else ''
+ })
+ self.dyheaders = headers
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ headers = [
+ {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0"
+ },
+ {
+ "User-Agent": "Dart/3.4 (dart:io)"
+ }
+ ]
+
+ excepturl = 'https://www.baidu.com'
+
+ hosts = {
+ "huya": ["https://www.huya.com","https://mp.huya.com"],
+ "douyin": "https://live.douyin.com",
+ "douyu": "https://www.douyu.com",
+ "wangyi": "https://cc.163.com",
+ "bili": ["https://api.live.bilibili.com", "https://api.bilibili.com"]
+ }
+
+ referers = {
+ "huya": "https://live.cdn.huya.com",
+ "douyin": "https://live.douyin.com",
+ "douyu": "https://m.douyu.com",
+ "bili": "https://live.bilibili.com"
+ }
+
+ playheaders = {
+ "wangyi": {
+ "User-Agent": "ExoPlayer",
+ "Connection": "Keep-Alive",
+ "Icy-MetaData": "1"
+ },
+ "bili": {
+ 'Accept': '*/*',
+ 'Icy-MetaData': '1',
+ 'referer': referers['bili'],
+ 'user-agent': headers[0]['User-Agent']
+ },
+ 'douyin': {
+ 'User-Agent': 'libmpv',
+ 'Icy-MetaData': '1'
+ },
+ 'huya': {
+ 'User-Agent': 'ExoPlayer',
+ 'Connection': 'Keep-Alive',
+ 'Icy-MetaData': '1'
+ },
+ 'douyu': {
+ 'User-Agent': 'libmpv',
+ 'Icy-MetaData': '1'
+ }
+ }
+
+ def process_bili(self):
+ try:
+ self.blfdata = self.fetch(
+ f'{self.hosts["bili"][0]}/room/v1/Area/getList?need_entrance=1&parent_id=0',
+ headers=self.gethr(0, 'bili')
+ ).json()
+ return ('bili', [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': i['name'], 'v': str(i['id'])}
+ for i in self.blfdata['data']]}])
+ except Exception as e:
+ print(f"bili处理错误: {e}")
+ return 'bili', None
+
+ def process_douyin(self):
+ try:
+ data = self.getpq(self.hosts['douyin'], headers=self.dyheaders)('script')
+ for i in data.items():
+ if 'categoryData' in i.text():
+ content = i.text()
+ start = content.find('{')
+ end = content.rfind('}') + 1
+ if start != -1 and end != -1:
+ json_str = content[start:end]
+ json_str = json_str.replace('\\"', '"')
+ try:
+ self.dyifdata = json.loads(json_str)
+ return ('douyin', [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': i['partition']['title'],
+ 'v': f"{i['partition']['id_str']}@@{i['partition']['title']}"}
+ for i in self.dyifdata['categoryData']]}])
+ except json.JSONDecodeError as e:
+ print(f"douyin解析错误: {e}")
+ return 'douyin', None
+ except Exception as e:
+ print(f"douyin请求或处理错误: {e}")
+ return 'douyin', None
+
+ def process_douyu(self):
+ try:
+ self.dyufdata = self.fetch(
+ f'{self.referers["douyu"]}/api/cate/list',
+ headers=self.headers[1]
+ ).json()
+ return ('douyu', [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': i['cate1Name'], 'v': str(i['cate1Id'])}
+ for i in self.dyufdata['data']['cate1Info']]}])
+ except Exception as e:
+ print(f"douyu错误: {e}")
+ return 'douyu', None
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "虎牙": "huya",
+ "哔哩": "bili",
+ "抖音": "douyin",
+ "斗鱼": "douyu",
+ "网易": "wangyi"
+ }
+ classes = []
+ filters = {
+ 'huya': [{'key': 'cate', 'name': '分类',
+ 'value': [{'n': '网游', 'v': '1'}, {'n': '单机', 'v': '2'},
+ {'n': '娱乐', 'v': '8'}, {'n': '手游', 'v': '3'}]}]
+ }
+
+ with ThreadPoolExecutor(max_workers=3) as executor:
+ futures = {
+ executor.submit(self.process_bili): 'bili',
+ executor.submit(self.process_douyin): 'douyin',
+ executor.submit(self.process_douyu): 'douyu'
+ }
+
+ for future in futures:
+ platform, filter_data = future.result()
+ if filter_data:
+ filters[platform] = filter_data
+
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ pass
+
+ def categoryContent(self, tid, pg, filter, extend):
+ vdata = []
+ result = {}
+ pagecount = 9999
+ result['page'] = pg
+ result['limit'] = 90
+ result['total'] = 999999
+ if tid == 'wangyi':
+ vdata, pagecount = self.wyccContent(tid, pg, filter, extend, vdata)
+ elif 'bili' in tid:
+ vdata, pagecount = self.biliContent(tid, pg, filter, extend, vdata)
+ elif 'huya' in tid:
+ vdata, pagecount = self.huyaContent(tid, pg, filter, extend, vdata)
+ elif 'douyin' in tid:
+ vdata, pagecount = self.douyinContent(tid, pg, filter, extend, vdata)
+ elif 'douyu' in tid:
+ vdata, pagecount = self.douyuContent(tid, pg, filter, extend, vdata)
+ result['list'] = vdata
+ result['pagecount'] = pagecount
+ return result
+
+ def wyccContent(self, tid, pg, filter, extend, vdata):
+ params = {
+ 'format': 'json',
+ 'start': (int(pg) - 1) * 20,
+ 'size': '20',
+ }
+ response = self.fetch(f'{self.hosts[tid]}/api/category/live/', params=params, headers=self.headers[0]).json()
+ for i in response['lives']:
+ if i.get('cuteid'):
+ bvdata = self.buildvod(
+ vod_id=f"{tid}@@{i['cuteid']}",
+ vod_name=i.get('title'),
+ vod_pic=i.get('cover'),
+ vod_remarks=i.get('nickname'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(bvdata)
+ return vdata, 9999
+
+ def biliContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ for i in self.blfdata['data']:
+ if str(i['id']) == extend['cate']:
+ for j in i['list']:
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{i['id']}@@{j['id']}",
+ vod_name=j.get('name'),
+ vod_pic=j.get('pic'),
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ path = f'/xlive/web-interface/v1/second/getListByArea?platform=web&sort=online&page_size=30&page={pg}'
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ path = f'/xlive/web-interface/v1/second/getList?platform=web&parent_area_id={ids[1]}&area_id={ids[-1]}&sort_type=&page={pg}'
+ data = self.fetch(f'{self.hosts[tid][0]}{path}', headers=self.gethr(0, tid)).json()
+ for i in data['data']['list']:
+ if i.get('roomid'):
+ data = self.buildvod(
+ f"{tid}@@{i['roomid']}",
+ i.get('title'),
+ i.get('cover'),
+ i.get('watched_show', {}).get('text_large'),
+ 0,
+ i.get('uname'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(data)
+ return vdata, 9999
+
+ def huyaContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ id = extend.get('cate')
+ data = self.fetch(f'{self.referers[tid]}/liveconfig/game/bussLive?bussType={id}',
+ headers=self.headers[1]).json()
+ for i in data['data']:
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{int(i['gid'])}",
+ vod_name=i.get('gameFullName'),
+ vod_pic=f'https://huyaimg.msstatic.com/cdnimage/game/{int(i["gid"])}-MS.jpg',
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ gid = ''
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ gid = f'&gameId={ids[1]}'
+ data = self.fetch(f'{self.hosts[tid][0]}/cache.php?m=LiveList&do=getLiveListByPage&tagAll=0{gid}&page={pg}',
+ headers=self.headers[1]).json()
+ for i in data['data']['datas']:
+ if i.get('profileRoom'):
+ v = self.buildvod(
+ f"{tid}@@{i['profileRoom']}",
+ i.get('introduction'),
+ i.get('screenshot'),
+ str(int(i.get('totalCount', '1')) / 10000) + '万',
+ 0,
+ i.get('nick'),
+ style={"type": "rect", "ratio": 1.33}
+
+ )
+ vdata.append(v)
+ return vdata, 9999
+
+ def douyinContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ ids = extend.get('cate').split('@@')
+ for i in self.dyifdata['categoryData']:
+ c = i['partition']
+ if c['id_str'] == ids[0] and c['title'] == ids[1]:
+ vlist = i['sub_partition'].copy()
+ vlist.insert(0, {'partition': c})
+ for j in vlist:
+ j = j['partition']
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{j['id_str']}@@{j['type']}",
+ vod_name=j.get('title'),
+ vod_pic='https://p3-pc-weboff.byteimg.com/tos-cn-i-9r5gewecjs/pwa_v3/512x512-1.png',
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ path = f'/webcast/web/partition/detail/room/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&count=15&offset={(int(pg) - 1) * 15}&partition=720&partition_type=1'
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ path = f'/webcast/web/partition/detail/room/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&count=15&offset={(int(pg) - 1) * 15}&partition={ids[1]}&partition_type={ids[-1]}&req_from=2'
+ data = self.fetch(f'{self.hosts[tid]}{path}', headers=self.dyheaders).json()
+ for i in data['data']['data']:
+ v = self.buildvod(
+ vod_id=f"{tid}@@{i['web_rid']}",
+ vod_name=i['room'].get('title'),
+ vod_pic=i['room']['cover'].get('url_list')[0],
+ vod_year=i.get('user_count_str'),
+ vod_remarks=i['room']['owner'].get('nickname'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(v)
+ return vdata, 9999
+
+ def douyuContent(self, tid, pg, filter, extend, vdata):
+ if extend.get('cate') and pg == '1' and 'click' not in tid:
+ for i in self.dyufdata['data']['cate2Info']:
+ if str(i['cate1Id']) == extend['cate']:
+ v = self.buildvod(
+ vod_id=f"click_{tid}@@{i['cate2Id']}",
+ vod_name=i.get('cate2Name'),
+ vod_pic=i.get('icon'),
+ vod_remarks=i.get('count'),
+ vod_tag=1,
+ style={"type": "oval", "ratio": 1}
+ )
+ vdata.append(v)
+ return vdata, 1
+ else:
+ path = f'/japi/weblist/apinc/allpage/6/{pg}'
+ if 'click' in tid:
+ ids = tid.split('_')[1].split('@@')
+ tid = ids[0]
+ path = f'/gapi/rkc/directory/mixList/2_{ids[1]}/{pg}'
+ url = f'{self.hosts[tid]}{path}'
+ data = self.fetch(url, headers=self.headers[1]).json()
+ for i in data['data']['rl']:
+ v = self.buildvod(
+ vod_id=f"{tid}@@{i['rid']}",
+ vod_name=i.get('rn'),
+ vod_pic=i.get('rs16'),
+ vod_year=str(int(i.get('ol', 1)) / 10000) + '万',
+ vod_remarks=i.get('nn'),
+ style={"type": "rect", "ratio": 1.33}
+ )
+ vdata.append(v)
+ return vdata, 9999
+
+ def detailContent(self, ids):
+ ids = ids[0].split('@@')
+ if ids[0] == 'wangyi':
+ vod = self.wyccDetail(ids)
+ elif ids[0] == 'bili':
+ vod = self.biliDetail(ids)
+ elif ids[0] == 'huya':
+ vod = self.huyaDetail(ids)
+ elif ids[0] == 'douyin':
+ vod = self.douyinDetail(ids)
+ elif ids[0] == 'douyu':
+ vod = self.douyuDetail(ids)
+ return {'list': [vod]}
+
+ def wyccDetail(self, ids):
+ try:
+ vdata = self.getpq(f'{self.hosts[ids[0]]}/{ids[1]}', self.headers[0])('script').eq(-1).text()
+
+ def get_quality_name(vbr):
+ if vbr <= 600:
+ return "标清"
+ elif vbr <= 1000:
+ return "高清"
+ elif vbr <= 2000:
+ return "超清"
+ else:
+ return "蓝光"
+
+ data = json.loads(vdata)['props']['pageProps']['roomInfoInitData']
+ name = data['live'].get('title', ids[0])
+ vod = self.buildvod(vod_name=data.get('keywords_suffix'), vod_remarks=data['live'].get('title'),
+ vod_content=data.get('description_suffix'))
+ resolution_data = data['live']['quickplay']['resolution']
+ all_streams = {}
+ sorted_qualities = sorted(resolution_data.items(),
+ key=lambda x: x[1]['vbr'],
+ reverse=True)
+ for quality, data in sorted_qualities:
+ vbr = data['vbr']
+ quality_name = get_quality_name(vbr)
+ for cdn_name, url in data['cdn'].items():
+ if cdn_name not in all_streams and type(url) == str and url.startswith('http'):
+ all_streams[cdn_name] = []
+ if isinstance(url, str) and url.startswith('http'):
+ all_streams[cdn_name].extend([quality_name, url])
+ plists = []
+ names = []
+ for i, (cdn_name, stream_list) in enumerate(all_streams.items(), 1):
+ names.append(f'线路{i}')
+ pstr = f"{name}${ids[0]}@@{self.e64(json.dumps(stream_list))}"
+ plists.append(pstr)
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plists)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def biliDetail(self, ids):
+ try:
+ vdata = self.fetch(
+ f'{self.hosts[ids[0]][0]}/xlive/web-room/v1/index/getInfoByRoom?room_id={ids[1]}&wts={int(time.time())}',
+ headers=self.gethr(0, ids[0])).json()
+ v = vdata['data']['room_info']
+ vod = self.buildvod(
+ vod_name=v.get('title'),
+ type_name=v.get('parent_area_name') + '/' + v.get('area_name'),
+ vod_remarks=v.get('tags'),
+ vod_play_from=v.get('title'),
+ )
+ data = self.fetch(
+ f'{self.hosts[ids[0]][0]}/xlive/web-room/v2/index/getRoomPlayInfo?room_id={ids[1]}&protocol=0%2C1&format=0%2C1%2C2&codec=0%2C1&platform=web',
+ headers=self.gethr(0, ids[0])).json()
+ vdnams = data['data']['playurl_info']['playurl']['g_qn_desc']
+ all_accept_qns = []
+ streams = data['data']['playurl_info']['playurl']['stream']
+ for stream in streams:
+ for format_item in stream['format']:
+ for codec in format_item['codec']:
+ if 'accept_qn' in codec:
+ all_accept_qns.append(codec['accept_qn'])
+ max_accept_qn = max(all_accept_qns, key=len) if all_accept_qns else []
+ quality_map = {
+ item['qn']: item['desc']
+ for item in vdnams
+ }
+ quality_names = [f"{quality_map.get(qn)}${ids[0]}@@{ids[1]}@@{qn}" for qn in max_accept_qn]
+ vod['vod_play_url'] = "#".join(quality_names)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def huyaDetail(self, ids):
+ try:
+ vdata = self.fetch(f'{self.hosts[ids[0]][1]}/cache.php?m=Live&do=profileRoom&roomid={ids[1]}',
+ headers=self.headers[0]).json()
+ v = vdata['data']['liveData']
+ vod = self.buildvod(
+ vod_name=v.get('introduction'),
+ type_name=v.get('gameFullName'),
+ vod_director=v.get('nick'),
+ vod_remarks=v.get('contentIntro'),
+ )
+ data = dict(reversed(list(vdata['data']['stream'].items())))
+ names = []
+ plist = []
+
+ for stream_type, stream_data in data.items():
+ if isinstance(stream_data, dict) and 'multiLine' in stream_data and 'rateArray' in stream_data:
+ names.append(f"线路{len(names) + 1}")
+ qualities = sorted(
+ stream_data['rateArray'],
+ key=lambda x: (x['iBitRate'], x['sDisplayName']),
+ reverse=True
+ )
+ cdn_urls = []
+ for cdn in stream_data['multiLine']:
+ quality_urls = []
+ for quality in qualities:
+ quality_name = quality['sDisplayName']
+ bit_rate = quality['iBitRate']
+ base_url = cdn['url']
+ if bit_rate > 0:
+ if '.m3u8' in base_url:
+ new_url = base_url.replace(
+ 'ratio=2000',
+ f'ratio={bit_rate}'
+ )
+ else:
+ new_url = base_url.replace(
+ 'imgplus.flv',
+ f'imgplus_{bit_rate}.flv'
+ )
+ else:
+ new_url = base_url
+ quality_urls.extend([quality_name, new_url])
+ encoded_urls = self.e64(json.dumps(quality_urls))
+ cdn_urls.append(f"{cdn['cdnType']}${ids[0]}@@{encoded_urls}")
+
+ if cdn_urls:
+ plist.append('#'.join(cdn_urls))
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plist)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def douyinDetail(self, ids):
+ url = f'{self.hosts[ids[0]]}/webcast/room/web/enter/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&enter_from=web_live&web_rid={ids[1]}&room_id_str=&enter_source=&Room-Enter-User-Login-Ab=0&is_need_double_stream=false&cookie_enabled=true&screen_width=1980&screen_height=1080&browser_language=zh-CN&browser_platform=Win32&browser_name=Edge&browser_version=125.0.0.0'
+ data = self.fetch(url, headers=self.dyheaders).json()
+ try:
+ vdata = data['data']['data'][0]
+ vod = self.buildvod(
+ vod_name=vdata['title'],
+ vod_remarks=vdata['user_count_str'],
+ )
+ resolution_data = vdata['stream_url']['live_core_sdk_data']['pull_data']['options']['qualities']
+ stream_json = vdata['stream_url']['live_core_sdk_data']['pull_data']['stream_data']
+ stream_json = json.loads(stream_json)
+ available_types = []
+ if any(sdk_key in stream_json['data'] and 'main' in stream_json['data'][sdk_key] for sdk_key in
+ stream_json['data']):
+ available_types.append('main')
+ if any(sdk_key in stream_json['data'] and 'backup' in stream_json['data'][sdk_key] for sdk_key in
+ stream_json['data']):
+ available_types.append('backup')
+ plist = []
+ for line_type in available_types:
+ format_arrays = {'flv': [], 'hls': [], 'lls': []}
+ qualities = sorted(resolution_data, key=lambda x: x['level'], reverse=True)
+ for quality in qualities:
+ sdk_key = quality['sdk_key']
+ if sdk_key in stream_json['data'] and line_type in stream_json['data'][sdk_key]:
+ stream_info = stream_json['data'][sdk_key][line_type]
+ if stream_info.get('flv'):
+ format_arrays['flv'].extend([quality['name'], stream_info['flv']])
+ if stream_info.get('hls'):
+ format_arrays['hls'].extend([quality['name'], stream_info['hls']])
+ if stream_info.get('lls'):
+ format_arrays['lls'].extend([quality['name'], stream_info['lls']])
+ format_urls = []
+ for format_name, url_array in format_arrays.items():
+ if url_array:
+ encoded_urls = self.e64(json.dumps(url_array))
+ format_urls.append(f"{format_name}${ids[0]}@@{encoded_urls}")
+
+ if format_urls:
+ plist.append('#'.join(format_urls))
+
+ names = ['线路1', '线路2'][:len(plist)]
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plist)
+ return vod
+
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def douyuDetail(self, ids):
+ headers = self.gethr(0, zr=f'{self.hosts[ids[0]]}/{ids[1]}')
+ try:
+ data = self.fetch(f'{self.hosts[ids[0]]}/betard/{ids[1]}', headers=headers).json()
+ vname = data['room']['room_name']
+ vod = self.buildvod(
+ vod_name=vname,
+ vod_remarks=data['room'].get('second_lvl_name'),
+ vod_director=data['room'].get('nickname'),
+ )
+ vdata = self.fetch(f'{self.hosts[ids[0]]}/swf_api/homeH5Enc?rids={ids[1]}', headers=headers).json()
+ json_body = vdata['data']
+ json_body = {"html": self.douyu_text(json_body[f'room{ids[1]}']), "rid": ids[1]}
+ sign = self.post('http://alive.nsapps.cn/api/AllLive/DouyuSign', json=json_body, headers=self.headers[1]).json()['data']
+ body = f'{sign}&cdn=&rate=-1&ver=Douyu_223061205&iar=1&ive=1&hevc=0&fa=0'
+ body=self.params_to_json(body)
+ nubdata = self.post(f'{self.hosts[ids[0]]}/lapi/live/getH5Play/{ids[1]}', data=body, headers=headers).json()
+ plist = []
+ names = []
+ for i,x in enumerate(nubdata['data']['cdnsWithName']):
+ names.append(f'线路{i+1}')
+ d = {'sign': sign, 'cdn': x['cdn'], 'id': ids[1]}
+ plist.append(
+ f'{vname}${ids[0]}@@{self.e64(json.dumps(d))}@@{self.e64(json.dumps(nubdata["data"]["multirates"]))}')
+ vod['vod_play_from'] = "$$$".join(names)
+ vod['vod_play_url'] = "$$$".join(plist)
+ return vod
+ except Exception as e:
+ return self.handle_exception(e)
+
+ def douyu_text(self, text):
+ function_positions = [m.start() for m in re.finditer('function', text)]
+ total_functions = len(function_positions)
+ if total_functions % 2 == 0:
+ target_index = total_functions // 2 + 1
+ else:
+ target_index = (total_functions - 1) // 2 + 1
+ if total_functions >= target_index:
+ cut_position = function_positions[target_index - 1]
+ ctext = text[4:cut_position]
+ return re.sub(r'eval\(strc\)\([\w\d,]+\)', 'strc', ctext)
+ return text
+
+ def searchContent(self, key, quick, pg="1"):
+ pass
+
+ def playerContent(self, flag, id, vipFlags):
+ try:
+ ids = id.split('@@')
+ p = 1
+ if ids[0] in ['wangyi', 'douyin','huya']:
+ p, url = 0, json.loads(self.d64(ids[1]))
+ elif ids[0] == 'bili':
+ p, url = self.biliplay(ids)
+ elif ids[0] == 'huya':
+ p, url = 0, json.loads(self.d64(ids[1]))
+ elif ids[0] == 'douyu':
+ p, url = self.douyuplay(ids)
+ return {'parse': p, 'url': url, 'header': self.playheaders[ids[0]]}
+ except Exception as e:
+ return {'parse': 1, 'url': self.excepturl, 'header': self.headers[0]}
+
+ def biliplay(self, ids):
+ try:
+ data = self.fetch(
+ f'{self.hosts[ids[0]][0]}/xlive/web-room/v2/index/getRoomPlayInfo?room_id={ids[1]}&protocol=0,1&format=0,2&codec=0&platform=web&qn={ids[2]}',
+ headers=self.gethr(0, ids[0])).json()
+ urls = []
+ line_index = 1
+ for stream in data['data']['playurl_info']['playurl']['stream']:
+ for format_item in stream['format']:
+ for codec in format_item['codec']:
+ for url_info in codec['url_info']:
+ full_url = f"{url_info['host']}/{codec['base_url'].lstrip('/')}{url_info['extra']}"
+ urls.extend([f"线路{line_index}", full_url])
+ line_index += 1
+ return 0, urls
+ except Exception as e:
+ return 1, self.excepturl
+
+ def douyuplay(self, ids):
+ try:
+ sdata = json.loads(self.d64(ids[1]))
+ headers = self.gethr(0, zr=f'{self.hosts[ids[0]]}/{sdata["id"]}')
+ ldata = json.loads(self.d64(ids[2]))
+ result_obj = {}
+ with ThreadPoolExecutor(max_workers=len(ldata)) as executor:
+ futures = [
+ executor.submit(
+ self.douyufp,
+ sdata,
+ quality,
+ headers,
+ self.hosts[ids[0]],
+ result_obj
+ ) for quality in ldata
+ ]
+ for future in futures:
+ future.result()
+
+ result = []
+ for bit in sorted(result_obj.keys(), reverse=True):
+ result.extend(result_obj[bit])
+
+ if result:
+ return 0, result
+ return 1, self.excepturl
+
+ except Exception as e:
+ return 1, self.excepturl
+
+ def douyufp(self, sdata, quality, headers, host, result_obj):
+ try:
+ body = f'{sdata["sign"]}&cdn={sdata["cdn"]}&rate={quality["rate"]}'
+ body=self.params_to_json(body)
+ data = self.post(f'{host}/lapi/live/getH5Play/{sdata["id"]}',
+ data=body, headers=headers).json()
+ if data.get('data'):
+ play_url = data['data']['rtmp_url'] + '/' + data['data']['rtmp_live']
+ bit = quality.get('bit', 0)
+ if bit not in result_obj:
+ result_obj[bit] = []
+ result_obj[bit].extend([quality['name'], play_url])
+ except Exception as e:
+ print(f"Error fetching {quality['name']}: {str(e)}")
+
+ def localProxy(self, param):
+ pass
+
+ def e64(self, text):
+ try:
+ text_bytes = text.encode('utf-8')
+ encoded_bytes = b64encode(text_bytes)
+ return encoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64编码错误: {str(e)}")
+ return ""
+
+ def d64(self, encoded_text):
+ try:
+ encoded_bytes = encoded_text.encode('utf-8')
+ decoded_bytes = b64decode(encoded_bytes)
+ return decoded_bytes.decode('utf-8')
+ except Exception as e:
+ print(f"Base64解码错误: {str(e)}")
+ return ""
+
+ def josn_to_params(self, params, skip_empty=False):
+ query = []
+ for k, v in params.items():
+ if skip_empty and not v:
+ continue
+ query.append(f"{k}={v}")
+ return "&".join(query)
+
+ def params_to_json(self, query_string):
+ parsed_data = parse_qs(query_string)
+ result = {key: value[0] for key, value in parsed_data.items()}
+ return result
+
+ def buildvod(self, vod_id='', vod_name='', vod_pic='', vod_year='', vod_tag='', vod_remarks='', style='',
+ type_name='', vod_area='', vod_actor='', vod_director='',
+ vod_content='', vod_play_from='', vod_play_url=''):
+ vod = {
+ 'vod_id': vod_id,
+ 'vod_name': vod_name,
+ 'vod_pic': vod_pic,
+ 'vod_year': vod_year,
+ 'vod_tag': 'folder' if vod_tag else '',
+ 'vod_remarks': vod_remarks,
+ 'style': style,
+ 'type_name': type_name,
+ 'vod_area': vod_area,
+ 'vod_actor': vod_actor,
+ 'vod_director': vod_director,
+ 'vod_content': vod_content,
+ 'vod_play_from': vod_play_from,
+ 'vod_play_url': vod_play_url
+ }
+ vod = {key: value for key, value in vod.items() if value}
+ return vod
+
+ def getpq(self, url, headers=None, cookies=None):
+ data = self.fetch(url, headers=headers, cookies=cookies).text
+ try:
+ return pq(data)
+ except Exception as e:
+ print(f"解析页面错误: {str(e)}")
+ return pq(data.encode('utf-8'))
+
+ def gethr(self, index, rf='', zr=''):
+ headers = self.headers[index]
+ if zr:
+ headers['referer'] = zr
+ else:
+ headers['referer'] = f"{self.referers[rf]}/"
+ return headers
+
+ def handle_exception(self, e):
+ print(f"报错: {str(e)}")
+ return {'vod_play_from': '哎呀翻车啦', 'vod_play_url': f'翻车啦${self.excepturl}'}
+
diff --git a/xiaosa/py/腾讯视频.py b/xiaosa/py/腾讯视频.py
new file mode 100644
index 00000000..7a5218f0
--- /dev/null
+++ b/xiaosa/py/腾讯视频.py
@@ -0,0 +1,323 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+import uuid
+import copy
+sys.path.append('..')
+from base.spider import Spider
+from concurrent.futures import ThreadPoolExecutor, as_completed
+
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ self.dbody = {
+ "page_params": {
+ "channel_id": "",
+ "filter_params": "sort=75",
+ "page_type": "channel_operation",
+ "page_id": "channel_list_second_page"
+ }
+ }
+ self.body = self.dbody
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ host = 'https://v.qq.com'
+
+ apihost = 'https://pbaccess.video.qq.com'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5410.0 Safari/537.36',
+ 'origin': host,
+ 'referer': f'{host}/'
+ }
+
+ def homeContent(self, filter):
+ cdata = {
+ "电视剧": "100113",
+ "电影": "100173",
+ "综艺": "100109",
+ "纪录片": "100105",
+ "动漫": "100119",
+ "少儿": "100150",
+ "短剧": "110755"
+ }
+ result = {}
+ classes = []
+ filters = {}
+ for k in cdata:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cdata[k]
+ })
+ with ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ futures = [executor.submit(self.get_filter_data, item['type_id']) for item in classes]
+ for future in futures:
+ cid, data = future.result()
+ if not data.get('data', {}).get('module_list_datas'):
+ continue
+ filter_dict = {}
+ try:
+ items = data['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']
+ for item in items:
+ if not item.get('item_params', {}).get('index_item_key'):
+ continue
+ params = item['item_params']
+ filter_key = params['index_item_key']
+ if filter_key not in filter_dict:
+ filter_dict[filter_key] = {
+ 'key': filter_key,
+ 'name': params['index_name'],
+ 'value': []
+ }
+ filter_dict[filter_key]['value'].append({
+ 'n': params['option_name'],
+ 'v': params['option_value']
+ })
+ except (IndexError, KeyError):
+ continue
+ filters[cid] = list(filter_dict.values())
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ json_data = {'page_context':None,'page_params':{'page_id':'100101','page_type':'channel','skip_privacy_types':'0','support_click_scan':'1','new_mark_label_enabled':'1','ams_cookies':'',},'page_bypass_params':{'params':{'caller_id':'','data_mode':'default','page_id':'','page_type':'channel','platform_id':'2','user_mode':'default',},'scene':'channel','abtest_bypass_id':'',}}
+ data = self.post(f'{self.apihost}/trpc.vector_layout.page_view.PageService/getPage',headers=self.headers, json=json_data).json()
+ vlist = []
+ for it in data['data']['CardList'][0]['children_list']['list']['cards']:
+ if it.get('params'):
+ p = it['params']
+ tag = json.loads(p.get('uni_imgtag', '{}') or p.get('imgtag', '{}') or '{}')
+ id = it.get('id') or p.get('cid')
+ name = p.get('mz_title') or p.get('title')
+ if name and 'http' not in id:
+ vlist.append({
+ 'vod_id': id,
+ 'vod_name': name,
+ 'vod_pic': p.get('image_url'),
+ 'vod_year': tag.get('tag_2', {}).get('text'),
+ 'vod_remarks': tag.get('tag_4', {}).get('text')
+ })
+ return {'list': vlist}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ result = {}
+ params = {
+ "sort": extend.get('sort', '75'),
+ "attr": extend.get('attr', '-1'),
+ "itype": extend.get('itype', '-1'),
+ "ipay": extend.get('ipay', '-1'),
+ "iarea": extend.get('iarea', '-1'),
+ "iyear": extend.get('iyear', '-1'),
+ "theater": extend.get('theater', '-1'),
+ "award": extend.get('award', '-1'),
+ "recommend": extend.get('recommend', '-1')
+ }
+ if pg == '1':
+ self.body = self.dbody.copy()
+ self.body['page_params']['channel_id'] = tid
+ self.body['page_params']['filter_params'] = self.josn_to_params(params)
+ data = self.post(
+ f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=1000005&vplatform=2&vversion_name=8.9.10&new_mark_label_enabled=1',
+ json=self.body, headers=self.headers).json()
+ ndata = data['data']
+ if ndata['has_next_page']:
+ result['pagecount'] = 9999
+ self.body['page_context'] = ndata['next_page_context']
+ else:
+ result['pagecount'] = int(pg)
+ vlist = []
+ for its in ndata['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']:
+ id = its.get('item_params', {}).get('cid')
+ if id:
+ p = its['item_params']
+ tag = json.loads(p.get('uni_imgtag', '{}') or p.get('imgtag', '{}') or '{}')
+ name = p.get('mz_title') or p.get('title')
+ pic = p.get('new_pic_hz') or p.get('new_pic_vt')
+ vlist.append({
+ 'vod_id': id,
+ 'vod_name': name,
+ 'vod_pic': pic,
+ 'vod_year': tag.get('tag_2', {}).get('text'),
+ 'vod_remarks': tag.get('tag_4', {}).get('text')
+ })
+ result['list'] = vlist
+ result['page'] = pg
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ vbody = {"page_params":{"req_from":"web","cid":ids[0],"vid":"","lid":"","page_type":"detail_operation","page_id":"detail_page_introduction"},"has_cache":1}
+ body = {"page_params":{"req_from":"web_vsite","page_id":"vsite_episode_list","page_type":"detail_operation","id_type":"1","page_size":"","cid":ids[0],"vid":"","lid":"","page_num":"","page_context":"","detail_page_type":"1"},"has_cache":1}
+ with ThreadPoolExecutor(max_workers=2) as executor:
+ future_detail = executor.submit(self.get_vdata, vbody)
+ future_episodes = executor.submit(self.get_vdata, body)
+ vdata = future_detail.result()
+ data = future_episodes.result()
+
+ pdata = self.process_tabs(data, body, ids)
+ if not pdata:
+ return self.handle_exception(None, "No pdata available")
+
+ try:
+ star_list = vdata['data']['module_list_datas'][0]['module_datas'][0]['item_data_lists']['item_datas'][
+ 0].get('sub_items', {}).get('star_list', {}).get('item_datas', [])
+ actors = [star['item_params']['name'] for star in star_list]
+ names = ['腾讯视频', '预告片']
+ plist, ylist = self.process_pdata(pdata, ids)
+ if not plist:
+ del names[0]
+ if not ylist:
+ del names[1]
+ vod = self.build_vod(vdata, actors, plist, ylist, names)
+ return {'list': [vod]}
+ except Exception as e:
+ return self.handle_exception(e, "Error processing detail")
+
+ def searchContent(self, key, quick, pg="1"):
+ headers = self.headers.copy()
+ headers.update({'Content-Type': 'application/json'})
+ body = {'version':'25021101','clientType':1,'filterValue':'','uuid':str(uuid.uuid4()),'retry':0,'query':key,'pagenum':int(pg)-1,'pagesize':30,'queryFrom':0,'searchDatakey':'','transInfo':'','isneedQc':True,'preQid':'','adClientInfo':'','extraInfo':{'isNewMarkLabel':'1','multi_terminal_pc':'1','themeType':'1',},}
+ data = self.post(f'{self.apihost}/trpc.videosearch.mobile_search.MultiTerminalSearch/MbSearch?vplatform=2',
+ json=body, headers=headers).json()
+ vlist = []
+ vname=["电视剧", "电影", "综艺", "纪录片", "动漫", "少儿", "短剧"]
+ v=data['data']['normalList']['itemList']
+ d=data['data']['areaBoxList'][0]['itemList']
+ q=v+d
+ if v[0].get('doc') and v[0]['doc'].get('id') =='MainNeed':q=d+v
+ for k in q:
+ if k.get('doc') and k.get('videoInfo') and k['doc'].get('id') and '外站' not in k['videoInfo'].get('subTitle') and k['videoInfo'].get('title') and k['videoInfo'].get('typeName') in vname:
+ img_tag = k.get('videoInfo', {}).get('imgTag')
+ if img_tag is not None and isinstance(img_tag, str):
+ try:
+ tag = json.loads(img_tag)
+ except json.JSONDecodeError as e:
+ tag = {}
+ else:
+ tag = {}
+ pic = k.get('videoInfo', {}).get('imgUrl')
+ vlist.append({
+ 'vod_id': k['doc']['id'],
+ 'vod_name': self.removeHtmlTags(k['videoInfo']['title']),
+ 'vod_pic': pic,
+ 'vod_year': k['videoInfo'].get('typeName') +' '+ tag.get('tag_2', {}).get('text', ''),
+ 'vod_remarks': tag.get('tag_4', {}).get('text', '')
+ })
+ return {'list': vlist, 'page': pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ ids = id.split('@')
+ url = f"{self.host}/x/cover/{ids[0]}/{ids[1]}.html"
+ return {'jx':1,'parse': 1, 'url': url, 'header': ''}
+
+ def localProxy(self, param):
+ pass
+
+ def get_filter_data(self, cid):
+ hbody = self.dbody.copy()
+ hbody['page_params']['channel_id'] = cid
+ data = self.post(
+ f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=1000005&vplatform=2&vversion_name=8.9.10&new_mark_label_enabled=1',
+ json=hbody, headers=self.headers).json()
+ return cid, data
+
+ def get_vdata(self, body):
+ try:
+ vdata = self.post(
+ f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=3000010&vplatform=2&vversion_name=8.2.96',
+ json=body, headers=self.headers
+ ).json()
+ return vdata
+ except Exception as e:
+ print(f"Error in get_vdata: {str(e)}")
+ return {'data': {'module_list_datas': []}}
+
+ def process_pdata(self, pdata, ids):
+ plist = []
+ ylist = []
+ for k in pdata:
+ if k.get('item_id'):
+ pid = f"{k['item_params']['union_title']}${ids[0]}@{k['item_id']}"
+ if '预告' in k['item_params']['union_title']:
+ ylist.append(pid)
+ else:
+ plist.append(pid)
+ return plist, ylist
+
+ def build_vod(self, vdata, actors, plist, ylist, names):
+ d = vdata['data']['module_list_datas'][0]['module_datas'][0]['item_data_lists']['item_datas'][0]['item_params']
+ urls = []
+ if plist:
+ urls.append('#'.join(plist))
+ if ylist:
+ urls.append('#'.join(ylist))
+ vod = {
+ 'type_name': d.get('sub_genre', ''),
+ 'vod_name': d.get('title', ''),
+ 'vod_year': d.get('year', ''),
+ 'vod_area': d.get('area_name', ''),
+ 'vod_remarks': d.get('holly_online_time', '') or d.get('hotval', ''),
+ 'vod_actor': ','.join(actors),
+ 'vod_content': d.get('cover_description', ''),
+ 'vod_play_from': '$$$'.join(names),
+ 'vod_play_url': '$$$'.join(urls)
+ }
+ return vod
+
+ def handle_exception(self, e, message):
+ print(f"{message}: {str(e)}")
+ return {'list': [{'vod_play_from': '哎呀翻车啦', 'vod_play_url': '翻车啦#555'}]}
+
+ def process_tabs(self, data, body, ids):
+ try:
+ pdata = data['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']
+ tabs = data['data']['module_list_datas'][-1]['module_datas'][-1]['module_params'].get('tabs')
+ if tabs and len(json.loads(tabs)):
+ tabs = json.loads(tabs)
+ remaining_tabs = tabs[1:]
+ task_queue = []
+ for tab in remaining_tabs:
+ nbody = copy.deepcopy(body)
+ nbody['page_params']['page_context'] = tab['page_context']
+ task_queue.append(nbody)
+ with ThreadPoolExecutor(max_workers=10) as executor:
+ future_map = {executor.submit(self.get_vdata, task): idx for idx, task in enumerate(task_queue)}
+ results = [None] * len(task_queue)
+ for future in as_completed(future_map.keys()):
+ idx = future_map[future]
+ results[idx] = future.result()
+ for result in results:
+ if result:
+ page_data = result['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists'][
+ 'item_datas']
+ pdata.extend(page_data)
+ return pdata
+ except Exception as e:
+ print(f"Error processing episodes: {str(e)}")
+ return []
+
+ def josn_to_params(self, params, skip_empty=False):
+ query = []
+ for k, v in params.items():
+ if skip_empty and not v:
+ continue
+ query.append(f"{k}={v}")
+ return "&".join(query)
+
+
diff --git a/xiaosa/py/芒果视频.py b/xiaosa/py/芒果视频.py
new file mode 100644
index 00000000..6ba8e343
--- /dev/null
+++ b/xiaosa/py/芒果视频.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+import time
+from concurrent.futures import ThreadPoolExecutor, as_completed
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ rhost='https://www.mgtv.com'
+
+ host='https://pianku.api.mgtv.com'
+
+ vhost='https://pcweb.api.mgtv.com'
+
+ mhost='https://dc.bz.mgtv.com'
+
+ shost='https://mobileso.bz.mgtv.com'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
+ 'origin': rhost,
+ 'referer': f'{rhost}/'
+ }
+
+ def homeContent(self, filter):
+ result = {}
+ cateManual = {
+ "电影": "3",
+ "电视剧": "2",
+ "综艺": "1",
+ "动画": "50",
+ "少儿": "10",
+ "纪录片": "51",
+ "教育": "115"
+ }
+ classes = []
+ filters = {}
+ for k in cateManual:
+ classes.append({
+ 'type_name': k,
+ 'type_id': cateManual[k]
+ })
+ with ThreadPoolExecutor(max_workers=len(classes)) as executor:
+ results = executor.map(self.getf, classes)
+ for id, ft in results:
+ if len(ft):filters[id] = ft
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data=self.fetch(f'{self.mhost}/dynamic/v1/channel/index/0/0/0/1000000/0/0/17/1354?type=17&version=5.0&t={str(int(time.time()*1000))}&_support=10000000', headers=self.headers).json()
+ videoList = []
+ for i in data['data']:
+ if i.get('DSLList') and len(i['DSLList']):
+ for j in i['DSLList']:
+ if j.get('data') and j['data'].get('items') and len(j['data']['items']):
+ for k in j['data']['items']:
+ videoList.append({
+ 'vod_id': k["videoId"],
+ 'vod_name': k['videoName'],
+ 'vod_pic': k['img'],
+ 'vod_year': k.get('cornerTitle'),
+ 'vod_remarks': k.get('time') or k.get('desc'),
+ })
+ return {'list':videoList}
+
+ def categoryContent(self, tid, pg, filter, extend):
+ body={
+ 'allowedRC': '1',
+ 'platform': 'pcweb',
+ 'channelId': tid,
+ 'pn': pg,
+ 'pc': '80',
+ 'hudong': '1',
+ '_support': '10000000'
+ }
+ body.update(extend)
+ data=self.fetch(f'{self.host}/rider/list/pcweb/v3', params=body, headers=self.headers).json()
+ videoList = []
+ for i in data['data']['hitDocs']:
+ videoList.append({
+ 'vod_id': i["playPartId"],
+ 'vod_name': i['title'],
+ 'vod_pic': i['img'],
+ 'vod_year': (i.get('rightCorner',{}) or {}).get('text') or i.get('year'),
+ 'vod_remarks': i['updateInfo']
+ })
+ result = {}
+ result['list'] = videoList
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ vbody={'allowedRC': '1', 'vid': ids[0], 'type': 'b', '_support': '10000000'}
+ vdata=self.fetch(f'{self.vhost}/video/info', params=vbody, headers=self.headers).json()
+ d=vdata['data']['info']['detail']
+ vod = {
+ 'vod_name': vdata['data']['info']['title'],
+ 'type_name': d.get('kind'),
+ 'vod_year': d.get('releaseTime'),
+ 'vod_area': d.get('area'),
+ 'vod_lang': d.get('language'),
+ 'vod_remarks': d.get('updateInfo'),
+ 'vod_actor': d.get('leader'),
+ 'vod_director': d.get('director'),
+ 'vod_content': d.get('story'),
+ 'vod_play_from': '芒果TV',
+ 'vod_play_url': ''
+ }
+ data,pdata=self.fetch_page_data('1', ids[0],True)
+ pagecount=data['data'].get('total_page') or 1
+ if int(pagecount)>1:
+ pages = list(range(2, pagecount+1))
+ page_results = {}
+ with ThreadPoolExecutor(max_workers=10) as executor:
+ future_to_page = {
+ executor.submit(self.fetch_page_data, page, ids[0]): page
+ for page in pages
+ }
+ for future in as_completed(future_to_page):
+ page = future_to_page[future]
+ try:
+ result = future.result()
+ page_results[page] = result
+ except Exception as e:
+ print(f"Error fetching page {page}: {e}")
+ for page in sorted(page_results.keys()):
+ pdata.extend(page_results[page])
+ vod['vod_play_url'] = '#'.join(pdata)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ data=self.fetch(f'{self.shost}/applet/search/v1?channelCode=mobile-wxap&q={key}&pn={pg}&pc=10&_support=10000000', headers=self.headers).json()
+ videoList = []
+ for i in data['data']['contents']:
+ if i.get('data') and len(i['data']):
+ k = i['data'][0]
+ if k.get('vid') and k.get('img'):
+ try:
+ videoList.append({
+ 'vod_id': k['vid'],
+ 'vod_name': k['title'],
+ 'vod_pic': k['img'],
+ 'vod_year': (i.get('rightTopCorner',{}) or {}).get('text') or i.get('year'),
+ 'vod_remarks': '/'.join(i.get('desc',[])),
+ })
+ except:
+ print(k)
+ return {'list':videoList,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ id=f'{self.rhost}{id}'
+ return {'jx':1,'parse': 1, 'url': id, 'header': ''}
+
+ def localProxy(self, param):
+ pass
+
+ def getf(self, body):
+ params = {
+ 'allowedRC': '1',
+ 'channelId': body['type_id'],
+ 'platform': 'pcweb',
+ '_support': '10000000',
+ }
+ data = self.fetch(f'{self.host}/rider/config/channel/v1', params=params, headers=self.headers).json()
+ ft = []
+ for i in data['data']['listItems']:
+ try:
+ value_array = [{"n": value['tagName'], "v": value['tagId']} for value in i['items'] if
+ value.get('tagName')]
+ ft.append({"key": i['eName'], "name": i['typeName'], "value": value_array})
+ except:
+ print(i)
+ return body['type_id'], ft
+
+ def fetch_page_data(self, page, id, b=False):
+ body = {'version': '5.5.35', 'video_id': id, 'page': page, 'size': '30',
+ 'platform': '4', 'src': 'mgtv', 'allowedRC': '1', '_support': '10000000'}
+ data = self.fetch(f'{self.vhost}/episode/list', params=body, headers=self.headers).json()
+ ldata = [f'{i["t3"]}${i["url"]}' for i in data['data']['list']]
+ if b:
+ return data, ldata
+ else:
+ return ldata
diff --git a/xiaosa/py/金牌影视.py b/xiaosa/py/金牌影视.py
new file mode 100644
index 00000000..815951a4
--- /dev/null
+++ b/xiaosa/py/金牌影视.py
@@ -0,0 +1,225 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import json
+import sys
+import threading
+import uuid
+import requests
+sys.path.append('..')
+from base.spider import Spider
+import time
+from Crypto.Hash import MD5, SHA1
+
+class Spider(Spider):
+ '''
+ 配置示例:
+ {
+ "key": "xxxx",
+ "name": "xxxx",
+ "type": 3,
+ "api": ".所在路径/金牌.py",
+ "searchable": 1,
+ "quickSearch": 1,
+ "filterable": 1,
+ "changeable": 1,
+ "ext": {
+ "site": "https://www.jiabaide.cn,域名2,域名3"
+ }
+ },
+ '''
+ def init(self, extend=""):
+ if extend:
+ hosts=json.loads(extend)['site']
+ self.host = self.host_late(hosts)
+ pass
+
+ def getName(self):
+ pass
+
+ def isVideoFormat(self, url):
+ pass
+
+ def manualVideoCheck(self):
+ pass
+
+ def destroy(self):
+ pass
+
+ def homeContent(self, filter):
+ cdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/get/filer/type", headers=self.getheaders()).json()
+ fdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/get/filer/list", headers=self.getheaders()).json()
+ result = {}
+ classes = []
+ filters={}
+ for k in cdata['data']:
+ classes.append({
+ 'type_name': k['typeName'],
+ 'type_id': str(k['typeId']),
+ })
+ sort_values = [{"n": "最近更新", "v": "2"},{"n": "人气高低", "v": "3"}, {"n": "评分高低", "v": "4"}]
+ for tid, d in fdata['data'].items():
+ current_sort_values = sort_values.copy()
+ if tid == '1':
+ del current_sort_values[0]
+ filters[tid] = [
+ {"key": "type", "name": "类型",
+ "value": [{"n": i["itemText"], "v": i["itemValue"]} for i in d["typeList"]]},
+
+ *([] if not d["plotList"] else [{"key": "v_class", "name": "剧情",
+ "value": [{"n": i["itemText"], "v": i["itemText"]}
+ for i in d["plotList"]]}]),
+
+ {"key": "area", "name": "地区",
+ "value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["districtList"]]},
+
+ {"key": "year", "name": "年份",
+ "value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["yearList"]]},
+
+ {"key": "lang", "name": "语言",
+ "value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["languageList"]]},
+
+ {"key": "sort", "name": "排序", "value": current_sort_values}
+ ]
+ result['class'] = classes
+ result['filters'] = filters
+ return result
+
+ def homeVideoContent(self):
+ data1 = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/home/all/list", headers=self.getheaders()).json()
+ data2=self.fetch(f"{self.host}/api/mw-movie/anonymous/home/hotSearch",headers=self.getheaders()).json()
+ data=[]
+ for i in data1['data'].values():
+ data.extend(i['list'])
+ data.extend(data2['data'])
+ vods=self.getvod(data)
+ return {'list':vods}
+
+ def categoryContent(self, tid, pg, filter, extend):
+
+ params = {
+ "area": extend.get('area', ''),
+ "filterStatus": "1",
+ "lang": extend.get('lang', ''),
+ "pageNum": pg,
+ "pageSize": "30",
+ "sort": extend.get('sort', '1'),
+ "sortBy": "1",
+ "type": extend.get('type', ''),
+ "type1": tid,
+ "v_class": extend.get('v_class', ''),
+ "year": extend.get('year', '')
+ }
+ data = self.fetch(f"{self.host}/api/mw-movie/anonymous/video/list?{self.js(params)}", headers=self.getheaders(params)).json()
+ result = {}
+ result['list'] = self.getvod(data['data']['list'])
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/detail?id={ids[0]}",headers=self.getheaders({'id':ids[0]})).json()
+ vod=self.getvod([data['data']])[0]
+ vod['vod_play_from']='金牌'
+ vod['vod_play_url'] = '#'.join(
+ f"{i['name'] if len(vod['episodelist']) > 1 else vod['vod_name']}${ids[0]}@@{i['nid']}" for i in
+ vod['episodelist'])
+ vod.pop('episodelist', None)
+ return {'list':[vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ params = {
+ "keyword": key,
+ "pageNum": pg,
+ "pageSize": "8",
+ "sourceCode": "1"
+ }
+ data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/searchByWord?{self.js(params)}",headers=self.getheaders(params)).json()
+ vods=self.getvod(data['data']['result']['list'])
+ return {'list':vods,'page':pg}
+
+ def playerContent(self, flag, id, vipFlags):
+ self.header = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'DNT': '1',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126", "Google Chrome";v="126"',
+ 'sec-ch-ua-mobile': '?0',
+ 'Origin': self.host,
+ 'Referer': f'{self.host}/'
+ }
+ ids=id.split('@@')
+ pdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v2/video/episode/url?clientType=1&id={ids[0]}&nid={ids[1]}",headers=self.getheaders({'clientType':'1','id': ids[0], 'nid': ids[1]})).json()
+ vlist=[]
+ for i in pdata['data']['list']:vlist.extend([i['resolutionName'],i['url']])
+ return {'parse':0,'url':vlist,'header':self.header}
+
+ def localProxy(self, param):
+ pass
+
+ def host_late(self, url_list):
+ if isinstance(url_list, str):
+ urls = [u.strip() for u in url_list.split(',')]
+ else:
+ urls = url_list
+ if len(urls) <= 1:
+ return urls[0] if urls else ''
+
+ results = {}
+ threads = []
+
+ def test_host(url):
+ try:
+ start_time = time.time()
+ response = requests.head(url, timeout=1.0, allow_redirects=False)
+ delay = (time.time() - start_time) * 1000
+ results[url] = delay
+ except Exception as e:
+ results[url] = float('inf')
+ for url in urls:
+ t = threading.Thread(target=test_host, args=(url,))
+ threads.append(t)
+ t.start()
+ for t in threads:
+ t.join()
+ return min(results.items(), key=lambda x: x[1])[0]
+
+ def md5(self, sign_key):
+ md5_hash = MD5.new()
+ md5_hash.update(sign_key.encode('utf-8'))
+ md5_result = md5_hash.hexdigest()
+ return md5_result
+
+ def js(self, param):
+ return '&'.join(f"{k}={v}" for k, v in param.items())
+
+ def getheaders(self, param=None):
+ if param is None:param = {}
+ t=str(int(time.time()*1000))
+ param['key']='cb808529bae6b6be45ecfab29a4889bc'
+ param['t']=t
+ sha1_hash = SHA1.new()
+ sha1_hash.update(self.md5(self.js(param)).encode('utf-8'))
+ sign = sha1_hash.hexdigest()
+ deviceid = str(uuid.uuid4())
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
+ 'Accept': 'application/json, text/plain, */*',
+ 'sign': sign,
+ 't': t,
+ 'deviceid':deviceid
+ }
+ return headers
+
+ def convert_field_name(self, field):
+ field = field.lower()
+ if field.startswith('vod') and len(field) > 3:
+ field = field.replace('vod', 'vod_')
+ if field.startswith('type') and len(field) > 4:
+ field = field.replace('type', 'type_')
+ return field
+
+ def getvod(self, array):
+ return [{self.convert_field_name(k): v for k, v in item.items()} for item in array]
+
diff --git a/xiaosa/spider.jar b/xiaosa/spider.jar
index 8327ba98..2696c4a9 100644
Binary files a/xiaosa/spider.jar and b/xiaosa/spider.jar differ