@ -204,35 +204,51 @@ class Spider(Spider):
if ' 导演: ' in info_text :
director_match = re . search ( r ' 导演:([^/]+) ' , info_text )
if director_match : vod [ ' vod_director ' ] = director_match . group ( 1 ) . strip ( )
playFrom = [ ]
playList = [ ]
h3_elements = doc ( ' h3 ' )
play_links = doc ( ' a[href*= " /play/ " ] ' )
for i in range ( h3_elements . length ) :
header_elem = h3_elements . eq ( i )
header_text = header_elem . text ( ) . strip ( )
if any ( keyword in header_text for keyword in [ ' 播放 ' , ' 下载 ' , ' BD5 ' , ' UC ' , ' 夸克 ' ] ) :
playFrom . append ( header_text )
vodItems = [ ]
for j in range ( play_links . length ) :
try :
link = play_links . eq ( j )
href = link . attr ( ' href ' )
name = link . text ( ) . strip ( )
if not href or not name : continue
tId_match = re . search ( r ' /play/([^.]+) \ .html ' , href )
if not tId_match : continue
tId = tId_match . group ( 1 )
if ' BD5 ' in header_text and ' -1- ' in tId : vodItems . append ( name + " $ " + tId )
elif ' UC ' in header_text and ' -2- ' in tId : vodItems . append ( name + " $ " + tId )
elif ' 夸克 ' in header_text and ' -3- ' in tId : vodItems . append ( name + " $ " + tId )
except : continue
if vodItems : playList . append ( ' # ' . join ( vodItems ) )
else : playList . append ( " " )
# 改进的播放线路提取逻辑
vodlist_heads = doc ( ' .stui-vodlist__head ' )
for i in range ( vodlist_heads . length ) :
head = vodlist_heads . eq ( i )
h3_elem = head . find ( ' h3 ' )
if h3_elem . length == 0 :
continue
header_text = h3_elem . text ( ) . strip ( )
if not any ( keyword in header_text for keyword in [ ' 播放 ' , ' 下载 ' , ' BD5 ' , ' UC ' , ' 夸克 ' ] ) :
continue
playFrom . append ( header_text )
vodItems = [ ]
# 提取当前播放线路下的所有播放链接
play_links = head . find ( ' a[href*= " /play/ " ] ' )
for j in range ( play_links . length ) :
try :
link = play_links . eq ( j )
href = link . attr ( ' href ' )
name = link . text ( ) . strip ( )
if not href or not name :
continue
tId_match = re . search ( r ' /play/([^.]+) \ .html ' , href )
if not tId_match :
continue
tId = tId_match . group ( 1 )
vodItems . append ( name + " $ " + tId )
except :
continue
playList . append ( ' # ' . join ( vodItems ) if vodItems else " " )
vod [ ' vod_play_from ' ] = ' $$$ ' . join ( playFrom ) if playFrom else " "
vod [ ' vod_play_url ' ] = ' $$$ ' . join ( playList ) if playList else " "
result = { ' list ' : [ vod ] }
return result
def searchContent ( self , key , quick ) :
url = ' https://www.libvio.site/index.php/ajax/suggest?mid=1&wd= {0} ' . format ( key )
rsp = self . _fetch_with_cache ( url , headers = self . header )
@ -251,7 +267,7 @@ class Spider(Spider):
years = [ { " n " : " 全部 " , " v " : " " } ]
for year in range ( 2025 , 1999 , - 1 ) :
for year in range ( 2025 , 1999 , - 1 ) :
years . append ( { " n " : str ( year ) , " v " : str ( year ) } )
@ -350,7 +366,7 @@ class Spider(Spider):
{ " n " : " 泰国 " , " v " : " 泰国 " }
]
} ,
{ " key " : " year " , " name " : " 年份 " , " value " : years [ : 25 ] }
{ " key " : " year " , " name " : " 年份 " , " value " : years [ : 25 ] }
]
@ -371,7 +387,7 @@ class Spider(Spider):
{ " n " : " 加拿大 " , " v " : " 加拿大 " } , { " n " : " 其他 " , " v " : " 其他 " }
]
} ,
{ " key " : " year " , " name " : " 年份 " , " value " : years [ : 25 ] }
{ " key " : " year " , " name " : " 年份 " , " value " : years [ : 25 ] }
]
return {
@ -383,52 +399,57 @@ class Spider(Spider):
}
header = { " Referer " : " https://www.libvio.site " , " User-Agent " : " Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36 " }
def playerContent ( self , flag , id , vipFlags ) :
# 如果已经是push链接, 直接返回
if id . startswith ( ' push:// ' ) :
return { " parse " : 0 , " playUrl " : " " , " url " : id , " header " : " " }
result = { }
url = ' https://www.libvio.site/play/ {0} .html ' . format ( id )
try :
rsp = self . _fetch_with_cache ( url , headers = self . header )
if not rsp :
return { " parse " : 1 , " playUrl " : " " , " url " : url , " header " : ujson . dumps ( self . header ) }
if self . _is_cloud_drive ( id ) : return self . _handle_cloud_drive ( url , rsp , id )
return self . _handle_bd5_player ( url , rsp , id )
return self . _handle_cloud_drive ( url , rsp , id )
except Exception as e :
print ( f " Player parse error: { e } " )
return { " parse " : 1 , " playUrl " : " " , " url " : url , " header " : ujson . dumps ( self . header ) }
def _is_cloud_drive ( self , id ) :
parts = id . split ( ' - ' )
if len ( parts ) > = 2 :
source_type = parts [ 1 ]
return source_type in [ ' 2 ' , ' 3 ' ]
return False
def _handle_cloud_drive ( self , url , rsp , id ) :
result = { }
try :
doc = self . _parse_html_fast ( rsp . text )
iframe_src = doc ( ' iframe ' ) . attr ( ' src ' )
if iframe_src :
try :
iframe_content = self . _fetch_with_cache ( iframe_src , headers = self . header )
if not iframe_content : raise Exception ( " Iframe fetch failed " )
iframe_doc = self . _parse_html_fast ( iframe_content . text )
uc_link = iframe_doc ( ' a[href*= " drive.uc.cn " ] ' ) . attr ( ' href ' )
if uc_link : return { " parse " : 0 , " playUrl " : " " , " url " : uc_link , " header " : " " }
quark_link = iframe_doc ( ' a[href*= " pan.quark.cn " ] ' ) . attr ( ' href ' )
if quark_link : return { " parse " : 0 , " playUrl " : " " , " url " : quark_link , " header " : " " }
except Exception as e : print ( f " iframe parse failed: { e } " )
page_text = rsp . text
# Added \b for word boundaries to avoid partial matches
uc_match = re . search ( r ' https://drive \ .uc \ .cn/s/[^ " \ s]+? \ b ' , page_text )
if uc_match : return { " parse " : 0 , " playUrl " : " " , " url " : uc_match . group ( 0 ) , " header " : " " }
quark_match = re . search ( r ' https://pan \ .quark \ .cn/s/[^ " \ s]+? \ b ' , page_text )
if quark_match : return { " parse " : 0 , " playUrl " : " " , " url " : quark_match . group ( 0 ) , " header " : " " }
except Exception as e : print ( f " Cloud drive parse error: { e } " )
return { " parse " : 1 , " playUrl " : " " , " url " : url , " header " : ujson . dumps ( self . header ) }
# 首先尝试从JavaScript变量中提取网盘链接
script_pattern = r ' var player_[^=]*= \ s*( { [^}]+}) '
matches = re . findall ( script_pattern , page_text )
for match in matches :
try :
player_data = ujson . loads ( match )
from_value = player_data . get ( ' from ' , ' ' )
url_value = player_data . get ( ' url ' , ' ' )
if from_value == ' kuake ' and url_value :
# 夸克网盘
drive_url = url_value . replace ( ' \\ / ' , ' / ' )
return { " parse " : 0 , " playUrl " : " " , " url " : f " push:// { drive_url } " , " header " : " " }
elif from_value == ' uc ' and url_value :
# UC网盘
drive_url = url_value . replace ( ' \\ / ' , ' / ' )
return { " parse " : 0 , " playUrl " : " " , " url " : f " push:// { drive_url } " , " header " : " " }
except :
continue
except Exception as e :
print ( f " Cloud drive parse error: { e } " )
# 如果所有网盘解析都失败, 尝试BD5播放源
return self . _handle_bd5_player ( url , rsp , id )
def _handle_bd5_player ( self , url , rsp , id ) :
try :
doc = self . _parse_html_fast ( rsp . text )
page_text = rsp . text
api_match = re . search ( r ' https://www \ .libvio \ .site/vid/plyr/vr2 \ .php \ ?url=([^& " \ s]+) ' , page_text )
if api_match :
if api_match :
return { " parse " : 0 , " playUrl " : " " , " url " : api_match . group ( 1 ) , " header " : ujson . dumps ( { " User-Agent " : self . header [ " User-Agent " ] , " Referer " : " https://www.libvio.site/ " } ) }
iframe_src = doc ( ' iframe ' ) . attr ( ' src ' )
if iframe_src :
@ -475,4 +496,4 @@ class Spider(Spider):
action = resp . content
except Exception as e :
print ( f " Local proxy error: { e } " )
return [ 200 , " video/MP2T " , action , param . get ( ' header ' , ' ' ) ]
return [ 200 , " video/MP2T " , action , param . get ( ' header ' , ' ' ) ]