百度网盘资源有效性检测教程附大型资源合集

释放双眼,带上耳机,听听看~!

直接给大家贴代码吧,懂的人自然会用!

 

  1. import requests
  2. import re
  3. from bs4 import BeautifulSoup
  4. import time
  5. import json
  6. from requests import exceptions
  7. class Baiduyun:
  8.     ”’
  9.     判断百度网盘链接是否失效
  10.     仅判断提取码类型的,其他类型直接视为失效
  11.     ”’
  12.     headers = {
  13.         ‘User-Agent’: ‘Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36’
  14.     }
  15.     def __init__(self,url):
  16.         self.url = url
  17.     def get_link(self):
  18.         match = re.search(‘https://pan.baidu.com/s/1(.{22})’,self.url,re.S)
  19.         if match:
  20.             id = match.group(1)
  21.             return id
  22.     def verify(self):
  23.         id = self.get_link()
  24.         init_url = ‘https://pan.baidu.com/share/init?surl=’+id
  25.         response = requests.get(init_url,headers=Baiduyun.headers)
  26.         if response.status_code==200:
  27.             response.encoding=’utf-8′
  28.             soup = BeautifulSoup(response.text,’lxml’)
  29.             if soup.select(‘dl.pickpw.clearfix’):
  30.                 clearfix = soup.select(‘dl.pickpw.clearfix’)[0]
  31.                 notice = clearfix.dt.string
  32.                 if ‘请输入提取码’ in notice:
  33.                     print(‘有效’)
  34.                     return True
  35.             else:
  36.                 print(‘已经失效’)
  37.                 return False
  38.         else:
  39.             print(response.status_code)
  40.             print(‘已经失效’)
  41.             return False
  42. headers = {
  43.         ‘User-Agent’: ‘Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36’
  44.     }
  45. def get_index(url):
  46.     index_list = []
  47.     response = requests.get(url,headers=headers)
  48.     if response.status_code==200:
  49.         # print(response.text)
  50.         soup = BeautifulSoup(response.text,’lxml’)
  51.         bm_c = soup.select(‘#ct > div.mn > div.tl.bm > div.bm_c’)[0]
  52.         items = bm_c.select(‘th > a’)
  53.         for item in items:
  54.             detial_info = {}
  55.             href = item[‘href’]
  56.             title = item.string
  57.             detail_url = ‘https://www.52pojie.cn/’+href
  58.             detial_info[‘detail_url’] =detail_url
  59.             detial_info[‘title’] = title
  60.             index_list.append(detial_info)
  61.     return index_list
  62. def get_detail(url):
  63.     response = requests.get(url,headers=headers)
  64.     if response.status_code==200:
  65.         response.encoding=’GB2312′
  66.         # https://pan.baidu.com/s/1dOOudVQxeBpSHD8YMrmKTQ 提取码:p5N9
  67.         baiduyun_link_match = re.search(‘(https://pan.baidu.com/s/1.{22}).*?[提取码|密码].*?([A-Za-z0-9]{4})’,response.text,re.S)
  68.         if baiduyun_link_match:
  69.             raw_url = baiduyun_link_match.group(1)
  70.             password = baiduyun_link_match.group(2)
  71.             return {
  72.                 ‘url’:raw_url,
  73.                 ‘password’:password
  74.             }
  75.             # print(baiduyun_link_match.group(1))
  76.             # print(baiduyun_link_match.group(2))
  77.     else:
  78.         print(‘status_code’,response.status_code)
  79. def save_result(content):
  80.     with open(‘result.txt’,’a’,encoding=’utf-8′) as f:
  81.         f.write(json.dumps(content)+’\n’)
  82. if __name__ == “__main__“:
  83.     for i in range(1,18):
  84.         try:
  85.             url = ‘https://www.52pojie.cn/forum.php?mod=collection&action=view&ctid=1667&page={}’.format(str(i))
  86.             print(url)
  87.             index_list = get_index(url)
  88.             for detial_info in index_list:
  89.                 # print(detial_info[‘detail_url’])
  90.                 detail_url = detial_info[‘detail_url’]
  91.                 print(detial_info[‘title’])
  92.                 result = get_detail(detail_url)
  93.                 if result:
  94.                     result[‘title’] =detial_info[‘title’]
  95.                     print(result)
  96.                     test_valid = Baiduyun(result[‘url’])
  97.                     isvalid =  test_valid.verify()
  98.                     if isvalid:
  99.                         save_result(result)
  100.                 time.sleep(1)
  101.         except exceptions as e:
  102.             time.sleep(10)
  103.             print(e)
  104.             continue
  105.         except:
  106.             time.sleep(10)
  107.             continue
  108. # url = ‘https://pan.baidu.com/s/1YC_MJ_RzcmK3EmTAKSST6w’
  109. # # url = ‘https://pan.baidu.com/s/1CtUlgWRaI-bYcSwEbuAN3A’
  110. # mybaiduyuan = Baiduyun(url)
  111. # mybaiduyuan.verify()

为TA充电
共{{data.count}}人
人已赞赏
自我提升

WordPress网站自动推送文章,加快收录!

2020-6-30 10:57:58

自我提升

老纳摄影后期PS:16种人像后期调色

2020-7-2 15:20:55

资源下载说明

请使用百度网盘下载资源,请不要在线解压!

1、请记住本站永久网址发布页:http://www.di4.top
2、本站资源大多存储在云盘,如发现链接失效请在下方评论留言,作者看到后会第一时间更新补链。
3、关于解压密码错误或者资源损坏,请查看解压教程:[必看]关于资源教程及常见问题
4、如果有资源想投稿的会员请仔细阅读:投稿指南教程
5、本站大部分资源解压密码均为: di4.top 如密码有误,请查看原帖的资源说明!

个人中心
购物车
优惠劵
今日签到
有新私信 私信列表
搜索