当前位置 博文首页 > 文章内容

    使用python刷访问量的示例代码

    作者:shunshunshun18 栏目:未分类 时间:2021-03-07 14:42:21

    本站于2023年9月4日。收到“大连君*****咨询有限公司”通知
    说我们IIS7站长博客,有一篇博文用了他们的图片。
    要求我们给他们一张图片6000元。要不然法院告我们

    为避免不必要的麻烦,IIS7站长博客,全站内容图片下架、并积极应诉
    博文内容全部不再显示,请需要相关资讯的站长朋友到必应搜索。谢谢!

    另祝:版权碰瓷诈骗团伙,早日弃暗投明。

    相关新闻:借版权之名、行诈骗之实,周某因犯诈骗罪被判处有期徒刑十一年六个月

    叹!百花齐放的时代,渐行渐远!



    python刷CSDN访问量

    import requests
    import re
    import time
    payload = ""
    # 请求头
    headers = {
      "Accept": "*/*",
      "Accept-Encoding": "gzip, deflate, br",
      "Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
      "Cookie": "l=AurqcPuigwQdnQv7WvAfCoR1OlrRQW7h; isg=BHp6mNB79CHqYXpVEiRteXyyyKNcg8YEwjgLqoRvCI3ddxqxbLtOFUBGwwOrZ3ad; thw=cn; cna=VsJQERAypn0CATrXFEIahcz8; t=0eed37629fe7ef5ec0b8ecb6cd3a3577; tracknick=tb830309_22; _cc_=UtASsssmfA%3D%3D; tg=0; ubn=p; ucn=unzbyun; x=e%3D1%26p%3D*%26s%3D0%26c%3D0%26f%3D0%26g%3D0%26t%3D0%26__ll%3D-1%26_ato%3D0; miid=981798063989731689; hng=CN%7Czh-CN%7CCNY%7C156; um=0712F33290AB8A6D01951C8161A2DF2CDC7C5278664EE3E02F8F6195B27229B88A7470FD7B89F7FACD43AD3E795C914CC2A8BEB1FA88729A3A74257D8EE4FBBC; enc=1UeyOeN0l7Fkx0yPu7l6BuiPkT%2BdSxE0EqUM26jcSMdi1LtYaZbjQCMj5dKU3P0qfGwJn8QqYXc6oJugH%2FhFRA%3D%3D; ali_ab=58.215.20.66.1516409089271.6; mt=ci%3D-1_1; cookie2=104f8fc9c13eb24c296768a50cabdd6e; _tb_token_=ee7e1e1e7dbe7; v=0",
      "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64;` rv:47.0) Gecko/20100101 Firefox/47.0"
    }
    # 获得文章列表urls
    def getUrls(url):
    
      # 发送请求
      resp = requests.request("GET", url, data=payload, headers=headers)
      #设置解码方式
      resp.encoding=resp.apparent_encoding
      #这里会用设置的解码方式解码
      html_source = resp.text
      # 正则表达式,取出网页中的url链接(一些寻找注入点的工具也是这么做出来的)
      urls = re.findall("https://[^>\";\']*\d",html_source)
      new_urls=[]
      for url in urls:
        if 'details' in url:
          if url not in new_urls:
            new_urls.append(url)
      return new_urls
    
    urls = getUrls("主页地址")
    while True:
      for url in urls:
        requests.request("GET", url, data=payload, headers=headers)
        print(url, "Ok")
        time.sleep(3)
      time.sleep(3)
    
    

    python刷博客园访问量 

    import requests
    import time
    import re
    from lxml import etree
    headers = {
      'referer':'https://i.cnblogs.com/posts',
    
      "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36"
    
    }
    proxy = {
    
      'http':'xxxx:xxx',#可以使用代理
    
    }
    
    def get_urls(url):
      ret = requests.get(url=url,headers = headers)
      ret.encoding='utf-8'
      urls = re.findall('href="https://www.cnblogs.com/shiguanggege/p/(.*?).html" rel="external nofollow" ',ret.text) #正则条件根据博客地址修改
      return urls
    
    urls = []
    for url in [f'https://www.cnblogs.com/shiguanggege/default.html?page={i}' for i in range(1,10)]: #url根据自己博客地址修改
      links = get_urls(url)
      for link in links:
        l = f'https://www.cnblogs.com/shiguanggege/p/{link}.html' #这个地址也是根据博客地址修改
        if l not in urls:
          urls.append(l)
    while True:
      for i in urls:
        requests.get(url=i,proxies=proxy)
        print(i,'ok')
        time.sleep(1)
      time.sleep(3)