首页 热点专区 小学知识 中学知识 出国留学 考研考公
您的当前位置:首页正文

python 挂ip代理爬虫

2024-12-10 来源:要发发知识网
#__author__ = 'Administrat  
#coding=utf-8  
from urllib.request import urlopen  
from urllib.parse import urlparse  
from bs4 import BeautifulSoup  
import re  
import datetime  
import random  
import io  
import os  
import sys  
from urllib  import request  
import urllib  
  
pages = set()  
random.seed(datetime.datetime.now())  
  
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030')  
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}  
  
  
#获取页面所有内链的列表  
def getInternalLinks(bsObj, includeUrl):  
    includeUrl =   
    internalLinks = []  
    #找出所有以“/”开头的链接  
    for link in bsObj.findAll("a",   
        if link.attrs['href'] is not None:  
            if link.attrs['href'] not in internalLinks:  
                if(link.attrs['href'].startswith("/")):  
                    internalLinks.append(includeUrl+link.attrs['href'])  
                else:  
                    internalLinks.append(link.attrs['href'])  
    return internalLinks  
  
#获取页面所有外链的列表  
def getExternalLinks(bsObj, excludeUrl):  
    externalLinks = []  
      
    for link in bsObj.findAll("a",   
        if link.attrs['href'] is not None:  
            if link.attrs['href'] not in externalLinks:  
                externalLinks.append(link.attrs['href'])  
    return externalLinks  
  
  
def getRandomExternalLink(startingPage):  
    req=request.Request(startingPage,headers=headers)  
    html=urlopen(req)  
    bsObj=BeautifulSoup(html.read(),"html.parser")  
    externalLinks = getExternalLinks(bsObj,   
    if len(externalLinks) == 0:  
        print("没有外部链接,准备遍历整个网站")  
        domain =   
        internalLinks = getInternalLinks(bsObj, domain)  
        return getRandomExternalLink(internalLinks[random.randint(0,len(internalLinks)-1)])  
    else:  
        return externalLinks[random.randint(0, len(externalLinks)-1)]  
  
def followExternalOnly(startingSite):  
    externalLink = getRandomExternalLink(startingSite)  
    print("随机外链是: "+externalLink)  
    followExternalOnly(externalLink)  
  
#收集网站上发现的所有外链列表  
allExtLinks = set()  
allIntLinks = set()  
def getAllExternalLinks(siteUrl):  
    #设置代理IP访问  
    proxy_handler=urllib.request.ProxyHandler({'http':'183.77.250.45:3128'})  
    proxy_auth_handler=urllib.request.ProxyBasicAuthHandler()  
    #proxy_auth_handler.add_password('realm', '123.123.2123.123', 'user', 'password')  
    opener = urllib.request.build_opener(urllib.request.HTTPHandler, proxy_handler)  
    urllib.request.install_opener(opener)  
  
    req=request.Request(siteUrl,headers=headers)  
    html=urlopen(req)  
    bsObj=BeautifulSoup(html.read(),"html.parser")  
    domain =   
    internalLinks = getInternalLinks(bsObj,domain)  
    externalLinks = getExternalLinks(bsObj,domain)  
  
    #收集外链  
    for link in externalLinks:  
        if link not in allExtLinks:  
            allExtLinks.add(link)  
            #print(link)  
            print("即将获取的外部链接的URL是:"+link)  
    #收集内链  
    for link in internalLinks:  
        if link not in allIntLinks:  
            print("即将获取内部链接的URL是:"+link)  
            allIntLinks.add(link)  
            getAllExternalLinks(link)  
  
  
  
  
显示全文