Autosqlmap v2 python

循环现在用while 1,加快了循环速度,分成四部分不容易出错

过滤放弃了正则采用了beautifulsoup,速度更快过滤效果更明显,只跑含参数的链接

一次性访问网址,流量减小,效率提高

加入百度引擎,可直接搜索关键词

#coding:utf-8
import urllib2
import random
import re
import random
from bs4 import BeautifulSoup

def randHeader():
    head_connection = ['Keep-Alive','close']
    head_accept = ['text/html, application/xhtml+xml, */*']
    head_accept_language = ['zh-CN,fr-FR;q=0.5','en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3']
    head_user_agent = ['Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
                       'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36',
                       'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; rv:11.0) like Gecko)',
                       'Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1',
                       'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3',
                       'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12',
                       'Opera/9.27 (Windows NT 5.2; U; zh-cn)',
                       'Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0',
                       'Opera/8.0 (Macintosh; PPC Mac OS X; U; en)',
                       'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6',
                       'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Win64; x64; Trident/4.0)',
                       'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)',
                       'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E)',
                       'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Maxthon/4.0.6.2000 Chrome/26.0.1410.43 Safari/537.1 ',
                       'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E; QQBrowser/7.3.9825.400)',
                       'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0 ',
                       'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.92 Safari/537.1 LBBROWSER',
                       'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; BIDUBrowser 2.x)',
                       'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/3.0 Safari/536.11']
    header = {
        'Connection': head_connection[0],
        'Accept': head_accept[0],
        'Accept-Language': head_accept_language[1],
        'User-Agent': head_user_agent[random.randrange(0,len(head_user_agent))]
    }
    return header

def url_user_agent(urls): # 传给Groudscan
	proxy = {'http':'127.0.0.1:8080'}
	proxy_support = urllib2.ProxyHandler(proxy)
	opener = urllib2.build_opener(proxy_support)
	i_headers = randHeader()
	req = urllib2.Request(urls,headers=i_headers)
	try:
		html = opener.open(req)
	except:
		print "agent error"

def inject(url): # 检测页面所有所含的参数链接并传给Groudscan
	sql_urls=[]
	i_headers = randHeader()
	req = urllib2.Request(url,headers=i_headers)
	try:
		response = urllib2.urlopen(req)
	except:
		return
	doc = response.read()
	soup=BeautifulSoup(doc,"html.parser")
	links=soup.findAll('a')
	for link in links:
		_url=link.get('href')
		if _url is None:
			continue
		if not re.match('^(http|https).*?(asp|php|aspx|jsp)\?.*?$',_url):
			continue
		else:
			sql_urls.append(_url)
	sql_urls=list(set(sql_urls))
	for sql_url in sql_urls:
		print sql_url
		url_user_agent(sql_url)

def spider(url): # 爬取网页的所有链接并把它传给inject()
	links=[]
	i_headers = randHeader()
	req = urllib2.Request(url,headers=i_headers)
	response = urllib2.urlopen(req)
	doc = response.read()
	soup=BeautifulSoup(doc,"html.parser")
	alink=soup.findAll('a')
	for link in alink:
		_url=link.get('href')
		if not re.match('^(http|https)://.*?(com|cn|org|net)/$',_url):
			continue
		else:
			links.append(_url)
	links=list(set(links))
	for spider_url in links:
		inject(spider_url)

def baidu(url): # 百度搜索关键字并过滤出真实地址
	links=[]
	i_headers = randHeader()
	req = urllib2.Request(url,headers=i_headers)
	response = urllib2.urlopen(req)
	doc = response.read()
	soup=BeautifulSoup(doc,"html.parser")
	alink=soup.findAll('a',attrs={"class":"c-showurl"})
	for link in alink:
		_url=link.get('href')
		links.append(_url)
	for baidu_url in links:
		req2 = urllib2.Request(baidu_url,headers=i_headers)
		try:
			response = urllib2.urlopen(req2)
		except:
			print "baidu error"
		realurl = response.geturl()
		url_user_agent(realurl)

x=0
while 1:
	url = "https://www.baidu.com/s?wd=inurl:php?id=&rn=50&pn="+str(x) #关键字为 inurl:php?id=,替换即可
	baidu(url)
	x=x+50
	#if x > 1000:  # 当搜索至的链接大于1000时跳出循环
		#break
点赞

发表评论

电子邮件地址不会被公开。 必填项已用*标注