美文网首页kali Linux实践
乙方渗透测试之信息收集

乙方渗透测试之信息收集

作者: wind_飘 | 来源:发表于2017-09-27 11:31 被阅读942次

    前言

    本篇整理信息收集阶段,大概会占渗透测试总时间的60%,视能力和情况而定,做到不影响企业业务正常运行的情况下,收集更多的资产信息,细心和耐心很关键。

    主站

    快速定位,快速产出

    先浏览下主站,可以很直观的了解目标站点的大致业务/性质/模式,会对整个渗透过程起到很大的帮助。

    对于boss要求的部分站点需要快速产出漏洞(销售好谈客户),主站很多都是可以拿下的,比如四位数字无限制爆破验证码,sqli,备份文件,组件框架历史漏洞,后台弱口令,邮箱收集/爆破/社工theHarvester等等。

    对于无范围全网测的厂商,可通过以下途径获取相关资产,比如收集开发运维专用的域名和解析到内网的域名,主机ip所属等。

    搜索引擎hacking语法,搜target.com|公司名字,有时也能查到,但是数据需要清洗。

    whois查询/注册人反查/邮箱反查/相关资产

    站长之家

    爱站

    微步在线(企业版)

    ip反查

    天眼查

    虎妈查

    历史漏洞查询

    Github泄露

    本阶段收集的信息,为下一步收集/爆破子域名做好准备。

    子域名

    子域名的收集途径很多,Layer子域名挖掘机4.2纪念版subDomainsBrute李劼杰wydomain猪猪侠Sublist3rsite:target.comGithub代码仓库,抓包分析请求返回值(跳转/文件上传/app/api接口等),站长帮手links等在线查询网站,部分会受到泛解析的影响,记录下学习到的,

    域传送漏洞

    linux:

    [dig @ns.example.com example=.com AXFR]

    windows:

    [nslookup -> set type=ns ->target.com -> server ns.target.com -> ls target.com]

    GetDomainsBySSL

    censys.io证书

    crt.sh证书查询

    shodan/fofa.so/zoomeye

    dnsdb.io

    api.hackertarget

    community.riskiq.com

    n级子域名爆破

    subdomain3

    FuzzDomain

    端口服务

    假设从layer导出来的子域名列表为target.com.txt,使用nmap扫默认端口

    nmap -v -A -F -iL target.com.txt -oX target_f.xml

    扫描全端口

    nmap -v -A -p1-65535 -iL target.com.txt -oX target_all.xml

    端口扫描脚本

    端口渗透总结

    web探测

    探测每个站点的web信息,假设所有子域名都在:target.com_domains.txt(Layer直接导出域名)

    BSDR_Banners密码:rzv0

    web指纹识别

    邮箱命名规则收集/爆破/社工

    详细可参考:渗透标准

    qq群,微信公众号和百度文库等社交途径有时也能收集到一页特定规则的密码表,直接附上一个小脚本

    main.py

    实现批量扫描target.com_domains.txt站点

    #-*-coding:UTF-8-*-

    importrequests

    importre

    importos

    frombs4importBeautifulSoup

    frommultiprocessing.poolimportPool

    importthreading

    importtime

    #from whatcms import *

    fromdirscanimport*

    frombaidu_siteimport*

    #from baidu_inurl import *

    #from getalllink import *

    importwarnings

    warnings.filterwarnings("ignore")

    importsys

    reload(sys)

    sys.setdefaultencoding('utf-8')

    globalcookie

    cookie='1'

    output_file=sys.argv[1].split('.')[0]+time.strftime('%Y-%m-%d',time.localtime(time.time()))+'.html'#-%H-%I-%S

    defcheck(url):

    try:

    printurl

    header={

    'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',

    'Accept-Encoding':'gzip, deflate, sdch',

    'Upgrade-Insecure-Requests':'1',

    'Accept-Language':'zh-CN,zh;q=0.8',

    'Cookie':cookie,

    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36'}

    req=requests.get(url,headers=header,timeout=5,allow_redirects=False)

    status=req.status_code

    html=req.content

    soup=BeautifulSoup(html,from_encoding="utf-8")

    server='      '

    try:

    server=req.headers['Server']

    except:pass

    title='  '

    try:

    title=soup.title.string

    except:pass

    X_Powered_By='    '

    try:

    X_Powered_By=req.headers['X-Powered-By']

    except:pass

    output=open(output_file,"a")

    str1='''

    【%s】  %s  %s  %s  %s  %s  %s

    '''%(str(status),server,X_Powered_By,url,url,title,baidu_site(url),dirscan(url))#,dirlink(url))#,baidu_inurl(url)whatcms(url),dirscan(url),yunxi_cmsapi(url)%s

    output.write(str1)

    output.close()

    return1

    except:

    return0

    finally:

    return0

    defget_domain(adr):

    files=open(adr,'r')

    regex=re.compile(r"(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+",re.IGNORECASE)

    whileTrue:

    line=files.readline()

    ifline:

    urls=regex.findall(line)

    foriinurls:

    forportinweb_port:

    domain_http='http://'+i+':'+port

    ifdomain_httpnotindomains:

    domains.append(domain_http)

    str=line.split('\n')

    domain_https='https://'+str[0]

    ifdomain_httpsnotindomains:

    domains.append(domain_https)

    else:break

    if__name__=='__main__':

    domains=[]

    web_port=['80',]#,'8080',] #

    in_domain=sys.argv[1]

    get_domain(in_domain)

    output=open(output_file,"w")

    str1=""

    output.write(str1)

    output.close()

    pool=Pool(2)

    pool.map(check,domains)

    pool.close()

    pool.join()

    os._exit(0)

    dirscan.py

    实现简单的目录fuzz,

    import requests

    import sys

    import urlparse

    import random

    import re

    def list(url):

    keys = []

    f = open('list.txt','r')

    bak = urlparse.urlparse(url).hostname#.netloc.replace(':8080','').replace(':80','')

    for i in f.readlines():

    key = i.strip().replace('%flag%',bak)

    if key not in keys:

    keys.append(key)

    return keys

    def dirscan(url):

    flag = []

    keys = list(url)

    headers = {

    'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',

    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',

    'Accept-Encoding': 'gzip, deflate, sdch',

    'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4'}

    user_agent = ['Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.8.1) Gecko/20061010 Firefox/2.0',

    'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.6 Safari/532.0',

    'Mozilla/5.0 (Windows; U; Windows NT 5.1 ; x64; en-US; rv:1.9.1b2pre) Gecko/20081026 Firefox/3.1b2pre',

    'Opera/10.60 (Windows NT 5.1; U; zh-cn) Presto/2.6.30 Version/10.60','Opera/8.01 (J2ME/MIDP; Opera Mini/2.0.4062; en; U; ssr)',

    'Mozilla/5.0 (Windows; U; Windows NT 5.1; ; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14',

    'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',

    'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr; rv:1.9.2.4) Gecko/20100523 Firefox/3.6.4 ( .NET CLR 3.5.30729)',

    'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr-FR) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',

    'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr-FR) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5']

    check = requests.get(url=url,timeout=3,headers=headers)

    if check.url[-1:] != '/':

    check.url = check.url + '/'

    #print check.url

    #print url

    try:

    if check.headers['X-Frame-Options']:

    return 'Nginx 404/401'

    except:pass

    if check.url[:-1] != url:

    return check.url[:-1]

    #url.replace(':8080','').replace(':80','')

    #url = urlparse.urlsplit(check.url).scheme+'://'+urlparse.urlsplit(check.url).netloc

    for i in keys:

    urli = url + i

    UA = random.choice(user_agent)

    headers['User-Agent'] = UA

    try:

    r = requests.get(url=urli,timeout=3,headers=headers)

    #print r.status_code

    #print r.url

    #print len(r.content),len(check.content)

    if r.status_code == 200 and len(check.content) != len(r.content) and r.url == urli:

    flag.append(i)

    except:pass

    if len(flag) > 25:

    return

    else:

    return flag

    '''

    if re.findall(r"\['/robots\.txt',  (.*?) '/tmp', '/file'\]",str(flag)):

    return

    else:

    return flag'''

    if __name__ == '__main__':

    print dirscan(sys.argv[1])

    # svn :text/plain

    # Ds_stroe:'application/octet-stream' == r.headers['Content-Type']

    #if 'application' in r.headers['Content-Type']:

    #  flag.append(i)

    baidu_site.py

    加入site:子域名,

    importrequests

    importre

    importsys

    importurlparse

    importrandom

    defbaidu_site(url):

    url=urlparse.urlparse(url).hostname

    baidu_url='https://www.baidu.com/s?ie=UTF-8&wd=site:{}'.format(url)

    headers={

    'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',

    'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',

    'Accept-Encoding':'gzip, deflate, sdch',

    'Accept-Language':'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4'}

    user_agent=['Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.8.1) Gecko/20061010 Firefox/2.0',

    'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.6 Safari/532.0',

    'Mozilla/5.0 (Windows; U; Windows NT 5.1 ; x64; en-US; rv:1.9.1b2pre) Gecko/20081026 Firefox/3.1b2pre',

    'Opera/10.60 (Windows NT 5.1; U; zh-cn) Presto/2.6.30 Version/10.60','Opera/8.01 (J2ME/MIDP; Opera Mini/2.0.4062; en; U; ssr)',

    'Mozilla/5.0 (Windows; U; Windows NT 5.1; ; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14',

    'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',

    'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr; rv:1.9.2.4) Gecko/20100523 Firefox/3.6.4 ( .NET CLR 3.5.30729)',

    'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr-FR) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',

    'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr-FR) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5']

    UA=random.choice(user_agent)

    headers['User-Agent']=UA

    try:

    r=requests.get(url=baidu_url,headers=headers,timeout=5).content

    if'class="nors"'notinr:

    return'Baidu_site'%baidu_url

    else:

    return''

    except:

    pass

    return''

    if__name__=='__main__':

    printbaidu_site(sys.argv[1])

    list.txt

    目录字典,

    /robots.txt

    /.git/config

    /.svn/entries

    /.svn/wc.db

    /README.md

    /.viminfo

    /.bash_history

    /.bashrc

    /crossdomain.xml

    /nginx.conf

    /httpd.conf

    /user.txt

    /sitemap.xml

    /username.txt

    /pass.txt

    /passwd.txt

    /password.txt

    /.DS_Store

    /.htaccess

    /log

    /log.txt

    /phpinfo.php

    /info.php

    /www.7z

    /www.rar

    /www.zip

    /www.tar.gz

    /wwwroot.zip

    /wwwroot.rar

    /wwwroot.7z

    /wwwroot.tar.gz

    /%flag%.7z

    /%flag%.rar

    /%flag%.zip

    /%flag%.tar.gz

    /backup

    /backup.7z

    /backup.rar

    /backup.sql

    /backup.tar

    /backup.tar.gz

    /backup.zip

    /database.sql

    /index.7z

    /index.rar

    /index.sql

    /index.tar

    /index.tar.gz

    /index.zip

    /index.html

    /index.php

    /index.asp

    /index.aspx

    /index.jsp

    /index.action

    /users.sql

    /login

    /phpmyadmin

    /pma

    /SiteServer

    /admin

    /install

    /backup

    /test

    /tmp

    /file

    效果:

    所需辅助小插件,可自行添加

    推荐个完善的datasploit

    转载:http://www.cnnetarmy.com/%e4%b9%99%e6%96%b9%e6%b8%97%e9%80%8f%e6%b5%8b%e8%af%95%e4%b9%8b%e4%bf%a1%e6%81%af%e6%94%b6%e9%9b%86/

    相关文章

      网友评论

        本文标题:乙方渗透测试之信息收集

        本文链接:https://www.haomeiwen.com/subject/rfzyextx.html