cgc新新的合并请求 #3

Closed
pwfog7nhz wants to merge 0 commits from Watchdog-cgc into main

@ -1,144 +0,0 @@
import datetime
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Text, ForeignKey, Boolean
from sqlalchemy.orm import relationship
from sqlalchemy.orm import sessionmaker
engine = create_engine('postgresql://postgres:687fb677c784ce2a0b273263bfe778be@127.0.0.1/src')
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
class SrcCustomer(Base):
'''Src客户管理'''
__tablename__ = 'src_customer'
cus_name = Column(String(80), primary_key=True) # 厂商名
cus_home = Column(String(100)) # 厂商主页
cus_time = Column(String(30)) # 添加时间
src_assets = relationship('SrcAssets', back_populates='src_customer', cascade='all, delete-orphan')
src_task = relationship('SrcTask', back_populates='src_customer', cascade='all, delete-orphan')
src_ports = relationship('SrcPorts', back_populates='src_customer', cascade='all, delete-orphan')
def __init__(self, cus_name, cus_home):
self.cus_name = cus_name
self.cus_home = cus_home
self.cus_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
class SrcTask(Base):
'''SRC 任务管理'''
__tablename__ = 'src_task'
id = Column(Integer, primary_key=True)
task_name = Column(String(80), ForeignKey('src_customer.cus_name', ondelete='CASCADE')) # 厂商名
task_domain = Column(String(100), unique=True) # 单条任务资产/子域名/IP/主域名
task_time = Column(String(30)) # 添加时间
task_flag = Column(Boolean) # 是否探测标识
src_customer = relationship('SrcCustomer', back_populates='src_task')
def __init__(self, task_name, task_domain, task_flag=False):
self.task_name = task_name
self.task_domain = task_domain
self.task_time = self.cus_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.task_flag = task_flag
class SrcAssets(Base):
'''Src资产管理'''
__tablename__ = 'src_assets'
id = Column(Integer, primary_key=True)
asset_name = Column(String(80), ForeignKey('src_customer.cus_name', ondelete='CASCADE')) # 厂商名
asset_host = Column(String(200), unique=True) # 主机/url
asset_subdomain = Column(String(200)) # 子域名
asset_title = Column(Text) # 网页标题
asset_ip = Column(String(16)) # IP地址
asset_area = Column(Text) # 地区
asset_waf = Column(String(100)) # waf
asset_cdn = Column(Boolean) # cdn
asset_banner = Column(Text) # banner
asset_info = Column(Text) # web指纹
asset_whois = Column(Text) # whois信息
asset_time = Column(String(30)) # 添加时间
asset_xray_flag = Column(Boolean) # 是否爬虫/xary被动扫描
asset_burp_flag = Column(Boolean) # Burpsuite是否扫描
asset_port_flag = Column(Boolean) # 是否进行端口扫描
asset_info_flag = Column(Boolean) # 是否进行web信息收集
src_customer = relationship('SrcCustomer', back_populates='src_assets')
def __init__(self, asset_name, asset_host, asset_subdomain, asset_title, asset_ip, asset_area, asset_waf, asset_cdn,
asset_banner, asset_info, asset_whois, asset_xray_flag=False, asset_burp_flag=False,
asset_port_flag=False, asset_info_flag=False):
self.asset_name = asset_name
self.asset_host = asset_host
self.asset_subdomain = asset_subdomain
self.asset_title = asset_title
self.asset_ip = asset_ip
self.asset_area = asset_area
self.asset_waf = asset_waf
self.asset_cdn = asset_cdn
self.asset_banner = asset_banner
self.asset_info = asset_info
self.asset_whois = asset_whois
self.asset_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.asset_xray_flag = asset_xray_flag
self.asset_burp_flag = asset_burp_flag
self.asset_port_flag = asset_port_flag
self.asset_info_flag = asset_info_flag
class SrcPorts(Base):
'''Src 端口管理'''
__tablename__ = 'src_ports'
id = Column(Integer, primary_key=True)
port_name = Column(String(80), ForeignKey('src_customer.cus_name', ondelete='CASCADE')) # 厂商名
port_host = Column(String(200)) # 主机/子域名/url
port_ip = Column(String(20)) # ip
port_port = Column(String(20)) # 端口
port_service = Column(String(30)) # 协议
port_product = Column(String(100)) # 端口服务
port_version = Column(String(100)) # 服务版本
port_time = Column(String(30)) # 添加时间
port_brute = Column(Boolean) # 是否暴力破解
port_url_scan = Column(Boolean) # 是否进行HTTP探测
src_customer = relationship('SrcCustomer', back_populates='src_ports')
def __init__(self, port_name, port_host, port_ip, port_port, port_service, port_product, port_version, port_brute=False,
port_url_scan=False):
self.port_name = port_name
self.port_host = port_host
self.port_ip = port_ip
self.port_port = port_port
self.port_service = port_service
self.port_product = port_product
self.port_version = port_version
self.port_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.port_brute = port_brute
self.port_url_scan = port_url_scan
class SrcVul(Base):
'''Src 漏洞信息表'''
__tablename__ = 'src_vul'
id = Column(Integer, primary_key=True)
vul_subdomain = Column(String(150)) # 子域名
vul_plugin = Column(String(200)) # 插件
vul_url = Column(Text) # URL
vul_payload = Column(Text)
vul_raw = Column(Text)
vul_time = Column(String(30))
vul_scan_name = Column(String(30)) # 扫描器
vul_flag = Column(Boolean) # 标记已提交
vul_mail = Column(Boolean) # 是否发发送邮件
def __init__(self, vul_subdomain, vul_plugin, vul_url, vul_payload, vul_raw, vul_scan_name, vul_flag=False,
vul_mail=False):
self.vul_subdomain = vul_subdomain
self.vul_plugin = vul_plugin
self.vul_url = vul_url
self.vul_payload = vul_payload
self.vul_raw = vul_raw
self.vul_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.vul_scan_name = vul_scan_name
self.vul_flag = vul_flag
self.vul_mail = vul_mail

@ -1,39 +0,0 @@
import nmap
from client.subdomain.oneforall.config import PortScan
def Nmap_Portscan(ip, port_info_list=None):
print(f'[+]端口扫描-开始nmap端口扫描[{ip}]')
try:
nm = nmap.PortScanner(nmap_search_path=PortScan.nmap_search_path)
except Exception as e:
print(f'[-]端口扫描-nmap初始化失败[{ip}];{e}')
return None
if port_info_list:
ports = ','.join([str(tmp) for tmp in port_info_list])
nm.scan(hosts=ip, ports=ports, arguments='-Pn -T 4 -sV --version-intensity=5')
else:
nm.scan(hosts=ip, arguments='-Pn -T 4 -sV --version-intensity=5')
try:
port_list = nm[ip]['tcp'].keys()
except Exception as e:
print(f'[-]端口扫描-nmap扫描异常[{ip}];{e}')
return None
else:
port_dict = {}
for port in port_list:
if nm[ip].has_tcp(port):
port_info = nm[ip]['tcp'][port]
state = port_info.get('state', 'no')
if state == 'open':
name = port_info.get('name', '')
product = port_info.get('product', '')
version = port_info.get('version', '')
port_dict[port] = {'ip': ip, 'port': port, 'name': name, 'product': product, 'version': version}
print(f'[+]端口扫描-nmap扫描成功{ip}:{port} {name} {product} {version}')
print(f'[+]端口扫描-nmap扫描完毕')
return port_dict
if __name__ == '__main__':
info = Nmap_Portscan('1.1.1.1')
print(info)

@ -1,39 +0,0 @@
import shodan
import time
from client.subdomain.oneforall.config import PortScan
check = True
if not PortScan.shodan_api:
print('[-]端口扫描-未填写shodan api秘钥')
check = False
else:
API = shodan.Shodan(PortScan.shodan_api)
try:
time.sleep(1)
API.info()
except shodan.exception.APIError as e:
print(f'[-]端口扫描-shodan api秘钥错误:{e}')
check = False
except Exception as e:
print(f'[-]端口扫描-shodan api接口异常{e}')
check = False
def Scan(ip):
print(f'[+]端口扫描-开始shodan端口扫描')
try:
ipinfo = API.host(ip)
except Exception as e:
print(f'[-]端口扫描-shodan查询{ip}失败,原因:{e}')
return None, None
port_list = ipinfo.get('ports', None)
vulns_list = ipinfo.get('vulns', None)
if port_list:
print(f'[+]端口扫描-shodan端口扫描[{ip}]完成:{port_list}')
return port_list, vulns_list
else:
return None, None
if __name__ == '__main__':
port_list, vulns_list = Scan('123.147.194.210')
print(port_list, vulns_list)

@ -1,64 +0,0 @@
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
import time
from client.portscan.ShodanScan import Scan
from client.portscan.NmapScan import Nmap_Portscan
from client.database import session, SrcAssets, SrcPorts
class PortScan:
def __init__(self, ip):
self.ip = ip
def run(self):
port_list, vulns_list = Scan(ip=self.ip)
port_dict = Nmap_Portscan(ip=self.ip, port_info_list=port_list)
return port_dict, vulns_list
def ReadAssets():
'''读取资产数据'''
assets_sql = session.query(SrcAssets).filter(SrcAssets.asset_port_flag == False).first()
session.commit()
if assets_sql:
ip = assets_sql.asset_ip
assets_sql1 = session.query(SrcAssets).filter(SrcAssets.asset_ip == ip).all()
for sql in assets_sql1:
sql.asset_port_flag = True
session.add(sql)
try:
session.commit()
except Exception as error:
print(f'[-]端口扫描-修改IP扫描状态异常{error}')
session.rollback()
return assets_sql
def WritePosts(port_dict, assets_sql):
'''端口扫描入库'''
for info in port_dict:
port_sql = SrcPorts(port_name=assets_sql.asset_name, port_host=assets_sql.asset_host, port_ip=assets_sql.asset_ip,
port_port=port_dict[info]['port'], port_service=port_dict[info]['name'],
port_product=port_dict[info]['product'], port_version=port_dict[info]['version'])
session.add(port_sql)
try:
session.commit()
except Exception as error:
session.rollback()
print(f'[-]端口入库异常{error}')
print(f'[+]端口[{assets_sql.asset_ip}]入库完成')
def main():
print('[+]端口扫描启动')
while True:
assets_sql = ReadAssets()
if not assets_sql:
time.sleep(30)
else:
portscan = PortScan(assets_sql.asset_ip)
port_dict, vulns_list = portscan.run()
if port_dict:
WritePosts(port_dict, assets_sql)
if __name__ == '__main__':
main()

@ -1,71 +0,0 @@
# 模块API配置
# Censys可以免费注册获取APIhttps://censys.io/api
censys_api_id = ''
censys_api_secret = ''
# Binaryedge可以免费注册获取APIhttps://app.binaryedge.io/account/api
# 免费的API有效期只有1个月到期之后可以再次生成每月可以查询250次。
binaryedge_api = ''
# Chinaz可以免费注册获取APIhttp://api.chinaz.com/ApiDetails/Alexa
chinaz_api = ''
# Bing可以免费注册获取APIhttps://azure.microsoft.com/zh-cn/services/
# cognitive-services/bing-web-search-api/#web-json
bing_api_id = ''
bing_api_key = ''
# SecurityTrails可以免费注册获取APIhttps://securitytrails.com/corp/api
securitytrails_api = ''
# https://fofa.so/api
fofa_api_email = '' # fofa用户邮箱
fofa_api_key = '' # fofa用户key
# Google可以免费注册获取API:
# https://developers.google.com/custom-search/v1/overview
# 免费的API只能查询前100条结果
google_api_key = '' # Google API搜索key
google_api_cx = '' # Google API搜索cx
# https://api.passivetotal.org/api/docs/
riskiq_api_username = ''
riskiq_api_key = ''
# Shodan可以免费注册获取API: https://account.shodan.io/register
# 免费的API限速1秒查询1次
shodan_api_key = ''
# ThreatBook API 查询子域名需要收费 https://x.threatbook.cn/nodev4/vb4/myAPI
threatbook_api_key = ''
# VirusTotal可以免费注册获取API: https://developers.virustotal.com/reference
virustotal_api_key = ''
# https://www.zoomeye.org/doc?channel=api
zoomeye_api_usermail = ''
zoomeye_api_password = ''
# Spyse可以免费注册获取API: https://spyse.com/
spyse_api_token = ''
# https://www.circl.lu/services/passive-dns/
circl_api_username = ''
circl_api_password = ''
# https://www.dnsdb.info/
dnsdb_api_key = ''
# ipv4info可以免费注册获取API: http://ipv4info.com/tools/api/
# 免费的API有效期只有2天到期之后可以再次生成每天可以查询50次。
ipv4info_api_key = ''
# https://github.com/360netlab/flint
# passivedns_api_addr默认空使用http://api.passivedns.cn
# passivedns_api_token可为空
passivedns_api_addr = ''
passivedns_api_token = ''
# Github Token可以访问https://github.com/settings/tokens生成,user为Github用户名
# 用于子域接管和子域收集
github_api_user = ''
github_api_token = ''

@ -1,636 +0,0 @@
#!/usr/bin/python3
# coding=utf-8
"""
OneForAll子域爆破模块
:copyright: Copyright (c) 2019, Jing Ling. All rights reserved.
:license: GNU General Public License v3.0, see LICENSE for more details.
"""
import gc
import json
import time
import random
import secrets
import exrex
import fire
import tenacity
from tenacity import RetryError
from dns.exception import Timeout
from dns.resolver import NXDOMAIN, YXDOMAIN, NoAnswer, NoNameservers
import client.subdomain.oneforall.config as config
import client.subdomain.oneforall.dbexport as dbexport
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.module import Module
from client.subdomain.oneforall.config import logger
@tenacity.retry(reraise=True, stop=tenacity.stop_after_attempt(3))
def do_query_a(domain, resolver):
try:
answer = resolver.query(domain, 'A')
# 如果查询随机域名A记录时抛出Timeout异常则重新探测
except Timeout as e:
logger.log('ALERT', f'探测超时重新探测中')
logger.log('DEBUG', e.args)
raise tenacity.TryAgain
# 如果查询随机域名A记录时抛出NXDOMAIN异常
# 则说明不存在随机子域的A记录 即没有开启泛解析
except (NXDOMAIN, YXDOMAIN, NoAnswer, NoNameservers) as e:
logger.log('DEBUG', e.args)
logger.log('INFOR', f'{domain}没有使用泛解析')
return False
except Exception as e:
logger.log('ALERT', f'探测{domain}是否使用泛解析出错')
logger.log('FATAL', e.args)
exit(1)
else:
if answer.rrset is None:
logger.log('ALERT', f'结果无记录重新探测中')
raise tenacity.TryAgain
ttl = answer.ttl
name = answer.name
ips = {item.address for item in answer}
logger.log('ALERT', f'{domain}使用了泛解析')
logger.log('ALERT', f'{domain} 解析到域名: {name} '
f'IP: {ips} TTL: {ttl}')
return True
def detect_wildcard(domain, authoritative_ns):
"""
探测域名是否使用泛解析
:param str domain: 域名
:param list authoritative_ns: 权威DNS
:return: 是否使用泛解析
"""
logger.log('INFOR', f'正在探测{domain}是否使用泛解析')
token = secrets.token_hex(4)
random_subdomain = f'{token}.{domain}'
resolver = utils.dns_resolver()
resolver.nameservers = authoritative_ns
resolver.rotate = True
resolver.cache = None
try:
wildcard = do_query_a(random_subdomain, resolver)
except Timeout as e:
logger.log('DEBUG', e.args)
logger.log('ALERT', f'多次探测超时暂且认为{domain}没有使用泛解析')
return False
else:
return wildcard
def gen_fuzz_subdomains(expression, rule):
"""
生成基于fuzz模式的爆破子域
:param str expression: 子域域名生成表达式
:param str rule: 生成子域所需的正则规则
:return: 用于爆破的子域
"""
subdomains = list()
fuzz_count = exrex.count(rule)
if fuzz_count > 10000000:
logger.log('ALERT', f'请注意该规则生成的字典太大:{fuzz_count} > 10000000')
logger.log('DEBUG', f'fuzz模式下生成的字典大小{fuzz_count}')
for fuzz_string in exrex.generate(rule):
fuzz_string = fuzz_string.lower()
if not fuzz_string.isalnum():
continue
fuzz_domain = expression.replace('*', fuzz_string)
subdomains.append(fuzz_domain)
random_domain = random.choice(subdomains)
logger.log('ALERT', f'请注意检查基于fuzz模式生成的{random_domain}是否正确')
return subdomains
def gen_word_subdomains(expression, path):
"""
生成基于word模式的爆破子域
:param str expression: 子域域名生成表达式
:param str path: 字典路径
:return: 用于爆破的子域
"""
subdomains = list()
with open(path, encoding='utf-8', errors='ignore') as fd:
for line in fd:
word = line.strip().lower()
if not word.isalnum():
continue
if word.endswith('.'):
word = word[:-1]
subdomain = expression.replace('*', word)
subdomains.append(subdomain)
random_domain = random.choice(subdomains)
logger.log('DEBUG', f'fuzz模式下生成的字典大小{len(subdomains)}')
logger.log('ALERT', f'请注意检查基于word模式生成的{random_domain}是否正确')
return subdomains
def query_domain_ns_a(ns_list):
logger.log('INFOR', f'正在查询权威DNS名称服务器{ns_list}的A记录')
if not isinstance(ns_list, list):
return list()
ns_ip_list = []
resolver = utils.dns_resolver()
for ns in ns_list:
try:
answer = resolver.query(ns, 'A')
except Exception as e:
logger.log('ERROR', e.args)
logger.log('ERROR', f'查询权威DNS名称服务器{ns}的A记录出错')
continue
if answer:
for item in answer:
ns_ip_list.append(item.address)
logger.log('INFOR', f'权威DNS名称服务器对应A记录 {ns_ip_list}')
return ns_ip_list
def query_domain_ns(domain):
logger.log('INFOR', f'正在查询{domain}的NS记录')
domain = utils.get_maindomain(domain)
resolver = utils.dns_resolver()
try:
answer = resolver.query(domain, 'NS')
except Exception as e:
logger.log('ERROR', e.args)
logger.log('ERROR', f'查询{domain}的NS记录出错')
return list()
ns = [item.to_text() for item in answer]
logger.log('INFOR', f'{domain}的权威DNS名称服务器 {ns}')
return ns
@tenacity.retry(reraise=True, stop=tenacity.stop_after_attempt(2))
def get_wildcard_record(domain, resolver):
logger.log('INFOR', f'查询{domain}在权威DNS名称服务器的泛解析记录')
try:
answer = resolver.query(domain, 'A')
# 如果查询随机域名A记录时抛出Timeout异常则重新查询
except Timeout as e:
logger.log('ALERT', f'查询超时重新查询中')
logger.log('DEBUG', e.args)
raise tenacity.TryAgain
except (NXDOMAIN, YXDOMAIN, NoAnswer, NoNameservers) as e:
logger.log('DEBUG', e.args)
logger.log('INFOR', f'{domain}在权威DNS名称服务器上没有A记录')
return None, None
except Exception as e:
logger.log('ERROR', e.args)
logger.log('ERROR', f'查询{domain}在权威DNS名称服务器泛解析记录出错')
exit(1)
else:
if answer.rrset is None:
logger.log('DEBUG', f'查询结果无记录')
return None, None
name = answer.name
ip = {item.address for item in answer}
ttl = answer.ttl
logger.log('INFOR', f'{domain} 在权威DNS上解析到域名: {name} '
f'IP: {ip} TTL: {ttl}')
return ip, ttl
def collect_wildcard_record(domain, authoritative_ns):
logger.log('INFOR', f'正在收集{domain}的泛解析记录')
if not authoritative_ns:
return list(), int()
resolver = utils.dns_resolver()
resolver.nameservers = authoritative_ns
resolver.rotate = True
resolver.cache = None
ips = set()
ips_stat = dict()
while True:
token = secrets.token_hex(4)
random_subdomain = f'{token}.{domain}'
try:
ip, ttl = get_wildcard_record(random_subdomain, resolver)
except Timeout as e:
logger.log('DEBUG', e.args)
logger.log('ALERT', f'多次查询超时将尝试查询新的随机子域')
continue
if ip is None:
continue
ips = ips.union(ip)
# 统计每个泛解析IP出现次数
for addr in ip:
count = ips_stat.setdefault(addr, 0)
ips_stat[addr] = count + 1
# 筛选出出现次数2次以上的IP地址
addrs = list()
for addr, times in ips_stat.items():
if times >= 2:
addrs.append(addr)
# 大部分的IP地址出现次数大于2次停止收集泛解析IP记录
if len(addrs) / len(ips) >= 0.8:
break
logger.log('DEBUG', f'收集到{domain}的泛解析记录\n{ips}\n{ttl}')
return ips, ttl
def get_nameservers_path(enable_wildcard, ns_ip_list):
path = config.brute_nameservers_path
if not enable_wildcard:
return path
if not ns_ip_list:
return path
path = config.authoritative_dns_path
ns_data = '\n'.join(ns_ip_list)
utils.save_data(path, ns_data)
return path
def check_dict():
if not config.enable_check_dict:
return
sec = config.check_time
logger.log('ALERT', f'你有{sec}秒时间检查爆破配置是否正确')
logger.log('ALERT', f'退出爆破请使用`Ctrl+C`')
try:
time.sleep(sec)
except KeyboardInterrupt:
logger.log('INFOR', '爆破配置有误退出爆破')
exit(0)
def gen_records(items, records, subdomains, ip_times, wc_ips, wc_ttl):
qname = items.get('name')[:-1] # 去出最右边的`.`点号
reason = items.get('status')
resolver = items.get('resolver')
data = items.get('data')
answers = data.get('answers')
record = dict()
cname = list()
ips = list()
public = list()
times = list()
ttls = list()
is_valid_flags = list()
have_a_record = False
for answer in answers:
if answer.get('type') != 'A':
logger.log('TRACE', f'查询{qname}返回的应答没有A记录\n{answer}')
continue
logger.log('TRACE', f'查询{qname}返回的应答具有A记录\n{answer}')
have_a_record = True
ttl = answer.get('ttl')
ttls.append(ttl)
cname.append(answer.get('name')[:-1]) # 去出最右边的`.`点号
ip = answer.get('data')
ips.append(ip)
public.append(utils.ip_is_public(ip))
num = ip_times.get(ip)
times.append(num)
isvalid, reason = is_valid_subdomain(ip, ttl, num, wc_ips, wc_ttl)
logger.log('TRACE', f'{ip}是否有效:{isvalid} 原因:{reason}')
is_valid_flags.append(isvalid)
if not have_a_record:
logger.log('TRACE', f'查询{qname}返回的所有应答都中没有A记录{answers}')
# 为了优化内存 只添加有A记录且通过判断的子域到记录中
if have_a_record and all(is_valid_flags):
record['resolve'] = 1
record['reason'] = reason
record['ttl'] = ttls
record['cname'] = cname
record['content'] = ips
record['public'] = public
record['times'] = times
record['resolver'] = resolver
records[qname] = record
subdomains.append(qname)
return records, subdomains
def stat_ip_times(result_path):
logger.log('INFOR', f'正在统计IP次数')
times = dict()
with open(result_path) as fd:
for line in fd:
line = line.strip()
try:
items = json.loads(line)
except Exception as e:
logger.log('ERROR', e.args)
logger.log('ERROR', f'解析行{line}出错跳过解析该行')
continue
status = items.get('status')
if status != 'NOERROR':
continue
data = items.get('data')
if 'answers' not in data:
continue
answers = data.get('answers')
for answer in answers:
if answer.get('type') == 'A':
ip = answer.get('data')
# 取值 如果是首次出现的IP集合 出现次数先赋值0
value = times.setdefault(ip, 0)
times[ip] = value + 1
return times
def deal_output(output_path, ip_times, wildcard_ips, wildcard_ttl):
logger.log('INFOR', f'正在处理解析结果')
records = dict() # 用来记录所有域名解析数据
subdomains = list() # 用来保存所有通过有效性检查的子域
with open(output_path) as fd:
for line in fd:
line = line.strip()
try:
items = json.loads(line)
except Exception as e:
logger.log('ERROR', e.args)
logger.log('ERROR', f'解析行{line}出错跳过解析该行')
continue
qname = items.get('name')[:-1] # 去出最右边的`.`点号
status = items.get('status')
if status != 'NOERROR':
logger.log('TRACE', f'处理{line}时发现{qname}查询结果状态{status}')
continue
data = items.get('data')
if 'answers' not in data:
logger.log('TRACE', f'处理{line}时发现{qname}返回的结果无应答')
continue
records, subdomains = gen_records(items, records, subdomains,
ip_times, wildcard_ips,
wildcard_ttl)
return records, subdomains
def check_by_compare(ip, ttl, wc_ips, wc_ttl):
"""
通过与泛解析返回的IP集合和返回的TTL值进行对比判断发现的子域是否是泛解析子域
:param set ip: 子域A记录查询出的IP
:param int ttl: 子域A记录查询出的TTL
:param set wc_ips: 泛解析的IP集合
:param int wc_ttl: 泛解析的TTL
:return: 判断结果
"""
# 参考http://sh3ll.me/archives/201704041222.txt
if ip not in wc_ips:
return False # 子域IP不在泛解析IP集合则不是泛解析
if ttl != wc_ttl and ttl % 60 == 0 and wc_ttl % 60 == 0:
return False
return True
def check_ip_times(times):
"""
根据ip出现次数判断是否为泛解析
:param times: 子域IP出现次数
:return: 判断结果
"""
if times > config.ip_appear_maximum:
return True
return False
def is_valid_subdomain(ip, ttl, times, wc_ips, wc_ttl):
ip_blacklist = config.brute_ip_blacklist
if ip in ip_blacklist: # 解析ip在黑名单ip则为非法子域
return 0, 'IP blacklist'
if all([wc_ips, wc_ttl]): # 有泛解析记录才进行对比
if check_by_compare(ip, ttl, wc_ips, wc_ttl):
return 0, 'IP wildcard'
if check_ip_times(times):
return 0, 'IP exceeded'
return 1, 'OK'
def save_brute_dict(dict_path, dict_set):
dict_data = '\n'.join(dict_set)
if not utils.save_data(dict_path, dict_data):
logger.log('FATAL', '保存生成的字典出错')
exit(1)
def delete_file(dict_path, output_path):
if config.delete_generated_dict:
dict_path.unlink()
if config.delete_massdns_result:
output_path.unlink()
class Brute(Module):
"""
OneForAll子域爆破模块
Example
brute.py --target domain.com --word True run
brute.py --target ./domains.txt --word True run
brute.py --target domain.com --word True --process 1 run
brute.py --target domain.com --word True --wordlist subnames.txt run
brute.py --target domain.com --word True --recursive True --depth 2 run
brute.py --target d.com --fuzz True --place m.*.d.com --rule '[a-z]' run
Note:
参数alive可选值TrueFalse分别表示导出存活全部子域结果
参数format可选格式有'txt', 'rst', 'csv', 'tsv', 'json', 'yaml', 'html',
'jira', 'xls', 'xlsx', 'dbf', 'latex', 'ods'
参数path默认None使用OneForAll结果目录自动生成路径
:param str target: 单个域名或者每行一个域名的文件路径
:param int process: 爆破进程数(默认1)
:param int concurrent: 并发爆破数量(默认10000)
:param bool word: 是否使用word模式进行爆破(默认False)
:param str wordlist: word模式爆破使用的字典路径(默认使用config.py配置)
:param bool recursive: 是否使用递归进行爆破(默认False)
:param int depth: 递归爆破的深度(默认2)
:param str nextlist: 递归爆破所使用的字典路径(默认使用config.py配置)
:param bool fuzz: 是否使用fuzz模式进行爆破(默认False)
:param str place: 指定爆破位置(开启fuzz模式时必需指定此参数)
:param str rule: 指定fuzz模式爆破使用的正则规则(开启fuzz模式时必需指定此参数)
:param bool export: 是否导出爆破结果(默认True)
:param bool alive: 只导出存活的子域结果(默认True)
:param str format: 结果导出格式(默认csv)
:param str path: 结果导出路径(默认None)
"""
def __init__(self, target, process=None, concurrent=None, word=False,
wordlist=None, recursive=False, depth=None, nextlist=None,
fuzz=False, place=None, rule=None, export=True, alive=True,
format='csv', path=None):
Module.__init__(self)
self.module = 'Brute'
self.source = 'Brute'
self.target = target
self.process_num = process or utils.get_process_num()
self.concurrent_num = concurrent or config.brute_concurrent_num
self.word = word
self.wordlist = wordlist or config.brute_wordlist_path
self.recursive_brute = recursive or config.enable_recursive_brute
self.recursive_depth = depth or config.brute_recursive_depth
self.recursive_nextlist = nextlist or config.recursive_nextlist_path
self.fuzz = fuzz or config.enable_fuzz
self.place = place or config.fuzz_place
self.rule = rule or config.fuzz_rule
self.export = export
self.alive = alive
self.format = format
self.path = path
self.bulk = False # 是否是批量爆破场景
self.domains = list() # 待爆破的所有域名集合
self.domain = str() # 当前正在进行爆破的域名
self.ips_times = dict() # IP集合出现次数
self.enable_wildcard = False # 当前域名是否使用泛解析
self.wildcard_check = config.enable_wildcard_check
self.wildcard_deal = config.enable_wildcard_deal
self.check_env = True
def gen_brute_dict(self, domain):
logger.log('INFOR', f'正在为{domain}生成爆破字典')
dict_set = set()
# 如果domain不是self.subdomain 而是self.domain的子域则生成递归爆破字典
if self.place is None:
self.place = '*.' + domain
wordlist = self.wordlist
main_domain = self.register(domain)
if domain != main_domain:
wordlist = self.recursive_nextlist
if self.word:
word_subdomains = gen_word_subdomains(self.place, wordlist)
# set可以合并list
dict_set = dict_set.union(word_subdomains)
if self.fuzz:
fuzz_subdomains = gen_fuzz_subdomains(self.place, self.rule)
dict_set = dict_set.union(fuzz_subdomains)
# logger.log('INFOR', f'正在去重爆破字典')
# dict_set = utils.uniq_dict_list(dict_set)
count = len(dict_set)
logger.log('INFOR', f'生成的爆破字典大小为{count}')
if count > 10000000:
logger.log('ALERT', f'注意生成的爆破字典太大:{count} > 10000000')
return dict_set
def check_brute_params(self):
if not (self.word or self.fuzz):
logger.log('FATAL', f'请至少指定一种爆破模式')
exit(1)
if len(self.domains) > 1:
self.bulk = True
if self.fuzz:
if self.place is None or self.rule is None:
logger.log('FATAL', f'没有指定fuzz位置或规则')
exit(1)
if self.bulk:
logger.log('FATAL', f'批量爆破的场景下不能使用fuzz模式')
exit(1)
if self.recursive_brute:
logger.log('FATAL', f'使用fuzz模式下不能使用递归爆破')
exit(1)
fuzz_count = self.place.count('*')
if fuzz_count < 1:
logger.log('FATAL', f'没有指定fuzz位置')
exit(1)
if fuzz_count > 1:
logger.log('FATAL', f'只能指定1个fuzz位置')
exit(1)
if self.domain not in self.place:
logger.log('FATAL', f'指定fuzz的域名有误')
exit(1)
def main(self, domain):
start = time.time()
logger.log('INFOR', f'正在爆破域名{domain}')
massdns_dir = config.third_party_dir.joinpath('massdns')
result_dir = config.result_save_dir
temp_dir = result_dir.joinpath('temp')
utils.check_dir(temp_dir)
massdns_path = utils.get_massdns_path(massdns_dir)
timestring = utils.get_timestring()
wildcard_ips = list() # 泛解析IP列表
wildcard_ttl = int() # 泛解析TTL整型值
ns_list = query_domain_ns(self.domain)
ns_ip_list = query_domain_ns_a(ns_list) # DNS权威名称服务器对应A记录列表
self.enable_wildcard = detect_wildcard(domain, ns_ip_list)
if self.enable_wildcard:
wildcard_ips, wildcard_ttl = collect_wildcard_record(domain,
ns_ip_list)
ns_path = get_nameservers_path(self.enable_wildcard, ns_ip_list)
dict_set = self.gen_brute_dict(domain)
dict_len = len(dict_set)
dict_name = f'generated_subdomains_{domain}_{timestring}.txt'
dict_path = temp_dir.joinpath(dict_name)
save_brute_dict(dict_path, dict_set)
del dict_set
gc.collect()
output_name = f'resolved_result_{domain}_{timestring}.json'
output_path = temp_dir.joinpath(output_name)
log_path = result_dir.joinpath('massdns.log')
check_dict()
utils.call_massdns(massdns_path, dict_path, ns_path, output_path,
log_path, process_num=self.process_num,
concurrent_num=self.concurrent_num)
ip_times = stat_ip_times(output_path)
self.records, self.subdomains = deal_output(output_path, ip_times,
wildcard_ips, wildcard_ttl)
delete_file(dict_path, output_path)
end = time.time()
self.elapse = round(end - start, 1)
logger.log('INFOR', f'{self.source}模块耗时{self.elapse}'
f'发现{domain}的子域{len(self.subdomains)}')
logger.log('DEBUG', f'{self.source}模块发现{domain}的子域:\n'
f'{self.subdomains}')
self.gen_result(brute=dict_len, valid=len(self.subdomains))
self.save_db()
return self.subdomains
def run(self):
logger.log('INFOR', f'开始执行{self.source}模块')
if self.check_env:
utils.check_env()
self.domains = utils.get_domains(self.target)
all_subdomains = list()
for self.domain in self.domains:
self.check_brute_params()
if self.recursive_brute:
logger.log('INFOR', f'开始递归爆破{self.domain}的第1层子域')
valid_subdomains = self.main(self.domain)
all_subdomains.extend(valid_subdomains)
# 递归爆破下一层的子域
# fuzz模式不使用递归爆破
if self.recursive_brute:
for layer_num in range(1, self.recursive_depth):
# 之前已经做过1层子域爆破 当前实际递归层数是layer+1
logger.log('INFOR', f'开始递归爆破{self.domain}'
f'{layer_num + 1}层子域')
for subdomain in all_subdomains:
self.place = '*.' + subdomain
# 进行下一层子域爆破的限制条件
num = subdomain.count('.') - self.domain.count('.')
if num == layer_num:
valid_subdomains = self.main(subdomain)
all_subdomains.extend(valid_subdomains)
logger.log('INFOR', f'结束执行{self.source}模块爆破域名{self.domain}')
if not self.path:
name = f'{self.domain}_brute_result.{self.format}'
self.path = config.result_save_dir.joinpath(name)
# 数据库导出
if self.export:
dbexport.export(self.domain,
alive=self.alive,
limit='resolve',
path=self.path,
format=self.format)
if __name__ == '__main__':
fire.Fire(Brute)

@ -1,95 +0,0 @@
import time
import threading
import importlib
import client.subdomain.oneforall.config as config
import client.subdomain.oneforall.dbexport as dbexport
from client.subdomain.oneforall.config import logger
class Collect(object):
"""
收集子域名类
"""
def __init__(self, domain, export=True):
self.domain = domain
self.elapse = 0.0
self.modules = []
self.collect_funcs = []
self.path = None
self.export = export
self.format = 'csv'
def get_mod(self):
"""
获取要运行的模块
"""
if config.enable_all_module:
# modules = ['brute', 'certificates', 'crawl',
# 'datasets', 'intelligence', 'search']
# crawl模块还有点问题
modules = ['certificates', 'check', 'datasets',
'dnsquery', 'intelligence', 'search']
# modules = ['intelligence'] # crawl模块还有点问题
for module in modules:
module_path = config.module_dir.joinpath(module)
for path in module_path.rglob('*.py'):
# 需要导入的类
import_module = ('modules.' + module, path.stem)
self.modules.append(import_module)
else:
self.modules = config.enable_partial_module
def import_func(self):
"""
导入脚本的do函数
"""
for package, name in self.modules:
import_object = importlib.import_module('.' + name, package)
func = getattr(import_object, 'do')
self.collect_funcs.append([func, name])
def run(self):
"""
类运行入口
"""
start = time.time()
logger.log('INFOR', f'开始收集{self.domain}的子域')
self.get_mod()
self.import_func()
threads = []
# 创建多个子域收集线程
for collect_func in self.collect_funcs:
func_obj, func_name = collect_func
thread = threading.Thread(target=func_obj,
name=func_name,
args=(self.domain,),
daemon=True)
threads.append(thread)
# 启动所有线程
for thread in threads:
thread.start()
# 等待所有线程完成
for thread in threads:
# 挨个线程判断超时 最坏情况主线程阻塞时间=线程数*module_thread_timeout
# 超时线程将脱离主线程 由于创建线程时已添加守护属于 所有超时线程会随着主线程结束
thread.join(config.module_thread_timeout)
for thread in threads:
if thread.is_alive():
logger.log('ALERT', f'{thread.name}模块线程发生超时')
# 数据库导出
if self.export:
if not self.path:
name = f'{self.domain}.{self.format}'
self.path = config.result_save_dir.joinpath(name)
dbexport.export(self.domain, path=self.path, format=self.format)
end = time.time()
self.elapse = round(end - start, 1)
if __name__ == '__main__':
collect = Collect('example.com')
collect.run()

@ -1,9 +0,0 @@
from .module import Module
class Crawl(Module):
"""
爬虫基类
"""
def __init__(self):
Module.__init__(self)

@ -1,250 +0,0 @@
#!/usr/bin/env python3
# coding=utf-8
"""
SQLite数据库初始化和操作
"""
import records
import client.subdomain.oneforall.config as config
from records import Connection
from client.subdomain.oneforall.config import logger
class Database(object):
def __init__(self, db_path=None):
self.conn = self.get_conn(db_path)
@staticmethod
def get_conn(db_path):
"""
获取数据库对象
:param db_path: 数据库连接或路径
:return: SQLite数据库
"""
logger.log('TRACE', f'正在获取数据库连接')
if isinstance(db_path, Connection):
return db_path
protocol = 'sqlite:///'
if not db_path: # 数据库路径为空连接默认数据库
db_path = f'{protocol}{config.result_save_dir}/result.sqlite3'
else:
db_path = protocol + db_path
db = records.Database(db_path) # 不存在数据库时会新建一个数据库
logger.log('TRACE', f'使用数据库: {db_path}')
return db.get_connection()
def query(self, sql):
try:
results = self.conn.query(sql)
except Exception as e:
logger.log('ERROR', e.args)
else:
return results
def create_table(self, table_name):
"""
创建表结构
:param str table_name: 要创建的表名
"""
table_name = table_name.replace('.', '_')
if self.exist_table(table_name):
logger.log('TRACE', f'已经存在{table_name}')
return
logger.log('TRACE', f'正在创建{table_name}')
self.query(f'create table "{table_name}" ('
f'id integer primary key,'
f'type text,'
f'alive int,'
f'request int,'
f'resolve int,'
f'new int,'
f'url text,'
f'subdomain text,'
f'port int,'
f'level int,'
f'cname text,'
f'content text,'
f'public int,'
f'status int,'
f'reason text,'
f'title text,'
f'banner text,'
f'header text,'
f'response text,'
f'times text,'
f'ttl text,'
f'resolver text,'
f'module text,'
f'source text,'
f'elapse float,'
f'find int,'
f'brute int,'
f'valid int)')
def save_db(self, table_name, results, module_name=None):
"""
将各模块结果存入数据库
:param str table_name: 表名
:param list results: 结果列表
:param str module_name: 模块名
"""
logger.log('TRACE', f'正在将{module_name}模块发现{table_name}的子域'
'结果存入数据库')
table_name = table_name.replace('.', '_')
if results:
try:
self.conn.bulk_query(
f'insert into "{table_name}" ('
f'id, type, alive, resolve, request, new, url, subdomain,'
f'port, level, cname, content, public, status, reason,'
f'title, banner, header, response, times, ttl, resolver,'
f'module, source, elapse, find, brute, valid) '
f'values (:id, :type, :alive, :resolve, :request, :new,'
f':url, :subdomain, :port, :level, :cname, :content,'
f':public, :status, :reason, :title, :banner, :header,'
f':response, :times, :ttl, :resolver, :module, :source,'
f':elapse, :find, :brute, :valid)', results)
except Exception as e:
logger.log('ERROR', e)
def exist_table(self, table_name):
"""
判断是否存在某表
:param str table_name: 表名
:return: 是否存在某表
"""
table_name = table_name.replace('.', '_')
logger.log('TRACE', f'正在查询是否存在{table_name}')
results = self.query(f'select count() from sqlite_master '
f'where type = "table" and '
f'name = "{table_name}"')
if results.scalar() == 0:
return False
else:
return True
def copy_table(self, table_name, bak_table_name):
"""
复制表创建备份
:param str table_name: 表名
:param str bak_table_name: 新表名
"""
table_name = table_name.replace('.', '_')
bak_table_name = bak_table_name.replace('.', '_')
logger.log('TRACE', f'正在将{table_name}表复制到{bak_table_name}新表')
self.query(f'drop table if exists "{bak_table_name}"')
self.query(f'create table "{bak_table_name}" '
f'as select * from "{table_name}"')
def clear_table(self, table_name):
"""
清空表中数据
:param str table_name: 表名
"""
table_name = table_name.replace('.', '_')
logger.log('TRACE', f'正在清空{table_name}表中的数据')
self.query(f'delete from "{table_name}"')
def drop_table(self, table_name):
"""
删除表
:param str table_name: 表名
"""
table_name = table_name.replace('.', '_')
logger.log('TRACE', f'正在删除{table_name}')
self.query(f'drop table if exists "{table_name}"')
def rename_table(self, table_name, new_table_name):
"""
重命名表名
:param str table_name: 表名
:param str new_table_name: 新表名
"""
table_name = table_name.replace('.', '_')
new_table_name = new_table_name.replace('.', '_')
logger.log('TRACE', f'正在将{table_name}表重命名为{table_name}')
self.query(f'alter table "{table_name}" '
f'rename to "{new_table_name}"')
def deduplicate_subdomain(self, table_name):
"""
去重表中的子域
:param str table_name: 表名
"""
table_name = table_name.replace('.', '_')
logger.log('TRACE', f'正在去重{table_name}表中的子域')
self.query(f'delete from "{table_name}" where '
f'id not in (select min(id) '
f'from "{table_name}" group by subdomain)')
def remove_invalid(self, table_name):
"""
去除表中的空值或无效子域
:param str table_name: 表名
"""
table_name = table_name.replace('.', '_')
logger.log('TRACE', f'正在去除{table_name}表中的无效子域')
self.query(f'delete from "{table_name}" where '
f'subdomain is null or resolve == 0')
def deal_table(self, deal_table_name, backup_table_name):
"""
收集任务完成时对表进行处理
:param str deal_table_name: 待处理的表名
:param str backup_table_name: 备份的表名
"""
self.copy_table(deal_table_name, backup_table_name)
self.remove_invalid(deal_table_name)
self.deduplicate_subdomain(deal_table_name)
def get_data(self, table_name):
"""
获取表中的所有数据
:param str table_name: 表名
"""
table_name = table_name.replace('.', '_')
logger.log('TRACE', f'获取{table_name}表中的所有数据')
return self.query(f'select * from "{table_name}"')
def export_data(self, table_name, alive, limit):
"""
获取表中的部分数据
:param str table_name: 表名
:param any alive: 存活
:param str limit: 限制字段
"""
table_name = table_name.replace('.', '_')
query = f'select id, type, new, alive, request, resolve, url, ' \
f'subdomain, level, cname, content, public, port, status, ' \
f'reason, title, banner, times, ttl, resolver, module, ' \
f'source, elapse, find, brute, valid from "{table_name}"'
if alive and limit:
if limit in ['resolve', 'request']:
where = f' where {limit} = 1'
query += where
elif alive:
where = f' where alive = 1'
query += where
logger.log('TRACE', f'获取{table_name}表中的数据')
return self.query(query)
def close(self):
"""
关闭数据库连接
"""
self.conn.close()

@ -1,64 +0,0 @@
import re
import tldextract
import client.subdomain.oneforall.config as config
class Domain(object):
"""
域名处理类
:param str string: 传入的字符串
"""
def __init__(self, string):
self.string = str(string)
self.regexp = r'\b((?=[a-z0-9-]{1,63}\.)(xn--)?[a-z0-9]+(-[a-z0-9]+)*\.)+[a-z]{2,63}\b'
self.domain = None
def match(self):
"""
域名匹配
:return: 匹配结果
"""
result = re.search(self.regexp, self.string, re.I)
if result:
return result.group()
else:
return None
def extract(self):
"""
域名导出
>>> d = Domain('www.example.com')
<domain.Domain object>
>>> d.extract()
ExtractResult(subdomain='www', domain='example', suffix='com')
:return: 导出结果
"""
data_storage_dir = config.data_storage_dir
extract_cache_file = data_storage_dir.joinpath('public_suffix_list.dat')
tldext = tldextract.TLDExtract(extract_cache_file)
result = self.match()
if result:
return tldext(result)
else:
return None
def registered(self):
"""
获取注册域名
>>> d = Domain('www.example.com')
<domain.Domain object>
>>> d.registered()
example.com
:return: 注册域名
"""
result = self.extract()
if result:
return result.registered_domain
else:
return None

@ -1,25 +0,0 @@
from .module import Module
from client.subdomain.oneforall.common import utils
class Lookup(Module):
"""
DNS查询基类
"""
def __init__(self):
Module.__init__(self)
def query(self):
"""
查询域名的TXT记录
:return: 查询结果
"""
answer = utils.dns_query(self.domain, self.type)
if answer is None:
return None
for item in answer:
record = item.to_text()
subdomains = utils.match_subdomain(self.domain, record)
self.subdomains = self.subdomains.union(subdomains)
self.gen_record(subdomains, record)
return self.subdomains

@ -1,358 +0,0 @@
# coding=utf-8
"""
模块基类
"""
import json
import re
import threading
import time
import requests
import client.subdomain.oneforall.config as config
from client.subdomain.oneforall.config import logger
from . import utils
from .domain import Domain
from client.subdomain.oneforall.common.database import Database
lock = threading.Lock()
class Module(object):
def __init__(self):
self.module = 'Module'
self.source = 'BaseModule'
self.cookie = None
self.header = dict()
self.proxy = None
self.delay = config.request_delay # 请求睡眠时延
self.timeout = config.request_timeout # 请求超时时间
self.verify = config.request_verify # 请求SSL验证
self.domain = str() # 当前进行子域名收集的主域
self.type = 'A' # 对主域进行子域收集时利用的DNS记录查询类型(默认利用A记录)
self.subdomains = set() # 存放发现的子域
self.records = dict() # 存放子域解析记录
self.results = list() # 存放模块结果
self.start = time.time() # 模块开始执行时间
self.end = None # 模块结束执行时间
self.elapse = None # 模块执行耗时
def check(self, *apis):
"""
简单检查是否配置了api信息
:param apis: api信息元组
:return: 检查结果
"""
if not all(apis):
logger.log('ALERT', f'{self.source}模块没有配置API跳过执行')
return False
return True
def begin(self):
"""
输出模块开始信息
"""
logger.log('DEBUG', f'开始执行{self.source}模块收集{self.domain}的子域')
def finish(self):
"""
输出模块结束信息
"""
self.end = time.time()
self.elapse = round(self.end - self.start, 1)
logger.log('DEBUG', f'结束执行{self.source}模块收集{self.domain}的子域')
logger.log('INFOR', f'{self.source}模块耗时{self.elapse}秒发现子域'
f'{len(self.subdomains)}')
logger.log('DEBUG', f'{self.source}模块发现{self.domain}的子域\n'
f'{self.subdomains}')
def head(self, url, params=None, check=True, **kwargs):
"""
自定义head请求
:param str url: 请求地址
:param dict params: 请求参数
:param bool check: 检查响应
:param kwargs: 其他参数
:return: requests响应对象
"""
try:
resp = requests.head(url,
params=params,
cookies=self.cookie,
headers=self.header,
proxies=self.proxy,
timeout=self.timeout,
verify=self.verify,
**kwargs)
except Exception as e:
logger.log('ERROR', e.args)
return None
if not check:
return resp
if utils.check_response('HEAD', resp):
return resp
return None
def get(self, url, params=None, check=True, **kwargs):
"""
自定义get请求
:param str url: 请求地址
:param dict params: 请求参数
:param bool check: 检查响应
:param kwargs: 其他参数
:return: requests响应对象
"""
try:
resp = requests.get(url,
params=params,
cookies=self.cookie,
headers=self.header,
proxies=self.proxy,
timeout=self.timeout,
verify=self.verify,
**kwargs)
except Exception as e:
logger.log('ERROR', e.args)
return None
if not check:
return resp
if utils.check_response('GET', resp):
return resp
return None
def post(self, url, data=None, check=True, **kwargs):
"""
自定义post请求
:param str url: 请求地址
:param dict data: 请求数据
:param bool check: 检查响应
:param kwargs: 其他参数
:return: requests响应对象
"""
try:
resp = requests.post(url,
data=data,
cookies=self.cookie,
headers=self.header,
proxies=self.proxy,
timeout=self.timeout,
verify=self.verify,
**kwargs)
except Exception as e:
logger.log('ERROR', e.args)
return None
if not check:
return resp
if utils.check_response('POST', resp):
return resp
return None
def get_header(self):
"""
获取请求头
:return: 请求头
"""
# logger.log('DEBUG', f'获取请求头')
if config.enable_fake_header:
return utils.gen_fake_header()
else:
return self.header
def get_proxy(self, module):
"""
获取代理
:param str module: 模块名
:return: 代理字典
"""
if not config.enable_proxy:
logger.log('TRACE', f'所有模块不使用代理')
return self.proxy
if config.proxy_all_module:
logger.log('TRACE', f'{module}模块使用代理')
return utils.get_random_proxy()
if module in config.proxy_partial_module:
logger.log('TRACE', f'{module}模块使用代理')
return utils.get_random_proxy()
else:
logger.log('TRACE', f'{module}模块不使用代理')
return self.proxy
@staticmethod
def match(domain, html, distinct=True):
"""
正则匹配出子域
:param str domain: 域名
:param str html: 要匹配的html响应体
:param bool distinct: 匹配结果去除
:return: 匹配出的子域集合或列表
:rtype: set or list
"""
logger.log('TRACE', f'正则匹配响应体中的子域')
regexp = r'(?:\>|\"|\'|\=|\,)(?:http\:\/\/|https\:\/\/)?' \
r'(?:[a-z0-9](?:[a-z0-9\-]{0,61}[a-z0-9])?\.){0,}' \
+ domain.replace('.', r'\.')
result = re.findall(regexp, html, re.I)
if not result:
return set()
regexp = r'(?:http://|https://)'
deal = map(lambda s: re.sub(regexp, '', s[1:].lower()), result)
if distinct:
return set(deal)
else:
return list(deal)
@staticmethod
def register(domain):
"""
获取注册域名
:param str domain: 域名
:return: 注册域名
"""
return Domain(domain).registered()
def save_json(self):
"""
将各模块结果保存为json文件
:return 是否保存成功
"""
if not config.save_module_result:
return False
logger.log('TRACE', f'{self.source}模块发现的子域结果保存为json文件')
path = config.result_save_dir.joinpath(self.domain, self.module)
path.mkdir(parents=True, exist_ok=True)
name = self.source + '.json'
path = path.joinpath(name)
with open(path, mode='w', encoding='utf-8', errors='ignore') as file:
result = {'domain': self.domain,
'name': self.module,
'source': self.source,
'elapse': self.elapse,
'find': len(self.subdomains),
'subdomains': list(self.subdomains),
'records': self.records}
json.dump(result, file, ensure_ascii=False, indent=4)
return True
def gen_record(self, subdomains, record):
"""
生成记录字典
"""
item = dict()
item['content'] = record
for subdomain in subdomains:
self.records[subdomain] = item
def gen_result(self, find=0, brute=None, valid=0):
"""
生成结果
"""
logger.log('DEBUG', f'正在生成最终结果')
if not len(self.subdomains): # 该模块一个子域都没有发现的情况
result = {'id': None,
'type': self.type,
'alive': None,
'request': None,
'resolve': None,
'new': None,
'url': None,
'subdomain': None,
'level': None,
'cname': None,
'content': None,
'public': None,
'port': None,
'status': None,
'reason': None,
'title': None,
'banner': None,
'header': None,
'response': None,
'times': None,
'ttl': None,
'resolver': None,
'module': self.module,
'source': self.source,
'elapse': self.elapse,
'find': find,
'brute': brute,
'valid': valid}
self.results.append(result)
else:
for subdomain in self.subdomains:
url = 'http://' + subdomain
level = subdomain.count('.') - self.domain.count('.')
record = self.records.get(subdomain)
if record is None:
record = dict()
resolve = record.get('resolve')
request = record.get('request')
alive = record.get('alive')
if self.type != 'A': # 不是利用的DNS记录的A记录查询子域默认都有效
resolve = 1
request = 1
alive = 1
reason = record.get('reason')
resolver = record.get('resolver')
cname = record.get('cname')
content = record.get('content')
times = record.get('times')
ttl = record.get('ttl')
public = record.get('public')
if isinstance(cname, list):
cname = ','.join(cname)
content = ','.join(content)
times = ','.join([str(num) for num in times])
ttl = ','.join([str(num) for num in ttl])
public = ','.join([str(num) for num in public])
result = {'id': None,
'type': self.type,
'alive': alive,
'request': request,
'resolve': resolve,
'new': None,
'url': url,
'subdomain': subdomain,
'level': level,
'cname': cname,
'content': content,
'public': public,
'port': 80,
'status': None,
'reason': reason,
'title': None,
'banner': None,
'header': None,
'response': None,
'times': times,
'ttl': ttl,
'resolver': resolver,
'module': self.module,
'source': self.source,
'elapse': self.elapse,
'find': find,
'brute': brute,
'valid': valid,
}
self.results.append(result)
def save_db(self):
"""
将模块结果存入数据库中
"""
logger.log('DEBUG', f'正在将结果存入到数据库')
lock.acquire()
db = Database()
db.create_table(self.domain)
db.save_db(self.domain, self.results, self.source)
db.close()
lock.release()

@ -1,9 +0,0 @@
from .module import Module
class Query(Module):
"""
查询基类
"""
def __init__(self):
Module.__init__(self)

@ -1,264 +0,0 @@
import asyncio
import functools
import aiohttp
import tqdm
from aiohttp import ClientSession
from bs4 import BeautifulSoup
import client.subdomain.oneforall.config as config
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.config import logger
from client.subdomain.oneforall.common.database import Database
def get_limit_conn():
limit_open_conn = config.limit_open_conn
if limit_open_conn is None: # 默认情况
limit_open_conn = utils.get_semaphore()
elif not isinstance(limit_open_conn, int): # 如果传入不是数字的情况
limit_open_conn = utils.get_semaphore()
return limit_open_conn
def get_ports(port):
logger.log('DEBUG', f'正在获取请求端口范围')
ports = set()
if isinstance(port, (set, list, tuple)):
ports = port
elif isinstance(port, int):
if 0 <= port <= 65535:
ports = {port}
elif port in {'default', 'small', 'large'}:
logger.log('DEBUG', f'请求{port}等端口范围')
ports = config.ports.get(port)
if not ports: # 意外情况
logger.log('ERROR', f'指定请求端口范围有误')
ports = {80}
logger.log('INFOR', f'请求端口范围:{ports}')
return set(ports)
def gen_req_data(data, ports):
logger.log('INFOR', f'正在生成请求地址')
new_data = []
for data in data:
resolve = data.get('resolve')
# 解析失败(0)的子域不进行http请求探测
if resolve == 0:
continue
subdomain = data.get('subdomain')
for port in ports:
if str(port).endswith('443'):
url = f'https://{subdomain}:{port}'
if port == 443:
url = f'https://{subdomain}'
data['id'] = None
data['url'] = url
data['port'] = port
new_data.append(data)
data = dict(data) # 需要生成一个新的字典对象
else:
url = f'http://{subdomain}:{port}'
if port == 80:
url = f'http://{subdomain}'
data['id'] = None
data['url'] = url
data['port'] = port
new_data.append(data)
data = dict(data) # 需要生成一个新的字典对象
return new_data
async def fetch(session, url):
"""
请求
:param session: session对象
:param str url: url地址
:return: 响应对象和响应文本
"""
method = config.request_method.upper()
timeout = aiohttp.ClientTimeout(total=None,
connect=None,
sock_read=config.sockread_timeout,
sock_connect=config.sockconn_timeout)
try:
if method == 'HEAD':
async with session.head(url,
ssl=config.verify_ssl,
allow_redirects=config.allow_redirects,
timeout=timeout,
proxy=config.aiohttp_proxy) as resp:
text = await resp.text()
else:
async with session.get(url,
ssl=config.verify_ssl,
allow_redirects=config.allow_redirects,
timeout=timeout,
proxy=config.aiohttp_proxy) as resp:
try:
# 先尝试用utf-8解码
text = await resp.text(encoding='utf-8', errors='strict')
except UnicodeError:
try:
# 再尝试用gb18030解码
text = await resp.text(encoding='gb18030',
errors='strict')
except UnicodeError:
# 最后尝试自动解码
text = await resp.text(encoding=None,
errors='ignore')
return resp, text
except Exception as e:
return e
def get_title(markup):
"""
获取标题
:param markup: html标签
:return: 标题
"""
soup = BeautifulSoup(markup, 'html.parser')
title = soup.title
if title:
return title.text
h1 = soup.h1
if h1:
return h1.text
h2 = soup.h2
if h2:
return h2.text
h3 = soup.h3
if h2:
return h3.text
desc = soup.find('meta', attrs={'name': 'description'})
if desc:
return desc['content']
word = soup.find('meta', attrs={'name': 'keywords'})
if word:
return word['content']
text = soup.text
if len(text) <= 200:
return text
return 'None'
def request_callback(future, index, datas):
result = future.result()
if isinstance(result, BaseException):
logger.log('TRACE', result.args)
name = utils.get_classname(result)
datas[index]['reason'] = name + ' ' + str(result)
datas[index]['request'] = 0
datas[index]['alive'] = 0
elif isinstance(result, tuple):
resp, text = result
datas[index]['reason'] = resp.reason
datas[index]['status'] = resp.status
if resp.status == 400 or resp.status >= 500:
datas[index]['request'] = 0
datas[index]['alive'] = 0
else:
datas[index]['request'] = 1
datas[index]['alive'] = 1
headers = resp.headers
datas[index]['banner'] = utils.get_sample_banner(headers)
datas[index]['header'] = str(dict(headers))[1:-1]
if isinstance(text, str):
title = get_title(text).strip()
datas[index]['title'] = utils.remove_invalid_string(title)
datas[index]['response'] = utils.remove_invalid_string(text)
def get_connector():
limit_open_conn = get_limit_conn()
return aiohttp.TCPConnector(ttl_dns_cache=300,
ssl=config.verify_ssl,
limit=limit_open_conn,
limit_per_host=config.limit_per_host)
def get_header():
header = None
if config.fake_header:
header = utils.gen_fake_header()
return header
async def bulk_request(data, port):
ports = get_ports(port)
no_req_data = utils.get_filtered_data(data)
to_req_data = gen_req_data(data, ports)
method = config.request_method
logger.log('INFOR', f'请求使用{method}方法')
logger.log('INFOR', f'正在进行异步子域请求')
connector = get_connector()
header = get_header()
async with ClientSession(connector=connector, headers=header) as session:
tasks = []
for i, data in enumerate(to_req_data):
url = data.get('url')
task = asyncio.ensure_future(fetch(session, url))
task.add_done_callback(functools.partial(request_callback,
index=i,
datas=to_req_data))
tasks.append(task)
# 任务列表里有任务不空时才进行解析
if tasks:
# 等待所有task完成 错误聚合到结果列表里
futures = asyncio.as_completed(tasks)
for future in tqdm.tqdm(futures,
total=len(tasks),
desc='Request Progress',
ncols=80):
await future
return to_req_data + no_req_data
def run_request(domain, data, port):
"""
调用子域请求入口函数
:param str domain: 待请求的主域
:param list data: 待请求的子域数据
:param str port: 待请求的端口范围
:return: 请求后得到的结果列表
:rtype: list
"""
logger.log('INFOR', f'开始执行子域请求模块')
loop = asyncio.get_event_loop()
asyncio.set_event_loop(loop)
data = utils.set_id_none(data)
request_coroutine = bulk_request(data, port)
data = loop.run_until_complete(request_coroutine)
# 在关闭事件循环前加入一小段延迟让底层连接得到关闭的缓冲时间
loop.run_until_complete(asyncio.sleep(0.25))
count = utils.count_alive(data)
logger.log('INFOR', f'经验证{domain}存活子域{count}')
return data
def save_data(name, data):
"""
保存请求结果到数据库
:param str name: 保存表名
:param list data: 待保存的数据
"""
db = Database()
db.drop_table(name)
db.create_table(name)
db.save_db(name, data, 'request')
db.close()

@ -1,166 +0,0 @@
import gc
import json
import client.subdomain.oneforall.config as config
from client.subdomain.oneforall.config import logger
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.database import Database
def filter_subdomain(data):
"""
过滤出无解析内容的子域到新的子域列表
:param list data: 待过滤的数据列表
:return: 符合条件的子域列表
"""
logger.log('DEBUG', f'正在过滤出待解析的子域')
subdomains = []
for data in data:
if not data.get('content'):
subdomain = data.get('subdomain')
subdomains.append(subdomain)
return subdomains
def update_data(data, records):
"""
更新解析结果
:param list data: 待更新的数据列表
:param dict records: 解析结果字典
:return: 更新后的数据列表
"""
logger.log('DEBUG', f'正在更新解析结果')
for index, items in enumerate(data):
if not items.get('content'):
subdomain = items.get('subdomain')
record = records.get(subdomain)
items.update(record)
data[index] = items
return data
def save_data(name, data):
"""
保存解析结果到数据库
:param str name: 保存表名
:param list data: 待保存的数据
"""
logger.log('INFOR', f'正在保存解析结果')
db = Database()
db.drop_table(name)
db.create_table(name)
db.save_db(name, data, 'resolve')
db.close()
def save_subdomains(save_path, subdomain_list):
logger.log('DEBUG', f'正在保存待解析的子域')
subdomain_data = '\n'.join(subdomain_list)
if not utils.save_data(save_path, subdomain_data):
logger.log('FATAL', '保存待解析的子域出错')
exit(1)
def deal_output(output_path):
logger.log('INFOR', f'正在处理解析结果')
records = dict() # 用来记录所有域名解析数据
with open(output_path) as fd:
for line in fd:
line = line.strip()
try:
items = json.loads(line)
except Exception as e:
logger.log('ERROR', e.args)
logger.log('ERROR', f'解析行{line}出错跳过解析该行')
continue
record = dict()
record['resolver'] = items.get('resolver')
qname = items.get('name')[:-1] # 去出最右边的`.`点号
status = items.get('status')
if status != 'NOERROR':
record['alive'] = 0
record['resolve'] = 0
record['reason'] = status
records[qname] = record
continue
data = items.get('data')
if 'answers' not in data:
record['alive'] = 0
record['resolve'] = 0
record['reason'] = 'NOANSWER'
records[qname] = record
continue
flag = False
cname = list()
ips = list()
public = list()
ttls = list()
answers = data.get('answers')
for answer in answers:
if answer.get('type') == 'A':
flag = True
cname.append(answer.get('name')[:-1]) # 去出最右边的`.`点号
ip = answer.get('data')
ips.append(ip)
ttl = answer.get('ttl')
ttls.append(str(ttl))
is_public = utils.ip_is_public(ip)
public.append(str(is_public))
record['resolve'] = 1
record['reason'] = status
record['cname'] = ','.join(cname)
record['content'] = ','.join(ips)
record['public'] = ','.join(public)
record['ttl'] = ','.join(ttls)
records[qname] = record
if not flag:
record['alive'] = 0
record['resolve'] = 0
record['reason'] = 'NOARECORD'
records[qname] = record
return records
def run_resolve(domain, data):
"""
调用子域解析入口函数
:param str domain: 待解析的主域
:param list data: 待解析的子域数据列表
:return: 解析得到的结果列表
:rtype: list
"""
logger.log('INFOR', f'开始解析{domain}的子域')
subdomains = filter_subdomain(data)
if not subdomains:
return data
massdns_dir = config.third_party_dir.joinpath('massdns')
result_dir = config.result_save_dir
temp_dir = result_dir.joinpath('temp')
utils.check_dir(temp_dir)
massdns_path = utils.get_massdns_path(massdns_dir)
timestring = utils.get_timestring()
save_name = f'collected_subdomains_{domain}_{timestring}.txt'
save_path = temp_dir.joinpath(save_name)
save_subdomains(save_path, subdomains)
del subdomains
gc.collect()
output_name = f'resolved_result_{domain}_{timestring}.json'
output_path = temp_dir.joinpath(output_name)
log_path = result_dir.joinpath('massdns.log')
ns_path = config.brute_nameservers_path
utils.call_massdns(massdns_path, save_path, ns_path,
output_path, log_path, quiet_mode=True)
records = deal_output(output_path)
data = update_data(data, records)
logger.log('INFOR', f'结束解析{domain}的子域')
return data

@ -1,54 +0,0 @@
import client.subdomain.oneforall.config as config
from .module import Module
from . import utils
class Search(Module):
"""
搜索基类
"""
def __init__(self):
Module.__init__(self)
self.page_num = 0 # 要显示搜索起始条数
self.per_page_num = 50 # 每页显示搜索条数
self.recursive_search = config.enable_recursive_search
self.recursive_times = config.search_recursive_times
@staticmethod
def filter(domain, subdomain):
"""
生成搜索过滤语句
使用搜索引擎支持的-site:语法过滤掉搜索页面较多的子域以发现新域
:param str domain: 域名
:param set subdomain: 子域名集合
:return: 过滤语句
:rtype: str
"""
statements_list = []
subdomains_temp = set(map(lambda x: x + '.' + domain,
config.subdomains_common))
subdomains_temp = list(subdomain.intersection(subdomains_temp))
for i in range(0, len(subdomains_temp), 2): # 同时排除2个子域
statements_list.append(''.join(set(map(lambda s: ' -site:' + s,
subdomains_temp[i:i + 2]))))
return statements_list
def match_location(self, domain, url):
"""
匹配跳转之后的url
针对部分搜索引擎(如百度搜索)搜索展示url时有显示不全的情况
此函数会向每条结果的链接发送head请求获取响应头的location值并做子域匹配
:param str domain: 域名
:param str url: 展示结果的url链接
:return: 匹配的子域
:rtype set
"""
resp = self.head(url, check=False, allow_redirects=False)
if not resp:
return set()
location = resp.headers.get('location')
if not location:
return set()
return set(utils.match_subdomain(domain, location))

@ -1,579 +0,0 @@
import os
import re
import sys
import time
import random
import platform
import subprocess
from ipaddress import IPv4Address, ip_address
from stat import S_IXUSR
import psutil
import tenacity
import requests
from pathlib import Path
from records import Record, RecordCollection
from dns.resolver import Resolver
import client.subdomain.oneforall.config as config
from client.subdomain.oneforall.common.domain import Domain
from client.subdomain.oneforall.config import logger
user_agents = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/68.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:61.0) '
'Gecko/20100101 Firefox/68.0',
'Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/68.0']
def match_subdomain(domain, text, distinct=True):
"""
匹配text中domain的子域名
:param str domain: 域名
:param str text: 响应文本
:param bool distinct: 结果去重
:return: 匹配结果
:rtype: set or list
"""
regexp = r'(?:[a-z0-9](?:[a-z0-9\-]{0,61}[a-z0-9])?\.){0,}' \
+ domain.replace('.', r'\.')
result = re.findall(regexp, text, re.I)
if not result:
return set()
deal = map(lambda s: s.lower(), result)
if distinct:
return set(deal)
else:
return list(deal)
def gen_random_ip():
"""
生成随机的点分十进制的IP字符串
"""
while True:
ip = IPv4Address(random.randint(0, 2 ** 32 - 1))
if ip.is_global:
return ip.exploded
def gen_fake_header():
"""
生成伪造请求头
"""
ua = random.choice(user_agents)
ip = gen_random_ip()
headers = {
'Accept': 'text/html,application/xhtml+xml,'
'application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Cache-Control': 'max-age=0',
'Connection': 'close',
'DNT': '1',
'Referer': 'https://www.google.com/',
'Upgrade-Insecure-Requests': '1',
'User-Agent': ua,
'X-Forwarded-For': ip,
'X-Real-IP': ip
}
return headers
def get_random_proxy():
"""
获取随机代理
"""
try:
return random.choice(config.proxy_pool)
except IndexError:
return None
def split_list(ls, size):
"""
将ls列表按size大小划分并返回新的划分结果列表
:param list ls: 要划分的列表
:param int size: 划分大小
:return 划分结果
>>> split_list([1, 2, 3, 4], 3)
[[1, 2, 3], [4]]
"""
if size == 0:
return ls
return [ls[i:i + size] for i in range(0, len(ls), size)]
def get_domains(target):
"""
获取域名
:param set or str target:
:return: 域名集合
"""
domains = list()
logger.log('DEBUG', f'正在获取域名')
if isinstance(target, (set, tuple)):
domains = list(target)
elif isinstance(target, list):
domains = target
elif isinstance(target, str):
path = Path(target)
if path.exists() and path.is_file():
with open(target, encoding='utf-8', errors='ignore') as file:
for line in file:
line = line.lower().strip()
domain = Domain(line).match()
if domain:
domains.append(domain)
else:
target = target.lower().strip()
domain = Domain(target).match()
if domain:
domains.append(domain)
count = len(domains)
if count == 0:
logger.log('FATAL', f'获取到{count}个域名')
exit(1)
logger.log('INFOR', f'获取到{count}个域名')
return domains
def get_semaphore():
"""
获取查询并发值
:return: 并发整型值
"""
system = platform.system()
if system == 'Windows':
return 800
elif system == 'Linux':
return 800
elif system == 'Darwin':
return 800
def check_dir(dir_path):
if not dir_path.exists():
logger.log('INFOR', f'不存在{dir_path}目录将会新建')
dir_path.mkdir(parents=True, exist_ok=True)
def check_path(path, name, format):
"""
检查结果输出目录路径
:param path: 保存路径
:param name: 导出名字
:param format: 保存格式
:return: 保存路径
"""
filename = f'{name}.{format}'
default_path = config.result_save_dir.joinpath(filename)
if isinstance(path, str):
path = repr(path).replace('\\', '/') # 将路径中的反斜杠替换为正斜杠
path = path.replace('\'', '') # 去除多余的转义
else:
path = default_path
path = Path(path)
if not path.suffix: # 输入是目录的情况
path = path.joinpath(filename)
parent_dir = path.parent
if not parent_dir.exists():
logger.log('ALERT', f'不存在{parent_dir}目录将会新建')
parent_dir.mkdir(parents=True, exist_ok=True)
if path.exists():
logger.log('ALERT', f'存在{path}文件将会覆盖')
return path
def check_format(format, count):
"""
检查导出格式
:param format: 传入的导出格式
:param count: 数量
:return: 导出格式
"""
formats = ['rst', 'csv', 'tsv', 'json', 'yaml', 'html',
'jira', 'xls', 'xlsx', 'dbf', 'latex', 'ods']
if format == 'xls' and count > 65000:
logger.log('ALERT', 'xls文件限制为最多65000行')
logger.log('ALERT', '使用xlsx格式导出')
return 'xlsx'
if format in formats:
return format
else:
logger.log('ALERT', f'不支持{format}格式导出')
logger.log('ALERT', '默认使用csv格式导出')
return 'csv'
def save_data(path, data):
"""
保存数据到文件
:param path: 保存路径
:param data: 待存数据
:return: 保存成功与否
"""
try:
with open(path, 'w', encoding="utf-8",
errors='ignore', newline='') as file:
file.write(data)
return True
except TypeError:
with open(path, 'wb') as file:
file.write(data)
return True
except Exception as e:
logger.log('ERROR', e.args)
return False
def check_response(method, resp):
"""
检查响应 输出非正常响应返回json的信息
:param method: 请求方法
:param resp: 响应体
:return: 是否正常响应
"""
if resp.status_code == 200 and resp.content:
return True
logger.log('ALERT', f'{method} {resp.url} {resp.status_code} - '
f'{resp.reason} {len(resp.content)}')
content_type = resp.headers.get('Content-Type')
if content_type and 'json' in content_type and resp.content:
try:
msg = resp.json()
except Exception as e:
logger.log('DEBUG', e.args)
else:
logger.log('ALERT', msg)
return False
def mark_subdomain(old_data, now_data):
"""
标记新增子域并返回新的数据集
:param list old_data: 之前子域数据
:param list now_data: 现在子域数据
:return: 标记后的的子域数据
:rtype: list
"""
# 第一次收集子域的情况
mark_data = now_data.copy()
if not old_data:
for index, item in enumerate(mark_data):
item['new'] = 1
mark_data[index] = item
return mark_data
# 非第一次收集子域的情况
old_subdomains = {item.get('subdomain') for item in old_data}
for index, item in enumerate(mark_data):
subdomain = item.get('subdomain')
if subdomain in old_subdomains:
item['new'] = 0
else:
item['new'] = 1
mark_data[index] = item
return mark_data
def remove_invalid_string(string):
# Excel文件中单元格值不能直接存储以下非法字符
return re.sub(r'[\000-\010]|[\013-\014]|[\016-\037]', r'', string)
def check_value(values):
if not isinstance(values, dict):
return values
for key, value in values.items():
if value is None:
continue
if isinstance(value, str) and len(value) > 32767:
# Excel文件中单元格值长度不能超过32767
values[key] = value[:32767]
return values
def export_all(format, path, datas):
"""
将所有结果数据导出到一个文件
:param str format: 导出文件格式
:param str path: 导出文件路径
:param list datas: 待导出的结果数据
"""
format = check_format(format, len(datas))
timestamp = get_timestring()
name = f'all_subdomain_result_{timestamp}'
path = check_path(path, name, format)
logger.log('INFOR', f'所有主域的子域结果 {path}')
row_list = list()
for row in datas:
if 'header' in row:
row.pop('header')
if 'response' in row:
row.pop('response')
keys = row.keys()
values = row.values()
if format in {'xls', 'xlsx'}:
values = check_value(values)
row_list.append(Record(keys, values))
rows = RecordCollection(iter(row_list))
content = rows.export(format)
save_data(path, content)
def dns_resolver():
"""
dns解析器
"""
resolver = Resolver()
resolver.nameservers = config.resolver_nameservers
resolver.timeout = config.resolver_timeout
resolver.lifetime = config.resolver_lifetime
return resolver
def dns_query(qname, qtype):
"""
查询域名DNS记录
:param str qname: 待查域名
:param str qtype: 查询类型
:return: 查询结果
"""
logger.log('TRACE', f'尝试查询{qname}{qtype}记录')
resolver = dns_resolver()
try:
answer = resolver.query(qname, qtype)
except Exception as e:
logger.log('TRACE', e.args)
logger.log('TRACE', f'查询{qname}{qtype}记录失败')
return None
else:
logger.log('TRACE', f'查询{qname}{qtype}记录成功')
return answer
def get_timestamp():
return int(time.time())
def get_timestring():
return time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time()))
def get_classname(classobj):
return classobj.__class__.__name__
def python_version():
return sys.version
def count_alive(data):
return len(list(filter(lambda item: item.get('alive') == 1, data)))
def get_subdomains(data):
return set(map(lambda item: item.get('subdomain'), data))
def set_id_none(data):
new_data = []
for item in data:
item['id'] = None
new_data.append(item)
return new_data
def get_filtered_data(data):
filtered_data = []
for item in data:
valid = item.get('resolve')
if valid == 0:
filtered_data.append(item)
return filtered_data
def get_sample_banner(headers):
temp_list = []
server = headers.get('Server')
if server:
temp_list.append(server)
via = headers.get('Via')
if via:
temp_list.append(via)
power = headers.get('X-Powered-By')
if power:
temp_list.append(power)
banner = ','.join(temp_list)
return banner
def check_ip_public(ip_list):
for ip_str in ip_list:
ip = ip_address(ip_str)
if not ip.is_global:
return 0
return 1
def ip_is_public(ip_str):
ip = ip_address(ip_str)
if not ip.is_global:
return 0
return 1
def get_process_num():
process_num = config.brute_process_num
if isinstance(process_num, int):
return min(os.cpu_count(), process_num)
else:
return 1
def get_coroutine_num():
coroutine_num = config.resolve_coroutine_num
if isinstance(coroutine_num, int):
return max(64, coroutine_num)
elif coroutine_num is None:
mem = psutil.virtual_memory()
total_mem = mem.total
g_size = 1024 * 1024 * 1024
if total_mem <= 1 * g_size:
return 64
elif total_mem <= 2 * g_size:
return 128
elif total_mem <= 4 * g_size:
return 256
elif total_mem <= 8 * g_size:
return 512
elif total_mem <= 16 * g_size:
return 1024
else:
return 2048
else:
return 64
def uniq_dict_list(dict_list):
return list(filter(lambda name: dict_list.count(name) == 1, dict_list))
def delete_file(*paths):
for path in paths:
try:
path.unlink()
except Exception as e:
logger.log('ERROR', e.args)
@tenacity.retry(stop=tenacity.stop_after_attempt(2))
def check_net():
logger.log('INFOR', '正在检查网络环境')
url = 'http://www.example.com/'
logger.log('INFOR', f'访问地址 {url}')
try:
rsp = requests.get(url)
except Exception as e:
logger.log('ERROR', e.args)
logger.log('ALERT', '访问外网出错 重新检查中')
raise tenacity.TryAgain
if rsp.status_code != 200:
logger.log('ALERT', f'{rsp.request.method} {rsp.request.url} '
f'{rsp.status_code} {rsp.reason}')
logger.log('ALERT', '不能正常访问外网 重新检查中')
raise tenacity.TryAgain
logger.log('INFOR', '能正常访问外网')
def check_pre():
logger.log('INFOR', '正在检查依赖环境')
system = platform.system()
implementation = platform.python_implementation()
version = platform.python_version()
if implementation != 'CPython':
logger.log('FATAL', f'OneForAll只在CPython下测试通过')
exit(1)
if version < '3.6':
logger.log('FATAL', 'OneForAll需要Python 3.6以上版本')
exit(1)
if system == 'Windows' and implementation == 'CPython':
if version < '3.8':
logger.log('FATAL', 'OneForAll在Windows系统运行时需要Python 3.8以上版本')
exit(1)
def check_env():
logger.log('INFOR', '正在检查运行环境')
try:
check_net()
except Exception as e:
logger.log('DEBUG', e.args)
logger.log('FATAL', '不能正常访问外网')
exit(1)
check_pre()
def get_maindomain(domain):
return Domain(domain).registered()
def call_massdns(massdns_path, dict_path, ns_path, output_path, log_path,
query_type='A', process_num=1, concurrent_num=10000,
quiet_mode=False):
logger.log('INFOR', f'开始执行massdns')
quiet = ''
if quiet_mode:
quiet = '--quiet'
status_format = config.brute_status_format
socket_num = config.brute_socket_num
resolve_num = config.brute_resolve_num
cmd = f'{massdns_path} {quiet} --status-format {status_format} ' \
f'--processes {process_num} --socket-count {socket_num} ' \
f'--hashmap-size {concurrent_num} --resolvers {ns_path} ' \
f'--resolve-count {resolve_num} --type {query_type} ' \
f'--flush --output J --outfile {output_path} ' \
f'--error-log {log_path} {dict_path}'
logger.log('INFOR', f'执行命令 {cmd}')
subprocess.run(args=cmd, shell=True)
logger.log('INFOR', f'结束执行massdns')
def get_massdns_path(massdns_dir):
path = config.brute_massdns_path
if path:
return path
system = platform.system().lower()
machine = platform.machine().lower()
name = f'massdns_{system}_{machine}'
if system == 'windows':
name = name + '.exe'
if machine == 'amd64':
massdns_dir = massdns_dir.joinpath('windows', 'x64')
else:
massdns_dir = massdns_dir.joinpath('windows', 'x84')
path = massdns_dir.joinpath(name)
path.chmod(S_IXUSR)
if not path.exists():
logger.log('FATAL', '暂无该系统平台及架构的massdns')
logger.log('INFOR', '请尝试自行编译massdns并在配置里指定路径')
exit(0)
return path

@ -1,214 +0,0 @@
# coding=utf-8
"""
OneForAll配置
"""
import os
import sys
import pathlib
import urllib3
from loguru import logger
class PortScan:
'''端口扫描配置'''
shodan_api = 'xxxxxx'
# nmap程序路径地址可指定具体路径或设置环境变量
nmap_search_path = ('nmap', '/usr/bin/nmap', '/usr/local/bin/nmap', '/sw/bin/nmap', '/opt/local/bin/nmap')
class crawlergo:
max_tab_count = '5' # 爬虫同时开启最大标签页
filter_mode = 'smart' # 过滤模式 simple-简单、smart-智能、strict-严格
max_crawled_count = '200' # 爬虫最大任务数量
if sys.platform.startswith('darwin'):
crawlergo_path = 'crawlergo_mac'
xray_path = 'xray_mac'
chromium_path = '/Users/miss/opt/scantools/Chromium/Chromium.app/Contents/MacOS/Chromium'
elif sys.platform.startswith('linux'):
crawlergo_path = 'crawlergo'
xray_path = 'xray'
chromium_path = '/usr/lib/chromium-browser/chromium-browser'
else:
crawlergo_path = 'crawlergo_mac'
xray_path = 'xray_mac'
chromium_path = '/Users/miss/opt/scantools/Chromium/Chromium.app/Contents/MacOS/Chromium'
# 路径设置
relative_directory = pathlib.Path(__file__).parent # OneForAll代码相对路径
module_dir = relative_directory.joinpath('modules') # OneForAll模块目录
third_party_dir = relative_directory.joinpath('thirdparty') # 三方工具目录
data_storage_dir = relative_directory.joinpath('data') # 数据存放目录
result_save_dir = relative_directory.joinpath('results') # 结果保存目录
# OneForAll入口参数设置
enable_dns_resolve = True # 使用DNS解析子域(默认True)
enable_http_request = True # 使用HTTP请求子域(默认True)
enable_takeover_check = False # 开启子域接管风险检查(默认False)
# 参数port可选值有'default', 'small', 'large'
http_request_port = 'default' # HTTP请求子域(默认'default'探测80端口)
# 参数alive可选值TrueFalse分别表示导出存活全部子域结果
result_export_alive = False # 只导出存活的子域结果(默认False)
# 参数format可选格式有'rst', 'csv', 'tsv', 'json', 'yaml', 'html',
# 'jira', 'xls', 'xlsx', 'dbf', 'latex', 'ods'
result_save_format = 'json' # 子域结果保存文件格式(默认csv)
# 参数path默认None使用OneForAll结果目录自动生成路径
result_save_path = None # 子域结果保存文件路径(默认None)
# 收集模块设置
save_module_result = False # 保存各模块发现结果为json文件(默认False)
enable_all_module = True # 启用所有模块(默认True)
enable_partial_module = [] # 启用部分模块 必须禁用enable_all_module才能生效
# 只使用ask和baidu搜索引擎收集子域的示例
#enable_partial_module = [('modules.search', 'baidu')]
module_thread_timeout = 180.0 # 每个收集模块线程超时时间(默认3分钟)
# 爆破模块设置
enable_brute_module = False # 使用爆破模块(默认False)
enable_wildcard_check = True # 开启泛解析检测(默认True)
enable_wildcard_deal = True # 开启泛解析处理(默认True)
brute_massdns_path = None # 默认None自动选择 如需填写请填写绝对路径
brute_status_format = 'ansi' # 爆破时状态输出格式默认asni可选json
# 爆破时使用的进程数(根据计算机中CPU数量情况设置 不宜大于逻辑CPU个数)
brute_process_num = 1 # 默认1
brute_concurrent_num = 10000 # 并发查询数量(默认10000)
brute_socket_num = 1 # 爆破时每个进程下的socket数量
brute_resolve_num = 50 # 解析失败时尝试换名称服务器重查次数
# 爆破所使用的字典路径 默认data/subdomains.txt
brute_wordlist_path = data_storage_dir.joinpath('subnames.txt')
brute_nameservers_path = data_storage_dir.joinpath('cn_nameservers.txt')
# 域名的权威DNS名称服务器的保存路径 当域名开启了泛解析时会使用该名称服务器来进行A记录查询
authoritative_dns_path = data_storage_dir.joinpath('authoritative_dns.txt')
enable_recursive_brute = False # 是否使用递归爆破(默认False)
brute_recursive_depth = 2 # 递归爆破深度(默认2层)
# 爆破下一层子域所使用的字典路径 默认data/next_subdomains.txt
recursive_nextlist_path = data_storage_dir.joinpath('next_subnames.txt')
enable_check_dict = False # 是否开启字典配置检查提示(默认False)
delete_generated_dict = True # 是否删除爆破时临时生成的字典(默认True)
# 是否删除爆破时massdns输出的解析结果 (默认True)
# massdns输出的结果中包含更详细解析结果
# 注意: 当爆破的字典较大或使用递归爆破或目标域名存在泛解析时生成的文件可能会很大
delete_massdns_result = True
only_save_valid = True # 是否在处理爆破结果时只存入解析成功的子域
check_time = 10 # 检查字典配置停留时间(默认10秒)
enable_fuzz = False # 是否使用fuzz模式枚举域名
fuzz_place = None # 指定爆破的位置 指定的位置用`@`表示 示例www.@.example.com
fuzz_rule = None # fuzz域名的正则 示例:'[a-z][0-9]' 表示第一位是字母 第二位是数字
brute_ip_blacklist = {'0.0.0.0', '0.0.0.1'} # IP黑名单 子域解析到IP黑名单则标记为非法子域
ip_appear_maximum = 100 # 多个子域解析到同一IP次数超过100次则标记为非法(泛解析)子域
# 代理设置
enable_proxy = False # 是否使用代理(全局开关)
proxy_all_module = False # 代理所有模块
proxy_partial_module = ['GoogleQuery', 'AskSearch', 'DuckDuckGoSearch',
'GoogleAPISearch', 'GoogleSearch', 'YahooSearch',
'YandexSearch', 'CrossDomainXml',
'ContentSecurityPolicy'] # 代理自定义的模块
proxy_pool = [{'http': 'http://127.0.0.1:1080',
'https': 'https://127.0.0.1:1080'}] # 代理池
# proxy_pool = [{'http': 'socks5h://127.0.0.1:10808',
# 'https': 'socks5h://127.0.0.1:10808'}] # 代理池
# 网络请求设置
enable_fake_header = True # 启用伪造请求头
request_delay = 1 # 请求时延
request_timeout = 60 # 请求超时
request_verify = False # 请求SSL验证
# 禁用安全警告信息
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# 搜索模块设置
enable_recursive_search = False # 递归搜索子域
search_recursive_times = 2 # 递归搜索层数
# DNS解析设置
resolve_coroutine_num = 64
resolver_nameservers = [
'223.5.5.5', # AliDNS
'119.29.29.29', # DNSPod
'114.114.114.114', # 114DNS
'8.8.8.8', # Google DNS
'1.1.1.1' # CloudFlare DNS
] # 指定查询的DNS域名服务器
resolver_timeout = 5.0 # 解析超时时间
resolver_lifetime = 60.0 # 解析存活时间
limit_resolve_conn = 500 # 限制同一时间解析的数量(默认500)
# 请求端口探测设置
# 你可以在端口列表添加自定义端口
default_ports = [80] # 默认使用
small_ports = [80, 443, 8000, 8080, 8443]
# 注意:建议大厂的域名尽量不使用大端口范围,因为大厂的子域太多,加上使用大端口范围会导致生成的
# 请求上十万,百万,千万级,可能会导致内存不足程序奔溃,另外这样级别的请求量等待时间也是漫长的。
# OneForAll不是一个端口扫描工具如果要扫端口建议使用nmap,zmap之类的工具。
large_ports = [80, 81, 280, 300, 443, 591, 593, 832, 888, 901, 981, 1010, 1080,
1100, 1241, 1311, 1352, 1434, 1521, 1527, 1582, 1583, 1944, 2082,
2082, 2086, 2087, 2095, 2096, 2222, 2301, 2480, 3000, 3128, 3333,
4000, 4001, 4002, 4100, 4125, 4243, 4443, 4444, 4567, 4711, 4712,
4848, 4849, 4993, 5000, 5104, 5108, 5432, 5555, 5800, 5801, 5802,
5984, 5985, 5986, 6082, 6225, 6346, 6347, 6443, 6480, 6543, 6789,
7000, 7001, 7002, 7396, 7474, 7674, 7675, 7777, 7778, 8000, 8001,
8002, 8003, 8004, 8005, 8006, 8008, 8009, 8010, 8014, 8042, 8069,
8075, 8080, 8081, 8083, 8088, 8090, 8091, 8092, 8093, 8016, 8118,
8123, 8172, 8181, 8200, 8222, 8243, 8280, 8281, 8333, 8384, 8403,
8443, 8500, 8530, 8531, 8800, 8806, 8834, 8880, 8887, 8888, 8910,
8983, 8989, 8990, 8991, 9000, 9043, 9060, 9080, 9090, 9091, 9200,
9294, 9295, 9443, 9444, 9800, 9981, 9988, 9990, 9999, 10000,
10880, 11371, 12043, 12046, 12443, 15672, 16225, 16080, 18091,
18092, 20000, 20720, 24465, 28017, 28080, 30821, 43110, 61600]
ports = {'default': default_ports, 'small': small_ports, 'large': large_ports}
# aiohttp有关配置
verify_ssl = False
# aiohttp 支持 HTTP/HTTPS形式的代理
aiohttp_proxy = None # proxy="http://user:pass@some.proxy.com"
allow_redirects = True # 允许请求跳转
fake_header = True # 使用伪造请求头
# 为了保证请求质量 请谨慎更改以下设置
# request_method只能是HEAD或GET,HEAD请求方法更快但是不能获取响应体并提取从中提取
request_method = 'GET' # 使用请求方法默认GET
sockread_timeout = 10 # 每个请求socket读取超时时间默认5秒
sockconn_timeout = 10 # 每个请求socket连接超时时间默认5秒
# 限制同一时间打开的连接总数
limit_open_conn = 100 # 默认100
# 限制同一时间在同一个端点((host, port, is_ssl) 3者都一样的情况)打开的连接数
limit_per_host = 10 # 0表示不限制,默认10
subdomains_common = {'i', 'w', 'm', 'en', 'us', 'zh', 'w3', 'app', 'bbs',
'web', 'www', 'job', 'docs', 'news', 'blog', 'data',
'help', 'live', 'mall', 'blogs', 'files', 'forum',
'store', 'mobile', 'admin'}
# 日志配置
# 终端日志输出格式
stdout_fmt = '<cyan>{time:HH:mm:ss,SSS}</cyan> ' \
'[<level>{level: <5}</level>] ' \
'<blue>{module}</blue>:<cyan>{line}</cyan> - ' \
'<level>{message}</level>'
# 日志文件记录格式
logfile_fmt = '<light-green>{time:YYYY-MM-DD HH:mm:ss,SSS}</light-green> ' \
'[<level>{level: <5}</level>] ' \
'<cyan>{process.name}({process.id})</cyan>:' \
'<cyan>{thread.name: <18}({thread.id: <5})</cyan> | ' \
'<blue>{module}</blue>.<blue>{function}</blue>:' \
'<blue>{line}</blue> - <level>{message}</level>'
log_path = result_save_dir.joinpath('oneforall.log')
logger.remove()
logger.level(name='TRACE', no=5, color='<cyan><bold>', icon='✏️')
logger.level(name='DEBUG', no=10, color='<blue><bold>', icon='🐞 ')
logger.level(name='INFOR', no=20, color='<green><bold>', icon='')
logger.level(name='ALERT', no=30, color='<yellow><bold>', icon='⚠️')
logger.level(name='ERROR', no=40, color='<red><bold>', icon='❌️')
logger.level(name='FATAL', no=50, color='<RED><bold>', icon='☠️')
if not os.environ.get('PYTHONIOENCODING'): # 设置编码
os.environ['PYTHONIOENCODING'] = 'utf-8'
logger.add(sys.stderr, level='INFOR', format=stdout_fmt, enqueue=True)
logger.add(log_path, level='DEBUG', format=logfile_fmt, enqueue=True,
encoding='utf-8')

File diff suppressed because it is too large Load Diff

@ -1,7 +0,0 @@
223.5.5.5
223.6.6.6
114.114.114.114
114.114.115.115
180.76.76.76
119.29.29.29
182.254.116.116

@ -1,249 +0,0 @@
[
{
"name":"github",
"cname":["github.io", "github.map.fastly.net"],
"response":["There isn't a GitHub Pages site here.", "For root URLs (like http://example.com/) you must provide an index.html file"]
},
{
"name":"heroku",
"cname":["herokudns.com", "herokussl.com", "herokuapp.com"],
"response":["There's nothing here, yet.", "herokucdn.com/error-pages/no-such-app.html", "<title>No such app</title>"]
},
{
"name":"unbounce",
"cname":["unbouncepages.com"],
"response":["Sorry, the page you were looking for doesnt exist.", "The requested URL was not found on this server"]
},
{
"name":"tumblr",
"cname":["tumblr.com"],
"response":["There's nothing here.", "Whatever you were looking for doesn't currently exist at this address."]
},
{
"name":"shopify",
"cname":["myshopify.com"],
"response":["Sorry, this shop is currently unavailable.", "Only one step left!"]
},
{
"name":"instapage",
"cname":["pageserve.co", "secure.pageserve.co", "https://instapage.com/"],
"response":["Looks Like You're Lost","The page you're looking for is no longer available."]
},
{
"name":"desk",
"cname":["desk.com"],
"response":["Please try again or try Desk.com free for 14 days.", "Sorry, We Couldn't Find That Page"]
},
{
"name":"campaignmonitor",
"cname":["createsend.com", "name.createsend.com"],
"response":["Double check the URL", "<strong>Trying to access your account?</strong>"]
},
{
"name":"cargocollective",
"cname":["cargocollective.com"],
"response":["404 Not Found"]
},
{
"name":"statuspage",
"cname":["statuspage.io"],
"response":["Better Status Communication", "You are being <a href=\"https://www.statuspage.io\">redirected"]
},
{
"name":"amazonaws",
"cname":["amazonaws.com"],
"response":["NoSuchBucket", "The specified bucket does not exist"]
},
{
"name":"bitbucket",
"cname":["bitbucket.org"],
"response":["The page you have requested does not exist","Repository not found"]
},
{
"name":"smartling",
"cname":["smartling.com"],
"response":["Domain is not configured"]
},
{
"name":"acquia",
"cname":["acquia.com"],
"response":["If you are an Acquia Cloud customer and expect to see your site at this address","The site you are looking for could not be found."]
},
{
"name":"fastly",
"cname":["fastly.net"],
"response":["Please check that this domain has been added to a service", "Fastly error: unknown domain"]
},
{
"name":"pantheon",
"cname":["pantheonsite.io"],
"response":["The gods are wise", "The gods are wise, but do not know of the site which you seek."]
},
{
"name":"zendesk",
"cname":["zendesk.com"],
"response":["Help Center Closed"]
},
{
"name":"uservoice",
"cname":["uservoice.com"],
"response":["This UserVoice subdomain is currently available!"]
},
{
"name":"ghost",
"cname":["ghost.io"],
"response":["The thing you were looking for is no longer here", "The thing you were looking for is no longer here, or never was"]
},
{
"name":"pingdom",
"cname":["stats.pingdom.com"],
"response":["pingdom"]
},
{
"name":"tilda",
"cname":["tilda.ws"],
"response":["Domain has been assigned"]
},
{
"name":"wordpress",
"cname":["wordpress.com"],
"response":["Do you want to register"]
},
{
"name":"teamwork",
"cname":["teamwork.com"],
"response":["Oops - We didn't find your site."]
},
{
"name":"helpjuice",
"cname":["helpjuice.com"],
"response":["We could not find what you're looking for."]
},
{
"name":"helpscout",
"cname":["helpscoutdocs.com"],
"response":["No settings were found for this company:"]
},
{
"name":"cargo",
"cname":["cargocollective.com"],
"response":["If you're moving your domain away from Cargo you must make this configuration through your registrar's DNS control panel."]
},
{
"name":"feedpress",
"cname":["redirect.feedpress.me"],
"response":["The feed has not been found."]
},
{
"name":"surge",
"cname":["surge.sh"],
"response":["project not found"]
},
{
"name":"surveygizmo",
"cname":["privatedomain.sgizmo.com", "privatedomain.surveygizmo.eu", "privatedomain.sgizmoca.com"],
"response":["data-html-name"]
},
{
"name":"mashery",
"cname":["mashery.com"],
"response":["Unrecognized domain <strong>"]
},
{
"name":"intercom",
"cname":["custom.intercom.help"],
"response":["This page is reserved for artistic dogs.","<h1 class=\"headline\">Uh oh. That page doesnt exist.</h1>"]
},
{
"name":"webflow",
"cname":["proxy.webflow.io"],
"response":["<p class=\"description\">The page you are looking for doesn't exist or has been moved.</p>"]
},
{
"name":"kajabi",
"cname":["endpoint.mykajabi.com"],
"response":["<h1>The page you were looking for doesn't exist.</h1>"]
},
{
"name":"thinkific",
"cname":["thinkific.com"],
"response":["You may have mistyped the address or the page may have moved."]
},
{
"name":"tave",
"cname":["clientaccess.tave.com"],
"response":["<h1>Error 404: Page Not Found</h1>"]
},
{
"name":"wishpond",
"cname":["wishpond.com"],
"response":["https://www.wishpond.com/404?campaign=true"]
},
{
"name":"aftership",
"cname":["aftership.com"],
"response":["Oops.</h2><p class=\"text-muted text-tight\">The page you're looking for doesn't exist."]
},
{
"name":"aha",
"cname":["ideas.aha.io"],
"response":["There is no portal here ... sending you back to Aha!"]
},
{
"name":"brightcove",
"cname":["brightcovegallery.com", "gallery.video", "bcvp0rtal.com"],
"response":["<p class=\"bc-gallery-error-code\">Error Code: 404</p>"]
},
{
"name":"bigcartel",
"cname":["bigcartel.com"],
"response":["<h1>Oops! We couldn&#8217;t find that page.</h1>"]
},
{
"name":"activecompaign",
"cname":["activehosted.com"],
"response":["alt=\"LIGHTTPD - fly light.\""]
},
{
"name":"compaignmonitor",
"cname":["createsend.com"],
"response":["Double check the URL or <a href=\"mailto:help@createsend.com"]
},
{
"name":"simplebooklet",
"cname":["simplebooklet.com"],
"response":["We can't find this <a href=\"https://simplebooklet.com"]
},
{
"name":"getresponse",
"cname":[".gr8.com"],
"response":["With GetResponse Landing Pages, lead generation has never been easier"]
},
{
"name":"vend",
"cname":["vendecommerce.com"],
"response":["Looks like you've traveled too far into cyberspace."]
},
{
"name":"jetbrains",
"cname":["myjetbrains.com"],
"response":["is not a registered InCloud YouTrack.","is not a registered InCloud YouTrack."]
},
{
"name":"azure",
"cname":["azurewebsites.net",
".cloudapp.net",
".cloudapp.azure.com",
".trafficmanager.net",
".blob.core.windows.net",
".azure-api.net",
".azurehdinsight.net",
".azureedge.net"],
"response":["404 Web Site not found"]
},
{
"name":"readme",
"cname":["readme.io"],
"response":["Project doesnt exist... yet!"]
}
]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

@ -1,102 +0,0 @@
[
"_afpovertcp._tcp.",
"_aix._tcp.",
"_autodiscover._tcp.",
"_caldav._tcp.",
"_certificates._tcp.",
"_client._smtp.",
"_cmp._tcp.",
"_crls._tcp.",
"_crl._tcp.",
"_finger._tcp.",
"_ftp._tcp.",
"_gc._tcp.",
"_h323be._tcp.",
"_h323be._udp.",
"_h323cs._tcp.",
"_h323cs._udp.",
"_h323ls._tcp.",
"_h323ls._udp.",
"_h323rs._tcp.",
"_hkps._tcp.",
"_hkp._tcp.",
"_http._tcp.",
"_iax.udp.",
"_imaps._tcp.",
"_imap._tcp.",
"_jabber-client._tcp.",
"_jabber-client._udp.",
"_jabber._tcp.",
"_jabber._udp.",
"_kerberos-adm._tcp.",
"_kerberos._tcp.",
"_kerberos._tcp.dc._msdcs.",
"_kerberos._udp.",
"_kpasswd._tcp.",
"_kpasswd._udp.",
"_ldap._tcp.",
"_ldap._tcp.dc._msdcs.",
"_ldap._tcp.gc._msdcs.",
"_ldap._tcp.pdc._msdcs.",
"_msdcs.",
"_mysqlsrv._tcp.",
"_nntp._tcp.",
"_ntp._udp.",
"_ocsp._tcp.",
"_pgpkeys._tcp.",
"_pgprevokations._tcp.",
"_PKIXREP._tcp.",
"_pop3s._tcp.",
"_pop3._tcp.",
"_sipfederationtls._tcp.",
"_sipinternal._tcp.",
"_sipinternaltls._tcp.",
"_sips._tcp.",
"_sip._tcp.",
"_sip._tls.",
"_sip._udp.",
"_smtp._tcp.",
"_ssh._tcp.",
"_stun._tcp.",
"_stun._udp.",
"_svcp._tcp.",
"_tcp.",
"_telnet._tcp.",
"_test._tcp.",
"_tls.",
"_udp.",
"_vlmcs._tcp.",
"_vlmcs._udp.",
"_whois._tcp.",
"_wpad._tcp.",
"_xmpp-client._tcp.",
"_xmpp-client._udp.",
"_xmpp-server._tcp.",
"_xmpp-server._udp.",
"_https._tcp.",
"_imap.tcp.",
"_kerberos.tcp.dc._msdcs.",
"_ldap._tcp.ForestDNSZones.",
"_submission._tcp.",
"_caldavs._tcp.",
"_carddav._tcp.",
"_carddavs._tcp.",
"_x-puppet._tcp.",
"_x-puppet-ca._tcp.",
"_domainkey.",
"_pkixrep._tcp.",
"_cisco-phone-http.",
"_cisco-phone-tftp.",
"_cisco-uds._tcp.",
"_ciscowtp._tcp.",
"_collab-edge._tls.",
"_cuplogin._tcp.",
"_client._smtp._tcp.",
"_sftp._tcp.",
"_h323rs._udp.",
"_sql._tcp.",
"_sip._tcp.internal.",
"_snmp._udp.",
"_rdp._tcp.",
"_xmpp-server._udp."
]

File diff suppressed because it is too large Load Diff

@ -1,57 +0,0 @@
#!/usr/bin/python3
# coding=utf-8
"""
OneForAll数据库导出模块
:copyright: Copyright (c) 2019, Jing Ling. All rights reserved.
:license: GNU General Public License v3.0, see LICENSE for more details.
"""
import fire
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.database import Database
from client.subdomain.oneforall.config import logger
def export(table, db=None, alive=False, limit=None, path=None, format='csv', show=False):
"""
OneForAll数据库导出模块
Example:
python3 dbexport.py --table name --format csv --dir= ./result.csv
python3 dbexport.py --db result.db --table name --show False
Note:
参数alive可选值TrueFalse分别表示导出存活全部子域结果
参数format可选格式有'txt', 'rst', 'csv', 'tsv', 'json', 'yaml', 'html',
'jira', 'xls', 'xlsx', 'dbf', 'latex', 'ods'
参数path默认None使用OneForAll结果目录自动生成路径
:param str table: 要导出的表
:param str db: 要导出的数据库路径(默认为results/result.sqlite3)
:param bool alive: 只导出存活的子域结果(默认False)
:param str limit: 导出限制条件(默认None)
:param str format: 导出文件格式(默认csv)
:param str path: 导出文件路径(默认None)
:param bool show: 终端显示导出数据(默认False)
"""
database = Database(db)
rows = database.export_data(table, alive, limit)
format = utils.check_format(format, len(rows))
path = utils.check_path(path, table, format)
if show:
print(rows.dataset)
data = rows.export(format)
database.close()
utils.save_data(path, data)
logger.log('INFOR', f'{table}主域的子域结果 {path}')
data_dict = rows.as_dict()
return data_dict
if __name__ == '__main__':
fire.Fire(export)
# save('example_com_last', format='txt')

@ -1,2 +0,0 @@
example.com
hackfun.org

@ -1,16 +0,0 @@
#!/usr/bin/env python3
# coding=utf-8
"""
示例
"""
import client.subdomain.oneforall.oneforall as oneforall
if __name__ == '__main__':
test = oneforall.OneForAll(target='github.com')
test.brute = True
test.req = False
test.takeover = True
test.run()
result = test.datas

@ -1,102 +0,0 @@
#!/usr/bin/env python3
# coding=utf-8
"""
github自动接管
"""
import json
import base64
import requests
import api
HEADERS = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Language": "zh-CN,zh;q=0.9",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36",
}
def github_takeover(url):
# 读取config配置文件
repo_name = url
print('[*]正在读取配置文件...')
user = api.github_api_user
token = api.github_api_token
CHECK_HEADERS = {
"Authorization": 'token ' + token,
"Accept": "application/vnd.github.switcheroo-preview+json"
}
repos_url = 'https://api.github.com/repos/' + user + '/' + repo_name
repos_r = requests.get(url=repos_url, headers=CHECK_HEADERS)
# 验证token是否正确
if 'message' in repos_r.json():
if repos_r.json()['message'] == 'Bad credentials':
print('[*]请检查Token是否正确')
elif repos_r.json()['message'] == 'Not Found':
print('[*]正在生成接管库...') # 生成接管库
creat_repo_dict = {
"name": repo_name,
"description": "This is a subdomain takeover Repository",
}
creat_repo_url = 'https://api.github.com/user/repos'
creat_repo_r = requests.post(url=creat_repo_url,
headers=CHECK_HEADERS,
data=json.dumps(creat_repo_dict))
creat_repo_status = creat_repo_r.status_code
if creat_repo_status == 201:
print('[*]创建接管库' + repo_name + '成功,正在进行自动接管...')
# 接管文件生成
# index.html文件
html = b'''
<html>
<p>Subdomain Takerover Test!</>
</html>
'''
html64 = base64.b64encode(html).decode('utf-8')
html_dict = {
"message": "my commit message",
"committer": {
"name": "user", # 提交id非必改项
"email": "user@163.com" # 同上
},
"content": html64
}
# CNAME文件
cname_url = bytes(url, encoding='utf-8')
cname_url64 = base64.b64encode(cname_url).decode('utf-8')
url_dict = {
"message": "my commit message",
"committer": {
"name": "user",
"email": "user@163.com"
},
"content": cname_url64
}
html_url = 'https://api.github.com/repos/' + user + '/' + repo_name + '/contents/index.html'
url_url = 'https://api.github.com/repos/' + user + '/' + repo_name + '/contents/CNAME'
html_r = requests.put(url=html_url, data=json.dumps(html_dict),
headers=CHECK_HEADERS) # 上传index.html
cname_r = requests.put(url=url_url, data=json.dumps(url_dict),
headers=CHECK_HEADERS) # 上传CNAME
rs = cname_r.status_code
if rs == 201:
print('[*]生成接管库成功正在开启Github pages...')
page_url = "https://api.github.com/repos/" + user + "/" + url + "/pages"
page_dict = {
"source": {
"branch": "master"
}
}
page_r = requests.post(url=page_url,
data=json.dumps(page_dict),
headers=CHECK_HEADERS) # 开启page
if page_r.status_code == 201:
print('[+]自动接管成功请稍后访问http://' + str(url) + '查看结果')
else:
print('[+]开启Github pages失败请检查网络或稍后重试...')
else:
print('[+]生成接管库失败,请检查网络或稍后重试...')
elif url in repos_r.json()['name']:
print('[*]生成接管库失败请检查https://github.com/' + user +
'?tab=repositories是否存在同名接管库...')

@ -1,72 +0,0 @@
import api
from client.subdomain.oneforall.common.query import Query
from client.subdomain.oneforall.config import logger
class CensysAPI(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Certificate'
self.source = "CensysAPIQuery"
self.addr = 'https://www.censys.io/api/v1/search/certificates'
self.id = api.censys_api_id
self.secret = api.censys_api_secret
self.delay = 3.0 # Censys 接口查询速率限制 最快2.5秒查1次
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
data = {
'query': f'parsed.names: {self.domain}',
'page': 1,
'fields': ['parsed.subject_dn', 'parsed.names'],
'flatten': True}
resp = self.post(self.addr, json=data, auth=(self.id, self.secret))
if not resp:
return
json = resp.json()
status = json.get('status')
if status != 'ok':
logger.log('ALERT', status)
return
subdomains = self.match(self.domain, str(json))
self.subdomains = self.subdomains.union(subdomains)
pages = json.get('metadata').get('pages')
for page in range(2, pages + 1):
data['page'] = page
resp = self.post(self.addr, json=data, auth=(self.id, self.secret))
if not resp:
return
subdomains = self.match(self.domain, str(resp.json()))
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
if not self.check(self.id, self.secret):
return
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = CensysAPI(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,53 +0,0 @@
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.query import Query
class CertSpotter(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = domain
self.module = 'Certificate'
self.source = 'CertSpotterQuery'
self.addr = 'https://api.certspotter.com/v1/issuances'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'domain': self.domain,
'include_subdomains': 'true',
'expand': 'dns_names'}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = utils.match_subdomain(self.domain, str(resp.json()))
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = CertSpotter(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,49 +0,0 @@
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.query import Query
class Crtsh(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Certificate'
self.source = 'CrtshQuery'
self.addr = 'https://crt.sh/'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'q': f'%.{self.domain}', 'output': 'json'}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = utils.match_subdomain(self.domain, str(resp.json()))
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = Crtsh(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,51 +0,0 @@
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.query import Query
class Entrust(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Certificate'
self.source = 'EntrustQuery'
self.addr = 'https://ctsearch.entrust.com/api/v1/certificates'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'fields': 'subjectDN',
'domain': self.domain,
'includeExpired': 'true'}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = utils.match_subdomain(self.domain, str(resp.json()))
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = Entrust(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,53 +0,0 @@
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.query import Query
class Google(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Certificate'
self.source = 'GoogleQuery'
self.addr = 'https://transparencyreport.google.com/' \
'transparencyreport/api/v3/httpsreport/ct/certsearch'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'include_expired': 'true',
'include_subdomains': 'true',
'domain': self.domain}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = utils.match_subdomain(self.domain, resp.text)
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = Google(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,65 +0,0 @@
import api
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.query import Query
class SpyseAPI(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = domain
self.module = 'Certificate'
self.source = 'CertDBAPIQuery'
self.addr = 'https://api.spyse.com/v1/subdomains'
self.token = api.spyse_api_token
def query(self):
"""
向接口查询子域并做子域匹配
"""
page_num = 1
while True:
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'domain': self.domain,
'api_token': self.token,
'page': page_num}
resp = self.get(self.addr, params)
if not resp:
return
json = resp.json()
subdomains = utils.match_subdomain(self.domain, str(json))
if not subdomains: # 搜索没有发现子域名则停止搜索
break
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
page_num += 1
# 默认每次查询最多返回30条 当前条数小于30条说明已经查完
if json.get('count') < 30:
break
def run(self):
"""
类执行入口
"""
if not self.check(self.token):
return
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = SpyseAPI(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,97 +0,0 @@
"""
查询域名的NS记录(域名服务器记录记录该域名由哪台域名服务器解析)检查查出的域名服务器是
否开启DNS域传送如果开启且没做访问控制和身份验证便加以利用获取域名的所有记录
DNS域传送(DNS zone transfer)指的是一台备用域名服务器使用来自主域名服务器的数据刷新自己
的域数据库目的是为了做冗余备份防止主域名服务器出现故障时 dns 解析不可用
当主服务器开启DNS域传送同时又对来请求的备用服务器未作访问控制和身份验证便可以利用此漏洞获
取某个域的所有记录
"""
import dns.resolver
import dns.zone
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.module import Module
from client.subdomain.oneforall.config import logger
class CheckAXFR(Module):
"""
DNS域传送漏洞检查类
"""
def __init__(self, domain: str):
Module.__init__(self)
self.domain = self.register(domain)
self.module = 'Check'
self.source = 'AXFRCheck'
self.results = []
def axfr(self, server):
"""
执行域传送
:param server: 域名服务器
"""
logger.log('DEBUG', f'尝试对{self.domain}的域名服务器{server}进行域传送')
try:
xfr = dns.query.xfr(where=server, zone=self.domain,
timeout=5.0, lifetime=10.0)
zone = dns.zone.from_xfr(xfr)
except Exception as e:
logger.log('DEBUG', e.args)
logger.log('DEBUG', f'{self.domain}的域名服务器{server}进行域传送失败')
return
names = zone.nodes.keys()
for name in names:
full_domain = str(name) + '.' + self.domain
subdomain = utils.match_subdomain(self.domain, full_domain)
self.subdomains = self.subdomains.union(subdomain)
record = zone[name].to_text(name)
self.results.append(record)
if self.results:
logger.log('DEBUG', f'发现{self.domain}{server}上的域传送记录')
logger.log('DEBUG', '\n'.join(self.results))
self.results = []
def check(self):
"""
正则匹配响应头中的内容安全策略字段以发现子域名
"""
resolver = utils.dns_resolver()
try:
answers = resolver.query(self.domain, "NS")
except Exception as e:
logger.log('ERROR', e.args)
return
nsservers = [str(answer) for answer in answers]
if not len(nsservers):
logger.log('ALERT', f'没有找到{self.domain}的NS域名服务器记录')
return
for nsserver in nsservers:
self.axfr(nsserver)
def run(self):
"""
类执行入口
"""
self.begin()
self.check()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
check = CheckAXFR(domain)
check.run()
if __name__ == '__main__':
do('ZoneTransfer.me')
# do('example.com')

@ -1,60 +0,0 @@
"""
检查crossdomain.xml文件收集子域名
"""
from client.subdomain.oneforall.common.module import Module
from client.subdomain.oneforall.common import utils
class CheckCDX(Module):
"""
检查crossdomain.xml文件收集子域名
"""
def __init__(self, domain: str):
Module.__init__(self)
self.domain = self.register(domain)
self.module = 'Check'
self.source = "CrossDomainXml"
def check(self):
"""
检查crossdomain.xml收集子域名
"""
urls = [f'http://{self.domain}/crossdomain.xml',
f'https://{self.domain}/crossdomain.xml',
f'http://www.{self.domain}/crossdomain.xml',
f'https://www.{self.domain}/crossdomain.xml']
for url in urls:
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
response = self.get(url, check=False)
if not response:
return
if response and len(response.content):
self.subdomains = utils.match_subdomain(self.domain,
response.text)
def run(self):
"""
类执行入口
"""
self.begin()
self.check()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param domain: 域名
"""
check = CheckCDX(domain)
check.run()
if __name__ == '__main__':
do('example.com')

@ -1,62 +0,0 @@
#!/usr/bin/env python3
"""
检查域名证书收集子域名
"""
import socket
import ssl
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.module import Module
from client.subdomain.oneforall.config import logger
class CheckCert(Module):
def __init__(self, domain):
Module.__init__(self)
self.domain = self.register(domain)
self.port = 443 # ssl port
self.module = 'Check'
self.source = 'CertInfo'
def check(self):
"""
获取域名证书并匹配证书中的子域名
"""
try:
ctx = ssl.create_default_context()
sock = ctx.wrap_socket(socket.socket(),
server_hostname=self.domain)
sock.connect((self.domain, self.port))
cert_dict = sock.getpeercert()
except Exception as e:
logger.log('DEBUG', e.args)
return
subdomains = utils.match_subdomain(self.domain, str(cert_dict))
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.check()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
check = CheckCert(domain)
check.run()
if __name__ == '__main__':
do('example.com')

@ -1,82 +0,0 @@
"""
检查内容安全策略收集子域名收集子域名
"""
import requests
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.module import Module
from client.subdomain.oneforall.config import logger
class CheckCSP(Module):
"""
检查内容安全策略收集子域名
"""
def __init__(self, domain, header):
Module.__init__(self)
self.domain = self.register(domain)
self.module = 'Check'
self.source = 'ContentSecurityPolicy'
self.csp_header = header
def grab_header(self):
"""
抓取请求头
:return: 请求头
"""
csp_header = dict()
urls = [f'http://{self.domain}',
f'https://{self.domain}',
f'http://www.{self.domain}',
f'https://www.{self.domain}']
for url in urls:
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
response = self.get(url, check=False)
if response:
csp_header = response.headers
break
return csp_header
def check(self):
"""
正则匹配响应头中的内容安全策略字段以发现子域名
"""
if not self.csp_header:
self.csp_header = self.grab_header()
csp = self.header.get('Content-Security-Policy')
if not self.csp_header:
logger.log('DEBUG', f'获取{self.domain}域的请求头失败')
return
if not csp:
logger.log('DEBUG', f'{self.domain}域的响应头不存在内容安全策略字段')
return
self.subdomains = utils.match_subdomain(self.domain, csp)
def run(self):
"""
类执行入口
"""
self.begin()
self.check()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain, header=None): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
:param dict or None header: 响应头
"""
check = CheckCSP(domain, header)
check.run()
if __name__ == '__main__':
resp = requests.get('https://content-security-policy.com/')
do('google-analytics.com', resp.headers)

@ -1,59 +0,0 @@
"""
检查内容安全策略收集子域名收集子域名
"""
from client.subdomain.oneforall.common.module import Module
from client.subdomain.oneforall.common import utils
class CheckRobots(Module):
"""
检查robots.txt收集子域名
"""
def __init__(self, domain):
Module.__init__(self)
self.domain = self.register(domain)
self.module = 'Check'
self.source = 'Robots'
def check(self):
"""
正则匹配域名的robots.txt文件中的子域
"""
urls = [f'http://{self.domain}/robots.txt',
f'https://{self.domain}/robots.txt',
f'http://www.{self.domain}/robots.txt',
f'https://www.{self.domain}/robots.txt']
for url in urls:
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
response = self.get(url, check=False, allow_redirects=False)
if not response:
return
if response and len(response.content):
self.subdomains = utils.match_subdomain(self.domain,
response.text)
def run(self):
"""
类执行入口
"""
self.begin()
self.check()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
check = CheckRobots(domain)
check.run()
if __name__ == '__main__':
do('qq.com')

@ -1,73 +0,0 @@
"""
检查内容安全策略收集子域名收集子域名
"""
from client.subdomain.oneforall.common.module import Module
from client.subdomain.oneforall.common import utils
class CheckRobots(Module):
"""
检查sitemap收集子域名
"""
def __init__(self, domain):
Module.__init__(self)
self.domain = self.register(domain)
self.module = 'Check'
self.source = 'Sitemap'
def check(self):
"""
正则匹配域名的sitemap文件中的子域
"""
urls = [f'http://{self.domain}/sitemap.xml',
f'https://{self.domain}/sitemap.xml',
f'http://www.{self.domain}/sitemap.xml',
f'https://www.{self.domain}/sitemap.xml',
f'http://{self.domain}/sitemap.txt',
f'https://{self.domain}/sitemap.txt',
f'http://www.{self.domain}/sitemap.txt',
f'https://www.{self.domain}/sitemap.txt',
f'http://{self.domain}/sitemap.html',
f'https://{self.domain}/sitemap.html',
f'http://www.{self.domain}/sitemap.html',
f'https://www.{self.domain}/sitemap.html',
f'http://{self.domain}/sitemap_index.xml',
f'https://{self.domain}/sitemap_index.xml',
f'http://www.{self.domain}/sitemap_index.xml',
f'https://www.{self.domain}/sitemap_index.xml']
for url in urls:
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
self.timeout = 10
response = self.get(url, check=False, allow_redirects=False)
if not response:
return
if response and len(response.content):
self.subdomains = utils.match_subdomain(self.domain,
response.text)
def run(self):
"""
类执行入口
"""
self.begin()
self.check()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
check = CheckRobots(domain)
check.run()
if __name__ == '__main__':
do('qq.com')

@ -1,62 +0,0 @@
import cdx_toolkit
from client.subdomain.oneforall.common.crawl import Crawl
from client.subdomain.oneforall.config import logger
class ArchiveCrawl(Crawl):
def __init__(self, domain):
Crawl.__init__(self)
self.domain = domain
self.module = 'Crawl'
self.source = 'ArchiveCrawl'
def crawl(self, domain, limit):
"""
:param domain:
:param limit:
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
cdx = cdx_toolkit.CDXFetcher(source='ia')
url = f'*.{domain}/*'
size = cdx.get_size_estimate(url)
logger.log('DEBUG', f'{url} ArchiveCrawl size estimate {size}')
for resp in cdx.iter(url, limit=limit):
if resp.data.get('status') not in ['301', '302']:
url = resp.data.get('url')
subdomains = self.match(self.register(domain),
url + resp.text)
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.crawl(self.domain, 50)
# 爬取已发现的子域以发现新的子域
for subdomain in self.subdomains:
if subdomain != self.domain:
self.crawl(subdomain, 10)
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
crawl = ArchiveCrawl(domain)
crawl.run()
if __name__ == '__main__':
do('example.com')

@ -1,61 +0,0 @@
import cdx_toolkit
from tqdm import tqdm
from client.subdomain.oneforall.common.crawl import Crawl
from client.subdomain.oneforall.config import logger
class CommonCrawl(Crawl):
def __init__(self, domain):
Crawl.__init__(self)
self.domain = domain
self.module = 'Crawl'
self.source = 'CommonCrawl'
def crawl(self, domain, limit):
"""
:param domain:
:param limit:
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
cdx = cdx_toolkit.CDXFetcher()
url = f'*.{domain}/*'
size = cdx.get_size_estimate(url)
print(url, 'CommonCrawl size estimate', size)
for resp in tqdm(cdx.iter(url, limit=limit), total=limit):
if resp.data.get('status') not in ['301', '302']:
subdomains = self.match(self.register(domain), resp.text)
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.crawl(self.domain, 50)
# 爬取已发现的子域以发现新的子域
for subdomain in self.subdomains:
if subdomain != self.domain:
self.crawl(subdomain, 10)
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
crawl = CommonCrawl(domain)
crawl.run()
if __name__ == '__main__':
do('example.com')

@ -1,53 +0,0 @@
import api
from client.subdomain.oneforall.common.query import Query
class BinaryEdgeAPI(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'BinaryEdgeAPIQuery'
self.addr = 'https://api.binaryedge.io/v2/query/domains/subdomain/'
self.api = api.binaryedge_api
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.header.update({'X-Key': self.api})
self.proxy = self.get_proxy(self.source)
url = self.addr + self.domain
resp = self.get(url)
if not resp:
return
subdomains = self.match(self.domain, str(resp.json()))
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
if not self.check(self.api):
return
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = BinaryEdgeAPI(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,57 +0,0 @@
import cloudscraper
from client.subdomain.oneforall.common.query import Query
from client.subdomain.oneforall.config import logger
class BufferOver(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'BufferOverQuery'
self.addr = 'https://dns.bufferover.run/dns?q='
def query(self):
"""
向接口查询子域并做子域匹配
"""
# 绕过cloudFlare验证
scraper = cloudscraper.create_scraper()
scraper.interpreter = 'js2py'
scraper.proxies = self.get_proxy(self.source)
url = self.addr + self.domain
try:
resp = scraper.get(url, timeout=self.timeout)
except Exception as e:
logger.log('ERROR', e.args)
return
if resp.status_code != 200:
return
subdomains = self.match(self.domain, str(resp.json()))
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = BufferOver(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,49 +0,0 @@
from client.subdomain.oneforall.common.query import Query
class CeBaidu(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'CeBaiduQuery'
self.addr = 'https://ce.baidu.com/index/getRelatedSites'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'site_address': self.domain}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = self.match(self.domain, str(resp.json()))
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = CeBaidu(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,49 +0,0 @@
from client.subdomain.oneforall.common.query import Query
class Chinaz(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'ChinazQuery'
self.addr = 'https://alexa.chinaz.com/'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
self.addr = self.addr + self.domain
resp = self.get(self.addr)
if not resp:
return
subdomains = self.match(self.domain, resp.text)
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = Chinaz(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,53 +0,0 @@
import api
from client.subdomain.oneforall.common.query import Query
class ChinazAPI(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'ChinazAPIQuery'
self.addr = 'https://apidata.chinaz.com/CallAPI/Alexa'
self.api = api.chinaz_api
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'key': self.api, 'domainName': self.domain}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = self.match(self.domain, str(resp.json()))
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
if not self.check(self.api):
return
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = ChinazAPI(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,53 +0,0 @@
import api
from client.subdomain.oneforall.common.query import Query
class CirclAPI(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'CirclAPIQuery'
self.addr = 'https://www.circl.lu/pdns/query/'
self.user = api.circl_api_username
self.pwd = api.circl_api_password
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
resp = self.get(self.addr + self.domain, auth=(self.user, self.pwd))
if not resp:
return
subdomains = self.match(self.domain, str(resp.json()))
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
if not self.check(self.user, self.pwd):
return
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = CirclAPI(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,56 +0,0 @@
import api
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.query import Query
class DNSdbAPI(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'DNSdbAPIQuery'
self.addr = 'https://api.dnsdb.info/lookup/rrset/name/'
self.api = api.dnsdb_api_key
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.header.update({'X-API-Key': self.api})
self.proxy = self.get_proxy(self.source)
url = f'{self.addr}*.{self.domain}'
resp = self.get(url)
if not resp:
return
subdomains = utils.match_subdomain(self.domain, resp.text)
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
if not self.check(self.api):
return
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = DNSdbAPI(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,58 +0,0 @@
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.query import Query
class DNSdumpster(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = "DNSdumpsterQuery"
self.addr = 'https://dnsdumpster.com/'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.header.update({'Referer': 'https://dnsdumpster.com'})
self.proxy = self.get_proxy(self.source)
resp = self.get(self.addr)
if not resp:
return
self.cookie = resp.cookies
data = {'csrfmiddlewaretoken': self.cookie.get('csrftoken'),
'targetip': self.domain}
resp = self.post(self.addr, data)
if not resp:
return
subdomains = utils.match_subdomain(self.domain, resp.text)
if subdomains:
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = DNSdumpster(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,52 +0,0 @@
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.query import Query
class HackerTarget(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = "HackerTargetQuery"
self.addr = 'https://api.hackertarget.com/hostsearch/'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'q': self.domain}
resp = self.get(self.addr, params)
if not resp:
return
if resp.status_code == 200:
subdomains = utils.match_subdomain(self.domain, resp.text)
if subdomains:
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = HackerTarget(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,49 +0,0 @@
from client.subdomain.oneforall.common.query import Query
class IP138(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'IP138Query'
self.addr = 'https://site.ip138.com/{domain}/domain.htm'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
self.addr = self.addr.format(domain=self.domain)
resp = self.get(self.addr)
if not resp:
return
subdomains = self.match(self.domain, resp.text)
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = IP138(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,75 +0,0 @@
import api
from client.subdomain.oneforall.common.query import Query
from client.subdomain.oneforall.config import logger
class IPv4InfoAPI(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'IPv4InfoAPIQuery'
self.addr = ' http://ipv4info.com/api_v1/'
self.api = api.ipv4info_api_key
def query(self):
"""
向接口查询子域并做子域匹配
"""
page = 0
while True:
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'type': 'SUBDOMAINS', 'key': self.api,
'value': self.domain, 'page': page}
resp = self.get(self.addr, params)
if not resp:
return
if resp.status_code != 200:
break # 请求不正常通常网络是有问题,不再继续请求下去
try:
json = resp.json()
except Exception as e:
logger.log('DEBUG', e.args)
break
subdomains = self.match(self.domain, str(json))
if not subdomains:
break
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
# 不直接使用subdomains是因为可能里面会出现不符合标准的子域名
subdomains = json.get('Subdomains')
if subdomains:
# ipv4info子域查询接口每次最多返回300个 用来判断是否还有下一页
if len(subdomains) < 300:
break
page += 1
if page >= 50: # ipv4info子域查询接口最多允许查询50页
break
def run(self):
"""
类执行入口
"""
if not self.check(self.api):
return
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = IPv4InfoAPI(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,83 +0,0 @@
import hashlib
import re
import time
from urllib import parse
from client.subdomain.oneforall.common.query import Query
class NetCraft(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'NetCraftQuery'
self.init = 'https://searchdns.netcraft.com/'
self.addr = 'https://searchdns.netcraft.com/?restriction=site+contains'
self.page_num = 1
self.per_page_num = 20
def bypass_verification(self):
"""
绕过NetCraft的JS验证
"""
self.header = self.get_header() # Netcraft会检查User-Agent
resp = self.get(self.init)
if not resp:
return False
self.cookie = resp.cookies
cookie_value = self.cookie['netcraft_js_verification_challenge']
cookie_encode = parse.unquote(cookie_value).encode('utf-8')
verify_taken = hashlib.sha1(cookie_encode).hexdigest()
self.cookie['netcraft_js_verification_response'] = verify_taken
return True
def query(self):
"""
向接口查询子域并做子域匹配
"""
if not self.bypass_verification():
return
last = ''
while True:
time.sleep(self.delay)
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'host': '*.' + self.domain, 'from': self.page_num}
resp = self.get(self.addr + last, params)
if not resp:
return
subdomains = self.match(self.domain, resp.text)
if not subdomains: # 搜索没有发现子域名则停止搜索
break
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
if 'Next page' not in resp.text: # 搜索页面没有出现下一页时停止搜索
break
last = re.search(r'&last=.*' + self.domain, resp.text).group(0)
self.page_num += self.per_page_num
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = NetCraft(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,54 +0,0 @@
import api
from client.subdomain.oneforall.common.query import Query
class PassiveDnsAPI(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'PassiveDnsQuery'
self.addr = api.passivedns_api_addr or 'http://api.passivedns.cn'
self.token = api.passivedns_api_token
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.header.update({'X-AuthToken': self.token})
self.proxy = self.get_proxy(self.source)
url = self.addr + '/flint/rrset/*.' + self.domain
resp = self.get(url)
if not resp:
return
subdomains = self.match(self.domain, str(resp.json()))
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
if not self.check(self.addr):
return
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = PassiveDnsAPI(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,56 +0,0 @@
import random
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.query import Query
class PTRArchive(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = "PTRArchiveQuery"
self.addr = 'http://ptrarchive.com/tools/search4.htm'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
# 绕过主页前端JS验证
self.cookie = {'pa_id': str(random.randint(0, 1000000000))}
params = {'label': self.domain, 'date': 'ALL'}
resp = self.get(self.addr, params)
if not resp:
return
if resp.status_code == 200:
subdomains = utils.match_subdomain(self.domain, resp.text)
if subdomains:
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = PTRArchive(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,61 +0,0 @@
from client.subdomain.oneforall.common.query import Query
class QianXun(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = domain
self.module = 'Query'
self.source = 'QianXunQuery'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
num = 1
while True:
data = {'ecmsfrom': '',
'show': '',
'num': '',
'classid': '0',
'keywords': self.domain}
url = f'https://www.dnsscan.cn/dns.html?' \
f'keywords={self.domain}&page={num}'
resp = self.post(url, data)
if not resp:
break
subdomains = self.match(self.domain, resp.text)
self.subdomains = self.subdomains.union(subdomains)
if '<div id="page" class="pagelist">' not in resp.text:
break
if '<li class="disabled"><span>&raquo;</span></li>' in resp.text:
break
num += 1
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = QianXun(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,50 +0,0 @@
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.query import Query
class RapidDNS(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'RapidDNSQuery'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
url = f'http://rapiddns.io/subdomain/{self.domain}'
params = {'full': '1'}
resp = self.get(url, params)
if not resp:
return
subdomains = utils.match_subdomain(self.domain, resp.text)
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = RapidDNS(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,49 +0,0 @@
from client.subdomain.oneforall.common.query import Query
class Riddler(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'RiddlerQuery'
self.addr = 'https://riddler.io/search'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'q': 'pld:' + self.domain}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = self.match(self.domain, resp.text)
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = Riddler(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,63 +0,0 @@
import json
import time
from client.subdomain.oneforall.common.query import Query
class Robtex(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = "RobtexQuery"
self.addr = 'https://freeapi.robtex.com/pdns/'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
url = self.addr + 'forward/' + self.domain
resp = self.get(url)
if not resp:
return
text_list = resp.text.splitlines()
text_json = list(map(lambda x: json.loads(x), text_list))
for record in text_json:
if record.get('rrtype') in ['A', 'AAAA']:
time.sleep(self.delay) # Robtex有查询频率限制
ip = record.get('rrdata')
url = self.addr + 'reverse/' + ip
resp = self.get(url)
if not resp:
return
subdomains = self.match(self.domain, resp.text)
if subdomains:
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = Robtex(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,57 +0,0 @@
import api
from client.subdomain.oneforall.common.query import Query
class SecurityTrailsAPI(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'SecurityTrailsAPIQuery'
self.addr = 'https://api.securitytrails.com/v1/domain/'
self.api = api.securitytrails_api
self.delay = 2 # SecurityTrails查询时延至少2秒
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'apikey': self.api}
url = f'{self.addr}{self.domain}/subdomains'
resp = self.get(url, params)
if not resp:
return
prefixs = resp.json()['subdomains']
subdomains = [f'{prefix}.{self.domain}' for prefix in prefixs]
if subdomains:
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
if not self.check(self.api):
return
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = SecurityTrailsAPI(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,60 +0,0 @@
from client.subdomain.oneforall.common.query import Query
class SiteDossier(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'SiteDossierQuery'
self.addr = 'http://www.sitedossier.com/parentdomain/'
self.delay = 2
self.page_num = 1
self.per_page_num = 100
def query(self):
"""
向接口查询子域并做子域匹配
"""
while True:
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
url = f'{self.addr}{self.domain}/{self.page_num}'
resp = self.get(url)
if not resp:
return
subdomains = self.match(self.domain, resp.text)
if not subdomains: # 搜索没有发现子域名则停止搜索
break
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
# 搜索页面没有出现下一页时停止搜索
if 'Show next 100 items' not in resp.text:
break
self.page_num += self.per_page_num
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = SiteDossier(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,56 +0,0 @@
import cloudscraper
from client.subdomain.oneforall.common.query import Query
from client.subdomain.oneforall.config import logger
class ThreatCrowd(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'ThreatCrowdQuery'
self.addr = 'https://www.threatcrowd.org/searchApi' \
'/v2/domain/report?domain='
def query(self):
# 绕过cloudFlare验证
scraper = cloudscraper.create_scraper()
scraper.interpreter = 'js2py'
scraper.proxies = self.get_proxy(self.source)
url = self.addr + self.domain
try:
resp = scraper.get(url, timeout=self.timeout)
except Exception as e:
logger.log('ERROR', e.args)
return
if resp.status_code != 200:
return
subdomains = self.match(self.domain, str(resp.json()))
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = ThreatCrowd(domain)
query.run()
if __name__ == '__main__':
do('mi.com')

@ -1,66 +0,0 @@
import time
from client.subdomain.oneforall.config import logger
from client.subdomain.oneforall.common.query import Query
class WZPCQuery(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'WZPCQuery'
def query(self):
"""
向接口查询子域并做子域匹配
"""
base_addr = 'http://114.55.181.28/check_web/' \
'databaseInfo_mainSearch.action'
page_num = 1
while True:
time.sleep(self.delay)
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'isSearch': 'true', 'searchType': 'url',
'term': self.domain, 'pageNo': page_num}
try:
resp = self.get(base_addr, params)
except Exception as e:
logger.log('ERROR', e.args)
break
if not resp:
break
subdomains = self.match(self.domain, resp.text)
self.subdomains = self.subdomains.union(subdomains)
if not subdomains:
break
if page_num > 10:
break
page_num += 1
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain):
"""
类统一调用入口
:param str domain: 域名
"""
query = WZPCQuery(domain)
query.run()
if __name__ == '__main__':
do('sc.gov.cn')
do('bkzy.org')

@ -1,50 +0,0 @@
from client.subdomain.oneforall.common.query import Query
class Ximcx(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'XimcxQuery'
self.addr = 'http://sbd.ximcx.cn/DomainServlet'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
data = {'domain': self.domain}
resp = self.post(self.addr, data=data)
if not resp:
return
json = resp.json()
subdomains = self.match(self.domain, str(json))
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = Ximcx(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,35 +0,0 @@
from client.subdomain.oneforall.common.lookup import Lookup
class QueryMX(Lookup):
def __init__(self, domain):
Lookup.__init__(self)
self.domain = self.register(domain)
self.module = 'dnsquery'
self.source = "QueryMX"
self.type = 'MX' # 利用的DNS记录的MX记录收集子域
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
brute = QueryMX(domain)
brute.run()
if __name__ == '__main__':
do('cuit.edu.cn')

@ -1,35 +0,0 @@
from client.subdomain.oneforall.common.lookup import Lookup
class QueryNS(Lookup):
def __init__(self, domain):
Lookup.__init__(self)
self.domain = self.register(domain)
self.module = 'dnsquery'
self.source = "QueryNS"
self.type = 'NS' # 利用的DNS记录的NS记录收集子域
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
brute = QueryNS(domain)
brute.run()
if __name__ == '__main__':
do('cuit.edu.cn')

@ -1,35 +0,0 @@
from client.subdomain.oneforall.common.lookup import Lookup
class QuerySOA(Lookup):
def __init__(self, domain):
Lookup.__init__(self)
self.domain = self.register(domain)
self.module = 'dnsquery'
self.source = "QuerySOA"
self.type = 'SOA' # 利用的DNS记录的SOA记录收集子域
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
brute = QuerySOA(domain)
brute.run()
if __name__ == '__main__':
do('cuit.edu.cn')

@ -1,95 +0,0 @@
"""
通过枚举域名常见的SRV记录并做查询来发现子域
"""
import json
import queue
import threading
from client.subdomain.oneforall.common import utils
from client.subdomain.oneforall.common.module import Module
from client.subdomain.oneforall.config import data_storage_dir, logger
class BruteSRV(Module):
def __init__(self, domain):
Module.__init__(self)
self.domain = self.register(domain)
self.module = 'dnsquery'
self.source = "BruteSRV"
self.type = 'SRV' # 利用的DNS记录的SRV记录查询子域
self.thread_num = 10
self.names_que = queue.Queue()
self.answers_que = queue.Queue()
def gen_names(self):
path = data_storage_dir.joinpath('srv_prefixes.json')
with open(path, encoding='utf-8', errors='ignore') as file:
prefixes = json.load(file)
names = map(lambda prefix: prefix + self.domain, prefixes)
for name in names:
self.names_que.put(name)
def brute(self):
"""
枚举域名的SRV记录
"""
self.gen_names()
for i in range(self.thread_num):
thread = BruteThread(self.names_que, self.answers_que)
thread.daemon = True
thread.start()
self.names_que.join()
while not self.answers_que.empty():
answer = self.answers_que.get()
if answer is None:
continue
for item in answer:
record = str(item)
subdomains = utils.match_subdomain(self.domain, record)
self.subdomains = self.subdomains.union(subdomains)
self.gen_record(subdomains, record)
def run(self):
"""
类执行入口
"""
self.begin()
self.brute()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
class BruteThread(threading.Thread):
def __init__(self, names_que, answers_que):
threading.Thread.__init__(self)
self.names_que = names_que
self.answers_que = answers_que
def run(self):
while True:
name = self.names_que.get()
answer = utils.dns_query(name, 'SRV')
self.answers_que.put(answer)
self.names_que.task_done()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
brute = BruteSRV(domain)
brute.run()
if __name__ == '__main__':
do('zonetransfer.me')
# do('example.com')

@ -1,35 +0,0 @@
from client.subdomain.oneforall.common.lookup import Lookup
class QueryTXT(Lookup):
def __init__(self, domain):
Lookup.__init__(self)
self.domain = self.register(domain)
self.module = 'dnsquery'
self.source = "QueryTXT"
self.type = 'TXT' # 利用的DNS记录的TXT记录收集子域
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
brute = QueryTXT(domain)
brute.run()
if __name__ == '__main__':
do('cuit.edu.cn')

@ -1,58 +0,0 @@
from client.subdomain.oneforall.common.query import Query
class AlienVault(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Intelligence'
self.source = 'AlienVaultQuery'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
base = 'https://otx.alienvault.com/api/v1/indicators/domain'
dns = f'{base}/{self.domain}/passive_dns'
resp = self.get(dns)
if not resp:
return
json = resp.json()
subdomains = self.match(self.domain, str(json))
self.subdomains = self.subdomains.union(subdomains)
url = f'{base}/{self.domain}/url_list'
resp = self.get(url)
if not resp:
return
json = resp.json()
subdomains = self.match(self.domain, str(json))
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = AlienVault(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,56 +0,0 @@
import api
from client.subdomain.oneforall.common.query import Query
class RiskIQ(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Intelligence'
self.source = 'RiskIQAPIQuery'
self.addr = 'https://api.passivetotal.org/v2/enrichment/subdomains'
self.user = api.riskiq_api_username
self.key = api.riskiq_api_key
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'query': self.domain}
resp = self.get(url=self.addr,
params=params,
auth=(self.user, self.key))
if not resp:
return
data = resp.json()
names = data.get('subdomains')
self.subdomains = set(map(lambda sub: f'{sub}.{self.domain}', names))
def run(self):
"""
类执行入口
"""
if not self.check(self.user, self.key):
return
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = RiskIQ(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,54 +0,0 @@
import api
from client.subdomain.oneforall.common.query import Query
class ThreatBookAPI(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Intelligence'
self.source = 'ThreatBookAPIQuery'
self.addr = 'https://x.threatbook.cn/api/v1/domain/query'
self.key = api.threatbook_api_key
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'apikey': self.key,
'domain': self.domain,
'field': 'sub_domains'}
resp = self.post(self.addr, params)
if not resp:
return
subdomains = self.match(self.domain, str(resp.json()))
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
if not self.check(self.key):
return
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = ThreatBookAPI(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,50 +0,0 @@
from client.subdomain.oneforall.common.query import Query
class ThreatMiner(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Intelligence'
self.source = 'ThreatMinerQuery'
self.addr = 'https://www.threatminer.org/getData.php'
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'e': 'subdomains_container',
'q': self.domain, 't': 0, 'rt': 10}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = self.match(self.domain, resp.text)
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = ThreatMiner(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,71 +0,0 @@
from client.subdomain.oneforall.common.query import Query
'''
最多查询100条
'''
class VirusTotal(Query):
def __init__(self, domain):
Query.__init__(self)
self.source = 'VirusTotalQuery'
self.module = 'Intelligence'
self.addr = 'https://www.virustotal.com/ui/domains/{}/subdomains'
self.domain = self.register(domain)
def query(self):
"""
向接口查询子域并做子域匹配
"""
next_cursor = ''
while True:
self.header = self.get_header()
self.header.update({'Referer': 'https://www.virustotal.com/',
'TE': 'Trailers'})
self.proxy = self.get_proxy(self.source)
params = {'limit': '40', 'cursor': next_cursor}
resp = self.get(url=self.addr.format(self.domain), params=params)
if not resp:
return
data = resp.json()
subdomains = set()
datas = data.get('data')
if datas:
for data in datas:
subdomain = data.get('id')
if subdomain:
subdomains.add(subdomain)
else:
break
self.subdomains = self.subdomains.union(subdomains)
meta = data.get('meta')
if meta:
next_cursor = meta.get('cursor')
else:
break
def run(self):
"""
类执行入口
"""
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = VirusTotal(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,55 +0,0 @@
import api
from client.subdomain.oneforall.common.query import Query
class VirusTotalAPI(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Intelligence'
self.source = 'VirusTotalAPIQuery'
self.addr = 'https://www.virustotal.com/vtapi/v2/domain/report'
self.key = api.virustotal_api_key
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'apikey': self.key, 'domain': self.domain}
resp = self.get(self.addr, params)
if not resp:
return
json = resp.json()
data = json.get('subdomains')
if data:
subdomains = set(data)
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
if not self.check(self.key):
return
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = VirusTotalAPI(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,81 +0,0 @@
import time
from client.subdomain.oneforall.common.search import Search
class Ask(Search):
def __init__(self, domain):
Search.__init__(self)
self.domain = domain
self.module = 'Search'
self.source = 'AskSearch'
self.addr = 'https://www.search.ask.com/web'
self.limit_num = 200 # 限制搜索条数
self.per_page_num = 10 # 默认每页显示10页
def search(self, domain, filtered_subdomain='', full_search=False):
"""
发送搜索请求并做子域匹配
:param str domain: 域名
:param str filtered_subdomain: 过滤的子域
:param bool full_search: 全量搜索
"""
self.page_num = 1
while True:
time.sleep(self.delay)
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
query = 'site:' + domain + filtered_subdomain
params = {'q': query, 'page': self.page_num}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = self.match(domain, resp.text)
if not subdomains:
break
if not full_search:
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
if subdomains.issubset(self.subdomains):
break
self.subdomains = self.subdomains.union(subdomains)
self.page_num += 1
if '>Next<' not in resp.text:
break
def run(self):
"""
类执行入口
"""
self.begin()
self.search(self.domain, full_search=True)
# 排除同一子域搜索结果过多的子域以发现新的子域
for statement in self.filter(self.domain, self.subdomains):
self.search(self.domain, filtered_subdomain=statement)
# 递归搜索下一层的子域
if self.recursive_search:
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
for layer_num in range(1, self.recursive_times):
for subdomain in self.subdomains:
# 进行下一层子域搜索的限制条件
count = subdomain.count('.') - self.domain.count('.')
if count == layer_num:
self.search(subdomain)
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
search = Ask(domain)
search.run()
if __name__ == '__main__':
do('example.com')

@ -1,109 +0,0 @@
import time
from bs4 import BeautifulSoup
from client.subdomain.oneforall.common.search import Search
class Baidu(Search):
def __init__(self, domain):
Search.__init__(self)
self.module = 'Search'
self.source = 'BaiduSearch'
self.init = 'https://www.baidu.com/'
self.addr = 'https://www.baidu.com/s'
self.domain = domain
self.limit_num = 750 # 限制搜索条数
def redirect_match(self, domain, html):
"""
获取跳转地址并传递地址进行跳转head请求
:param domain: 域名
:param html: 响应体
:return: 子域
"""
bs = BeautifulSoup(html, 'html.parser')
subdomains_all = set()
# 获取搜索结果中所有的跳转URL地址
for find_res in bs.find_all('a', {'class': 'c-showurl'}):
url = find_res.get('href')
subdomains = self.match_location(domain, url)
subdomains_all = subdomains_all.union(subdomains)
return subdomains_all
def search(self, domain, filtered_subdomain='', full_search=False):
"""
发送搜索请求并做子域匹配
:param str domain: 域名
:param str filtered_subdomain: 过滤的子域
:param bool full_search: 全量搜索
"""
self.page_num = 0 # 二次搜索重新置0
while True:
time.sleep(self.delay)
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
query = 'site:' + domain + filtered_subdomain
params = {'wd': query,
'pn': self.page_num,
'rn': self.per_page_num}
resp = self.get(self.addr, params)
if not resp:
return
if len(domain) > 12: # 解决百度搜索结果中域名过长会显示不全的问题
# 获取百度跳转URL响应头的Location字段获取直链
subdomains = self.redirect_match(domain, resp.text)
else:
subdomains = self.match(domain, resp.text)
if not subdomains: # 搜索没有发现子域名则停止搜索
break
if not full_search:
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
if subdomains.issubset(self.subdomains):
break
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
self.page_num += self.per_page_num
# 搜索页面没有出现下一页时停止搜索
if '&pn={next_pn}&'.format(next_pn=self.page_num) not in resp.text:
break
if self.page_num >= self.limit_num: # 搜索条数限制
break
def run(self):
"""
类执行入口
"""
self.begin()
self.search(self.domain, full_search=True)
# 排除同一子域搜索结果过多的子域以发现新的子域
for statement in self.filter(self.domain, self.subdomains):
self.search(self.domain, filtered_subdomain=statement)
# 递归搜索下一层的子域
if self.recursive_search:
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
for layer_num in range(1, self.recursive_times):
for subdomain in self.subdomains:
# 进行下一层子域搜索的限制条件
count = subdomain.count('.') - self.domain.count('.')
if count == layer_num:
self.search(subdomain)
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
search = Baidu(domain)
search.run()
if __name__ == '__main__':
do('huayunshuzi.com')

@ -1,92 +0,0 @@
import time
from client.subdomain.oneforall.common.search import Search
class Bing(Search):
def __init__(self, domain):
Search.__init__(self)
self.domain = domain
self.module = 'Search'
self.source = 'BingSearch'
self.init = 'https://www.bing.com/'
self.addr = 'https://www.bing.com/search'
self.limit_num = 1000 # 限制搜索条数
def search(self, domain, filtered_subdomain='', full_search=False):
"""
发送搜索请求并做子域匹配
:param str domain: 域名
:param str filtered_subdomain: 过滤的子域
:param bool full_search: 全量搜索
"""
self.page_num = 0 # 二次搜索重新置0
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
resp = self.get(self.init)
if not resp:
return
self.cookie = resp.cookies # 获取cookie bing在搜索时需要带上cookie
while True:
time.sleep(self.delay)
self.proxy = self.get_proxy(self.source)
query = 'site:' + domain + filtered_subdomain
params = {'q': query, 'first': self.page_num,
'count': self.per_page_num}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = self.match(domain, resp.text)
if not subdomains: # 搜索没有发现子域名则停止搜索
break
if not full_search:
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
if subdomains.issubset(self.subdomains):
break
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
# 搜索页面没有出现下一页时停止搜索
if '<div class="sw_next">' not in resp.text:
break
self.page_num += self.per_page_num
if self.page_num >= self.limit_num: # 搜索条数限制
break
def run(self):
"""
类执行入口
"""
self.begin()
self.search(self.domain, full_search=True)
# 排除同一子域搜索结果过多的子域以发现新的子域
for statement in self.filter(self.domain, self.subdomains):
self.search(self.domain, filtered_subdomain=statement)
# 递归搜索下一层的子域
if self.recursive_search:
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
for layer_num in range(1, self.recursive_times):
for subdomain in self.subdomains:
# 进行下一层子域搜索的限制条件
count = subdomain.count('.') - self.domain.count('.')
if count == layer_num:
self.search(subdomain)
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
search = Bing(domain)
search.run()
if __name__ == '__main__':
do('example.com')

@ -1,92 +0,0 @@
import time
import api
from client.subdomain.oneforall.common.search import Search
class BingAPI(Search):
def __init__(self, domain):
Search.__init__(self)
self.domain = domain
self.module = 'Search'
self.source = 'BingAPISearch'
self.addr = 'https://api.cognitive.microsoft.com/' \
'bing/v7.0/search'
self.id = api.bing_api_id
self.key = api.bing_api_key
self.limit_num = 1000 # 必应同一个搜索关键词限制搜索条数
self.delay = 1 # 必应自定义搜索限制时延1秒
def search(self, domain, filtered_subdomain='', full_search=False):
"""
发送搜索请求并做子域匹配
:param str domain: 域名
:param str filtered_subdomain: 过滤的子域
:param bool full_search: 全量搜索
"""
self.page_num = 0 # 二次搜索重新置0
while True:
time.sleep(self.delay)
self.header = self.get_header()
self.header = {'Ocp-Apim-Subscription-Key': self.key}
self.proxy = self.get_proxy(self.source)
query = 'site:' + domain + filtered_subdomain
params = {'q': query, 'safesearch': 'Off',
'count': self.per_page_num,
'offset': self.page_num}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = self.match(domain, str(resp.json()))
if not subdomains: # 搜索没有发现子域名则停止搜索
break
if not full_search:
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
if subdomains.issubset(self.subdomains):
break
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
self.page_num += self.per_page_num
if self.page_num >= self.limit_num: # 搜索条数限制
break
def run(self):
"""
类执行入口
"""
if not self.check(self.id, self.key):
return
self.begin()
self.search(self.domain, full_search=True)
# 排除同一子域搜索结果过多的子域以发现新的子域
for statement in self.filter(self.domain, self.subdomains):
self.search(self.domain, filtered_subdomain=statement)
# 递归搜索下一层的子域
if self.recursive_search:
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
for layer_num in range(1, self.recursive_times):
for subdomain in self.subdomains:
# 进行下一层子域搜索的限制条件
count = subdomain.count('.') - self.domain.count('.')
if count == layer_num:
self.search(subdomain)
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
search = BingAPI(domain)
search.run()
if __name__ == '__main__':
do('example.com')

@ -1,87 +0,0 @@
import random
import time
from client.subdomain.oneforall.common.search import Search
class Exalead(Search):
def __init__(self, domain):
Search.__init__(self)
self.domain = domain
self.module = 'Search'
self.source = "ExaleadSearch"
self.addr = "http://www.exalead.com/search/web/results/"
self.per_page_num = 30
def search(self, domain, filtered_subdomain='', full_search=False):
"""
发送搜索请求并做子域匹配
:param str domain: 域名
:param str filtered_subdomain: 过滤的子域
:param bool full_search: 全量搜索
"""
self.page_num = 0
while True:
self.delay = random.randint(1, 5)
time.sleep(self.delay)
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
query = 'site:' + domain + filtered_subdomain
params = {'q': query, 'elements_per_page': '30',
"start_index": self.page_num}
resp = self.get(url=self.addr, params=params)
if not resp:
return
subdomains = self.match(domain, resp.text)
if not subdomains:
break
if not full_search:
if subdomains.issubset(self.subdomains):
break
self.subdomains = self.subdomains.union(subdomains)
self.page_num += self.per_page_num
if self.page_num > 1999:
break
if 'title="Go to the next page"' not in resp.text:
break
def run(self):
"""
类执行入口
"""
self.begin()
self.search(self.domain, full_search=True)
# 排除同一子域搜索结果过多的子域以发现新的子域
for statement in self.filter(self.domain, self.subdomains):
statement = statement.replace('-site', 'and -site')
self.search(self.domain, filtered_subdomain=statement)
# 递归搜索下一层的子域
if self.recursive_search:
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
for layer_num in range(1, self.recursive_times):
for subdomain in self.subdomains:
# 进行下一层子域搜索的限制条件
count = subdomain.count('.') - self.domain.count('.')
if count == layer_num:
self.search(subdomain)
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
search = Exalead(domain)
search.run()
if __name__ == '__main__':
do('example.com')

@ -1,73 +0,0 @@
import base64
import time
import api
from client.subdomain.oneforall.common.search import Search
class FoFa(Search):
def __init__(self, domain):
Search.__init__(self)
self.domain = domain
self.module = 'Search'
self.source = 'FoFaAPISearch'
self.addr = 'https://fofa.so/api/v1/search/all'
self.delay = 1
self.email = api.fofa_api_email
self.key = api.fofa_api_key
def search(self):
"""
发送搜索请求并做子域匹配
"""
self.page_num = 1
subdomain_encode = f'domain={self.domain}'.encode('utf-8')
query_data = base64.b64encode(subdomain_encode)
while True:
time.sleep(self.delay)
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
query = {'email': self.email,
'key': self.key,
'qbase64': query_data,
'page': self.page_num,
'size': 10000}
resp = self.get(self.addr, query)
if not resp:
return
resp_json = resp.json()
subdomains = self.match(self.domain, str(resp_json))
if not subdomains: # 搜索没有发现子域名则停止搜索
break
self.subdomains = self.subdomains.union(subdomains)
size = resp_json.get('size')
if size < 10000:
break
self.page_num += 1
def run(self):
"""
类执行入口
"""
if not self.check(self.email, self.key):
return
self.begin()
self.search()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
search = FoFa(domain)
search.run()
if __name__ == '__main__':
do('example.com')

@ -1,74 +0,0 @@
import time
from bs4 import BeautifulSoup
from client.subdomain.oneforall.common.search import Search
from client.subdomain.oneforall.config import logger
class Gitee(Search):
def __init__(self, domain):
Search.__init__(self)
self.source = 'GiteeSearch'
self.module = 'Search'
self.addr = 'https://search.gitee.com/'
self.domain = self.register(domain)
self.header = self.get_header()
def search(self, full_search=False):
"""
向接口查询子域并做子域匹配
"""
page_num = 1
while True:
time.sleep(self.delay)
params = {'pageno': page_num, 'q': self.domain, 'type': 'code'}
try:
resp = self.get(self.addr, params=params)
except Exception as e:
logger.log('ERROR', e.args)
break
if not resp:
break
if resp.status_code != 200:
logger.log('ERROR', f'{self.source}模块搜索出错')
break
if 'class="empty-box"' in resp.text:
break
soup = BeautifulSoup(resp.text, 'html.parser')
subdomains = self.match(self.domain, soup.text)
self.subdomains = self.subdomains.union(subdomains)
if not subdomains:
break
if not full_search:
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
if subdomains.issubset(self.subdomains):
break
if '<li class="disabled"><a href="###">' in resp.text:
break
if page_num > 100:
break
page_num += 1
def run(self):
"""
类执行入口
"""
self.begin()
self.search()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = Gitee(domain)
query.run()
if __name__ == '__main__':
do('example.com')

@ -1,108 +0,0 @@
import requests
import api
import json
from client.subdomain.oneforall.common.utils import match_subdomain
from client.subdomain.oneforall.common.search import Search
from client.subdomain.oneforall.config import logger
class GithubAPI(Search):
def __init__(self, domain):
Search.__init__(self)
self.source = 'GithubAPISearch'
self.module = 'Search'
self.addr = 'https://api.github.com/search/code'
self.domain = self.register(domain)
self.session = requests.Session()
self.auth_url = 'https://api.github.com'
self.token = api.github_api_token
def auth_github(self):
"""
github api 认证
:return: 认证失败返回False 成功返回True
"""
self.session.headers.update({'Authorization': 'token ' + self.token})
try:
resp = self.session.get(self.auth_url)
except Exception as e:
logger.log('ERROR', e.args)
return False
if resp.status_code != 200:
resp_json = resp.json()
msg = resp_json.get('message')
logger.log('ERROR', msg)
return False
else:
return True
def search(self):
"""
向接口查询子域并做子域匹配
"""
self.session.headers = self.get_header()
self.session.proxies = self.get_proxy(self.source)
self.session.verify = self.verify
self.session.headers.update(
{'Accept': 'application/vnd.github.v3.text-match+json'})
if not self.auth_github():
logger.log('ERROR', f'{self.source}模块登录失败')
return
page = 1
while True:
params = {'q': self.domain, 'per_page': 100,
'page': page, 'sort': 'indexed'}
try:
resp = self.session.get(self.addr, params=params)
except Exception as e:
logger.log('ERROR', e.args)
break
if resp.status_code != 200:
logger.log('ERROR', f'{self.source}模块搜索出错')
break
subdomains = match_subdomain(self.domain, resp.text)
if not subdomains:
break
self.subdomains = self.subdomains.union(subdomains)
page += 1
try:
resp_json = resp.json()
except Exception as e:
logger.log('ERROR', e.args)
break
total_count = resp_json.get('total_count')
if not isinstance(total_count, int):
break
if page * 100 > total_count:
break
if page * 100 > 1000:
break
def run(self):
"""
类执行入口
"""
if not self.check(self.token):
return
self.begin()
self.search()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = GithubAPI(domain)
query.run()
if __name__ == '__main__':
do('exmaple.com')

@ -1,94 +0,0 @@
import random
import time
from client.subdomain.oneforall.common.search import Search
class Google(Search):
def __init__(self, domain):
Search.__init__(self)
self.domain = domain
self.module = 'Search'
self.source = 'GoogleSearch'
self.init = 'https://www.google.com/'
self.addr = 'https://www.google.com/search'
def search(self, domain, filtered_subdomain='', full_search=False):
"""
发送搜索请求并做子域匹配
:param str domain: 域名
:param str filtered_subdomain: 过滤的子域
:param bool full_search: 全量搜索
"""
page_num = 1
per_page_num = 50
self.header = self.get_header()
self.header.update({'User-Agent': 'Googlebot',
'Referer': 'https://www.google.com'})
self.proxy = self.get_proxy(self.source)
resp = self.get(self.init)
if not resp:
return
self.cookie = resp.cookies
while True:
self.delay = random.randint(1, 5)
time.sleep(self.delay)
self.proxy = self.get_proxy(self.source)
word = 'site:' + domain + filtered_subdomain
payload = {'q': word, 'start': page_num, 'num': per_page_num,
'filter': '0', 'btnG': 'Search', 'gbv': '1', 'hl': 'en'}
resp = self.get(url=self.addr, params=payload)
if not resp:
return
subdomains = self.match(domain, resp.text)
if not subdomains:
break
if not full_search:
if subdomains.issubset(self.subdomains):
break
self.subdomains = self.subdomains.union(subdomains)
page_num += per_page_num
if 'start=' + str(page_num) not in resp.text:
break
if '302 Moved' in resp.text:
break
def run(self):
"""
类执行入口
"""
self.begin()
self.search(self.domain, full_search=True)
# 排除同一子域搜索结果过多的子域以发现新的子域
for statement in self.filter(self.domain, self.subdomains):
self.search(self.domain, filtered_subdomain=statement)
# 递归搜索下一层的子域
if self.recursive_search:
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
for layer_num in range(1, self.recursive_times):
for subdomain in self.subdomains:
# 进行下一层子域搜索的限制条件
count = subdomain.count('.') - self.domain.count('.')
if count == layer_num:
self.search(subdomain)
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
search = Google(domain)
search.run()
if __name__ == '__main__':
do('example.com')

@ -1,88 +0,0 @@
import time
import api
from client.subdomain.oneforall.common.search import Search
class GoogleAPI(Search):
def __init__(self, domain):
Search.__init__(self)
self.domain = domain
self.module = 'Search'
self.source = 'GoogleAPISearch'
self.addr = 'https://www.googleapis.com/customsearch/v1'
self.delay = 1
self.key = api.google_api_key
self.cx = api.google_api_cx
self.per_page_num = 10 # 每次只能请求10个结果
def search(self, domain, filtered_subdomain='', full_search=False):
"""
发送搜索请求并做子域匹配
:param str domain: 域名
:param str filtered_subdomain: 过滤的子域
:param bool full_search: 全量搜索
"""
self.page_num = 1
while True:
word = 'site:' + domain + filtered_subdomain
time.sleep(self.delay)
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'key': self.key, 'cx': self.cx,
'q': word, 'fields': 'items/link',
'start': self.page_num, 'num': self.per_page_num}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = self.match(domain, str(resp.json()))
if not subdomains:
break
if not full_search:
if subdomains.issubset(self.subdomains):
break
self.subdomains = self.subdomains.union(subdomains)
self.page_num += self.per_page_num
if self.page_num > 100: # 免费的API只能查询前100条结果
break
def run(self):
"""
类执行入口
"""
if not self.check(self.cx, self.key):
return
self.begin()
self.search(self.domain, full_search=True)
# 排除同一子域搜索结果过多的子域以发现新的子域
for statement in self.filter(self.domain, self.subdomains):
self.search(self.domain, filtered_subdomain=statement)
# 递归搜索下一层的子域
if self.recursive_search:
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
for layer_num in range(1, self.recursive_times):
for subdomain in self.subdomains:
# 进行下一层子域搜索的限制条件
count = subdomain.count('.') - self.domain.count('.')
if count == layer_num:
self.search(subdomain)
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
search = GoogleAPI(domain)
search.run()
if __name__ == '__main__':
do('example.com')

@ -1,60 +0,0 @@
import api
from client.subdomain.oneforall.common.search import Search
class ShodanAPI(Search):
def __init__(self, domain):
Search.__init__(self)
self.domain = self.register(domain)
self.module = 'Search'
self.source = 'ShodanAPISearch'
self.addr = 'https://api.shodan.io/shodan/host/search'
self.key = api.shodan_api_key
def search(self):
"""
发送搜索请求并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
query = 'hostname:.' + self.domain
page = 1
while True:
params = {'key': self.key, 'page': page, 'query': query,
'minify': True, 'facets': {'hostnames'}}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = self.match(self.domain, resp.text)
if not subdomains: # 搜索没有发现子域名则停止搜索
break
if subdomains:
self.subdomains = self.subdomains.union(subdomains)
page += 1
def run(self):
"""
类执行入口
"""
if not self.check(self.key):
return
self.begin()
self.search()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
search = ShodanAPI(domain)
search.run()
if __name__ == '__main__':
do('example.com')

@ -1,87 +0,0 @@
import time
from client.subdomain.oneforall.common.search import Search
class So(Search):
def __init__(self, domain):
Search.__init__(self)
self.domain = domain
self.module = 'Search'
self.source = 'SoSearch'
self.addr = 'https://www.so.com/s'
self.limit_num = 640 # 限制搜索条数
self.per_page_num = 10 # 默认每页显示10页
def search(self, domain, filtered_subdomain='', full_search=False):
"""
发送搜索请求并做子域匹配
:param str domain: 域名
:param str filtered_subdomain: 过滤的子域
:param bool full_search: 全量搜索
"""
page_num = 1
while True:
time.sleep(self.delay)
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
word = 'site:' + domain + filtered_subdomain
payload = {'q': word, 'pn': page_num}
resp = self.get(url=self.addr, params=payload)
if not resp:
return
subdomains = self.match(domain, resp.text)
if not subdomains:
break
if not full_search:
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
if subdomains.issubset(self.subdomains):
break
self.subdomains = self.subdomains.union(subdomains)
page_num += 1
# 搜索页面没有出现下一页时停止搜索
if '<a id="snext"' not in resp.text:
break
# 搜索条数限制
if self.page_num * self.per_page_num >= self.limit_num:
break
def run(self):
"""
类执行入口
"""
self.begin()
self.search(self.domain, full_search=True)
# 排除同一子域搜索结果过多的子域以发现新的子域
for statement in self.filter(self.domain, self.subdomains):
self.search(self.domain, filtered_subdomain=statement)
# 递归搜索下一层的子域
if self.recursive_search:
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
for layer_num in range(1, self.recursive_times):
for subdomain in self.subdomains:
# 进行下一层子域搜索的限制条件
count = subdomain.count('.') - self.domain.count('.')
if count == layer_num:
self.search(subdomain)
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
search = So(domain)
search.run()
if __name__ == '__main__':
do('example.com')

@ -1,85 +0,0 @@
from client.subdomain.oneforall.common.search import Search
class Sogou(Search):
def __init__(self, domain):
Search.__init__(self)
self.domain = domain
self.module = 'Search'
self.source = 'SogouSearch'
self.addr = 'https://www.sogou.com/web'
self.limit_num = 1000 # 限制搜索条数
def search(self, domain, filtered_subdomain='', full_search=False):
"""
发送搜索请求并做子域匹配
:param str domain: 域名
:param str filtered_subdomain: 过滤的子域
:param bool full_search: 全量搜索
"""
self.page_num = 1
while True:
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
word = 'site:' + domain + filtered_subdomain
payload = {'query': word, 'page': self.page_num,
"num": self.per_page_num}
resp = self.get(self.addr, payload)
if not resp:
return
subdomains = self.match(domain, resp.text)
if not subdomains:
break
if not full_search:
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
if subdomains.issubset(self.subdomains):
break
self.subdomains = self.subdomains.union(subdomains)
self.page_num += 1
# 搜索页面没有出现下一页时停止搜索
if '<a id="sogou_next"' not in resp.text:
break
# 搜索条数限制
if self.page_num * self.per_page_num >= self.limit_num:
break
def run(self):
"""
类执行入口
"""
self.begin()
self.search(self.domain, full_search=True)
# 排除同一子域搜索结果过多的子域以发现新的子域
for statement in self.filter(self.domain, self.subdomains):
self.search(self.domain, filtered_subdomain=statement)
# 递归搜索下一层的子域
if self.recursive_search:
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
for layer_num in range(1, self.recursive_times):
for subdomain in self.subdomains:
# 进行下一层子域搜索的限制条件
count = subdomain.count('.') - self.domain.count('.')
if count == layer_num:
self.search(subdomain)
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
search = Sogou(domain)
search.run()
if __name__ == '__main__':
do('example.com')

@ -1,92 +0,0 @@
import time
from client.subdomain.oneforall.common.search import Search
class Yahoo(Search):
def __init__(self, domain):
Search.__init__(self)
self.domain = domain
self.module = 'Search'
self.source = 'YahooSearch'
self.init = 'https://search.yahoo.com/'
self.addr = 'https://search.yahoo.com/search'
self.limit_num = 1000 # 限制搜索条数
self.delay = 5
self.per_page_num = 40
def search(self, domain, filtered_subdomain='', full_search=False):
"""
发送搜索请求并做子域匹配
:param str domain: 域名
:param str filtered_subdomain: 过滤的子域
:param bool full_search: 全量搜索
"""
self.page_num = 0
resp = self.get(self.init)
if not resp:
return
self.cookie = resp.cookies # 获取cookie Yahoo在搜索时需要带上cookie
while True:
time.sleep(self.delay)
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
query = 'site:' + domain + filtered_subdomain
params = {'q': query, 'b': self.page_num, 'n': self.per_page_num}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = self.match(domain, resp.text)
if not subdomains: # 搜索没有发现子域名则停止搜索
break
if not full_search:
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
if subdomains.issubset(self.subdomains):
break
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
if '>Next</a>' not in resp.text: # 搜索页面没有出现下一页时停止搜索
break
self.page_num += self.per_page_num
if self.page_num >= self.limit_num: # 搜索条数限制
break
def run(self):
"""
类执行入口
"""
self.begin()
self.search(self.domain, full_search=True)
# 排除同一子域搜索结果过多的子域以发现新的子域
for statement in self.filter(self.domain, self.subdomains):
self.search(self.domain, filtered_subdomain=statement)
# 递归搜索下一层的子域
if self.recursive_search:
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
for layer_num in range(1, self.recursive_times):
for subdomain in self.subdomains:
# 进行下一层子域搜索的限制条件
count = subdomain.count('.') - self.domain.count('.')
if count == layer_num:
self.search(subdomain)
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
search = Yahoo(domain)
search.run()
if __name__ == '__main__':
do('example.com')

@ -1,92 +0,0 @@
import time
from client.subdomain.oneforall.common.search import Search
class Yandex(Search):
def __init__(self, domain):
Search.__init__(self)
self.domain = domain
self.module = 'Search'
self.source = 'YandexSearch'
self.init = 'https://yandex.com/'
self.addr = 'https://yandex.com/search'
self.limit_num = 1000 # 限制搜索条数
self.delay = 5
def search(self, domain, filtered_subdomain='', full_search=False):
"""
发送搜索请求并做子域匹配
:param str domain: 域名
:param str filtered_subdomain: 过滤的子域
:param bool full_search: 全量搜索
"""
self.page_num = 0 # 二次搜索重新置0
resp = self.get(self.init)
if not resp:
return
self.cookie = resp.cookies # 获取cookie
while True:
time.sleep(self.delay)
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
query = 'site:' + domain + filtered_subdomain
params = {'text': query, 'p': self.page_num,
'numdoc': self.per_page_num}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = self.match(domain, resp.text)
if not subdomains: # 搜索没有发现子域名则停止搜索
break
if not full_search:
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
if subdomains.issubset(self.subdomains):
break
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
if '>next</a>' not in resp.text: # 搜索页面没有出现下一页时停止搜索
break
self.page_num += 1
if self.page_num >= self.limit_num: # 搜索条数限制
break
def run(self):
"""
类执行入口
"""
self.begin()
self.search(self.domain, full_search=True)
# 排除同一子域搜索结果过多的子域以发现新的子域
for statement in self.filter(self.domain, self.subdomains):
self.search(self.domain, filtered_subdomain=statement)
# 递归搜索下一层的子域
if self.recursive_search:
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
for layer_num in range(1, self.recursive_times):
for subdomain in self.subdomains:
# 进行下一层子域搜索的限制条件
count = subdomain.count('.') - self.domain.count('.')
if count == layer_num:
self.search(subdomain)
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
search = Yandex(domain)
search.run()
if __name__ == '__main__':
do('example.com')

@ -1,86 +0,0 @@
import time
import api
from client.subdomain.oneforall.common.search import Search
from client.subdomain.oneforall.config import logger
class ZoomEyeAPI(Search):
def __init__(self, domain):
Search.__init__(self)
self.domain = domain
self.module = 'Search'
self.source = 'ZoomEyeAPISearch'
self.addr = 'https://api.zoomeye.org/web/search'
self.delay = 2
self.user = api.zoomeye_api_usermail
self.pwd = api.zoomeye_api_password
def login(self):
"""
登陆获取查询taken
"""
url = 'https://api.zoomeye.org/user/login'
data = {'username': self.user, 'password': self.pwd}
resp = self.post(url=url, json=data)
if not resp:
logger.log('FATAL', f'登录失败无法获取{self.source}的访问token')
exit(1)
data = resp.json()
if resp.status_code == 200:
logger.log('DEBUG', f'{self.source}模块登录成功')
return data.get('access_token')
else:
logger.log('ALERT', data.get('message'))
exit(1)
def search(self):
"""
发送搜索请求并做子域匹配
"""
page_num = 1
access_token = self.login()
while True:
time.sleep(self.delay)
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
self.header.update({'Authorization': 'JWT ' + access_token})
params = {'query': 'hostname:' + self.domain, 'page': page_num}
resp = self.get(self.addr, params)
if not resp:
return
subdomains = self.match(self.domain, resp.text)
if not subdomains: # 搜索没有发现子域名则停止搜索
break
self.subdomains = self.subdomains.union(subdomains)
page_num += 1
if page_num > 500:
break
if resp.status_code == 403:
break
def run(self):
"""
类执行入口
"""
if not self.check(self.user, self.pwd):
return
self.begin()
self.search()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
search = ZoomEyeAPI(domain)
search.run()
if __name__ == '__main__':
do('mi.com')

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save