@ -1,144 +0,0 @@
|
||||
import datetime
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy import Column, Integer, String, Text, ForeignKey, Boolean
|
||||
from sqlalchemy.orm import relationship
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
engine = create_engine('postgresql://postgres:687fb677c784ce2a0b273263bfe778be@127.0.0.1/src')
|
||||
Base = declarative_base()
|
||||
Session = sessionmaker(bind=engine)
|
||||
session = Session()
|
||||
|
||||
class SrcCustomer(Base):
|
||||
'''Src客户管理'''
|
||||
|
||||
__tablename__ = 'src_customer'
|
||||
cus_name = Column(String(80), primary_key=True) # 厂商名
|
||||
cus_home = Column(String(100)) # 厂商主页
|
||||
cus_time = Column(String(30)) # 添加时间
|
||||
src_assets = relationship('SrcAssets', back_populates='src_customer', cascade='all, delete-orphan')
|
||||
src_task = relationship('SrcTask', back_populates='src_customer', cascade='all, delete-orphan')
|
||||
src_ports = relationship('SrcPorts', back_populates='src_customer', cascade='all, delete-orphan')
|
||||
|
||||
def __init__(self, cus_name, cus_home):
|
||||
self.cus_name = cus_name
|
||||
self.cus_home = cus_home
|
||||
self.cus_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
class SrcTask(Base):
|
||||
'''SRC 任务管理'''
|
||||
|
||||
__tablename__ = 'src_task'
|
||||
id = Column(Integer, primary_key=True)
|
||||
task_name = Column(String(80), ForeignKey('src_customer.cus_name', ondelete='CASCADE')) # 厂商名
|
||||
task_domain = Column(String(100), unique=True) # 单条任务资产/子域名/IP/主域名
|
||||
task_time = Column(String(30)) # 添加时间
|
||||
task_flag = Column(Boolean) # 是否探测标识
|
||||
src_customer = relationship('SrcCustomer', back_populates='src_task')
|
||||
|
||||
def __init__(self, task_name, task_domain, task_flag=False):
|
||||
self.task_name = task_name
|
||||
self.task_domain = task_domain
|
||||
self.task_time = self.cus_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
self.task_flag = task_flag
|
||||
|
||||
class SrcAssets(Base):
|
||||
'''Src资产管理'''
|
||||
|
||||
__tablename__ = 'src_assets'
|
||||
id = Column(Integer, primary_key=True)
|
||||
asset_name = Column(String(80), ForeignKey('src_customer.cus_name', ondelete='CASCADE')) # 厂商名
|
||||
asset_host = Column(String(200), unique=True) # 主机/url
|
||||
asset_subdomain = Column(String(200)) # 子域名
|
||||
asset_title = Column(Text) # 网页标题
|
||||
asset_ip = Column(String(16)) # IP地址
|
||||
asset_area = Column(Text) # 地区
|
||||
asset_waf = Column(String(100)) # waf
|
||||
asset_cdn = Column(Boolean) # cdn
|
||||
asset_banner = Column(Text) # banner
|
||||
asset_info = Column(Text) # web指纹
|
||||
asset_whois = Column(Text) # whois信息
|
||||
asset_time = Column(String(30)) # 添加时间
|
||||
asset_xray_flag = Column(Boolean) # 是否爬虫/xary被动扫描
|
||||
asset_burp_flag = Column(Boolean) # Burpsuite是否扫描
|
||||
asset_port_flag = Column(Boolean) # 是否进行端口扫描
|
||||
asset_info_flag = Column(Boolean) # 是否进行web信息收集
|
||||
src_customer = relationship('SrcCustomer', back_populates='src_assets')
|
||||
|
||||
def __init__(self, asset_name, asset_host, asset_subdomain, asset_title, asset_ip, asset_area, asset_waf, asset_cdn,
|
||||
asset_banner, asset_info, asset_whois, asset_xray_flag=False, asset_burp_flag=False,
|
||||
asset_port_flag=False, asset_info_flag=False):
|
||||
self.asset_name = asset_name
|
||||
self.asset_host = asset_host
|
||||
self.asset_subdomain = asset_subdomain
|
||||
self.asset_title = asset_title
|
||||
self.asset_ip = asset_ip
|
||||
self.asset_area = asset_area
|
||||
self.asset_waf = asset_waf
|
||||
self.asset_cdn = asset_cdn
|
||||
self.asset_banner = asset_banner
|
||||
self.asset_info = asset_info
|
||||
self.asset_whois = asset_whois
|
||||
self.asset_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
self.asset_xray_flag = asset_xray_flag
|
||||
self.asset_burp_flag = asset_burp_flag
|
||||
self.asset_port_flag = asset_port_flag
|
||||
self.asset_info_flag = asset_info_flag
|
||||
|
||||
class SrcPorts(Base):
|
||||
'''Src 端口管理'''
|
||||
|
||||
__tablename__ = 'src_ports'
|
||||
id = Column(Integer, primary_key=True)
|
||||
port_name = Column(String(80), ForeignKey('src_customer.cus_name', ondelete='CASCADE')) # 厂商名
|
||||
port_host = Column(String(200)) # 主机/子域名/url
|
||||
port_ip = Column(String(20)) # ip
|
||||
port_port = Column(String(20)) # 端口
|
||||
port_service = Column(String(30)) # 协议
|
||||
port_product = Column(String(100)) # 端口服务
|
||||
port_version = Column(String(100)) # 服务版本
|
||||
port_time = Column(String(30)) # 添加时间
|
||||
port_brute = Column(Boolean) # 是否暴力破解
|
||||
port_url_scan = Column(Boolean) # 是否进行HTTP探测
|
||||
src_customer = relationship('SrcCustomer', back_populates='src_ports')
|
||||
|
||||
def __init__(self, port_name, port_host, port_ip, port_port, port_service, port_product, port_version, port_brute=False,
|
||||
port_url_scan=False):
|
||||
self.port_name = port_name
|
||||
self.port_host = port_host
|
||||
self.port_ip = port_ip
|
||||
self.port_port = port_port
|
||||
self.port_service = port_service
|
||||
self.port_product = port_product
|
||||
self.port_version = port_version
|
||||
self.port_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
self.port_brute = port_brute
|
||||
self.port_url_scan = port_url_scan
|
||||
|
||||
class SrcVul(Base):
|
||||
'''Src 漏洞信息表'''
|
||||
|
||||
__tablename__ = 'src_vul'
|
||||
id = Column(Integer, primary_key=True)
|
||||
vul_subdomain = Column(String(150)) # 子域名
|
||||
vul_plugin = Column(String(200)) # 插件
|
||||
vul_url = Column(Text) # URL
|
||||
vul_payload = Column(Text)
|
||||
vul_raw = Column(Text)
|
||||
vul_time = Column(String(30))
|
||||
vul_scan_name = Column(String(30)) # 扫描器
|
||||
vul_flag = Column(Boolean) # 标记已提交
|
||||
vul_mail = Column(Boolean) # 是否发发送邮件
|
||||
|
||||
def __init__(self, vul_subdomain, vul_plugin, vul_url, vul_payload, vul_raw, vul_scan_name, vul_flag=False,
|
||||
vul_mail=False):
|
||||
self.vul_subdomain = vul_subdomain
|
||||
self.vul_plugin = vul_plugin
|
||||
self.vul_url = vul_url
|
||||
self.vul_payload = vul_payload
|
||||
self.vul_raw = vul_raw
|
||||
self.vul_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
self.vul_scan_name = vul_scan_name
|
||||
self.vul_flag = vul_flag
|
||||
self.vul_mail = vul_mail
|
@ -1,64 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
|
||||
import time
|
||||
|
||||
from client.portscan.ShodanScan import Scan
|
||||
from client.portscan.NmapScan import Nmap_Portscan
|
||||
from client.database import session, SrcAssets, SrcPorts
|
||||
|
||||
class PortScan:
|
||||
|
||||
def __init__(self, ip):
|
||||
self.ip = ip
|
||||
|
||||
def run(self):
|
||||
port_list, vulns_list = Scan(ip=self.ip)
|
||||
port_dict = Nmap_Portscan(ip=self.ip, port_info_list=port_list)
|
||||
return port_dict, vulns_list
|
||||
|
||||
def ReadAssets():
|
||||
'''读取资产数据'''
|
||||
assets_sql = session.query(SrcAssets).filter(SrcAssets.asset_port_flag == False).first()
|
||||
session.commit()
|
||||
if assets_sql:
|
||||
ip = assets_sql.asset_ip
|
||||
assets_sql1 = session.query(SrcAssets).filter(SrcAssets.asset_ip == ip).all()
|
||||
for sql in assets_sql1:
|
||||
sql.asset_port_flag = True
|
||||
session.add(sql)
|
||||
try:
|
||||
session.commit()
|
||||
except Exception as error:
|
||||
print(f'[-]端口扫描-修改IP扫描状态异常{error}')
|
||||
session.rollback()
|
||||
return assets_sql
|
||||
|
||||
def WritePosts(port_dict, assets_sql):
|
||||
'''端口扫描入库'''
|
||||
for info in port_dict:
|
||||
port_sql = SrcPorts(port_name=assets_sql.asset_name, port_host=assets_sql.asset_host, port_ip=assets_sql.asset_ip,
|
||||
port_port=port_dict[info]['port'], port_service=port_dict[info]['name'],
|
||||
port_product=port_dict[info]['product'], port_version=port_dict[info]['version'])
|
||||
session.add(port_sql)
|
||||
try:
|
||||
session.commit()
|
||||
except Exception as error:
|
||||
session.rollback()
|
||||
print(f'[-]端口入库异常{error}')
|
||||
print(f'[+]端口[{assets_sql.asset_ip}]入库完成')
|
||||
|
||||
def main():
|
||||
print('[+]端口扫描启动')
|
||||
while True:
|
||||
assets_sql = ReadAssets()
|
||||
if not assets_sql:
|
||||
time.sleep(30)
|
||||
else:
|
||||
portscan = PortScan(assets_sql.asset_ip)
|
||||
port_dict, vulns_list = portscan.run()
|
||||
if port_dict:
|
||||
WritePosts(port_dict, assets_sql)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,95 +0,0 @@
|
||||
import time
|
||||
import threading
|
||||
import importlib
|
||||
|
||||
import client.subdomain.oneforall.config as config
|
||||
import client.subdomain.oneforall.dbexport as dbexport
|
||||
from client.subdomain.oneforall.config import logger
|
||||
|
||||
|
||||
class Collect(object):
|
||||
"""
|
||||
收集子域名类
|
||||
"""
|
||||
def __init__(self, domain, export=True):
|
||||
self.domain = domain
|
||||
self.elapse = 0.0
|
||||
self.modules = []
|
||||
self.collect_funcs = []
|
||||
self.path = None
|
||||
self.export = export
|
||||
self.format = 'csv'
|
||||
|
||||
def get_mod(self):
|
||||
"""
|
||||
获取要运行的模块
|
||||
"""
|
||||
if config.enable_all_module:
|
||||
# modules = ['brute', 'certificates', 'crawl',
|
||||
# 'datasets', 'intelligence', 'search']
|
||||
# crawl模块还有点问题
|
||||
modules = ['certificates', 'check', 'datasets',
|
||||
'dnsquery', 'intelligence', 'search']
|
||||
# modules = ['intelligence'] # crawl模块还有点问题
|
||||
for module in modules:
|
||||
module_path = config.module_dir.joinpath(module)
|
||||
for path in module_path.rglob('*.py'):
|
||||
# 需要导入的类
|
||||
import_module = ('modules.' + module, path.stem)
|
||||
self.modules.append(import_module)
|
||||
else:
|
||||
self.modules = config.enable_partial_module
|
||||
|
||||
def import_func(self):
|
||||
"""
|
||||
导入脚本的do函数
|
||||
"""
|
||||
for package, name in self.modules:
|
||||
import_object = importlib.import_module('.' + name, package)
|
||||
func = getattr(import_object, 'do')
|
||||
self.collect_funcs.append([func, name])
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类运行入口
|
||||
"""
|
||||
start = time.time()
|
||||
logger.log('INFOR', f'开始收集{self.domain}的子域')
|
||||
self.get_mod()
|
||||
self.import_func()
|
||||
|
||||
threads = []
|
||||
# 创建多个子域收集线程
|
||||
for collect_func in self.collect_funcs:
|
||||
func_obj, func_name = collect_func
|
||||
thread = threading.Thread(target=func_obj,
|
||||
name=func_name,
|
||||
args=(self.domain,),
|
||||
daemon=True)
|
||||
threads.append(thread)
|
||||
# 启动所有线程
|
||||
for thread in threads:
|
||||
thread.start()
|
||||
# 等待所有线程完成
|
||||
for thread in threads:
|
||||
# 挨个线程判断超时 最坏情况主线程阻塞时间=线程数*module_thread_timeout
|
||||
# 超时线程将脱离主线程 由于创建线程时已添加守护属于 所有超时线程会随着主线程结束
|
||||
thread.join(config.module_thread_timeout)
|
||||
|
||||
for thread in threads:
|
||||
if thread.is_alive():
|
||||
logger.log('ALERT', f'{thread.name}模块线程发生超时')
|
||||
|
||||
# 数据库导出
|
||||
if self.export:
|
||||
if not self.path:
|
||||
name = f'{self.domain}.{self.format}'
|
||||
self.path = config.result_save_dir.joinpath(name)
|
||||
dbexport.export(self.domain, path=self.path, format=self.format)
|
||||
end = time.time()
|
||||
self.elapse = round(end - start, 1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
collect = Collect('example.com')
|
||||
collect.run()
|
@ -1 +0,0 @@
|
||||
# coding=utf-8
|
@ -1,9 +0,0 @@
|
||||
from .module import Module
|
||||
|
||||
|
||||
class Crawl(Module):
|
||||
"""
|
||||
爬虫基类
|
||||
"""
|
||||
def __init__(self):
|
||||
Module.__init__(self)
|
@ -1,250 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
|
||||
"""
|
||||
SQLite数据库初始化和操作
|
||||
"""
|
||||
|
||||
import records
|
||||
|
||||
import client.subdomain.oneforall.config as config
|
||||
from records import Connection
|
||||
from client.subdomain.oneforall.config import logger
|
||||
|
||||
|
||||
class Database(object):
|
||||
def __init__(self, db_path=None):
|
||||
self.conn = self.get_conn(db_path)
|
||||
|
||||
@staticmethod
|
||||
def get_conn(db_path):
|
||||
"""
|
||||
获取数据库对象
|
||||
|
||||
:param db_path: 数据库连接或路径
|
||||
:return: SQLite数据库
|
||||
"""
|
||||
logger.log('TRACE', f'正在获取数据库连接')
|
||||
if isinstance(db_path, Connection):
|
||||
return db_path
|
||||
protocol = 'sqlite:///'
|
||||
if not db_path: # 数据库路径为空连接默认数据库
|
||||
db_path = f'{protocol}{config.result_save_dir}/result.sqlite3'
|
||||
else:
|
||||
db_path = protocol + db_path
|
||||
db = records.Database(db_path) # 不存在数据库时会新建一个数据库
|
||||
logger.log('TRACE', f'使用数据库: {db_path}')
|
||||
return db.get_connection()
|
||||
|
||||
def query(self, sql):
|
||||
try:
|
||||
results = self.conn.query(sql)
|
||||
except Exception as e:
|
||||
logger.log('ERROR', e.args)
|
||||
else:
|
||||
return results
|
||||
|
||||
def create_table(self, table_name):
|
||||
"""
|
||||
创建表结构
|
||||
|
||||
:param str table_name: 要创建的表名
|
||||
"""
|
||||
table_name = table_name.replace('.', '_')
|
||||
if self.exist_table(table_name):
|
||||
logger.log('TRACE', f'已经存在{table_name}表')
|
||||
return
|
||||
logger.log('TRACE', f'正在创建{table_name}表')
|
||||
self.query(f'create table "{table_name}" ('
|
||||
f'id integer primary key,'
|
||||
f'type text,'
|
||||
f'alive int,'
|
||||
f'request int,'
|
||||
f'resolve int,'
|
||||
f'new int,'
|
||||
f'url text,'
|
||||
f'subdomain text,'
|
||||
f'port int,'
|
||||
f'level int,'
|
||||
f'cname text,'
|
||||
f'content text,'
|
||||
f'public int,'
|
||||
f'status int,'
|
||||
f'reason text,'
|
||||
f'title text,'
|
||||
f'banner text,'
|
||||
f'header text,'
|
||||
f'response text,'
|
||||
f'times text,'
|
||||
f'ttl text,'
|
||||
f'resolver text,'
|
||||
f'module text,'
|
||||
f'source text,'
|
||||
f'elapse float,'
|
||||
f'find int,'
|
||||
f'brute int,'
|
||||
f'valid int)')
|
||||
|
||||
def save_db(self, table_name, results, module_name=None):
|
||||
"""
|
||||
将各模块结果存入数据库
|
||||
|
||||
:param str table_name: 表名
|
||||
:param list results: 结果列表
|
||||
:param str module_name: 模块名
|
||||
"""
|
||||
logger.log('TRACE', f'正在将{module_name}模块发现{table_name}的子域'
|
||||
'结果存入数据库')
|
||||
table_name = table_name.replace('.', '_')
|
||||
if results:
|
||||
try:
|
||||
self.conn.bulk_query(
|
||||
f'insert into "{table_name}" ('
|
||||
f'id, type, alive, resolve, request, new, url, subdomain,'
|
||||
f'port, level, cname, content, public, status, reason,'
|
||||
f'title, banner, header, response, times, ttl, resolver,'
|
||||
f'module, source, elapse, find, brute, valid) '
|
||||
f'values (:id, :type, :alive, :resolve, :request, :new,'
|
||||
f':url, :subdomain, :port, :level, :cname, :content,'
|
||||
f':public, :status, :reason, :title, :banner, :header,'
|
||||
f':response, :times, :ttl, :resolver, :module, :source,'
|
||||
f':elapse, :find, :brute, :valid)', results)
|
||||
except Exception as e:
|
||||
logger.log('ERROR', e)
|
||||
|
||||
def exist_table(self, table_name):
|
||||
"""
|
||||
判断是否存在某表
|
||||
|
||||
:param str table_name: 表名
|
||||
:return: 是否存在某表
|
||||
"""
|
||||
table_name = table_name.replace('.', '_')
|
||||
logger.log('TRACE', f'正在查询是否存在{table_name}表')
|
||||
results = self.query(f'select count() from sqlite_master '
|
||||
f'where type = "table" and '
|
||||
f'name = "{table_name}"')
|
||||
if results.scalar() == 0:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def copy_table(self, table_name, bak_table_name):
|
||||
"""
|
||||
复制表创建备份
|
||||
|
||||
:param str table_name: 表名
|
||||
:param str bak_table_name: 新表名
|
||||
"""
|
||||
table_name = table_name.replace('.', '_')
|
||||
bak_table_name = bak_table_name.replace('.', '_')
|
||||
logger.log('TRACE', f'正在将{table_name}表复制到{bak_table_name}新表')
|
||||
self.query(f'drop table if exists "{bak_table_name}"')
|
||||
self.query(f'create table "{bak_table_name}" '
|
||||
f'as select * from "{table_name}"')
|
||||
|
||||
def clear_table(self, table_name):
|
||||
"""
|
||||
清空表中数据
|
||||
|
||||
:param str table_name: 表名
|
||||
"""
|
||||
table_name = table_name.replace('.', '_')
|
||||
logger.log('TRACE', f'正在清空{table_name}表中的数据')
|
||||
self.query(f'delete from "{table_name}"')
|
||||
|
||||
def drop_table(self, table_name):
|
||||
"""
|
||||
删除表
|
||||
|
||||
:param str table_name: 表名
|
||||
"""
|
||||
table_name = table_name.replace('.', '_')
|
||||
logger.log('TRACE', f'正在删除{table_name}表')
|
||||
self.query(f'drop table if exists "{table_name}"')
|
||||
|
||||
def rename_table(self, table_name, new_table_name):
|
||||
"""
|
||||
重命名表名
|
||||
|
||||
:param str table_name: 表名
|
||||
:param str new_table_name: 新表名
|
||||
"""
|
||||
table_name = table_name.replace('.', '_')
|
||||
new_table_name = new_table_name.replace('.', '_')
|
||||
logger.log('TRACE', f'正在将{table_name}表重命名为{table_name}表')
|
||||
self.query(f'alter table "{table_name}" '
|
||||
f'rename to "{new_table_name}"')
|
||||
|
||||
def deduplicate_subdomain(self, table_name):
|
||||
"""
|
||||
去重表中的子域
|
||||
|
||||
:param str table_name: 表名
|
||||
"""
|
||||
table_name = table_name.replace('.', '_')
|
||||
logger.log('TRACE', f'正在去重{table_name}表中的子域')
|
||||
self.query(f'delete from "{table_name}" where '
|
||||
f'id not in (select min(id) '
|
||||
f'from "{table_name}" group by subdomain)')
|
||||
|
||||
def remove_invalid(self, table_name):
|
||||
"""
|
||||
去除表中的空值或无效子域
|
||||
|
||||
:param str table_name: 表名
|
||||
"""
|
||||
table_name = table_name.replace('.', '_')
|
||||
logger.log('TRACE', f'正在去除{table_name}表中的无效子域')
|
||||
self.query(f'delete from "{table_name}" where '
|
||||
f'subdomain is null or resolve == 0')
|
||||
|
||||
def deal_table(self, deal_table_name, backup_table_name):
|
||||
"""
|
||||
收集任务完成时对表进行处理
|
||||
|
||||
:param str deal_table_name: 待处理的表名
|
||||
:param str backup_table_name: 备份的表名
|
||||
"""
|
||||
self.copy_table(deal_table_name, backup_table_name)
|
||||
self.remove_invalid(deal_table_name)
|
||||
self.deduplicate_subdomain(deal_table_name)
|
||||
|
||||
def get_data(self, table_name):
|
||||
"""
|
||||
获取表中的所有数据
|
||||
|
||||
:param str table_name: 表名
|
||||
"""
|
||||
table_name = table_name.replace('.', '_')
|
||||
logger.log('TRACE', f'获取{table_name}表中的所有数据')
|
||||
return self.query(f'select * from "{table_name}"')
|
||||
|
||||
def export_data(self, table_name, alive, limit):
|
||||
"""
|
||||
获取表中的部分数据
|
||||
|
||||
:param str table_name: 表名
|
||||
:param any alive: 存活
|
||||
:param str limit: 限制字段
|
||||
"""
|
||||
table_name = table_name.replace('.', '_')
|
||||
query = f'select id, type, new, alive, request, resolve, url, ' \
|
||||
f'subdomain, level, cname, content, public, port, status, ' \
|
||||
f'reason, title, banner, times, ttl, resolver, module, ' \
|
||||
f'source, elapse, find, brute, valid from "{table_name}"'
|
||||
if alive and limit:
|
||||
if limit in ['resolve', 'request']:
|
||||
where = f' where {limit} = 1'
|
||||
query += where
|
||||
elif alive:
|
||||
where = f' where alive = 1'
|
||||
query += where
|
||||
logger.log('TRACE', f'获取{table_name}表中的数据')
|
||||
return self.query(query)
|
||||
|
||||
def close(self):
|
||||
"""
|
||||
关闭数据库连接
|
||||
"""
|
||||
self.conn.close()
|
@ -1,64 +0,0 @@
|
||||
import re
|
||||
import tldextract
|
||||
import client.subdomain.oneforall.config as config
|
||||
|
||||
|
||||
class Domain(object):
|
||||
"""
|
||||
域名处理类
|
||||
|
||||
:param str string: 传入的字符串
|
||||
"""
|
||||
def __init__(self, string):
|
||||
self.string = str(string)
|
||||
self.regexp = r'\b((?=[a-z0-9-]{1,63}\.)(xn--)?[a-z0-9]+(-[a-z0-9]+)*\.)+[a-z]{2,63}\b'
|
||||
self.domain = None
|
||||
|
||||
def match(self):
|
||||
"""
|
||||
域名匹配
|
||||
|
||||
:return: 匹配结果
|
||||
"""
|
||||
result = re.search(self.regexp, self.string, re.I)
|
||||
if result:
|
||||
return result.group()
|
||||
else:
|
||||
return None
|
||||
|
||||
def extract(self):
|
||||
"""
|
||||
域名导出
|
||||
|
||||
>>> d = Domain('www.example.com')
|
||||
<domain.Domain object>
|
||||
>>> d.extract()
|
||||
ExtractResult(subdomain='www', domain='example', suffix='com')
|
||||
|
||||
:return: 导出结果
|
||||
"""
|
||||
data_storage_dir = config.data_storage_dir
|
||||
extract_cache_file = data_storage_dir.joinpath('public_suffix_list.dat')
|
||||
tldext = tldextract.TLDExtract(extract_cache_file)
|
||||
result = self.match()
|
||||
if result:
|
||||
return tldext(result)
|
||||
else:
|
||||
return None
|
||||
|
||||
def registered(self):
|
||||
"""
|
||||
获取注册域名
|
||||
|
||||
>>> d = Domain('www.example.com')
|
||||
<domain.Domain object>
|
||||
>>> d.registered()
|
||||
example.com
|
||||
|
||||
:return: 注册域名
|
||||
"""
|
||||
result = self.extract()
|
||||
if result:
|
||||
return result.registered_domain
|
||||
else:
|
||||
return None
|
@ -1,25 +0,0 @@
|
||||
from .module import Module
|
||||
from client.subdomain.oneforall.common import utils
|
||||
|
||||
|
||||
class Lookup(Module):
|
||||
"""
|
||||
DNS查询基类
|
||||
"""
|
||||
def __init__(self):
|
||||
Module.__init__(self)
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
查询域名的TXT记录
|
||||
:return: 查询结果
|
||||
"""
|
||||
answer = utils.dns_query(self.domain, self.type)
|
||||
if answer is None:
|
||||
return None
|
||||
for item in answer:
|
||||
record = item.to_text()
|
||||
subdomains = utils.match_subdomain(self.domain, record)
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
self.gen_record(subdomains, record)
|
||||
return self.subdomains
|
@ -1,9 +0,0 @@
|
||||
from .module import Module
|
||||
|
||||
|
||||
class Query(Module):
|
||||
"""
|
||||
查询基类
|
||||
"""
|
||||
def __init__(self):
|
||||
Module.__init__(self)
|
@ -1,264 +0,0 @@
|
||||
import asyncio
|
||||
import functools
|
||||
|
||||
import aiohttp
|
||||
import tqdm
|
||||
from aiohttp import ClientSession
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
import client.subdomain.oneforall.config as config
|
||||
from client.subdomain.oneforall.common import utils
|
||||
from client.subdomain.oneforall.config import logger
|
||||
from client.subdomain.oneforall.common.database import Database
|
||||
|
||||
|
||||
def get_limit_conn():
|
||||
limit_open_conn = config.limit_open_conn
|
||||
if limit_open_conn is None: # 默认情况
|
||||
limit_open_conn = utils.get_semaphore()
|
||||
elif not isinstance(limit_open_conn, int): # 如果传入不是数字的情况
|
||||
limit_open_conn = utils.get_semaphore()
|
||||
return limit_open_conn
|
||||
|
||||
|
||||
def get_ports(port):
|
||||
logger.log('DEBUG', f'正在获取请求端口范围')
|
||||
ports = set()
|
||||
if isinstance(port, (set, list, tuple)):
|
||||
ports = port
|
||||
elif isinstance(port, int):
|
||||
if 0 <= port <= 65535:
|
||||
ports = {port}
|
||||
elif port in {'default', 'small', 'large'}:
|
||||
logger.log('DEBUG', f'请求{port}等端口范围')
|
||||
ports = config.ports.get(port)
|
||||
if not ports: # 意外情况
|
||||
logger.log('ERROR', f'指定请求端口范围有误')
|
||||
ports = {80}
|
||||
logger.log('INFOR', f'请求端口范围:{ports}')
|
||||
return set(ports)
|
||||
|
||||
|
||||
def gen_req_data(data, ports):
|
||||
logger.log('INFOR', f'正在生成请求地址')
|
||||
new_data = []
|
||||
for data in data:
|
||||
resolve = data.get('resolve')
|
||||
# 解析失败(0)的子域不进行http请求探测
|
||||
if resolve == 0:
|
||||
continue
|
||||
subdomain = data.get('subdomain')
|
||||
for port in ports:
|
||||
if str(port).endswith('443'):
|
||||
url = f'https://{subdomain}:{port}'
|
||||
if port == 443:
|
||||
url = f'https://{subdomain}'
|
||||
data['id'] = None
|
||||
data['url'] = url
|
||||
data['port'] = port
|
||||
new_data.append(data)
|
||||
data = dict(data) # 需要生成一个新的字典对象
|
||||
else:
|
||||
url = f'http://{subdomain}:{port}'
|
||||
if port == 80:
|
||||
url = f'http://{subdomain}'
|
||||
data['id'] = None
|
||||
data['url'] = url
|
||||
data['port'] = port
|
||||
new_data.append(data)
|
||||
data = dict(data) # 需要生成一个新的字典对象
|
||||
return new_data
|
||||
|
||||
|
||||
async def fetch(session, url):
|
||||
"""
|
||||
请求
|
||||
|
||||
:param session: session对象
|
||||
:param str url: url地址
|
||||
:return: 响应对象和响应文本
|
||||
"""
|
||||
method = config.request_method.upper()
|
||||
timeout = aiohttp.ClientTimeout(total=None,
|
||||
connect=None,
|
||||
sock_read=config.sockread_timeout,
|
||||
sock_connect=config.sockconn_timeout)
|
||||
try:
|
||||
if method == 'HEAD':
|
||||
async with session.head(url,
|
||||
ssl=config.verify_ssl,
|
||||
allow_redirects=config.allow_redirects,
|
||||
timeout=timeout,
|
||||
proxy=config.aiohttp_proxy) as resp:
|
||||
text = await resp.text()
|
||||
else:
|
||||
async with session.get(url,
|
||||
ssl=config.verify_ssl,
|
||||
allow_redirects=config.allow_redirects,
|
||||
timeout=timeout,
|
||||
proxy=config.aiohttp_proxy) as resp:
|
||||
|
||||
try:
|
||||
# 先尝试用utf-8解码
|
||||
text = await resp.text(encoding='utf-8', errors='strict')
|
||||
except UnicodeError:
|
||||
try:
|
||||
# 再尝试用gb18030解码
|
||||
text = await resp.text(encoding='gb18030',
|
||||
errors='strict')
|
||||
except UnicodeError:
|
||||
# 最后尝试自动解码
|
||||
text = await resp.text(encoding=None,
|
||||
errors='ignore')
|
||||
return resp, text
|
||||
except Exception as e:
|
||||
return e
|
||||
|
||||
|
||||
def get_title(markup):
|
||||
"""
|
||||
获取标题
|
||||
|
||||
:param markup: html标签
|
||||
:return: 标题
|
||||
"""
|
||||
soup = BeautifulSoup(markup, 'html.parser')
|
||||
|
||||
title = soup.title
|
||||
if title:
|
||||
return title.text
|
||||
|
||||
h1 = soup.h1
|
||||
if h1:
|
||||
return h1.text
|
||||
|
||||
h2 = soup.h2
|
||||
if h2:
|
||||
return h2.text
|
||||
|
||||
h3 = soup.h3
|
||||
if h2:
|
||||
return h3.text
|
||||
|
||||
desc = soup.find('meta', attrs={'name': 'description'})
|
||||
if desc:
|
||||
return desc['content']
|
||||
|
||||
word = soup.find('meta', attrs={'name': 'keywords'})
|
||||
if word:
|
||||
return word['content']
|
||||
|
||||
text = soup.text
|
||||
if len(text) <= 200:
|
||||
return text
|
||||
|
||||
return 'None'
|
||||
|
||||
|
||||
def request_callback(future, index, datas):
|
||||
result = future.result()
|
||||
if isinstance(result, BaseException):
|
||||
logger.log('TRACE', result.args)
|
||||
name = utils.get_classname(result)
|
||||
datas[index]['reason'] = name + ' ' + str(result)
|
||||
datas[index]['request'] = 0
|
||||
datas[index]['alive'] = 0
|
||||
elif isinstance(result, tuple):
|
||||
resp, text = result
|
||||
datas[index]['reason'] = resp.reason
|
||||
datas[index]['status'] = resp.status
|
||||
if resp.status == 400 or resp.status >= 500:
|
||||
datas[index]['request'] = 0
|
||||
datas[index]['alive'] = 0
|
||||
else:
|
||||
datas[index]['request'] = 1
|
||||
datas[index]['alive'] = 1
|
||||
headers = resp.headers
|
||||
datas[index]['banner'] = utils.get_sample_banner(headers)
|
||||
datas[index]['header'] = str(dict(headers))[1:-1]
|
||||
if isinstance(text, str):
|
||||
title = get_title(text).strip()
|
||||
datas[index]['title'] = utils.remove_invalid_string(title)
|
||||
datas[index]['response'] = utils.remove_invalid_string(text)
|
||||
|
||||
|
||||
def get_connector():
|
||||
limit_open_conn = get_limit_conn()
|
||||
return aiohttp.TCPConnector(ttl_dns_cache=300,
|
||||
ssl=config.verify_ssl,
|
||||
limit=limit_open_conn,
|
||||
limit_per_host=config.limit_per_host)
|
||||
|
||||
|
||||
def get_header():
|
||||
header = None
|
||||
if config.fake_header:
|
||||
header = utils.gen_fake_header()
|
||||
return header
|
||||
|
||||
|
||||
async def bulk_request(data, port):
|
||||
ports = get_ports(port)
|
||||
no_req_data = utils.get_filtered_data(data)
|
||||
to_req_data = gen_req_data(data, ports)
|
||||
method = config.request_method
|
||||
logger.log('INFOR', f'请求使用{method}方法')
|
||||
logger.log('INFOR', f'正在进行异步子域请求')
|
||||
connector = get_connector()
|
||||
header = get_header()
|
||||
async with ClientSession(connector=connector, headers=header) as session:
|
||||
tasks = []
|
||||
for i, data in enumerate(to_req_data):
|
||||
url = data.get('url')
|
||||
task = asyncio.ensure_future(fetch(session, url))
|
||||
task.add_done_callback(functools.partial(request_callback,
|
||||
index=i,
|
||||
datas=to_req_data))
|
||||
tasks.append(task)
|
||||
# 任务列表里有任务不空时才进行解析
|
||||
if tasks:
|
||||
# 等待所有task完成 错误聚合到结果列表里
|
||||
futures = asyncio.as_completed(tasks)
|
||||
for future in tqdm.tqdm(futures,
|
||||
total=len(tasks),
|
||||
desc='Request Progress',
|
||||
ncols=80):
|
||||
await future
|
||||
return to_req_data + no_req_data
|
||||
|
||||
|
||||
def run_request(domain, data, port):
|
||||
"""
|
||||
调用子域请求入口函数
|
||||
|
||||
:param str domain: 待请求的主域
|
||||
:param list data: 待请求的子域数据
|
||||
:param str port: 待请求的端口范围
|
||||
:return: 请求后得到的结果列表
|
||||
:rtype: list
|
||||
"""
|
||||
logger.log('INFOR', f'开始执行子域请求模块')
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
data = utils.set_id_none(data)
|
||||
request_coroutine = bulk_request(data, port)
|
||||
data = loop.run_until_complete(request_coroutine)
|
||||
# 在关闭事件循环前加入一小段延迟让底层连接得到关闭的缓冲时间
|
||||
loop.run_until_complete(asyncio.sleep(0.25))
|
||||
count = utils.count_alive(data)
|
||||
logger.log('INFOR', f'经验证{domain}存活子域{count}个')
|
||||
return data
|
||||
|
||||
|
||||
def save_data(name, data):
|
||||
"""
|
||||
保存请求结果到数据库
|
||||
|
||||
:param str name: 保存表名
|
||||
:param list data: 待保存的数据
|
||||
"""
|
||||
db = Database()
|
||||
db.drop_table(name)
|
||||
db.create_table(name)
|
||||
db.save_db(name, data, 'request')
|
||||
db.close()
|
@ -1,166 +0,0 @@
|
||||
import gc
|
||||
import json
|
||||
|
||||
import client.subdomain.oneforall.config as config
|
||||
from client.subdomain.oneforall.config import logger
|
||||
from client.subdomain.oneforall.common import utils
|
||||
from client.subdomain.oneforall.common.database import Database
|
||||
|
||||
|
||||
def filter_subdomain(data):
|
||||
"""
|
||||
过滤出无解析内容的子域到新的子域列表
|
||||
|
||||
:param list data: 待过滤的数据列表
|
||||
:return: 符合条件的子域列表
|
||||
"""
|
||||
logger.log('DEBUG', f'正在过滤出待解析的子域')
|
||||
subdomains = []
|
||||
for data in data:
|
||||
if not data.get('content'):
|
||||
subdomain = data.get('subdomain')
|
||||
subdomains.append(subdomain)
|
||||
return subdomains
|
||||
|
||||
|
||||
def update_data(data, records):
|
||||
"""
|
||||
更新解析结果
|
||||
|
||||
:param list data: 待更新的数据列表
|
||||
:param dict records: 解析结果字典
|
||||
:return: 更新后的数据列表
|
||||
"""
|
||||
logger.log('DEBUG', f'正在更新解析结果')
|
||||
for index, items in enumerate(data):
|
||||
if not items.get('content'):
|
||||
subdomain = items.get('subdomain')
|
||||
record = records.get(subdomain)
|
||||
items.update(record)
|
||||
data[index] = items
|
||||
return data
|
||||
|
||||
|
||||
def save_data(name, data):
|
||||
"""
|
||||
保存解析结果到数据库
|
||||
|
||||
:param str name: 保存表名
|
||||
:param list data: 待保存的数据
|
||||
"""
|
||||
logger.log('INFOR', f'正在保存解析结果')
|
||||
db = Database()
|
||||
db.drop_table(name)
|
||||
db.create_table(name)
|
||||
db.save_db(name, data, 'resolve')
|
||||
db.close()
|
||||
|
||||
|
||||
def save_subdomains(save_path, subdomain_list):
|
||||
logger.log('DEBUG', f'正在保存待解析的子域')
|
||||
subdomain_data = '\n'.join(subdomain_list)
|
||||
if not utils.save_data(save_path, subdomain_data):
|
||||
logger.log('FATAL', '保存待解析的子域出错')
|
||||
exit(1)
|
||||
|
||||
|
||||
def deal_output(output_path):
|
||||
logger.log('INFOR', f'正在处理解析结果')
|
||||
records = dict() # 用来记录所有域名解析数据
|
||||
with open(output_path) as fd:
|
||||
for line in fd:
|
||||
line = line.strip()
|
||||
try:
|
||||
items = json.loads(line)
|
||||
except Exception as e:
|
||||
logger.log('ERROR', e.args)
|
||||
logger.log('ERROR', f'解析行{line}出错跳过解析该行')
|
||||
continue
|
||||
record = dict()
|
||||
record['resolver'] = items.get('resolver')
|
||||
qname = items.get('name')[:-1] # 去出最右边的`.`点号
|
||||
status = items.get('status')
|
||||
if status != 'NOERROR':
|
||||
record['alive'] = 0
|
||||
record['resolve'] = 0
|
||||
record['reason'] = status
|
||||
records[qname] = record
|
||||
continue
|
||||
data = items.get('data')
|
||||
if 'answers' not in data:
|
||||
record['alive'] = 0
|
||||
record['resolve'] = 0
|
||||
record['reason'] = 'NOANSWER'
|
||||
records[qname] = record
|
||||
continue
|
||||
flag = False
|
||||
cname = list()
|
||||
ips = list()
|
||||
public = list()
|
||||
ttls = list()
|
||||
answers = data.get('answers')
|
||||
for answer in answers:
|
||||
if answer.get('type') == 'A':
|
||||
flag = True
|
||||
cname.append(answer.get('name')[:-1]) # 去出最右边的`.`点号
|
||||
ip = answer.get('data')
|
||||
ips.append(ip)
|
||||
ttl = answer.get('ttl')
|
||||
ttls.append(str(ttl))
|
||||
is_public = utils.ip_is_public(ip)
|
||||
public.append(str(is_public))
|
||||
record['resolve'] = 1
|
||||
record['reason'] = status
|
||||
record['cname'] = ','.join(cname)
|
||||
record['content'] = ','.join(ips)
|
||||
record['public'] = ','.join(public)
|
||||
record['ttl'] = ','.join(ttls)
|
||||
records[qname] = record
|
||||
if not flag:
|
||||
record['alive'] = 0
|
||||
record['resolve'] = 0
|
||||
record['reason'] = 'NOARECORD'
|
||||
records[qname] = record
|
||||
return records
|
||||
|
||||
|
||||
def run_resolve(domain, data):
|
||||
"""
|
||||
调用子域解析入口函数
|
||||
|
||||
:param str domain: 待解析的主域
|
||||
:param list data: 待解析的子域数据列表
|
||||
:return: 解析得到的结果列表
|
||||
:rtype: list
|
||||
"""
|
||||
logger.log('INFOR', f'开始解析{domain}的子域')
|
||||
subdomains = filter_subdomain(data)
|
||||
if not subdomains:
|
||||
return data
|
||||
|
||||
massdns_dir = config.third_party_dir.joinpath('massdns')
|
||||
result_dir = config.result_save_dir
|
||||
temp_dir = result_dir.joinpath('temp')
|
||||
utils.check_dir(temp_dir)
|
||||
massdns_path = utils.get_massdns_path(massdns_dir)
|
||||
timestring = utils.get_timestring()
|
||||
|
||||
save_name = f'collected_subdomains_{domain}_{timestring}.txt'
|
||||
save_path = temp_dir.joinpath(save_name)
|
||||
save_subdomains(save_path, subdomains)
|
||||
del subdomains
|
||||
gc.collect()
|
||||
|
||||
output_name = f'resolved_result_{domain}_{timestring}.json'
|
||||
output_path = temp_dir.joinpath(output_name)
|
||||
log_path = result_dir.joinpath('massdns.log')
|
||||
|
||||
ns_path = config.brute_nameservers_path
|
||||
|
||||
utils.call_massdns(massdns_path, save_path, ns_path,
|
||||
output_path, log_path, quiet_mode=True)
|
||||
|
||||
records = deal_output(output_path)
|
||||
data = update_data(data, records)
|
||||
logger.log('INFOR', f'结束解析{domain}的子域')
|
||||
return data
|
@ -1,54 +0,0 @@
|
||||
import client.subdomain.oneforall.config as config
|
||||
from .module import Module
|
||||
from . import utils
|
||||
|
||||
|
||||
class Search(Module):
|
||||
"""
|
||||
搜索基类
|
||||
"""
|
||||
def __init__(self):
|
||||
Module.__init__(self)
|
||||
self.page_num = 0 # 要显示搜索起始条数
|
||||
self.per_page_num = 50 # 每页显示搜索条数
|
||||
self.recursive_search = config.enable_recursive_search
|
||||
self.recursive_times = config.search_recursive_times
|
||||
|
||||
@staticmethod
|
||||
def filter(domain, subdomain):
|
||||
"""
|
||||
生成搜索过滤语句
|
||||
使用搜索引擎支持的-site:语法过滤掉搜索页面较多的子域以发现新域
|
||||
|
||||
:param str domain: 域名
|
||||
:param set subdomain: 子域名集合
|
||||
:return: 过滤语句
|
||||
:rtype: str
|
||||
"""
|
||||
statements_list = []
|
||||
subdomains_temp = set(map(lambda x: x + '.' + domain,
|
||||
config.subdomains_common))
|
||||
subdomains_temp = list(subdomain.intersection(subdomains_temp))
|
||||
for i in range(0, len(subdomains_temp), 2): # 同时排除2个子域
|
||||
statements_list.append(''.join(set(map(lambda s: ' -site:' + s,
|
||||
subdomains_temp[i:i + 2]))))
|
||||
return statements_list
|
||||
|
||||
def match_location(self, domain, url):
|
||||
"""
|
||||
匹配跳转之后的url
|
||||
针对部分搜索引擎(如百度搜索)搜索展示url时有显示不全的情况
|
||||
此函数会向每条结果的链接发送head请求获取响应头的location值并做子域匹配
|
||||
|
||||
:param str domain: 域名
|
||||
:param str url: 展示结果的url链接
|
||||
:return: 匹配的子域
|
||||
:rtype set
|
||||
"""
|
||||
resp = self.head(url, check=False, allow_redirects=False)
|
||||
if not resp:
|
||||
return set()
|
||||
location = resp.headers.get('location')
|
||||
if not location:
|
||||
return set()
|
||||
return set(utils.match_subdomain(domain, location))
|
@ -1,579 +0,0 @@
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import random
|
||||
import platform
|
||||
import subprocess
|
||||
from ipaddress import IPv4Address, ip_address
|
||||
from stat import S_IXUSR
|
||||
|
||||
import psutil
|
||||
import tenacity
|
||||
import requests
|
||||
from pathlib import Path
|
||||
from records import Record, RecordCollection
|
||||
from dns.resolver import Resolver
|
||||
|
||||
import client.subdomain.oneforall.config as config
|
||||
from client.subdomain.oneforall.common.domain import Domain
|
||||
from client.subdomain.oneforall.config import logger
|
||||
|
||||
user_agents = [
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
|
||||
'(KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 '
|
||||
'(KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
|
||||
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
|
||||
'(KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
|
||||
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/68.0',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:61.0) '
|
||||
'Gecko/20100101 Firefox/68.0',
|
||||
'Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/68.0']
|
||||
|
||||
|
||||
def match_subdomain(domain, text, distinct=True):
|
||||
"""
|
||||
匹配text中domain的子域名
|
||||
|
||||
:param str domain: 域名
|
||||
:param str text: 响应文本
|
||||
:param bool distinct: 结果去重
|
||||
:return: 匹配结果
|
||||
:rtype: set or list
|
||||
"""
|
||||
regexp = r'(?:[a-z0-9](?:[a-z0-9\-]{0,61}[a-z0-9])?\.){0,}' \
|
||||
+ domain.replace('.', r'\.')
|
||||
result = re.findall(regexp, text, re.I)
|
||||
if not result:
|
||||
return set()
|
||||
deal = map(lambda s: s.lower(), result)
|
||||
if distinct:
|
||||
return set(deal)
|
||||
else:
|
||||
return list(deal)
|
||||
|
||||
|
||||
def gen_random_ip():
|
||||
"""
|
||||
生成随机的点分十进制的IP字符串
|
||||
"""
|
||||
while True:
|
||||
ip = IPv4Address(random.randint(0, 2 ** 32 - 1))
|
||||
if ip.is_global:
|
||||
return ip.exploded
|
||||
|
||||
|
||||
def gen_fake_header():
|
||||
"""
|
||||
生成伪造请求头
|
||||
"""
|
||||
ua = random.choice(user_agents)
|
||||
ip = gen_random_ip()
|
||||
headers = {
|
||||
'Accept': 'text/html,application/xhtml+xml,'
|
||||
'application/xml;q=0.9,*/*;q=0.8',
|
||||
'Accept-Encoding': 'gzip, deflate, br',
|
||||
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
|
||||
'Cache-Control': 'max-age=0',
|
||||
'Connection': 'close',
|
||||
'DNT': '1',
|
||||
'Referer': 'https://www.google.com/',
|
||||
'Upgrade-Insecure-Requests': '1',
|
||||
'User-Agent': ua,
|
||||
'X-Forwarded-For': ip,
|
||||
'X-Real-IP': ip
|
||||
}
|
||||
return headers
|
||||
|
||||
|
||||
def get_random_proxy():
|
||||
"""
|
||||
获取随机代理
|
||||
"""
|
||||
try:
|
||||
return random.choice(config.proxy_pool)
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
|
||||
def split_list(ls, size):
|
||||
"""
|
||||
将ls列表按size大小划分并返回新的划分结果列表
|
||||
|
||||
:param list ls: 要划分的列表
|
||||
:param int size: 划分大小
|
||||
:return 划分结果
|
||||
|
||||
>>> split_list([1, 2, 3, 4], 3)
|
||||
[[1, 2, 3], [4]]
|
||||
"""
|
||||
if size == 0:
|
||||
return ls
|
||||
return [ls[i:i + size] for i in range(0, len(ls), size)]
|
||||
|
||||
|
||||
def get_domains(target):
|
||||
"""
|
||||
获取域名
|
||||
|
||||
:param set or str target:
|
||||
:return: 域名集合
|
||||
"""
|
||||
domains = list()
|
||||
logger.log('DEBUG', f'正在获取域名')
|
||||
if isinstance(target, (set, tuple)):
|
||||
domains = list(target)
|
||||
elif isinstance(target, list):
|
||||
domains = target
|
||||
elif isinstance(target, str):
|
||||
path = Path(target)
|
||||
if path.exists() and path.is_file():
|
||||
with open(target, encoding='utf-8', errors='ignore') as file:
|
||||
for line in file:
|
||||
line = line.lower().strip()
|
||||
domain = Domain(line).match()
|
||||
if domain:
|
||||
domains.append(domain)
|
||||
else:
|
||||
target = target.lower().strip()
|
||||
domain = Domain(target).match()
|
||||
if domain:
|
||||
domains.append(domain)
|
||||
count = len(domains)
|
||||
if count == 0:
|
||||
logger.log('FATAL', f'获取到{count}个域名')
|
||||
exit(1)
|
||||
logger.log('INFOR', f'获取到{count}个域名')
|
||||
return domains
|
||||
|
||||
|
||||
def get_semaphore():
|
||||
"""
|
||||
获取查询并发值
|
||||
|
||||
:return: 并发整型值
|
||||
"""
|
||||
system = platform.system()
|
||||
if system == 'Windows':
|
||||
return 800
|
||||
elif system == 'Linux':
|
||||
return 800
|
||||
elif system == 'Darwin':
|
||||
return 800
|
||||
|
||||
|
||||
def check_dir(dir_path):
|
||||
if not dir_path.exists():
|
||||
logger.log('INFOR', f'不存在{dir_path}目录将会新建')
|
||||
dir_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
def check_path(path, name, format):
|
||||
"""
|
||||
检查结果输出目录路径
|
||||
|
||||
:param path: 保存路径
|
||||
:param name: 导出名字
|
||||
:param format: 保存格式
|
||||
:return: 保存路径
|
||||
"""
|
||||
filename = f'{name}.{format}'
|
||||
default_path = config.result_save_dir.joinpath(filename)
|
||||
if isinstance(path, str):
|
||||
path = repr(path).replace('\\', '/') # 将路径中的反斜杠替换为正斜杠
|
||||
path = path.replace('\'', '') # 去除多余的转义
|
||||
else:
|
||||
path = default_path
|
||||
path = Path(path)
|
||||
if not path.suffix: # 输入是目录的情况
|
||||
path = path.joinpath(filename)
|
||||
parent_dir = path.parent
|
||||
if not parent_dir.exists():
|
||||
logger.log('ALERT', f'不存在{parent_dir}目录将会新建')
|
||||
parent_dir.mkdir(parents=True, exist_ok=True)
|
||||
if path.exists():
|
||||
logger.log('ALERT', f'存在{path}文件将会覆盖')
|
||||
return path
|
||||
|
||||
|
||||
def check_format(format, count):
|
||||
"""
|
||||
检查导出格式
|
||||
|
||||
:param format: 传入的导出格式
|
||||
:param count: 数量
|
||||
:return: 导出格式
|
||||
"""
|
||||
formats = ['rst', 'csv', 'tsv', 'json', 'yaml', 'html',
|
||||
'jira', 'xls', 'xlsx', 'dbf', 'latex', 'ods']
|
||||
if format == 'xls' and count > 65000:
|
||||
logger.log('ALERT', 'xls文件限制为最多65000行')
|
||||
logger.log('ALERT', '使用xlsx格式导出')
|
||||
return 'xlsx'
|
||||
if format in formats:
|
||||
return format
|
||||
else:
|
||||
logger.log('ALERT', f'不支持{format}格式导出')
|
||||
logger.log('ALERT', '默认使用csv格式导出')
|
||||
return 'csv'
|
||||
|
||||
|
||||
def save_data(path, data):
|
||||
"""
|
||||
保存数据到文件
|
||||
|
||||
:param path: 保存路径
|
||||
:param data: 待存数据
|
||||
:return: 保存成功与否
|
||||
"""
|
||||
try:
|
||||
with open(path, 'w', encoding="utf-8",
|
||||
errors='ignore', newline='') as file:
|
||||
file.write(data)
|
||||
return True
|
||||
except TypeError:
|
||||
with open(path, 'wb') as file:
|
||||
file.write(data)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.log('ERROR', e.args)
|
||||
return False
|
||||
|
||||
|
||||
def check_response(method, resp):
|
||||
"""
|
||||
检查响应 输出非正常响应返回json的信息
|
||||
|
||||
:param method: 请求方法
|
||||
:param resp: 响应体
|
||||
:return: 是否正常响应
|
||||
"""
|
||||
if resp.status_code == 200 and resp.content:
|
||||
return True
|
||||
logger.log('ALERT', f'{method} {resp.url} {resp.status_code} - '
|
||||
f'{resp.reason} {len(resp.content)}')
|
||||
content_type = resp.headers.get('Content-Type')
|
||||
if content_type and 'json' in content_type and resp.content:
|
||||
try:
|
||||
msg = resp.json()
|
||||
except Exception as e:
|
||||
logger.log('DEBUG', e.args)
|
||||
else:
|
||||
logger.log('ALERT', msg)
|
||||
return False
|
||||
|
||||
|
||||
def mark_subdomain(old_data, now_data):
|
||||
"""
|
||||
标记新增子域并返回新的数据集
|
||||
|
||||
:param list old_data: 之前子域数据
|
||||
:param list now_data: 现在子域数据
|
||||
:return: 标记后的的子域数据
|
||||
:rtype: list
|
||||
"""
|
||||
# 第一次收集子域的情况
|
||||
mark_data = now_data.copy()
|
||||
if not old_data:
|
||||
for index, item in enumerate(mark_data):
|
||||
item['new'] = 1
|
||||
mark_data[index] = item
|
||||
return mark_data
|
||||
# 非第一次收集子域的情况
|
||||
old_subdomains = {item.get('subdomain') for item in old_data}
|
||||
for index, item in enumerate(mark_data):
|
||||
subdomain = item.get('subdomain')
|
||||
if subdomain in old_subdomains:
|
||||
item['new'] = 0
|
||||
else:
|
||||
item['new'] = 1
|
||||
mark_data[index] = item
|
||||
return mark_data
|
||||
|
||||
|
||||
def remove_invalid_string(string):
|
||||
# Excel文件中单元格值不能直接存储以下非法字符
|
||||
return re.sub(r'[\000-\010]|[\013-\014]|[\016-\037]', r'', string)
|
||||
|
||||
|
||||
def check_value(values):
|
||||
if not isinstance(values, dict):
|
||||
return values
|
||||
for key, value in values.items():
|
||||
if value is None:
|
||||
continue
|
||||
if isinstance(value, str) and len(value) > 32767:
|
||||
# Excel文件中单元格值长度不能超过32767
|
||||
values[key] = value[:32767]
|
||||
return values
|
||||
|
||||
|
||||
def export_all(format, path, datas):
|
||||
"""
|
||||
将所有结果数据导出到一个文件
|
||||
|
||||
:param str format: 导出文件格式
|
||||
:param str path: 导出文件路径
|
||||
:param list datas: 待导出的结果数据
|
||||
"""
|
||||
format = check_format(format, len(datas))
|
||||
timestamp = get_timestring()
|
||||
name = f'all_subdomain_result_{timestamp}'
|
||||
path = check_path(path, name, format)
|
||||
logger.log('INFOR', f'所有主域的子域结果 {path}')
|
||||
row_list = list()
|
||||
for row in datas:
|
||||
if 'header' in row:
|
||||
row.pop('header')
|
||||
if 'response' in row:
|
||||
row.pop('response')
|
||||
keys = row.keys()
|
||||
values = row.values()
|
||||
if format in {'xls', 'xlsx'}:
|
||||
values = check_value(values)
|
||||
row_list.append(Record(keys, values))
|
||||
rows = RecordCollection(iter(row_list))
|
||||
content = rows.export(format)
|
||||
save_data(path, content)
|
||||
|
||||
|
||||
def dns_resolver():
|
||||
"""
|
||||
dns解析器
|
||||
"""
|
||||
resolver = Resolver()
|
||||
resolver.nameservers = config.resolver_nameservers
|
||||
resolver.timeout = config.resolver_timeout
|
||||
resolver.lifetime = config.resolver_lifetime
|
||||
return resolver
|
||||
|
||||
|
||||
def dns_query(qname, qtype):
|
||||
"""
|
||||
查询域名DNS记录
|
||||
|
||||
:param str qname: 待查域名
|
||||
:param str qtype: 查询类型
|
||||
:return: 查询结果
|
||||
"""
|
||||
logger.log('TRACE', f'尝试查询{qname}的{qtype}记录')
|
||||
resolver = dns_resolver()
|
||||
try:
|
||||
answer = resolver.query(qname, qtype)
|
||||
except Exception as e:
|
||||
logger.log('TRACE', e.args)
|
||||
logger.log('TRACE', f'查询{qname}的{qtype}记录失败')
|
||||
return None
|
||||
else:
|
||||
logger.log('TRACE', f'查询{qname}的{qtype}记录成功')
|
||||
return answer
|
||||
|
||||
|
||||
def get_timestamp():
|
||||
return int(time.time())
|
||||
|
||||
|
||||
def get_timestring():
|
||||
return time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time()))
|
||||
|
||||
|
||||
def get_classname(classobj):
|
||||
return classobj.__class__.__name__
|
||||
|
||||
|
||||
def python_version():
|
||||
return sys.version
|
||||
|
||||
|
||||
def count_alive(data):
|
||||
return len(list(filter(lambda item: item.get('alive') == 1, data)))
|
||||
|
||||
|
||||
def get_subdomains(data):
|
||||
return set(map(lambda item: item.get('subdomain'), data))
|
||||
|
||||
|
||||
def set_id_none(data):
|
||||
new_data = []
|
||||
for item in data:
|
||||
item['id'] = None
|
||||
new_data.append(item)
|
||||
return new_data
|
||||
|
||||
|
||||
def get_filtered_data(data):
|
||||
filtered_data = []
|
||||
for item in data:
|
||||
valid = item.get('resolve')
|
||||
if valid == 0:
|
||||
filtered_data.append(item)
|
||||
return filtered_data
|
||||
|
||||
|
||||
def get_sample_banner(headers):
|
||||
temp_list = []
|
||||
server = headers.get('Server')
|
||||
if server:
|
||||
temp_list.append(server)
|
||||
via = headers.get('Via')
|
||||
if via:
|
||||
temp_list.append(via)
|
||||
power = headers.get('X-Powered-By')
|
||||
if power:
|
||||
temp_list.append(power)
|
||||
banner = ','.join(temp_list)
|
||||
return banner
|
||||
|
||||
|
||||
def check_ip_public(ip_list):
|
||||
for ip_str in ip_list:
|
||||
ip = ip_address(ip_str)
|
||||
if not ip.is_global:
|
||||
return 0
|
||||
return 1
|
||||
|
||||
|
||||
def ip_is_public(ip_str):
|
||||
ip = ip_address(ip_str)
|
||||
if not ip.is_global:
|
||||
return 0
|
||||
return 1
|
||||
|
||||
|
||||
def get_process_num():
|
||||
process_num = config.brute_process_num
|
||||
if isinstance(process_num, int):
|
||||
return min(os.cpu_count(), process_num)
|
||||
else:
|
||||
return 1
|
||||
|
||||
|
||||
def get_coroutine_num():
|
||||
coroutine_num = config.resolve_coroutine_num
|
||||
if isinstance(coroutine_num, int):
|
||||
return max(64, coroutine_num)
|
||||
elif coroutine_num is None:
|
||||
mem = psutil.virtual_memory()
|
||||
total_mem = mem.total
|
||||
g_size = 1024 * 1024 * 1024
|
||||
if total_mem <= 1 * g_size:
|
||||
return 64
|
||||
elif total_mem <= 2 * g_size:
|
||||
return 128
|
||||
elif total_mem <= 4 * g_size:
|
||||
return 256
|
||||
elif total_mem <= 8 * g_size:
|
||||
return 512
|
||||
elif total_mem <= 16 * g_size:
|
||||
return 1024
|
||||
else:
|
||||
return 2048
|
||||
else:
|
||||
return 64
|
||||
|
||||
|
||||
def uniq_dict_list(dict_list):
|
||||
return list(filter(lambda name: dict_list.count(name) == 1, dict_list))
|
||||
|
||||
|
||||
def delete_file(*paths):
|
||||
for path in paths:
|
||||
try:
|
||||
path.unlink()
|
||||
except Exception as e:
|
||||
logger.log('ERROR', e.args)
|
||||
|
||||
|
||||
@tenacity.retry(stop=tenacity.stop_after_attempt(2))
|
||||
def check_net():
|
||||
logger.log('INFOR', '正在检查网络环境')
|
||||
url = 'http://www.example.com/'
|
||||
logger.log('INFOR', f'访问地址 {url}')
|
||||
try:
|
||||
rsp = requests.get(url)
|
||||
except Exception as e:
|
||||
logger.log('ERROR', e.args)
|
||||
logger.log('ALERT', '访问外网出错 重新检查中')
|
||||
raise tenacity.TryAgain
|
||||
if rsp.status_code != 200:
|
||||
logger.log('ALERT', f'{rsp.request.method} {rsp.request.url} '
|
||||
f'{rsp.status_code} {rsp.reason}')
|
||||
logger.log('ALERT', '不能正常访问外网 重新检查中')
|
||||
raise tenacity.TryAgain
|
||||
logger.log('INFOR', '能正常访问外网')
|
||||
|
||||
|
||||
def check_pre():
|
||||
logger.log('INFOR', '正在检查依赖环境')
|
||||
system = platform.system()
|
||||
implementation = platform.python_implementation()
|
||||
version = platform.python_version()
|
||||
if implementation != 'CPython':
|
||||
logger.log('FATAL', f'OneForAll只在CPython下测试通过')
|
||||
exit(1)
|
||||
if version < '3.6':
|
||||
logger.log('FATAL', 'OneForAll需要Python 3.6以上版本')
|
||||
exit(1)
|
||||
if system == 'Windows' and implementation == 'CPython':
|
||||
if version < '3.8':
|
||||
logger.log('FATAL', 'OneForAll在Windows系统运行时需要Python 3.8以上版本')
|
||||
exit(1)
|
||||
|
||||
|
||||
def check_env():
|
||||
logger.log('INFOR', '正在检查运行环境')
|
||||
try:
|
||||
check_net()
|
||||
except Exception as e:
|
||||
logger.log('DEBUG', e.args)
|
||||
logger.log('FATAL', '不能正常访问外网')
|
||||
exit(1)
|
||||
check_pre()
|
||||
|
||||
|
||||
def get_maindomain(domain):
|
||||
return Domain(domain).registered()
|
||||
|
||||
|
||||
def call_massdns(massdns_path, dict_path, ns_path, output_path, log_path,
|
||||
query_type='A', process_num=1, concurrent_num=10000,
|
||||
quiet_mode=False):
|
||||
logger.log('INFOR', f'开始执行massdns')
|
||||
quiet = ''
|
||||
if quiet_mode:
|
||||
quiet = '--quiet'
|
||||
status_format = config.brute_status_format
|
||||
socket_num = config.brute_socket_num
|
||||
resolve_num = config.brute_resolve_num
|
||||
cmd = f'{massdns_path} {quiet} --status-format {status_format} ' \
|
||||
f'--processes {process_num} --socket-count {socket_num} ' \
|
||||
f'--hashmap-size {concurrent_num} --resolvers {ns_path} ' \
|
||||
f'--resolve-count {resolve_num} --type {query_type} ' \
|
||||
f'--flush --output J --outfile {output_path} ' \
|
||||
f'--error-log {log_path} {dict_path}'
|
||||
logger.log('INFOR', f'执行命令 {cmd}')
|
||||
subprocess.run(args=cmd, shell=True)
|
||||
logger.log('INFOR', f'结束执行massdns')
|
||||
|
||||
|
||||
def get_massdns_path(massdns_dir):
|
||||
path = config.brute_massdns_path
|
||||
if path:
|
||||
return path
|
||||
system = platform.system().lower()
|
||||
machine = platform.machine().lower()
|
||||
name = f'massdns_{system}_{machine}'
|
||||
if system == 'windows':
|
||||
name = name + '.exe'
|
||||
if machine == 'amd64':
|
||||
massdns_dir = massdns_dir.joinpath('windows', 'x64')
|
||||
else:
|
||||
massdns_dir = massdns_dir.joinpath('windows', 'x84')
|
||||
path = massdns_dir.joinpath(name)
|
||||
path.chmod(S_IXUSR)
|
||||
if not path.exists():
|
||||
logger.log('FATAL', '暂无该系统平台及架构的massdns')
|
||||
logger.log('INFOR', '请尝试自行编译massdns并在配置里指定路径')
|
||||
exit(0)
|
||||
return path
|
File diff suppressed because it is too large
Load Diff
@ -1,7 +0,0 @@
|
||||
223.5.5.5
|
||||
223.6.6.6
|
||||
114.114.114.114
|
||||
114.114.115.115
|
||||
180.76.76.76
|
||||
119.29.29.29
|
||||
182.254.116.116
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@ -1,102 +0,0 @@
|
||||
[
|
||||
"_afpovertcp._tcp.",
|
||||
"_aix._tcp.",
|
||||
"_autodiscover._tcp.",
|
||||
"_caldav._tcp.",
|
||||
"_certificates._tcp.",
|
||||
"_client._smtp.",
|
||||
"_cmp._tcp.",
|
||||
"_crls._tcp.",
|
||||
"_crl._tcp.",
|
||||
"_finger._tcp.",
|
||||
"_ftp._tcp.",
|
||||
"_gc._tcp.",
|
||||
"_h323be._tcp.",
|
||||
"_h323be._udp.",
|
||||
"_h323cs._tcp.",
|
||||
"_h323cs._udp.",
|
||||
"_h323ls._tcp.",
|
||||
"_h323ls._udp.",
|
||||
"_h323rs._tcp.",
|
||||
"_hkps._tcp.",
|
||||
"_hkp._tcp.",
|
||||
"_http._tcp.",
|
||||
"_iax.udp.",
|
||||
"_imaps._tcp.",
|
||||
"_imap._tcp.",
|
||||
"_jabber-client._tcp.",
|
||||
"_jabber-client._udp.",
|
||||
"_jabber._tcp.",
|
||||
"_jabber._udp.",
|
||||
"_kerberos-adm._tcp.",
|
||||
"_kerberos._tcp.",
|
||||
"_kerberos._tcp.dc._msdcs.",
|
||||
"_kerberos._udp.",
|
||||
"_kpasswd._tcp.",
|
||||
"_kpasswd._udp.",
|
||||
"_ldap._tcp.",
|
||||
"_ldap._tcp.dc._msdcs.",
|
||||
"_ldap._tcp.gc._msdcs.",
|
||||
"_ldap._tcp.pdc._msdcs.",
|
||||
"_msdcs.",
|
||||
"_mysqlsrv._tcp.",
|
||||
"_nntp._tcp.",
|
||||
"_ntp._udp.",
|
||||
"_ocsp._tcp.",
|
||||
"_pgpkeys._tcp.",
|
||||
"_pgprevokations._tcp.",
|
||||
"_PKIXREP._tcp.",
|
||||
"_pop3s._tcp.",
|
||||
"_pop3._tcp.",
|
||||
"_sipfederationtls._tcp.",
|
||||
"_sipinternal._tcp.",
|
||||
"_sipinternaltls._tcp.",
|
||||
"_sips._tcp.",
|
||||
"_sip._tcp.",
|
||||
"_sip._tls.",
|
||||
"_sip._udp.",
|
||||
"_smtp._tcp.",
|
||||
"_ssh._tcp.",
|
||||
"_stun._tcp.",
|
||||
"_stun._udp.",
|
||||
"_svcp._tcp.",
|
||||
"_tcp.",
|
||||
"_telnet._tcp.",
|
||||
"_test._tcp.",
|
||||
"_tls.",
|
||||
"_udp.",
|
||||
"_vlmcs._tcp.",
|
||||
"_vlmcs._udp.",
|
||||
"_whois._tcp.",
|
||||
"_wpad._tcp.",
|
||||
"_xmpp-client._tcp.",
|
||||
"_xmpp-client._udp.",
|
||||
"_xmpp-server._tcp.",
|
||||
"_xmpp-server._udp.",
|
||||
"_https._tcp.",
|
||||
"_imap.tcp.",
|
||||
"_kerberos.tcp.dc._msdcs.",
|
||||
"_ldap._tcp.ForestDNSZones.",
|
||||
"_submission._tcp.",
|
||||
"_caldavs._tcp.",
|
||||
"_carddav._tcp.",
|
||||
"_carddavs._tcp.",
|
||||
"_x-puppet._tcp.",
|
||||
"_x-puppet-ca._tcp.",
|
||||
"_domainkey.",
|
||||
"_pkixrep._tcp.",
|
||||
"_cisco-phone-http.",
|
||||
"_cisco-phone-tftp.",
|
||||
"_cisco-uds._tcp.",
|
||||
"_ciscowtp._tcp.",
|
||||
"_collab-edge._tls.",
|
||||
"_cuplogin._tcp.",
|
||||
"_client._smtp._tcp.",
|
||||
"_sftp._tcp.",
|
||||
"_h323rs._udp.",
|
||||
"_sql._tcp.",
|
||||
"_sip._tcp.internal.",
|
||||
"_snmp._udp.",
|
||||
"_rdp._tcp.",
|
||||
"_xmpp-server._udp."
|
||||
]
|
File diff suppressed because it is too large
Load Diff
@ -1,2 +0,0 @@
|
||||
example.com
|
||||
hackfun.org
|
@ -1,16 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# coding=utf-8
|
||||
|
||||
"""
|
||||
示例
|
||||
"""
|
||||
|
||||
import client.subdomain.oneforall.oneforall as oneforall
|
||||
|
||||
if __name__ == '__main__':
|
||||
test = oneforall.OneForAll(target='github.com')
|
||||
test.brute = True
|
||||
test.req = False
|
||||
test.takeover = True
|
||||
test.run()
|
||||
result = test.datas
|
@ -1,72 +0,0 @@
|
||||
import api
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
from client.subdomain.oneforall.config import logger
|
||||
|
||||
|
||||
class CensysAPI(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Certificate'
|
||||
self.source = "CensysAPIQuery"
|
||||
self.addr = 'https://www.censys.io/api/v1/search/certificates'
|
||||
self.id = api.censys_api_id
|
||||
self.secret = api.censys_api_secret
|
||||
self.delay = 3.0 # Censys 接口查询速率限制 最快2.5秒查1次
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
data = {
|
||||
'query': f'parsed.names: {self.domain}',
|
||||
'page': 1,
|
||||
'fields': ['parsed.subject_dn', 'parsed.names'],
|
||||
'flatten': True}
|
||||
resp = self.post(self.addr, json=data, auth=(self.id, self.secret))
|
||||
if not resp:
|
||||
return
|
||||
json = resp.json()
|
||||
status = json.get('status')
|
||||
if status != 'ok':
|
||||
logger.log('ALERT', status)
|
||||
return
|
||||
subdomains = self.match(self.domain, str(json))
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
pages = json.get('metadata').get('pages')
|
||||
for page in range(2, pages + 1):
|
||||
data['page'] = page
|
||||
resp = self.post(self.addr, json=data, auth=(self.id, self.secret))
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, str(resp.json()))
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.id, self.secret):
|
||||
return
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = CensysAPI(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,53 +0,0 @@
|
||||
from client.subdomain.oneforall.common import utils
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class CertSpotter(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Certificate'
|
||||
self.source = 'CertSpotterQuery'
|
||||
self.addr = 'https://api.certspotter.com/v1/issuances'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'domain': self.domain,
|
||||
'include_subdomains': 'true',
|
||||
'expand': 'dns_names'}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = utils.match_subdomain(self.domain, str(resp.json()))
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
|
||||
"""
|
||||
query = CertSpotter(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,49 +0,0 @@
|
||||
from client.subdomain.oneforall.common import utils
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class Crtsh(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Certificate'
|
||||
self.source = 'CrtshQuery'
|
||||
self.addr = 'https://crt.sh/'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'q': f'%.{self.domain}', 'output': 'json'}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = utils.match_subdomain(self.domain, str(resp.json()))
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = Crtsh(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,51 +0,0 @@
|
||||
from client.subdomain.oneforall.common import utils
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class Entrust(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Certificate'
|
||||
self.source = 'EntrustQuery'
|
||||
self.addr = 'https://ctsearch.entrust.com/api/v1/certificates'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'fields': 'subjectDN',
|
||||
'domain': self.domain,
|
||||
'includeExpired': 'true'}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = utils.match_subdomain(self.domain, str(resp.json()))
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = Entrust(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,53 +0,0 @@
|
||||
from client.subdomain.oneforall.common import utils
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class Google(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Certificate'
|
||||
self.source = 'GoogleQuery'
|
||||
self.addr = 'https://transparencyreport.google.com/' \
|
||||
'transparencyreport/api/v3/httpsreport/ct/certsearch'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'include_expired': 'true',
|
||||
'include_subdomains': 'true',
|
||||
'domain': self.domain}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = utils.match_subdomain(self.domain, resp.text)
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = Google(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,65 +0,0 @@
|
||||
import api
|
||||
from client.subdomain.oneforall.common import utils
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class SpyseAPI(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Certificate'
|
||||
self.source = 'CertDBAPIQuery'
|
||||
self.addr = 'https://api.spyse.com/v1/subdomains'
|
||||
self.token = api.spyse_api_token
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
page_num = 1
|
||||
while True:
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'domain': self.domain,
|
||||
'api_token': self.token,
|
||||
'page': page_num}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
json = resp.json()
|
||||
subdomains = utils.match_subdomain(self.domain, str(json))
|
||||
if not subdomains: # 搜索没有发现子域名则停止搜索
|
||||
break
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
page_num += 1
|
||||
# 默认每次查询最多返回30条 当前条数小于30条说明已经查完
|
||||
if json.get('count') < 30:
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.token):
|
||||
return
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = SpyseAPI(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,60 +0,0 @@
|
||||
"""
|
||||
检查crossdomain.xml文件收集子域名
|
||||
"""
|
||||
|
||||
from client.subdomain.oneforall.common.module import Module
|
||||
from client.subdomain.oneforall.common import utils
|
||||
|
||||
|
||||
class CheckCDX(Module):
|
||||
"""
|
||||
检查crossdomain.xml文件收集子域名
|
||||
"""
|
||||
def __init__(self, domain: str):
|
||||
Module.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Check'
|
||||
self.source = "CrossDomainXml"
|
||||
|
||||
def check(self):
|
||||
"""
|
||||
检查crossdomain.xml收集子域名
|
||||
"""
|
||||
urls = [f'http://{self.domain}/crossdomain.xml',
|
||||
f'https://{self.domain}/crossdomain.xml',
|
||||
f'http://www.{self.domain}/crossdomain.xml',
|
||||
f'https://www.{self.domain}/crossdomain.xml']
|
||||
for url in urls:
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
response = self.get(url, check=False)
|
||||
if not response:
|
||||
return
|
||||
if response and len(response.content):
|
||||
self.subdomains = utils.match_subdomain(self.domain,
|
||||
response.text)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.check()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param domain: 域名
|
||||
"""
|
||||
check = CheckCDX(domain)
|
||||
check.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,62 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
检查域名证书收集子域名
|
||||
"""
|
||||
import socket
|
||||
import ssl
|
||||
|
||||
from client.subdomain.oneforall.common import utils
|
||||
from client.subdomain.oneforall.common.module import Module
|
||||
from client.subdomain.oneforall.config import logger
|
||||
|
||||
|
||||
|
||||
class CheckCert(Module):
|
||||
def __init__(self, domain):
|
||||
Module.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.port = 443 # ssl port
|
||||
self.module = 'Check'
|
||||
self.source = 'CertInfo'
|
||||
|
||||
def check(self):
|
||||
"""
|
||||
获取域名证书并匹配证书中的子域名
|
||||
"""
|
||||
try:
|
||||
ctx = ssl.create_default_context()
|
||||
sock = ctx.wrap_socket(socket.socket(),
|
||||
server_hostname=self.domain)
|
||||
sock.connect((self.domain, self.port))
|
||||
cert_dict = sock.getpeercert()
|
||||
except Exception as e:
|
||||
logger.log('DEBUG', e.args)
|
||||
return
|
||||
subdomains = utils.match_subdomain(self.domain, str(cert_dict))
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.check()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
check = CheckCert(domain)
|
||||
check.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,82 +0,0 @@
|
||||
"""
|
||||
检查内容安全策略收集子域名收集子域名
|
||||
"""
|
||||
import requests
|
||||
|
||||
from client.subdomain.oneforall.common import utils
|
||||
from client.subdomain.oneforall.common.module import Module
|
||||
from client.subdomain.oneforall.config import logger
|
||||
|
||||
|
||||
class CheckCSP(Module):
|
||||
"""
|
||||
检查内容安全策略收集子域名
|
||||
"""
|
||||
def __init__(self, domain, header):
|
||||
Module.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Check'
|
||||
self.source = 'ContentSecurityPolicy'
|
||||
self.csp_header = header
|
||||
|
||||
def grab_header(self):
|
||||
"""
|
||||
抓取请求头
|
||||
|
||||
:return: 请求头
|
||||
"""
|
||||
csp_header = dict()
|
||||
urls = [f'http://{self.domain}',
|
||||
f'https://{self.domain}',
|
||||
f'http://www.{self.domain}',
|
||||
f'https://www.{self.domain}']
|
||||
for url in urls:
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
response = self.get(url, check=False)
|
||||
if response:
|
||||
csp_header = response.headers
|
||||
break
|
||||
return csp_header
|
||||
|
||||
def check(self):
|
||||
"""
|
||||
正则匹配响应头中的内容安全策略字段以发现子域名
|
||||
"""
|
||||
if not self.csp_header:
|
||||
self.csp_header = self.grab_header()
|
||||
csp = self.header.get('Content-Security-Policy')
|
||||
if not self.csp_header:
|
||||
logger.log('DEBUG', f'获取{self.domain}域的请求头失败')
|
||||
return
|
||||
if not csp:
|
||||
logger.log('DEBUG', f'{self.domain}域的响应头不存在内容安全策略字段')
|
||||
return
|
||||
self.subdomains = utils.match_subdomain(self.domain, csp)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.check()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain, header=None): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
:param dict or None header: 响应头
|
||||
"""
|
||||
check = CheckCSP(domain, header)
|
||||
check.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
resp = requests.get('https://content-security-policy.com/')
|
||||
do('google-analytics.com', resp.headers)
|
@ -1,59 +0,0 @@
|
||||
"""
|
||||
检查内容安全策略收集子域名收集子域名
|
||||
"""
|
||||
from client.subdomain.oneforall.common.module import Module
|
||||
from client.subdomain.oneforall.common import utils
|
||||
|
||||
|
||||
class CheckRobots(Module):
|
||||
"""
|
||||
检查robots.txt收集子域名
|
||||
"""
|
||||
def __init__(self, domain):
|
||||
Module.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Check'
|
||||
self.source = 'Robots'
|
||||
|
||||
def check(self):
|
||||
"""
|
||||
正则匹配域名的robots.txt文件中的子域
|
||||
"""
|
||||
urls = [f'http://{self.domain}/robots.txt',
|
||||
f'https://{self.domain}/robots.txt',
|
||||
f'http://www.{self.domain}/robots.txt',
|
||||
f'https://www.{self.domain}/robots.txt']
|
||||
for url in urls:
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
response = self.get(url, check=False, allow_redirects=False)
|
||||
if not response:
|
||||
return
|
||||
if response and len(response.content):
|
||||
self.subdomains = utils.match_subdomain(self.domain,
|
||||
response.text)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.check()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
check = CheckRobots(domain)
|
||||
check.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('qq.com')
|
@ -1,73 +0,0 @@
|
||||
"""
|
||||
检查内容安全策略收集子域名收集子域名
|
||||
"""
|
||||
from client.subdomain.oneforall.common.module import Module
|
||||
from client.subdomain.oneforall.common import utils
|
||||
|
||||
|
||||
class CheckRobots(Module):
|
||||
"""
|
||||
检查sitemap收集子域名
|
||||
"""
|
||||
def __init__(self, domain):
|
||||
Module.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Check'
|
||||
self.source = 'Sitemap'
|
||||
|
||||
def check(self):
|
||||
"""
|
||||
正则匹配域名的sitemap文件中的子域
|
||||
"""
|
||||
urls = [f'http://{self.domain}/sitemap.xml',
|
||||
f'https://{self.domain}/sitemap.xml',
|
||||
f'http://www.{self.domain}/sitemap.xml',
|
||||
f'https://www.{self.domain}/sitemap.xml',
|
||||
f'http://{self.domain}/sitemap.txt',
|
||||
f'https://{self.domain}/sitemap.txt',
|
||||
f'http://www.{self.domain}/sitemap.txt',
|
||||
f'https://www.{self.domain}/sitemap.txt',
|
||||
f'http://{self.domain}/sitemap.html',
|
||||
f'https://{self.domain}/sitemap.html',
|
||||
f'http://www.{self.domain}/sitemap.html',
|
||||
f'https://www.{self.domain}/sitemap.html',
|
||||
f'http://{self.domain}/sitemap_index.xml',
|
||||
f'https://{self.domain}/sitemap_index.xml',
|
||||
f'http://www.{self.domain}/sitemap_index.xml',
|
||||
f'https://www.{self.domain}/sitemap_index.xml']
|
||||
for url in urls:
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
self.timeout = 10
|
||||
response = self.get(url, check=False, allow_redirects=False)
|
||||
if not response:
|
||||
return
|
||||
if response and len(response.content):
|
||||
self.subdomains = utils.match_subdomain(self.domain,
|
||||
response.text)
|
||||
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.check()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
check = CheckRobots(domain)
|
||||
check.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('qq.com')
|
@ -1,62 +0,0 @@
|
||||
import cdx_toolkit
|
||||
from client.subdomain.oneforall.common.crawl import Crawl
|
||||
from client.subdomain.oneforall.config import logger
|
||||
|
||||
|
||||
|
||||
class ArchiveCrawl(Crawl):
|
||||
def __init__(self, domain):
|
||||
Crawl.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Crawl'
|
||||
self.source = 'ArchiveCrawl'
|
||||
|
||||
def crawl(self, domain, limit):
|
||||
"""
|
||||
|
||||
:param domain:
|
||||
:param limit:
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
cdx = cdx_toolkit.CDXFetcher(source='ia')
|
||||
url = f'*.{domain}/*'
|
||||
size = cdx.get_size_estimate(url)
|
||||
logger.log('DEBUG', f'{url} ArchiveCrawl size estimate {size}')
|
||||
|
||||
for resp in cdx.iter(url, limit=limit):
|
||||
if resp.data.get('status') not in ['301', '302']:
|
||||
url = resp.data.get('url')
|
||||
subdomains = self.match(self.register(domain),
|
||||
url + resp.text)
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.crawl(self.domain, 50)
|
||||
# 爬取已发现的子域以发现新的子域
|
||||
for subdomain in self.subdomains:
|
||||
if subdomain != self.domain:
|
||||
self.crawl(subdomain, 10)
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
crawl = ArchiveCrawl(domain)
|
||||
crawl.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,61 +0,0 @@
|
||||
import cdx_toolkit
|
||||
from tqdm import tqdm
|
||||
|
||||
from client.subdomain.oneforall.common.crawl import Crawl
|
||||
from client.subdomain.oneforall.config import logger
|
||||
|
||||
|
||||
class CommonCrawl(Crawl):
|
||||
def __init__(self, domain):
|
||||
Crawl.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Crawl'
|
||||
self.source = 'CommonCrawl'
|
||||
|
||||
def crawl(self, domain, limit):
|
||||
"""
|
||||
|
||||
:param domain:
|
||||
:param limit:
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
cdx = cdx_toolkit.CDXFetcher()
|
||||
url = f'*.{domain}/*'
|
||||
size = cdx.get_size_estimate(url)
|
||||
print(url, 'CommonCrawl size estimate', size)
|
||||
|
||||
for resp in tqdm(cdx.iter(url, limit=limit), total=limit):
|
||||
if resp.data.get('status') not in ['301', '302']:
|
||||
subdomains = self.match(self.register(domain), resp.text)
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.crawl(self.domain, 50)
|
||||
# 爬取已发现的子域以发现新的子域
|
||||
for subdomain in self.subdomains:
|
||||
if subdomain != self.domain:
|
||||
self.crawl(subdomain, 10)
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
crawl = CommonCrawl(domain)
|
||||
crawl.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,53 +0,0 @@
|
||||
import api
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class BinaryEdgeAPI(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'BinaryEdgeAPIQuery'
|
||||
self.addr = 'https://api.binaryedge.io/v2/query/domains/subdomain/'
|
||||
self.api = api.binaryedge_api
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.header.update({'X-Key': self.api})
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
url = self.addr + self.domain
|
||||
resp = self.get(url)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, str(resp.json()))
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.api):
|
||||
return
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = BinaryEdgeAPI(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,57 +0,0 @@
|
||||
import cloudscraper
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
from client.subdomain.oneforall.config import logger
|
||||
|
||||
|
||||
class BufferOver(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'BufferOverQuery'
|
||||
self.addr = 'https://dns.bufferover.run/dns?q='
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
# 绕过cloudFlare验证
|
||||
scraper = cloudscraper.create_scraper()
|
||||
scraper.interpreter = 'js2py'
|
||||
scraper.proxies = self.get_proxy(self.source)
|
||||
url = self.addr + self.domain
|
||||
try:
|
||||
resp = scraper.get(url, timeout=self.timeout)
|
||||
except Exception as e:
|
||||
logger.log('ERROR', e.args)
|
||||
return
|
||||
if resp.status_code != 200:
|
||||
return
|
||||
subdomains = self.match(self.domain, str(resp.json()))
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = BufferOver(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,49 +0,0 @@
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class CeBaidu(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'CeBaiduQuery'
|
||||
self.addr = 'https://ce.baidu.com/index/getRelatedSites'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'site_address': self.domain}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, str(resp.json()))
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = CeBaidu(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,49 +0,0 @@
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class Chinaz(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'ChinazQuery'
|
||||
self.addr = 'https://alexa.chinaz.com/'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
self.addr = self.addr + self.domain
|
||||
resp = self.get(self.addr)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, resp.text)
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = Chinaz(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,53 +0,0 @@
|
||||
import api
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class ChinazAPI(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'ChinazAPIQuery'
|
||||
self.addr = 'https://apidata.chinaz.com/CallAPI/Alexa'
|
||||
self.api = api.chinaz_api
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'key': self.api, 'domainName': self.domain}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, str(resp.json()))
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.api):
|
||||
return
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = ChinazAPI(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,53 +0,0 @@
|
||||
import api
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class CirclAPI(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'CirclAPIQuery'
|
||||
self.addr = 'https://www.circl.lu/pdns/query/'
|
||||
self.user = api.circl_api_username
|
||||
self.pwd = api.circl_api_password
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
resp = self.get(self.addr + self.domain, auth=(self.user, self.pwd))
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, str(resp.json()))
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.user, self.pwd):
|
||||
return
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = CirclAPI(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,56 +0,0 @@
|
||||
import api
|
||||
from client.subdomain.oneforall.common import utils
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class DNSdbAPI(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'DNSdbAPIQuery'
|
||||
self.addr = 'https://api.dnsdb.info/lookup/rrset/name/'
|
||||
self.api = api.dnsdb_api_key
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.header.update({'X-API-Key': self.api})
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
url = f'{self.addr}*.{self.domain}'
|
||||
resp = self.get(url)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = utils.match_subdomain(self.domain, resp.text)
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.api):
|
||||
return
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
|
||||
"""
|
||||
query = DNSdbAPI(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,58 +0,0 @@
|
||||
from client.subdomain.oneforall.common import utils
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class DNSdumpster(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = "DNSdumpsterQuery"
|
||||
self.addr = 'https://dnsdumpster.com/'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.header.update({'Referer': 'https://dnsdumpster.com'})
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
resp = self.get(self.addr)
|
||||
if not resp:
|
||||
return
|
||||
self.cookie = resp.cookies
|
||||
data = {'csrfmiddlewaretoken': self.cookie.get('csrftoken'),
|
||||
'targetip': self.domain}
|
||||
resp = self.post(self.addr, data)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = utils.match_subdomain(self.domain, resp.text)
|
||||
if subdomains:
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = DNSdumpster(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
do('example.com')
|
@ -1,52 +0,0 @@
|
||||
from client.subdomain.oneforall.common import utils
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class HackerTarget(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = "HackerTargetQuery"
|
||||
self.addr = 'https://api.hackertarget.com/hostsearch/'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'q': self.domain}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
if resp.status_code == 200:
|
||||
subdomains = utils.match_subdomain(self.domain, resp.text)
|
||||
if subdomains:
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = HackerTarget(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,49 +0,0 @@
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class IP138(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'IP138Query'
|
||||
self.addr = 'https://site.ip138.com/{domain}/domain.htm'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
self.addr = self.addr.format(domain=self.domain)
|
||||
resp = self.get(self.addr)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, resp.text)
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = IP138(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,75 +0,0 @@
|
||||
import api
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
from client.subdomain.oneforall.config import logger
|
||||
|
||||
|
||||
class IPv4InfoAPI(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'IPv4InfoAPIQuery'
|
||||
self.addr = ' http://ipv4info.com/api_v1/'
|
||||
self.api = api.ipv4info_api_key
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
page = 0
|
||||
while True:
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'type': 'SUBDOMAINS', 'key': self.api,
|
||||
'value': self.domain, 'page': page}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
if resp.status_code != 200:
|
||||
break # 请求不正常通常网络是有问题,不再继续请求下去
|
||||
try:
|
||||
json = resp.json()
|
||||
except Exception as e:
|
||||
logger.log('DEBUG', e.args)
|
||||
break
|
||||
subdomains = self.match(self.domain, str(json))
|
||||
if not subdomains:
|
||||
break
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
# 不直接使用subdomains是因为可能里面会出现不符合标准的子域名
|
||||
subdomains = json.get('Subdomains')
|
||||
if subdomains:
|
||||
# ipv4info子域查询接口每次最多返回300个 用来判断是否还有下一页
|
||||
if len(subdomains) < 300:
|
||||
break
|
||||
page += 1
|
||||
if page >= 50: # ipv4info子域查询接口最多允许查询50页
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.api):
|
||||
return
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = IPv4InfoAPI(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,83 +0,0 @@
|
||||
import hashlib
|
||||
import re
|
||||
import time
|
||||
from urllib import parse
|
||||
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class NetCraft(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'NetCraftQuery'
|
||||
self.init = 'https://searchdns.netcraft.com/'
|
||||
self.addr = 'https://searchdns.netcraft.com/?restriction=site+contains'
|
||||
self.page_num = 1
|
||||
self.per_page_num = 20
|
||||
|
||||
def bypass_verification(self):
|
||||
"""
|
||||
绕过NetCraft的JS验证
|
||||
"""
|
||||
self.header = self.get_header() # Netcraft会检查User-Agent
|
||||
resp = self.get(self.init)
|
||||
if not resp:
|
||||
return False
|
||||
self.cookie = resp.cookies
|
||||
cookie_value = self.cookie['netcraft_js_verification_challenge']
|
||||
cookie_encode = parse.unquote(cookie_value).encode('utf-8')
|
||||
verify_taken = hashlib.sha1(cookie_encode).hexdigest()
|
||||
self.cookie['netcraft_js_verification_response'] = verify_taken
|
||||
return True
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
if not self.bypass_verification():
|
||||
return
|
||||
last = ''
|
||||
while True:
|
||||
time.sleep(self.delay)
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'host': '*.' + self.domain, 'from': self.page_num}
|
||||
resp = self.get(self.addr + last, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, resp.text)
|
||||
if not subdomains: # 搜索没有发现子域名则停止搜索
|
||||
break
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
if 'Next page' not in resp.text: # 搜索页面没有出现下一页时停止搜索
|
||||
break
|
||||
last = re.search(r'&last=.*' + self.domain, resp.text).group(0)
|
||||
self.page_num += self.per_page_num
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = NetCraft(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,54 +0,0 @@
|
||||
import api
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class PassiveDnsAPI(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'PassiveDnsQuery'
|
||||
self.addr = api.passivedns_api_addr or 'http://api.passivedns.cn'
|
||||
self.token = api.passivedns_api_token
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.header.update({'X-AuthToken': self.token})
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
url = self.addr + '/flint/rrset/*.' + self.domain
|
||||
resp = self.get(url)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, str(resp.json()))
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.addr):
|
||||
return
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = PassiveDnsAPI(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,56 +0,0 @@
|
||||
import random
|
||||
|
||||
from client.subdomain.oneforall.common import utils
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class PTRArchive(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = "PTRArchiveQuery"
|
||||
self.addr = 'http://ptrarchive.com/tools/search4.htm'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
# 绕过主页前端JS验证
|
||||
self.cookie = {'pa_id': str(random.randint(0, 1000000000))}
|
||||
params = {'label': self.domain, 'date': 'ALL'}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
if resp.status_code == 200:
|
||||
subdomains = utils.match_subdomain(self.domain, resp.text)
|
||||
if subdomains:
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = PTRArchive(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,61 +0,0 @@
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class QianXun(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Query'
|
||||
self.source = 'QianXunQuery'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
|
||||
num = 1
|
||||
while True:
|
||||
data = {'ecmsfrom': '',
|
||||
'show': '',
|
||||
'num': '',
|
||||
'classid': '0',
|
||||
'keywords': self.domain}
|
||||
url = f'https://www.dnsscan.cn/dns.html?' \
|
||||
f'keywords={self.domain}&page={num}'
|
||||
resp = self.post(url, data)
|
||||
if not resp:
|
||||
break
|
||||
subdomains = self.match(self.domain, resp.text)
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
if '<div id="page" class="pagelist">' not in resp.text:
|
||||
break
|
||||
if '<li class="disabled"><span>»</span></li>' in resp.text:
|
||||
break
|
||||
num += 1
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = QianXun(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,50 +0,0 @@
|
||||
from client.subdomain.oneforall.common import utils
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class RapidDNS(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'RapidDNSQuery'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
url = f'http://rapiddns.io/subdomain/{self.domain}'
|
||||
params = {'full': '1'}
|
||||
resp = self.get(url, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = utils.match_subdomain(self.domain, resp.text)
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = RapidDNS(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,49 +0,0 @@
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class Riddler(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'RiddlerQuery'
|
||||
self.addr = 'https://riddler.io/search'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'q': 'pld:' + self.domain}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, resp.text)
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = Riddler(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,63 +0,0 @@
|
||||
import json
|
||||
import time
|
||||
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class Robtex(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = "RobtexQuery"
|
||||
self.addr = 'https://freeapi.robtex.com/pdns/'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
url = self.addr + 'forward/' + self.domain
|
||||
resp = self.get(url)
|
||||
if not resp:
|
||||
return
|
||||
text_list = resp.text.splitlines()
|
||||
text_json = list(map(lambda x: json.loads(x), text_list))
|
||||
for record in text_json:
|
||||
if record.get('rrtype') in ['A', 'AAAA']:
|
||||
time.sleep(self.delay) # Robtex有查询频率限制
|
||||
ip = record.get('rrdata')
|
||||
url = self.addr + 'reverse/' + ip
|
||||
resp = self.get(url)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, resp.text)
|
||||
if subdomains:
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = Robtex(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,57 +0,0 @@
|
||||
import api
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class SecurityTrailsAPI(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'SecurityTrailsAPIQuery'
|
||||
self.addr = 'https://api.securitytrails.com/v1/domain/'
|
||||
self.api = api.securitytrails_api
|
||||
self.delay = 2 # SecurityTrails查询时延至少2秒
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'apikey': self.api}
|
||||
url = f'{self.addr}{self.domain}/subdomains'
|
||||
resp = self.get(url, params)
|
||||
if not resp:
|
||||
return
|
||||
prefixs = resp.json()['subdomains']
|
||||
subdomains = [f'{prefix}.{self.domain}' for prefix in prefixs]
|
||||
if subdomains:
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.api):
|
||||
return
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = SecurityTrailsAPI(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,60 +0,0 @@
|
||||
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class SiteDossier(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'SiteDossierQuery'
|
||||
self.addr = 'http://www.sitedossier.com/parentdomain/'
|
||||
self.delay = 2
|
||||
self.page_num = 1
|
||||
self.per_page_num = 100
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
while True:
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
url = f'{self.addr}{self.domain}/{self.page_num}'
|
||||
resp = self.get(url)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, resp.text)
|
||||
if not subdomains: # 搜索没有发现子域名则停止搜索
|
||||
break
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
# 搜索页面没有出现下一页时停止搜索
|
||||
if 'Show next 100 items' not in resp.text:
|
||||
break
|
||||
self.page_num += self.per_page_num
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = SiteDossier(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,56 +0,0 @@
|
||||
import cloudscraper
|
||||
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
from client.subdomain.oneforall.config import logger
|
||||
|
||||
|
||||
class ThreatCrowd(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'ThreatCrowdQuery'
|
||||
self.addr = 'https://www.threatcrowd.org/searchApi' \
|
||||
'/v2/domain/report?domain='
|
||||
|
||||
def query(self):
|
||||
# 绕过cloudFlare验证
|
||||
scraper = cloudscraper.create_scraper()
|
||||
scraper.interpreter = 'js2py'
|
||||
scraper.proxies = self.get_proxy(self.source)
|
||||
url = self.addr + self.domain
|
||||
try:
|
||||
resp = scraper.get(url, timeout=self.timeout)
|
||||
except Exception as e:
|
||||
logger.log('ERROR', e.args)
|
||||
return
|
||||
if resp.status_code != 200:
|
||||
return
|
||||
subdomains = self.match(self.domain, str(resp.json()))
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = ThreatCrowd(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('mi.com')
|
@ -1,66 +0,0 @@
|
||||
import time
|
||||
from client.subdomain.oneforall.config import logger
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class WZPCQuery(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'WZPCQuery'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
|
||||
base_addr = 'http://114.55.181.28/check_web/' \
|
||||
'databaseInfo_mainSearch.action'
|
||||
page_num = 1
|
||||
while True:
|
||||
time.sleep(self.delay)
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'isSearch': 'true', 'searchType': 'url',
|
||||
'term': self.domain, 'pageNo': page_num}
|
||||
try:
|
||||
resp = self.get(base_addr, params)
|
||||
except Exception as e:
|
||||
logger.log('ERROR', e.args)
|
||||
break
|
||||
if not resp:
|
||||
break
|
||||
subdomains = self.match(self.domain, resp.text)
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
if not subdomains:
|
||||
break
|
||||
if page_num > 10:
|
||||
break
|
||||
page_num += 1
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain):
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = WZPCQuery(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('sc.gov.cn')
|
||||
do('bkzy.org')
|
@ -1,50 +0,0 @@
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class Ximcx(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Dataset'
|
||||
self.source = 'XimcxQuery'
|
||||
self.addr = 'http://sbd.ximcx.cn/DomainServlet'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
data = {'domain': self.domain}
|
||||
resp = self.post(self.addr, data=data)
|
||||
if not resp:
|
||||
return
|
||||
json = resp.json()
|
||||
subdomains = self.match(self.domain, str(json))
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = Ximcx(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,35 +0,0 @@
|
||||
from client.subdomain.oneforall.common.lookup import Lookup
|
||||
|
||||
|
||||
class QueryMX(Lookup):
|
||||
def __init__(self, domain):
|
||||
Lookup.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'dnsquery'
|
||||
self.source = "QueryMX"
|
||||
self.type = 'MX' # 利用的DNS记录的MX记录收集子域
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
brute = QueryMX(domain)
|
||||
brute.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('cuit.edu.cn')
|
@ -1,35 +0,0 @@
|
||||
from client.subdomain.oneforall.common.lookup import Lookup
|
||||
|
||||
|
||||
class QueryNS(Lookup):
|
||||
def __init__(self, domain):
|
||||
Lookup.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'dnsquery'
|
||||
self.source = "QueryNS"
|
||||
self.type = 'NS' # 利用的DNS记录的NS记录收集子域
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
brute = QueryNS(domain)
|
||||
brute.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('cuit.edu.cn')
|
@ -1,35 +0,0 @@
|
||||
from client.subdomain.oneforall.common.lookup import Lookup
|
||||
|
||||
|
||||
class QuerySOA(Lookup):
|
||||
def __init__(self, domain):
|
||||
Lookup.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'dnsquery'
|
||||
self.source = "QuerySOA"
|
||||
self.type = 'SOA' # 利用的DNS记录的SOA记录收集子域
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
brute = QuerySOA(domain)
|
||||
brute.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('cuit.edu.cn')
|
@ -1,95 +0,0 @@
|
||||
"""
|
||||
通过枚举域名常见的SRV记录并做查询来发现子域
|
||||
"""
|
||||
|
||||
import json
|
||||
import queue
|
||||
import threading
|
||||
|
||||
from client.subdomain.oneforall.common import utils
|
||||
from client.subdomain.oneforall.common.module import Module
|
||||
from client.subdomain.oneforall.config import data_storage_dir, logger
|
||||
|
||||
|
||||
class BruteSRV(Module):
|
||||
def __init__(self, domain):
|
||||
Module.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'dnsquery'
|
||||
self.source = "BruteSRV"
|
||||
self.type = 'SRV' # 利用的DNS记录的SRV记录查询子域
|
||||
self.thread_num = 10
|
||||
self.names_que = queue.Queue()
|
||||
self.answers_que = queue.Queue()
|
||||
|
||||
def gen_names(self):
|
||||
path = data_storage_dir.joinpath('srv_prefixes.json')
|
||||
with open(path, encoding='utf-8', errors='ignore') as file:
|
||||
prefixes = json.load(file)
|
||||
names = map(lambda prefix: prefix + self.domain, prefixes)
|
||||
|
||||
for name in names:
|
||||
self.names_que.put(name)
|
||||
|
||||
def brute(self):
|
||||
"""
|
||||
枚举域名的SRV记录
|
||||
"""
|
||||
self.gen_names()
|
||||
|
||||
for i in range(self.thread_num):
|
||||
thread = BruteThread(self.names_que, self.answers_que)
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
|
||||
self.names_que.join()
|
||||
|
||||
while not self.answers_que.empty():
|
||||
answer = self.answers_que.get()
|
||||
if answer is None:
|
||||
continue
|
||||
for item in answer:
|
||||
record = str(item)
|
||||
subdomains = utils.match_subdomain(self.domain, record)
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
self.gen_record(subdomains, record)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.brute()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
class BruteThread(threading.Thread):
|
||||
def __init__(self, names_que, answers_que):
|
||||
threading.Thread.__init__(self)
|
||||
self.names_que = names_que
|
||||
self.answers_que = answers_que
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
name = self.names_que.get()
|
||||
answer = utils.dns_query(name, 'SRV')
|
||||
self.answers_que.put(answer)
|
||||
self.names_que.task_done()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
brute = BruteSRV(domain)
|
||||
brute.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('zonetransfer.me')
|
||||
# do('example.com')
|
@ -1,35 +0,0 @@
|
||||
from client.subdomain.oneforall.common.lookup import Lookup
|
||||
|
||||
|
||||
class QueryTXT(Lookup):
|
||||
def __init__(self, domain):
|
||||
Lookup.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'dnsquery'
|
||||
self.source = "QueryTXT"
|
||||
self.type = 'TXT' # 利用的DNS记录的TXT记录收集子域
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
brute = QueryTXT(domain)
|
||||
brute.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('cuit.edu.cn')
|
@ -1,58 +0,0 @@
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class AlienVault(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Intelligence'
|
||||
self.source = 'AlienVaultQuery'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
|
||||
base = 'https://otx.alienvault.com/api/v1/indicators/domain'
|
||||
dns = f'{base}/{self.domain}/passive_dns'
|
||||
resp = self.get(dns)
|
||||
if not resp:
|
||||
return
|
||||
json = resp.json()
|
||||
subdomains = self.match(self.domain, str(json))
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
url = f'{base}/{self.domain}/url_list'
|
||||
resp = self.get(url)
|
||||
if not resp:
|
||||
return
|
||||
json = resp.json()
|
||||
subdomains = self.match(self.domain, str(json))
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = AlienVault(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,56 +0,0 @@
|
||||
import api
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class RiskIQ(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Intelligence'
|
||||
self.source = 'RiskIQAPIQuery'
|
||||
self.addr = 'https://api.passivetotal.org/v2/enrichment/subdomains'
|
||||
self.user = api.riskiq_api_username
|
||||
self.key = api.riskiq_api_key
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'query': self.domain}
|
||||
resp = self.get(url=self.addr,
|
||||
params=params,
|
||||
auth=(self.user, self.key))
|
||||
if not resp:
|
||||
return
|
||||
data = resp.json()
|
||||
names = data.get('subdomains')
|
||||
self.subdomains = set(map(lambda sub: f'{sub}.{self.domain}', names))
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.user, self.key):
|
||||
return
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = RiskIQ(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,54 +0,0 @@
|
||||
import api
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class ThreatBookAPI(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Intelligence'
|
||||
self.source = 'ThreatBookAPIQuery'
|
||||
self.addr = 'https://x.threatbook.cn/api/v1/domain/query'
|
||||
self.key = api.threatbook_api_key
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'apikey': self.key,
|
||||
'domain': self.domain,
|
||||
'field': 'sub_domains'}
|
||||
resp = self.post(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, str(resp.json()))
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.key):
|
||||
return
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = ThreatBookAPI(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,50 +0,0 @@
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class ThreatMiner(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Intelligence'
|
||||
self.source = 'ThreatMinerQuery'
|
||||
self.addr = 'https://www.threatminer.org/getData.php'
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'e': 'subdomains_container',
|
||||
'q': self.domain, 't': 0, 'rt': 10}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, resp.text)
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = ThreatMiner(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,71 +0,0 @@
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
'''
|
||||
最多查询100条
|
||||
'''
|
||||
|
||||
|
||||
class VirusTotal(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.source = 'VirusTotalQuery'
|
||||
self.module = 'Intelligence'
|
||||
self.addr = 'https://www.virustotal.com/ui/domains/{}/subdomains'
|
||||
self.domain = self.register(domain)
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
next_cursor = ''
|
||||
while True:
|
||||
self.header = self.get_header()
|
||||
self.header.update({'Referer': 'https://www.virustotal.com/',
|
||||
'TE': 'Trailers'})
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'limit': '40', 'cursor': next_cursor}
|
||||
resp = self.get(url=self.addr.format(self.domain), params=params)
|
||||
if not resp:
|
||||
return
|
||||
data = resp.json()
|
||||
subdomains = set()
|
||||
datas = data.get('data')
|
||||
|
||||
if datas:
|
||||
for data in datas:
|
||||
subdomain = data.get('id')
|
||||
if subdomain:
|
||||
subdomains.add(subdomain)
|
||||
else:
|
||||
break
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
meta = data.get('meta')
|
||||
if meta:
|
||||
next_cursor = meta.get('cursor')
|
||||
else:
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = VirusTotal(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,55 +0,0 @@
|
||||
import api
|
||||
from client.subdomain.oneforall.common.query import Query
|
||||
|
||||
|
||||
class VirusTotalAPI(Query):
|
||||
def __init__(self, domain):
|
||||
Query.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Intelligence'
|
||||
self.source = 'VirusTotalAPIQuery'
|
||||
self.addr = 'https://www.virustotal.com/vtapi/v2/domain/report'
|
||||
self.key = api.virustotal_api_key
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'apikey': self.key, 'domain': self.domain}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
json = resp.json()
|
||||
data = json.get('subdomains')
|
||||
if data:
|
||||
subdomains = set(data)
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.key):
|
||||
return
|
||||
self.begin()
|
||||
self.query()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = VirusTotalAPI(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,81 +0,0 @@
|
||||
import time
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
|
||||
|
||||
class Ask(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Search'
|
||||
self.source = 'AskSearch'
|
||||
self.addr = 'https://www.search.ask.com/web'
|
||||
self.limit_num = 200 # 限制搜索条数
|
||||
self.per_page_num = 10 # 默认每页显示10页
|
||||
|
||||
def search(self, domain, filtered_subdomain='', full_search=False):
|
||||
"""
|
||||
发送搜索请求并做子域匹配
|
||||
|
||||
:param str domain: 域名
|
||||
:param str filtered_subdomain: 过滤的子域
|
||||
:param bool full_search: 全量搜索
|
||||
"""
|
||||
self.page_num = 1
|
||||
while True:
|
||||
time.sleep(self.delay)
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
query = 'site:' + domain + filtered_subdomain
|
||||
params = {'q': query, 'page': self.page_num}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(domain, resp.text)
|
||||
if not subdomains:
|
||||
break
|
||||
if not full_search:
|
||||
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
|
||||
if subdomains.issubset(self.subdomains):
|
||||
break
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
self.page_num += 1
|
||||
if '>Next<' not in resp.text:
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.search(self.domain, full_search=True)
|
||||
# 排除同一子域搜索结果过多的子域以发现新的子域
|
||||
for statement in self.filter(self.domain, self.subdomains):
|
||||
self.search(self.domain, filtered_subdomain=statement)
|
||||
|
||||
# 递归搜索下一层的子域
|
||||
if self.recursive_search:
|
||||
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
|
||||
for layer_num in range(1, self.recursive_times):
|
||||
for subdomain in self.subdomains:
|
||||
# 进行下一层子域搜索的限制条件
|
||||
count = subdomain.count('.') - self.domain.count('.')
|
||||
if count == layer_num:
|
||||
self.search(subdomain)
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
search = Ask(domain)
|
||||
search.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,109 +0,0 @@
|
||||
import time
|
||||
from bs4 import BeautifulSoup
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
|
||||
|
||||
class Baidu(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.module = 'Search'
|
||||
self.source = 'BaiduSearch'
|
||||
self.init = 'https://www.baidu.com/'
|
||||
self.addr = 'https://www.baidu.com/s'
|
||||
self.domain = domain
|
||||
self.limit_num = 750 # 限制搜索条数
|
||||
|
||||
def redirect_match(self, domain, html):
|
||||
"""
|
||||
获取跳转地址并传递地址进行跳转head请求
|
||||
|
||||
:param domain: 域名
|
||||
:param html: 响应体
|
||||
:return: 子域
|
||||
"""
|
||||
bs = BeautifulSoup(html, 'html.parser')
|
||||
subdomains_all = set()
|
||||
# 获取搜索结果中所有的跳转URL地址
|
||||
for find_res in bs.find_all('a', {'class': 'c-showurl'}):
|
||||
url = find_res.get('href')
|
||||
subdomains = self.match_location(domain, url)
|
||||
subdomains_all = subdomains_all.union(subdomains)
|
||||
return subdomains_all
|
||||
|
||||
def search(self, domain, filtered_subdomain='', full_search=False):
|
||||
"""
|
||||
发送搜索请求并做子域匹配
|
||||
|
||||
:param str domain: 域名
|
||||
:param str filtered_subdomain: 过滤的子域
|
||||
:param bool full_search: 全量搜索
|
||||
"""
|
||||
self.page_num = 0 # 二次搜索重新置0
|
||||
while True:
|
||||
time.sleep(self.delay)
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
query = 'site:' + domain + filtered_subdomain
|
||||
params = {'wd': query,
|
||||
'pn': self.page_num,
|
||||
'rn': self.per_page_num}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
if len(domain) > 12: # 解决百度搜索结果中域名过长会显示不全的问题
|
||||
# 获取百度跳转URL响应头的Location字段获取直链
|
||||
subdomains = self.redirect_match(domain, resp.text)
|
||||
else:
|
||||
subdomains = self.match(domain, resp.text)
|
||||
if not subdomains: # 搜索没有发现子域名则停止搜索
|
||||
break
|
||||
if not full_search:
|
||||
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
|
||||
if subdomains.issubset(self.subdomains):
|
||||
break
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
self.page_num += self.per_page_num
|
||||
# 搜索页面没有出现下一页时停止搜索
|
||||
if '&pn={next_pn}&'.format(next_pn=self.page_num) not in resp.text:
|
||||
break
|
||||
if self.page_num >= self.limit_num: # 搜索条数限制
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.search(self.domain, full_search=True)
|
||||
# 排除同一子域搜索结果过多的子域以发现新的子域
|
||||
for statement in self.filter(self.domain, self.subdomains):
|
||||
self.search(self.domain, filtered_subdomain=statement)
|
||||
|
||||
# 递归搜索下一层的子域
|
||||
if self.recursive_search:
|
||||
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
|
||||
for layer_num in range(1, self.recursive_times):
|
||||
for subdomain in self.subdomains:
|
||||
# 进行下一层子域搜索的限制条件
|
||||
count = subdomain.count('.') - self.domain.count('.')
|
||||
if count == layer_num:
|
||||
self.search(subdomain)
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
search = Baidu(domain)
|
||||
search.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('huayunshuzi.com')
|
@ -1,92 +0,0 @@
|
||||
import time
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
|
||||
|
||||
class Bing(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Search'
|
||||
self.source = 'BingSearch'
|
||||
self.init = 'https://www.bing.com/'
|
||||
self.addr = 'https://www.bing.com/search'
|
||||
self.limit_num = 1000 # 限制搜索条数
|
||||
|
||||
def search(self, domain, filtered_subdomain='', full_search=False):
|
||||
"""
|
||||
发送搜索请求并做子域匹配
|
||||
|
||||
:param str domain: 域名
|
||||
:param str filtered_subdomain: 过滤的子域
|
||||
:param bool full_search: 全量搜索
|
||||
"""
|
||||
self.page_num = 0 # 二次搜索重新置0
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
resp = self.get(self.init)
|
||||
if not resp:
|
||||
return
|
||||
self.cookie = resp.cookies # 获取cookie bing在搜索时需要带上cookie
|
||||
while True:
|
||||
time.sleep(self.delay)
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
query = 'site:' + domain + filtered_subdomain
|
||||
params = {'q': query, 'first': self.page_num,
|
||||
'count': self.per_page_num}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(domain, resp.text)
|
||||
if not subdomains: # 搜索没有发现子域名则停止搜索
|
||||
break
|
||||
if not full_search:
|
||||
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
|
||||
if subdomains.issubset(self.subdomains):
|
||||
break
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
# 搜索页面没有出现下一页时停止搜索
|
||||
if '<div class="sw_next">' not in resp.text:
|
||||
break
|
||||
self.page_num += self.per_page_num
|
||||
if self.page_num >= self.limit_num: # 搜索条数限制
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.search(self.domain, full_search=True)
|
||||
|
||||
# 排除同一子域搜索结果过多的子域以发现新的子域
|
||||
for statement in self.filter(self.domain, self.subdomains):
|
||||
self.search(self.domain, filtered_subdomain=statement)
|
||||
|
||||
# 递归搜索下一层的子域
|
||||
if self.recursive_search:
|
||||
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
|
||||
for layer_num in range(1, self.recursive_times):
|
||||
for subdomain in self.subdomains:
|
||||
# 进行下一层子域搜索的限制条件
|
||||
count = subdomain.count('.') - self.domain.count('.')
|
||||
if count == layer_num:
|
||||
self.search(subdomain)
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
search = Bing(domain)
|
||||
search.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,92 +0,0 @@
|
||||
import time
|
||||
import api
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
|
||||
|
||||
class BingAPI(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Search'
|
||||
self.source = 'BingAPISearch'
|
||||
self.addr = 'https://api.cognitive.microsoft.com/' \
|
||||
'bing/v7.0/search'
|
||||
self.id = api.bing_api_id
|
||||
self.key = api.bing_api_key
|
||||
self.limit_num = 1000 # 必应同一个搜索关键词限制搜索条数
|
||||
self.delay = 1 # 必应自定义搜索限制时延1秒
|
||||
|
||||
def search(self, domain, filtered_subdomain='', full_search=False):
|
||||
"""
|
||||
发送搜索请求并做子域匹配
|
||||
|
||||
:param str domain: 域名
|
||||
:param str filtered_subdomain: 过滤的子域
|
||||
:param bool full_search: 全量搜索
|
||||
"""
|
||||
self.page_num = 0 # 二次搜索重新置0
|
||||
while True:
|
||||
time.sleep(self.delay)
|
||||
self.header = self.get_header()
|
||||
self.header = {'Ocp-Apim-Subscription-Key': self.key}
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
query = 'site:' + domain + filtered_subdomain
|
||||
params = {'q': query, 'safesearch': 'Off',
|
||||
'count': self.per_page_num,
|
||||
'offset': self.page_num}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(domain, str(resp.json()))
|
||||
if not subdomains: # 搜索没有发现子域名则停止搜索
|
||||
break
|
||||
if not full_search:
|
||||
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
|
||||
if subdomains.issubset(self.subdomains):
|
||||
break
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
self.page_num += self.per_page_num
|
||||
if self.page_num >= self.limit_num: # 搜索条数限制
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.id, self.key):
|
||||
return
|
||||
self.begin()
|
||||
self.search(self.domain, full_search=True)
|
||||
|
||||
# 排除同一子域搜索结果过多的子域以发现新的子域
|
||||
for statement in self.filter(self.domain, self.subdomains):
|
||||
self.search(self.domain, filtered_subdomain=statement)
|
||||
|
||||
# 递归搜索下一层的子域
|
||||
if self.recursive_search:
|
||||
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
|
||||
for layer_num in range(1, self.recursive_times):
|
||||
for subdomain in self.subdomains:
|
||||
# 进行下一层子域搜索的限制条件
|
||||
count = subdomain.count('.') - self.domain.count('.')
|
||||
if count == layer_num:
|
||||
self.search(subdomain)
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
search = BingAPI(domain)
|
||||
search.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,87 +0,0 @@
|
||||
import random
|
||||
import time
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
|
||||
|
||||
class Exalead(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Search'
|
||||
self.source = "ExaleadSearch"
|
||||
self.addr = "http://www.exalead.com/search/web/results/"
|
||||
self.per_page_num = 30
|
||||
|
||||
def search(self, domain, filtered_subdomain='', full_search=False):
|
||||
"""
|
||||
发送搜索请求并做子域匹配
|
||||
|
||||
:param str domain: 域名
|
||||
:param str filtered_subdomain: 过滤的子域
|
||||
:param bool full_search: 全量搜索
|
||||
"""
|
||||
self.page_num = 0
|
||||
while True:
|
||||
self.delay = random.randint(1, 5)
|
||||
time.sleep(self.delay)
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
query = 'site:' + domain + filtered_subdomain
|
||||
params = {'q': query, 'elements_per_page': '30',
|
||||
"start_index": self.page_num}
|
||||
resp = self.get(url=self.addr, params=params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(domain, resp.text)
|
||||
if not subdomains:
|
||||
break
|
||||
if not full_search:
|
||||
if subdomains.issubset(self.subdomains):
|
||||
break
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
self.page_num += self.per_page_num
|
||||
if self.page_num > 1999:
|
||||
break
|
||||
if 'title="Go to the next page"' not in resp.text:
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
|
||||
self.search(self.domain, full_search=True)
|
||||
|
||||
# 排除同一子域搜索结果过多的子域以发现新的子域
|
||||
for statement in self.filter(self.domain, self.subdomains):
|
||||
statement = statement.replace('-site', 'and -site')
|
||||
self.search(self.domain, filtered_subdomain=statement)
|
||||
|
||||
# 递归搜索下一层的子域
|
||||
if self.recursive_search:
|
||||
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
|
||||
for layer_num in range(1, self.recursive_times):
|
||||
for subdomain in self.subdomains:
|
||||
# 进行下一层子域搜索的限制条件
|
||||
count = subdomain.count('.') - self.domain.count('.')
|
||||
if count == layer_num:
|
||||
self.search(subdomain)
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
search = Exalead(domain)
|
||||
search.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,73 +0,0 @@
|
||||
import base64
|
||||
import time
|
||||
|
||||
import api
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
|
||||
|
||||
class FoFa(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Search'
|
||||
self.source = 'FoFaAPISearch'
|
||||
self.addr = 'https://fofa.so/api/v1/search/all'
|
||||
self.delay = 1
|
||||
self.email = api.fofa_api_email
|
||||
self.key = api.fofa_api_key
|
||||
|
||||
def search(self):
|
||||
"""
|
||||
发送搜索请求并做子域匹配
|
||||
"""
|
||||
self.page_num = 1
|
||||
subdomain_encode = f'domain={self.domain}'.encode('utf-8')
|
||||
query_data = base64.b64encode(subdomain_encode)
|
||||
while True:
|
||||
time.sleep(self.delay)
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
query = {'email': self.email,
|
||||
'key': self.key,
|
||||
'qbase64': query_data,
|
||||
'page': self.page_num,
|
||||
'size': 10000}
|
||||
resp = self.get(self.addr, query)
|
||||
if not resp:
|
||||
return
|
||||
resp_json = resp.json()
|
||||
subdomains = self.match(self.domain, str(resp_json))
|
||||
if not subdomains: # 搜索没有发现子域名则停止搜索
|
||||
break
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
size = resp_json.get('size')
|
||||
if size < 10000:
|
||||
break
|
||||
self.page_num += 1
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.email, self.key):
|
||||
return
|
||||
self.begin()
|
||||
self.search()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
search = FoFa(domain)
|
||||
search.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,74 +0,0 @@
|
||||
import time
|
||||
from bs4 import BeautifulSoup
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
from client.subdomain.oneforall.config import logger
|
||||
|
||||
|
||||
class Gitee(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.source = 'GiteeSearch'
|
||||
self.module = 'Search'
|
||||
self.addr = 'https://search.gitee.com/'
|
||||
self.domain = self.register(domain)
|
||||
self.header = self.get_header()
|
||||
|
||||
def search(self, full_search=False):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
page_num = 1
|
||||
while True:
|
||||
time.sleep(self.delay)
|
||||
params = {'pageno': page_num, 'q': self.domain, 'type': 'code'}
|
||||
try:
|
||||
resp = self.get(self.addr, params=params)
|
||||
except Exception as e:
|
||||
logger.log('ERROR', e.args)
|
||||
break
|
||||
if not resp:
|
||||
break
|
||||
if resp.status_code != 200:
|
||||
logger.log('ERROR', f'{self.source}模块搜索出错')
|
||||
break
|
||||
if 'class="empty-box"' in resp.text:
|
||||
break
|
||||
soup = BeautifulSoup(resp.text, 'html.parser')
|
||||
subdomains = self.match(self.domain, soup.text)
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
if not subdomains:
|
||||
break
|
||||
if not full_search:
|
||||
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
|
||||
if subdomains.issubset(self.subdomains):
|
||||
break
|
||||
if '<li class="disabled"><a href="###">' in resp.text:
|
||||
break
|
||||
if page_num > 100:
|
||||
break
|
||||
page_num += 1
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.search()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = Gitee(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,108 +0,0 @@
|
||||
import requests
|
||||
import api
|
||||
import json
|
||||
from client.subdomain.oneforall.common.utils import match_subdomain
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
from client.subdomain.oneforall.config import logger
|
||||
|
||||
|
||||
class GithubAPI(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.source = 'GithubAPISearch'
|
||||
self.module = 'Search'
|
||||
self.addr = 'https://api.github.com/search/code'
|
||||
self.domain = self.register(domain)
|
||||
self.session = requests.Session()
|
||||
self.auth_url = 'https://api.github.com'
|
||||
self.token = api.github_api_token
|
||||
|
||||
def auth_github(self):
|
||||
"""
|
||||
github api 认证
|
||||
|
||||
:return: 认证失败返回False 成功返回True
|
||||
"""
|
||||
self.session.headers.update({'Authorization': 'token ' + self.token})
|
||||
try:
|
||||
resp = self.session.get(self.auth_url)
|
||||
except Exception as e:
|
||||
logger.log('ERROR', e.args)
|
||||
return False
|
||||
if resp.status_code != 200:
|
||||
resp_json = resp.json()
|
||||
msg = resp_json.get('message')
|
||||
logger.log('ERROR', msg)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def search(self):
|
||||
"""
|
||||
向接口查询子域并做子域匹配
|
||||
"""
|
||||
self.session.headers = self.get_header()
|
||||
self.session.proxies = self.get_proxy(self.source)
|
||||
self.session.verify = self.verify
|
||||
self.session.headers.update(
|
||||
{'Accept': 'application/vnd.github.v3.text-match+json'})
|
||||
|
||||
if not self.auth_github():
|
||||
logger.log('ERROR', f'{self.source}模块登录失败')
|
||||
return
|
||||
page = 1
|
||||
while True:
|
||||
params = {'q': self.domain, 'per_page': 100,
|
||||
'page': page, 'sort': 'indexed'}
|
||||
try:
|
||||
resp = self.session.get(self.addr, params=params)
|
||||
except Exception as e:
|
||||
logger.log('ERROR', e.args)
|
||||
break
|
||||
if resp.status_code != 200:
|
||||
logger.log('ERROR', f'{self.source}模块搜索出错')
|
||||
break
|
||||
subdomains = match_subdomain(self.domain, resp.text)
|
||||
if not subdomains:
|
||||
break
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
page += 1
|
||||
try:
|
||||
resp_json = resp.json()
|
||||
except Exception as e:
|
||||
logger.log('ERROR', e.args)
|
||||
break
|
||||
total_count = resp_json.get('total_count')
|
||||
if not isinstance(total_count, int):
|
||||
break
|
||||
if page * 100 > total_count:
|
||||
break
|
||||
if page * 100 > 1000:
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.token):
|
||||
return
|
||||
self.begin()
|
||||
self.search()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
query = GithubAPI(domain)
|
||||
query.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('exmaple.com')
|
@ -1,94 +0,0 @@
|
||||
import random
|
||||
import time
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
|
||||
|
||||
class Google(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Search'
|
||||
self.source = 'GoogleSearch'
|
||||
self.init = 'https://www.google.com/'
|
||||
self.addr = 'https://www.google.com/search'
|
||||
|
||||
def search(self, domain, filtered_subdomain='', full_search=False):
|
||||
"""
|
||||
发送搜索请求并做子域匹配
|
||||
|
||||
:param str domain: 域名
|
||||
:param str filtered_subdomain: 过滤的子域
|
||||
:param bool full_search: 全量搜索
|
||||
"""
|
||||
page_num = 1
|
||||
per_page_num = 50
|
||||
self.header = self.get_header()
|
||||
self.header.update({'User-Agent': 'Googlebot',
|
||||
'Referer': 'https://www.google.com'})
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
resp = self.get(self.init)
|
||||
if not resp:
|
||||
return
|
||||
self.cookie = resp.cookies
|
||||
while True:
|
||||
self.delay = random.randint(1, 5)
|
||||
time.sleep(self.delay)
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
word = 'site:' + domain + filtered_subdomain
|
||||
payload = {'q': word, 'start': page_num, 'num': per_page_num,
|
||||
'filter': '0', 'btnG': 'Search', 'gbv': '1', 'hl': 'en'}
|
||||
resp = self.get(url=self.addr, params=payload)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(domain, resp.text)
|
||||
if not subdomains:
|
||||
break
|
||||
if not full_search:
|
||||
if subdomains.issubset(self.subdomains):
|
||||
break
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
page_num += per_page_num
|
||||
if 'start=' + str(page_num) not in resp.text:
|
||||
break
|
||||
if '302 Moved' in resp.text:
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
|
||||
self.search(self.domain, full_search=True)
|
||||
|
||||
# 排除同一子域搜索结果过多的子域以发现新的子域
|
||||
for statement in self.filter(self.domain, self.subdomains):
|
||||
self.search(self.domain, filtered_subdomain=statement)
|
||||
|
||||
# 递归搜索下一层的子域
|
||||
if self.recursive_search:
|
||||
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
|
||||
for layer_num in range(1, self.recursive_times):
|
||||
for subdomain in self.subdomains:
|
||||
# 进行下一层子域搜索的限制条件
|
||||
count = subdomain.count('.') - self.domain.count('.')
|
||||
if count == layer_num:
|
||||
self.search(subdomain)
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
search = Google(domain)
|
||||
search.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,88 +0,0 @@
|
||||
import time
|
||||
import api
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
|
||||
|
||||
class GoogleAPI(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Search'
|
||||
self.source = 'GoogleAPISearch'
|
||||
self.addr = 'https://www.googleapis.com/customsearch/v1'
|
||||
self.delay = 1
|
||||
self.key = api.google_api_key
|
||||
self.cx = api.google_api_cx
|
||||
self.per_page_num = 10 # 每次只能请求10个结果
|
||||
|
||||
def search(self, domain, filtered_subdomain='', full_search=False):
|
||||
"""
|
||||
发送搜索请求并做子域匹配
|
||||
|
||||
:param str domain: 域名
|
||||
:param str filtered_subdomain: 过滤的子域
|
||||
:param bool full_search: 全量搜索
|
||||
"""
|
||||
self.page_num = 1
|
||||
while True:
|
||||
word = 'site:' + domain + filtered_subdomain
|
||||
time.sleep(self.delay)
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
params = {'key': self.key, 'cx': self.cx,
|
||||
'q': word, 'fields': 'items/link',
|
||||
'start': self.page_num, 'num': self.per_page_num}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(domain, str(resp.json()))
|
||||
if not subdomains:
|
||||
break
|
||||
if not full_search:
|
||||
if subdomains.issubset(self.subdomains):
|
||||
break
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
self.page_num += self.per_page_num
|
||||
if self.page_num > 100: # 免费的API只能查询前100条结果
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.cx, self.key):
|
||||
return
|
||||
self.begin()
|
||||
self.search(self.domain, full_search=True)
|
||||
|
||||
# 排除同一子域搜索结果过多的子域以发现新的子域
|
||||
for statement in self.filter(self.domain, self.subdomains):
|
||||
self.search(self.domain, filtered_subdomain=statement)
|
||||
|
||||
# 递归搜索下一层的子域
|
||||
if self.recursive_search:
|
||||
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
|
||||
for layer_num in range(1, self.recursive_times):
|
||||
for subdomain in self.subdomains:
|
||||
# 进行下一层子域搜索的限制条件
|
||||
count = subdomain.count('.') - self.domain.count('.')
|
||||
if count == layer_num:
|
||||
self.search(subdomain)
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
search = GoogleAPI(domain)
|
||||
search.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,60 +0,0 @@
|
||||
import api
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
|
||||
|
||||
class ShodanAPI(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.domain = self.register(domain)
|
||||
self.module = 'Search'
|
||||
self.source = 'ShodanAPISearch'
|
||||
self.addr = 'https://api.shodan.io/shodan/host/search'
|
||||
self.key = api.shodan_api_key
|
||||
|
||||
def search(self):
|
||||
"""
|
||||
发送搜索请求并做子域匹配
|
||||
"""
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
query = 'hostname:.' + self.domain
|
||||
page = 1
|
||||
while True:
|
||||
params = {'key': self.key, 'page': page, 'query': query,
|
||||
'minify': True, 'facets': {'hostnames'}}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, resp.text)
|
||||
if not subdomains: # 搜索没有发现子域名则停止搜索
|
||||
break
|
||||
if subdomains:
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
page += 1
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.key):
|
||||
return
|
||||
self.begin()
|
||||
self.search()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
search = ShodanAPI(domain)
|
||||
search.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,87 +0,0 @@
|
||||
import time
|
||||
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
|
||||
|
||||
class So(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Search'
|
||||
self.source = 'SoSearch'
|
||||
self.addr = 'https://www.so.com/s'
|
||||
self.limit_num = 640 # 限制搜索条数
|
||||
self.per_page_num = 10 # 默认每页显示10页
|
||||
|
||||
def search(self, domain, filtered_subdomain='', full_search=False):
|
||||
"""
|
||||
发送搜索请求并做子域匹配
|
||||
|
||||
:param str domain: 域名
|
||||
:param str filtered_subdomain: 过滤的子域
|
||||
:param bool full_search: 全量搜索
|
||||
"""
|
||||
page_num = 1
|
||||
while True:
|
||||
time.sleep(self.delay)
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
word = 'site:' + domain + filtered_subdomain
|
||||
payload = {'q': word, 'pn': page_num}
|
||||
resp = self.get(url=self.addr, params=payload)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(domain, resp.text)
|
||||
if not subdomains:
|
||||
break
|
||||
if not full_search:
|
||||
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
|
||||
if subdomains.issubset(self.subdomains):
|
||||
break
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
page_num += 1
|
||||
# 搜索页面没有出现下一页时停止搜索
|
||||
if '<a id="snext"' not in resp.text:
|
||||
break
|
||||
# 搜索条数限制
|
||||
if self.page_num * self.per_page_num >= self.limit_num:
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
self.search(self.domain, full_search=True)
|
||||
|
||||
# 排除同一子域搜索结果过多的子域以发现新的子域
|
||||
for statement in self.filter(self.domain, self.subdomains):
|
||||
self.search(self.domain, filtered_subdomain=statement)
|
||||
|
||||
# 递归搜索下一层的子域
|
||||
if self.recursive_search:
|
||||
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
|
||||
for layer_num in range(1, self.recursive_times):
|
||||
for subdomain in self.subdomains:
|
||||
# 进行下一层子域搜索的限制条件
|
||||
count = subdomain.count('.') - self.domain.count('.')
|
||||
if count == layer_num:
|
||||
self.search(subdomain)
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
search = So(domain)
|
||||
search.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,85 +0,0 @@
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
|
||||
|
||||
class Sogou(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Search'
|
||||
self.source = 'SogouSearch'
|
||||
self.addr = 'https://www.sogou.com/web'
|
||||
self.limit_num = 1000 # 限制搜索条数
|
||||
|
||||
def search(self, domain, filtered_subdomain='', full_search=False):
|
||||
"""
|
||||
发送搜索请求并做子域匹配
|
||||
|
||||
:param str domain: 域名
|
||||
:param str filtered_subdomain: 过滤的子域
|
||||
:param bool full_search: 全量搜索
|
||||
"""
|
||||
self.page_num = 1
|
||||
while True:
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
word = 'site:' + domain + filtered_subdomain
|
||||
payload = {'query': word, 'page': self.page_num,
|
||||
"num": self.per_page_num}
|
||||
resp = self.get(self.addr, payload)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(domain, resp.text)
|
||||
if not subdomains:
|
||||
break
|
||||
if not full_search:
|
||||
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
|
||||
if subdomains.issubset(self.subdomains):
|
||||
break
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
self.page_num += 1
|
||||
# 搜索页面没有出现下一页时停止搜索
|
||||
if '<a id="sogou_next"' not in resp.text:
|
||||
break
|
||||
# 搜索条数限制
|
||||
if self.page_num * self.per_page_num >= self.limit_num:
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
|
||||
self.search(self.domain, full_search=True)
|
||||
|
||||
# 排除同一子域搜索结果过多的子域以发现新的子域
|
||||
for statement in self.filter(self.domain, self.subdomains):
|
||||
self.search(self.domain, filtered_subdomain=statement)
|
||||
|
||||
# 递归搜索下一层的子域
|
||||
if self.recursive_search:
|
||||
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
|
||||
for layer_num in range(1, self.recursive_times):
|
||||
for subdomain in self.subdomains:
|
||||
# 进行下一层子域搜索的限制条件
|
||||
count = subdomain.count('.') - self.domain.count('.')
|
||||
if count == layer_num:
|
||||
self.search(subdomain)
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
search = Sogou(domain)
|
||||
search.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,92 +0,0 @@
|
||||
import time
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
|
||||
|
||||
class Yahoo(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Search'
|
||||
self.source = 'YahooSearch'
|
||||
self.init = 'https://search.yahoo.com/'
|
||||
self.addr = 'https://search.yahoo.com/search'
|
||||
self.limit_num = 1000 # 限制搜索条数
|
||||
self.delay = 5
|
||||
self.per_page_num = 40
|
||||
|
||||
def search(self, domain, filtered_subdomain='', full_search=False):
|
||||
"""
|
||||
发送搜索请求并做子域匹配
|
||||
|
||||
:param str domain: 域名
|
||||
:param str filtered_subdomain: 过滤的子域
|
||||
:param bool full_search: 全量搜索
|
||||
"""
|
||||
self.page_num = 0
|
||||
resp = self.get(self.init)
|
||||
if not resp:
|
||||
return
|
||||
self.cookie = resp.cookies # 获取cookie Yahoo在搜索时需要带上cookie
|
||||
while True:
|
||||
time.sleep(self.delay)
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
query = 'site:' + domain + filtered_subdomain
|
||||
params = {'q': query, 'b': self.page_num, 'n': self.per_page_num}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(domain, resp.text)
|
||||
if not subdomains: # 搜索没有发现子域名则停止搜索
|
||||
break
|
||||
if not full_search:
|
||||
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
|
||||
if subdomains.issubset(self.subdomains):
|
||||
break
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
if '>Next</a>' not in resp.text: # 搜索页面没有出现下一页时停止搜索
|
||||
break
|
||||
self.page_num += self.per_page_num
|
||||
if self.page_num >= self.limit_num: # 搜索条数限制
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
|
||||
self.search(self.domain, full_search=True)
|
||||
|
||||
# 排除同一子域搜索结果过多的子域以发现新的子域
|
||||
for statement in self.filter(self.domain, self.subdomains):
|
||||
self.search(self.domain, filtered_subdomain=statement)
|
||||
|
||||
# 递归搜索下一层的子域
|
||||
if self.recursive_search:
|
||||
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
|
||||
for layer_num in range(1, self.recursive_times):
|
||||
for subdomain in self.subdomains:
|
||||
# 进行下一层子域搜索的限制条件
|
||||
count = subdomain.count('.') - self.domain.count('.')
|
||||
if count == layer_num:
|
||||
self.search(subdomain)
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
search = Yahoo(domain)
|
||||
search.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,92 +0,0 @@
|
||||
import time
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
|
||||
|
||||
class Yandex(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Search'
|
||||
self.source = 'YandexSearch'
|
||||
self.init = 'https://yandex.com/'
|
||||
self.addr = 'https://yandex.com/search'
|
||||
self.limit_num = 1000 # 限制搜索条数
|
||||
self.delay = 5
|
||||
|
||||
def search(self, domain, filtered_subdomain='', full_search=False):
|
||||
"""
|
||||
发送搜索请求并做子域匹配
|
||||
|
||||
:param str domain: 域名
|
||||
:param str filtered_subdomain: 过滤的子域
|
||||
:param bool full_search: 全量搜索
|
||||
"""
|
||||
self.page_num = 0 # 二次搜索重新置0
|
||||
resp = self.get(self.init)
|
||||
if not resp:
|
||||
return
|
||||
self.cookie = resp.cookies # 获取cookie
|
||||
while True:
|
||||
time.sleep(self.delay)
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
query = 'site:' + domain + filtered_subdomain
|
||||
params = {'text': query, 'p': self.page_num,
|
||||
'numdoc': self.per_page_num}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(domain, resp.text)
|
||||
if not subdomains: # 搜索没有发现子域名则停止搜索
|
||||
break
|
||||
if not full_search:
|
||||
# 搜索中发现搜索出的结果有完全重复的结果就停止搜索
|
||||
if subdomains.issubset(self.subdomains):
|
||||
break
|
||||
# 合并搜索子域名搜索结果
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
if '>next</a>' not in resp.text: # 搜索页面没有出现下一页时停止搜索
|
||||
break
|
||||
self.page_num += 1
|
||||
if self.page_num >= self.limit_num: # 搜索条数限制
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
self.begin()
|
||||
|
||||
self.search(self.domain, full_search=True)
|
||||
|
||||
# 排除同一子域搜索结果过多的子域以发现新的子域
|
||||
for statement in self.filter(self.domain, self.subdomains):
|
||||
self.search(self.domain, filtered_subdomain=statement)
|
||||
|
||||
# 递归搜索下一层的子域
|
||||
if self.recursive_search:
|
||||
# 从1开始是之前已经做过1层子域搜索了,当前实际递归层数是layer+1
|
||||
for layer_num in range(1, self.recursive_times):
|
||||
for subdomain in self.subdomains:
|
||||
# 进行下一层子域搜索的限制条件
|
||||
count = subdomain.count('.') - self.domain.count('.')
|
||||
if count == layer_num:
|
||||
self.search(subdomain)
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
search = Yandex(domain)
|
||||
search.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('example.com')
|
@ -1,86 +0,0 @@
|
||||
import time
|
||||
import api
|
||||
from client.subdomain.oneforall.common.search import Search
|
||||
from client.subdomain.oneforall.config import logger
|
||||
|
||||
|
||||
class ZoomEyeAPI(Search):
|
||||
def __init__(self, domain):
|
||||
Search.__init__(self)
|
||||
self.domain = domain
|
||||
self.module = 'Search'
|
||||
self.source = 'ZoomEyeAPISearch'
|
||||
self.addr = 'https://api.zoomeye.org/web/search'
|
||||
self.delay = 2
|
||||
self.user = api.zoomeye_api_usermail
|
||||
self.pwd = api.zoomeye_api_password
|
||||
|
||||
def login(self):
|
||||
"""
|
||||
登陆获取查询taken
|
||||
"""
|
||||
url = 'https://api.zoomeye.org/user/login'
|
||||
data = {'username': self.user, 'password': self.pwd}
|
||||
resp = self.post(url=url, json=data)
|
||||
if not resp:
|
||||
logger.log('FATAL', f'登录失败无法获取{self.source}的访问token')
|
||||
exit(1)
|
||||
data = resp.json()
|
||||
if resp.status_code == 200:
|
||||
logger.log('DEBUG', f'{self.source}模块登录成功')
|
||||
return data.get('access_token')
|
||||
else:
|
||||
logger.log('ALERT', data.get('message'))
|
||||
exit(1)
|
||||
|
||||
def search(self):
|
||||
"""
|
||||
发送搜索请求并做子域匹配
|
||||
"""
|
||||
page_num = 1
|
||||
access_token = self.login()
|
||||
while True:
|
||||
time.sleep(self.delay)
|
||||
self.header = self.get_header()
|
||||
self.proxy = self.get_proxy(self.source)
|
||||
self.header.update({'Authorization': 'JWT ' + access_token})
|
||||
params = {'query': 'hostname:' + self.domain, 'page': page_num}
|
||||
resp = self.get(self.addr, params)
|
||||
if not resp:
|
||||
return
|
||||
subdomains = self.match(self.domain, resp.text)
|
||||
if not subdomains: # 搜索没有发现子域名则停止搜索
|
||||
break
|
||||
self.subdomains = self.subdomains.union(subdomains)
|
||||
page_num += 1
|
||||
if page_num > 500:
|
||||
break
|
||||
if resp.status_code == 403:
|
||||
break
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
类执行入口
|
||||
"""
|
||||
if not self.check(self.user, self.pwd):
|
||||
return
|
||||
self.begin()
|
||||
self.search()
|
||||
self.finish()
|
||||
self.save_json()
|
||||
self.gen_result()
|
||||
self.save_db()
|
||||
|
||||
|
||||
def do(domain): # 统一入口名字 方便多线程调用
|
||||
"""
|
||||
类统一调用入口
|
||||
|
||||
:param str domain: 域名
|
||||
"""
|
||||
search = ZoomEyeAPI(domain)
|
||||
search.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
do('mi.com')
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue