|
|
|
|
@ -0,0 +1,928 @@
|
|
|
|
|
# encoding: utf-8
|
|
|
|
|
|
|
|
|
|
from __future__ import absolute_import, division, print_function, unicode_literals
|
|
|
|
|
|
|
|
|
|
import json
|
|
|
|
|
import os
|
|
|
|
|
import re
|
|
|
|
|
import shutil
|
|
|
|
|
import threading
|
|
|
|
|
import warnings
|
|
|
|
|
|
|
|
|
|
import six
|
|
|
|
|
from django.conf import settings
|
|
|
|
|
from django.core.exceptions import ImproperlyConfigured
|
|
|
|
|
from datetime import datetime
|
|
|
|
|
from django.utils.encoding import force_str
|
|
|
|
|
from haystack.backends import BaseEngine, BaseSearchBackend, BaseSearchQuery, EmptyResults, log_query
|
|
|
|
|
from haystack.constants import DJANGO_CT, DJANGO_ID, ID
|
|
|
|
|
from haystack.exceptions import MissingDependency, SearchBackendError, SkipDocument
|
|
|
|
|
from haystack.inputs import Clean, Exact, PythonData, Raw
|
|
|
|
|
from haystack.models import SearchResult
|
|
|
|
|
from haystack.utils import get_identifier, get_model_ct
|
|
|
|
|
from haystack.utils import log as logging
|
|
|
|
|
from haystack.utils.app_loading import haystack_get_model
|
|
|
|
|
from jieba.analyse import ChineseAnalyzer # 导入jieba中文分词器
|
|
|
|
|
from whoosh import index
|
|
|
|
|
from whoosh.analysis import StemmingAnalyzer
|
|
|
|
|
from whoosh.fields import BOOLEAN, DATETIME, IDLIST, KEYWORD, NGRAM, NGRAMWORDS, NUMERIC, Schema, TEXT
|
|
|
|
|
from whoosh.fields import ID as WHOOSH_ID
|
|
|
|
|
from whoosh.filedb.filestore import FileStorage, RamStorage
|
|
|
|
|
from whoosh.highlight import ContextFragmenter, HtmlFormatter
|
|
|
|
|
from whoosh.highlight import highlight as whoosh_highlight
|
|
|
|
|
from whoosh.qparser import QueryParser
|
|
|
|
|
from whoosh.searching import ResultsPage
|
|
|
|
|
from whoosh.writing import AsyncWriter
|
|
|
|
|
|
|
|
|
|
# 检查whoosh依赖是否安装
|
|
|
|
|
try:
|
|
|
|
|
import whoosh
|
|
|
|
|
except ImportError:
|
|
|
|
|
raise MissingDependency(
|
|
|
|
|
"The 'whoosh' backend requires the installation of 'Whoosh'. Please refer to the documentation.")
|
|
|
|
|
|
|
|
|
|
# 检查whoosh版本要求
|
|
|
|
|
if not hasattr(whoosh, '__version__') or whoosh.__version__ < (2, 5, 0):
|
|
|
|
|
raise MissingDependency(
|
|
|
|
|
"The 'whoosh' backend requires version 2.5.0 or greater.")
|
|
|
|
|
|
|
|
|
|
# 日期时间正则表达式,用于解析日期格式
|
|
|
|
|
DATETIME_REGEX = re.compile(
|
|
|
|
|
'^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})T(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})(\.\d{3,6}Z?)?$')
|
|
|
|
|
|
|
|
|
|
# 线程本地存储,用于RAM存储
|
|
|
|
|
LOCALS = threading.local()
|
|
|
|
|
LOCALS.RAM_STORE = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class WhooshHtmlFormatter(HtmlFormatter):
|
|
|
|
|
"""
|
|
|
|
|
自定义HTML格式化器,用于搜索结果高亮显示
|
|
|
|
|
比whoosh原生的HtmlFormatter更简单,保持跨后端的一致性
|
|
|
|
|
"""
|
|
|
|
|
template = '<%(tag)s>%(t)s</%(tag)s>' # 高亮模板
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class WhooshSearchBackend(BaseSearchBackend):
|
|
|
|
|
"""Whoosh搜索引擎后端实现类,继承自Haystack的BaseSearchBackend"""
|
|
|
|
|
|
|
|
|
|
# Whoosh保留关键字列表
|
|
|
|
|
RESERVED_WORDS = (
|
|
|
|
|
'AND', 'NOT', 'OR', 'TO',
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Whoosh保留字符列表
|
|
|
|
|
RESERVED_CHARACTERS = (
|
|
|
|
|
'\\', '+', '-', '&&', '||', '!', '(', ')', '{', '}',
|
|
|
|
|
'[', ']', '^', '"', '~', '*', '?', ':', '.',
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def __init__(self, connection_alias, **connection_options):
|
|
|
|
|
"""初始化Whoosh后端
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
connection_alias: 连接别名
|
|
|
|
|
**connection_options: 连接选项,包括PATH、STORAGE等
|
|
|
|
|
"""
|
|
|
|
|
super(WhooshSearchBackend, self).__init__(connection_alias, **connection_options)
|
|
|
|
|
self.setup_complete = False
|
|
|
|
|
self.use_file_storage = True
|
|
|
|
|
self.post_limit = getattr(connection_options, 'POST_LIMIT', 128 * 1024 * 1024) # 帖子大小限制
|
|
|
|
|
self.path = connection_options.get('PATH') # 索引文件路径
|
|
|
|
|
|
|
|
|
|
# 判断使用文件存储还是内存存储
|
|
|
|
|
if connection_options.get('STORAGE', 'file') != 'file':
|
|
|
|
|
self.use_file_storage = False
|
|
|
|
|
|
|
|
|
|
# 文件存储必须指定路径
|
|
|
|
|
if self.use_file_storage and not self.path:
|
|
|
|
|
raise ImproperlyConfigured(
|
|
|
|
|
"You must specify a 'PATH' in your settings for connection '%s'." % connection_alias)
|
|
|
|
|
|
|
|
|
|
self.log = logging.getLogger('haystack')
|
|
|
|
|
|
|
|
|
|
def setup(self):
|
|
|
|
|
"""初始化设置,延迟加载直到需要时执行"""
|
|
|
|
|
from haystack import connections
|
|
|
|
|
new_index = False
|
|
|
|
|
|
|
|
|
|
# 确保索引目录存在(文件存储模式)
|
|
|
|
|
if self.use_file_storage and not os.path.exists(self.path):
|
|
|
|
|
os.makedirs(self.path)
|
|
|
|
|
new_index = True
|
|
|
|
|
|
|
|
|
|
# 检查目录写入权限
|
|
|
|
|
if self.use_file_storage and not os.access(self.path, os.W_OK):
|
|
|
|
|
raise IOError("The path to your Whoosh index '%s' is not writable for the current user/group." % self.path)
|
|
|
|
|
|
|
|
|
|
# 设置存储后端
|
|
|
|
|
if self.use_file_storage:
|
|
|
|
|
self.storage = FileStorage(self.path) # 文件存储
|
|
|
|
|
else:
|
|
|
|
|
global LOCALS
|
|
|
|
|
if getattr(LOCALS, 'RAM_STORE', None) is None:
|
|
|
|
|
LOCALS.RAM_STORE = RamStorage() # 内存存储
|
|
|
|
|
self.storage = LOCALS.RAM_STORE
|
|
|
|
|
|
|
|
|
|
# 构建schema和获取内容字段名
|
|
|
|
|
self.content_field_name, self.schema = self.build_schema(
|
|
|
|
|
connections[self.connection_alias].get_unified_index().all_searchfields())
|
|
|
|
|
self.parser = QueryParser(self.content_field_name, schema=self.schema) # 查询解析器
|
|
|
|
|
|
|
|
|
|
# 创建或打开索引
|
|
|
|
|
if new_index is True:
|
|
|
|
|
self.index = self.storage.create_index(self.schema)
|
|
|
|
|
else:
|
|
|
|
|
try:
|
|
|
|
|
self.index = self.storage.open_index(schema=self.schema)
|
|
|
|
|
except index.EmptyIndexError:
|
|
|
|
|
self.index = self.storage.create_index(self.schema)
|
|
|
|
|
|
|
|
|
|
self.setup_complete = True
|
|
|
|
|
|
|
|
|
|
def build_schema(self, fields):
|
|
|
|
|
"""构建Whoosh的schema(表结构)
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
fields: 搜索字段字典
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
tuple: (内容字段名, schema对象)
|
|
|
|
|
"""
|
|
|
|
|
# 基础字段定义
|
|
|
|
|
schema_fields = {
|
|
|
|
|
ID: WHOOSH_ID(stored=True, unique=True), # 文档ID
|
|
|
|
|
DJANGO_CT: WHOOSH_ID(stored=True), # Django内容类型
|
|
|
|
|
DJANGO_ID: WHOOSH_ID(stored=True), # Django对象ID
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
initial_key_count = len(schema_fields) # 初始字段数量
|
|
|
|
|
content_field_name = '' # 内容字段名
|
|
|
|
|
|
|
|
|
|
# 遍历所有字段进行类型映射
|
|
|
|
|
for field_name, field_class in fields.items():
|
|
|
|
|
if field_class.is_multivalued: # 多值字段
|
|
|
|
|
if field_class.indexed is False:
|
|
|
|
|
schema_fields[field_class.index_fieldname] = IDLIST(
|
|
|
|
|
stored=True, field_boost=field_class.boost)
|
|
|
|
|
else:
|
|
|
|
|
schema_fields[field_class.index_fieldname] = KEYWORD(
|
|
|
|
|
stored=True, commas=True, scorable=True, field_boost=field_class.boost)
|
|
|
|
|
elif field_class.field_type in ['date', 'datetime']: # 日期时间字段
|
|
|
|
|
schema_fields[field_class.index_fieldname] = DATETIME(
|
|
|
|
|
stored=field_class.stored, sortable=True)
|
|
|
|
|
elif field_class.field_type == 'integer': # 整数字段
|
|
|
|
|
schema_fields[field_class.index_fieldname] = NUMERIC(
|
|
|
|
|
stored=field_class.stored, numtype=int, field_boost=field_class.boost)
|
|
|
|
|
elif field_class.field_type == 'float': # 浮点数字段
|
|
|
|
|
schema_fields[field_class.index_fieldname] = NUMERIC(
|
|
|
|
|
stored=field_class.stored, numtype=float, field_boost=field_class.boost)
|
|
|
|
|
elif field_class.field_type == 'boolean': # 布尔字段
|
|
|
|
|
schema_fields[field_class.index_fieldname] = BOOLEAN(stored=field_class.stored)
|
|
|
|
|
elif field_class.field_type == 'ngram': # N-gram字段
|
|
|
|
|
schema_fields[field_class.index_fieldname] = NGRAM(
|
|
|
|
|
minsize=3, maxsize=15, stored=field_class.stored, field_boost=field_class.boost)
|
|
|
|
|
elif field_class.field_type == 'edge_ngram': # 边缘N-gram字段
|
|
|
|
|
schema_fields[field_class.index_fieldname] = NGRAMWORDS(
|
|
|
|
|
minsize=2, maxsize=15, at='start', stored=field_class.stored, field_boost=field_class.boost)
|
|
|
|
|
else: # 文本字段,使用中文分词器[1,3,6](@ref)
|
|
|
|
|
schema_fields[field_class.index_fieldname] = TEXT(
|
|
|
|
|
stored=True, analyzer=ChineseAnalyzer(), field_boost=field_class.boost, sortable=True)
|
|
|
|
|
|
|
|
|
|
# 标记文档主字段
|
|
|
|
|
if field_class.document is True:
|
|
|
|
|
content_field_name = field_class.index_fieldname
|
|
|
|
|
schema_fields[field_class.index_fieldname].spelling = True # 启用拼写检查
|
|
|
|
|
|
|
|
|
|
# 检查是否有有效字段
|
|
|
|
|
if len(schema_fields) <= initial_key_count:
|
|
|
|
|
raise SearchBackendError(
|
|
|
|
|
"No fields were found in any search_indexes. Please correct this before attempting to search.")
|
|
|
|
|
|
|
|
|
|
return (content_field_name, Schema(**schema_fields))
|
|
|
|
|
|
|
|
|
|
def update(self, index, iterable, commit=True):
|
|
|
|
|
"""更新索引文档
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
index: 索引对象
|
|
|
|
|
iterable: 可迭代的对象集合
|
|
|
|
|
commit: 是否提交更改
|
|
|
|
|
"""
|
|
|
|
|
if not self.setup_complete:
|
|
|
|
|
self.setup()
|
|
|
|
|
|
|
|
|
|
self.index = self.index.refresh()
|
|
|
|
|
writer = AsyncWriter(self.index) # 异步写入器
|
|
|
|
|
|
|
|
|
|
# 遍历所有对象并更新索引
|
|
|
|
|
for obj in iterable:
|
|
|
|
|
try:
|
|
|
|
|
doc = index.full_prepare(obj) # 准备文档数据
|
|
|
|
|
except SkipDocument:
|
|
|
|
|
self.log.debug(u"Indexing for object `%s` skipped", obj)
|
|
|
|
|
else:
|
|
|
|
|
# 确保所有值为unicode格式
|
|
|
|
|
for key in doc:
|
|
|
|
|
doc[key] = self._from_python(doc[key])
|
|
|
|
|
|
|
|
|
|
# Whoosh 2.5.0+不支持文档boost
|
|
|
|
|
if 'boost' in doc:
|
|
|
|
|
del doc['boost']
|
|
|
|
|
|
|
|
|
|
# 更新文档
|
|
|
|
|
try:
|
|
|
|
|
writer.update_document(**doc)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
if not self.silently_fail:
|
|
|
|
|
raise
|
|
|
|
|
self.log.error(u"%s while preparing object for update" % e.__class__.__name__,
|
|
|
|
|
exc_info=True, extra={"data": {"index": index, "object": get_identifier(obj)}})
|
|
|
|
|
|
|
|
|
|
# 提交更改
|
|
|
|
|
if len(iterable) > 0:
|
|
|
|
|
writer.commit()
|
|
|
|
|
|
|
|
|
|
def remove(self, obj_or_string, commit=True):
|
|
|
|
|
"""从索引中移除文档
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
obj_or_string: 对象或标识符
|
|
|
|
|
commit: 是否提交更改
|
|
|
|
|
"""
|
|
|
|
|
if not self.setup_complete:
|
|
|
|
|
self.setup()
|
|
|
|
|
|
|
|
|
|
self.index = self.index.refresh()
|
|
|
|
|
whoosh_id = get_identifier(obj_or_string)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# 通过ID删除文档
|
|
|
|
|
self.index.delete_by_query(q=self.parser.parse(u'%s:"%s"' % (ID, whoosh_id)))
|
|
|
|
|
except Exception as e:
|
|
|
|
|
if not self.silently_fail:
|
|
|
|
|
raise
|
|
|
|
|
self.log.error("Failed to remove document '%s' from Whoosh: %s", whoosh_id, e, exc_info=True)
|
|
|
|
|
|
|
|
|
|
def clear(self, models=None, commit=True):
|
|
|
|
|
"""清空索引
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
models: 要清空的模型列表,None表示清空所有
|
|
|
|
|
commit: 是否提交更改
|
|
|
|
|
"""
|
|
|
|
|
if not self.setup_complete:
|
|
|
|
|
self.setup()
|
|
|
|
|
|
|
|
|
|
self.index = self.index.refresh()
|
|
|
|
|
|
|
|
|
|
if models is not None:
|
|
|
|
|
assert isinstance(models, (list, tuple))
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
if models is None: # 清空整个索引
|
|
|
|
|
self.delete_index()
|
|
|
|
|
else: # 只清空指定模型的索引
|
|
|
|
|
models_to_delete = []
|
|
|
|
|
for model in models:
|
|
|
|
|
models_to_delete.append(u"%s:%s" % (DJANGO_CT, get_model_ct(model)))
|
|
|
|
|
self.index.delete_by_query(q=self.parser.parse(u" OR ".join(models_to_delete)))
|
|
|
|
|
except Exception as e:
|
|
|
|
|
if not self.silently_fail:
|
|
|
|
|
raise
|
|
|
|
|
if models is not None:
|
|
|
|
|
self.log.error("Failed to clear Whoosh index of models '%s': %s",
|
|
|
|
|
','.join(models_to_delete), e, exc_info=True)
|
|
|
|
|
else:
|
|
|
|
|
self.log.error("Failed to clear Whoosh index: %s", e, exc_info=True)
|
|
|
|
|
|
|
|
|
|
def delete_index(self):
|
|
|
|
|
"""删除整个索引(高效方式)"""
|
|
|
|
|
# 文件存储:直接删除目录[3,8](@ref)
|
|
|
|
|
if self.use_file_storage and os.path.exists(self.path):
|
|
|
|
|
shutil.rmtree(self.path)
|
|
|
|
|
elif not self.use_file_storage: # 内存存储:清理存储
|
|
|
|
|
self.storage.clean()
|
|
|
|
|
|
|
|
|
|
# 重新创建索引
|
|
|
|
|
self.setup()
|
|
|
|
|
|
|
|
|
|
def optimize(self):
|
|
|
|
|
"""优化索引性能"""
|
|
|
|
|
if not self.setup_complete:
|
|
|
|
|
self.setup()
|
|
|
|
|
|
|
|
|
|
self.index = self.index.refresh()
|
|
|
|
|
self.index.optimize()
|
|
|
|
|
|
|
|
|
|
def calculate_page(self, start_offset=0, end_offset=None):
|
|
|
|
|
"""计算分页信息
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
start_offset: 起始偏移量
|
|
|
|
|
end_offset: 结束偏移量
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
tuple: (页码, 页大小)
|
|
|
|
|
"""
|
|
|
|
|
# 防止Whoosh错误,需要end_offset大于0
|
|
|
|
|
if end_offset is not None and end_offset <= 0:
|
|
|
|
|
end_offset = 1
|
|
|
|
|
|
|
|
|
|
# 计算页码
|
|
|
|
|
page_num = 0
|
|
|
|
|
if end_offset is None:
|
|
|
|
|
end_offset = 1000000
|
|
|
|
|
if start_offset is None:
|
|
|
|
|
start_offset = 0
|
|
|
|
|
|
|
|
|
|
page_length = end_offset - start_offset
|
|
|
|
|
if page_length and page_length > 0:
|
|
|
|
|
page_num = int(start_offset / page_length)
|
|
|
|
|
|
|
|
|
|
# Whoosh使用1-based页码
|
|
|
|
|
page_num += 1
|
|
|
|
|
return page_num, page_length
|
|
|
|
|
|
|
|
|
|
@log_query
|
|
|
|
|
def search(self, query_string, sort_by=None, start_offset=0, end_offset=None, fields='',
|
|
|
|
|
highlight=False, facets=None, date_facets=None, query_facets=None, narrow_queries=None,
|
|
|
|
|
spelling_query=None, within=None, dwithin=None, distance_point=None, models=None,
|
|
|
|
|
limit_to_registered_models=None, result_class=None, **kwargs):
|
|
|
|
|
"""执行搜索查询[2,5,8](@ref)
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
query_string: 查询字符串
|
|
|
|
|
sort_by: 排序字段
|
|
|
|
|
start_offset: 起始偏移
|
|
|
|
|
end_offset: 结束偏移
|
|
|
|
|
highlight: 是否高亮
|
|
|
|
|
...其他参数...
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
dict: 搜索结果字典
|
|
|
|
|
"""
|
|
|
|
|
if not self.setup_complete:
|
|
|
|
|
self.setup()
|
|
|
|
|
|
|
|
|
|
# 空查询返回空结果
|
|
|
|
|
if len(query_string) == 0:
|
|
|
|
|
return {'results': [], 'hits': 0}
|
|
|
|
|
|
|
|
|
|
query_string = force_str(query_string)
|
|
|
|
|
|
|
|
|
|
# 单字符非通配符查询返回空结果(被停用词过滤)
|
|
|
|
|
if len(query_string) <= 1 and query_string != u'*':
|
|
|
|
|
return {'results': [], 'hits': 0}
|
|
|
|
|
|
|
|
|
|
# 处理排序方向
|
|
|
|
|
reverse = False
|
|
|
|
|
if sort_by is not None:
|
|
|
|
|
# 检查所有排序字段是否同向
|
|
|
|
|
sort_by_list = []
|
|
|
|
|
reverse_counter = 0
|
|
|
|
|
for order_by in sort_by:
|
|
|
|
|
if order_by.startswith('-'):
|
|
|
|
|
reverse_counter += 1
|
|
|
|
|
|
|
|
|
|
if reverse_counter and reverse_counter != len(sort_by):
|
|
|
|
|
raise SearchBackendError("Whoosh requires all order_by fields to use the same sort direction")
|
|
|
|
|
|
|
|
|
|
# 处理排序字段
|
|
|
|
|
for order_by in sort_by:
|
|
|
|
|
if order_by.startswith('-'):
|
|
|
|
|
sort_by_list.append(order_by[1:])
|
|
|
|
|
if len(sort_by_list) == 1:
|
|
|
|
|
reverse = True
|
|
|
|
|
else:
|
|
|
|
|
sort_by_list.append(order_by)
|
|
|
|
|
if len(sort_by_list) == 1:
|
|
|
|
|
reverse = False
|
|
|
|
|
sort_by = sort_by_list[0]
|
|
|
|
|
|
|
|
|
|
# Whoosh不支持facets功能[8](@ref)
|
|
|
|
|
if facets is not None:
|
|
|
|
|
warnings.warn("Whoosh does not handle faceting.", Warning, stacklevel=2)
|
|
|
|
|
if date_facets is not None:
|
|
|
|
|
warnings.warn("Whoosh does not handle date faceting.", Warning, stacklevel=2)
|
|
|
|
|
if query_facets is not None:
|
|
|
|
|
warnings.warn("Whoosh does not handle query faceting.", Warning, stacklevel=2)
|
|
|
|
|
|
|
|
|
|
# 窄化查询处理
|
|
|
|
|
narrowed_results = None
|
|
|
|
|
self.index = self.index.refresh()
|
|
|
|
|
|
|
|
|
|
# 模型限制处理
|
|
|
|
|
if limit_to_registered_models is None:
|
|
|
|
|
limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
|
|
|
|
|
|
|
|
|
|
if models and len(models):
|
|
|
|
|
model_choices = sorted(get_model_ct(model) for model in models)
|
|
|
|
|
elif limit_to_registered_models:
|
|
|
|
|
model_choices = self.build_models_list()
|
|
|
|
|
else:
|
|
|
|
|
model_choices = []
|
|
|
|
|
|
|
|
|
|
# 添加模型过滤条件
|
|
|
|
|
if len(model_choices) > 0:
|
|
|
|
|
if narrow_queries is None:
|
|
|
|
|
narrow_queries = set()
|
|
|
|
|
narrow_queries.add(' OR '.join(['%s:%s' % (DJANGO_CT, rm) for rm in model_choices]))
|
|
|
|
|
|
|
|
|
|
# 执行窄化查询
|
|
|
|
|
narrow_searcher = None
|
|
|
|
|
if narrow_queries is not None:
|
|
|
|
|
narrow_searcher = self.index.searcher()
|
|
|
|
|
for nq in narrow_queries:
|
|
|
|
|
recent_narrowed_results = narrow_searcher.search(
|
|
|
|
|
self.parser.parse(force_str(nq)), limit=None)
|
|
|
|
|
|
|
|
|
|
if len(recent_narrowed_results) <= 0:
|
|
|
|
|
return {'results': [], 'hits': 0}
|
|
|
|
|
|
|
|
|
|
if narrowed_results:
|
|
|
|
|
narrowed_results.filter(recent_narrowed_results)
|
|
|
|
|
else:
|
|
|
|
|
narrowed_results = recent_narrowed_results
|
|
|
|
|
|
|
|
|
|
self.index = self.index.refresh()
|
|
|
|
|
|
|
|
|
|
# 执行主搜索查询
|
|
|
|
|
if self.index.doc_count():
|
|
|
|
|
searcher = self.index.searcher()
|
|
|
|
|
parsed_query = self.parser.parse(query_string)
|
|
|
|
|
|
|
|
|
|
# 处理无效查询
|
|
|
|
|
if parsed_query is None:
|
|
|
|
|
return {'results': [], 'hits': 0}
|
|
|
|
|
|
|
|
|
|
# 计算分页
|
|
|
|
|
page_num, page_length = self.calculate_page(start_offset, end_offset)
|
|
|
|
|
search_kwargs = {
|
|
|
|
|
'pagelen': page_length,
|
|
|
|
|
'sortedby': sort_by,
|
|
|
|
|
'reverse': reverse,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# 应用窄化过滤
|
|
|
|
|
if narrowed_results is not None:
|
|
|
|
|
search_kwargs['filter'] = narrowed_results
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
raw_page = searcher.search_page(parsed_query, page_num, **search_kwargs)
|
|
|
|
|
except ValueError:
|
|
|
|
|
if not self.silently_fail:
|
|
|
|
|
raise
|
|
|
|
|
return {'results': [], 'hits': 0}
|
|
|
|
|
|
|
|
|
|
# 检查页码有效性
|
|
|
|
|
if raw_page.pagenum < page_num:
|
|
|
|
|
return {'results': [], 'hits': 0}
|
|
|
|
|
|
|
|
|
|
# 处理结果
|
|
|
|
|
results = self._process_results(raw_page, highlight=highlight, query_string=query_string,
|
|
|
|
|
spelling_query=spelling_query, result_class=result_class)
|
|
|
|
|
searcher.close()
|
|
|
|
|
|
|
|
|
|
if hasattr(narrow_searcher, 'close'):
|
|
|
|
|
narrow_searcher.close()
|
|
|
|
|
|
|
|
|
|
return results
|
|
|
|
|
else:
|
|
|
|
|
# 无文档时的处理
|
|
|
|
|
if self.include_spelling:
|
|
|
|
|
spelling_suggestion = self.create_spelling_suggestion(
|
|
|
|
|
spelling_query if spelling_query else query_string)
|
|
|
|
|
else:
|
|
|
|
|
spelling_suggestion = None
|
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
'results': [],
|
|
|
|
|
'hits': 0,
|
|
|
|
|
'spelling_suggestion': spelling_suggestion,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def more_like_this(self, model_instance, additional_query_string=None, start_offset=0, end_offset=None,
|
|
|
|
|
models=None, limit_to_registered_models=None, result_class=None, **kwargs):
|
|
|
|
|
"""查找相似文档[8](@ref)
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
model_instance: 模型实例
|
|
|
|
|
additional_query_string: 附加查询条件
|
|
|
|
|
...其他参数...
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
dict: 相似结果
|
|
|
|
|
"""
|
|
|
|
|
if not self.setup_complete:
|
|
|
|
|
self.setup()
|
|
|
|
|
|
|
|
|
|
# 获取模型信息
|
|
|
|
|
model_klass = model_instance._meta.concrete_model
|
|
|
|
|
field_name = self.content_field_name
|
|
|
|
|
narrow_queries = set()
|
|
|
|
|
narrowed_results = None
|
|
|
|
|
self.index = self.index.refresh()
|
|
|
|
|
|
|
|
|
|
# 模型过滤处理
|
|
|
|
|
if limit_to_registered_models is None:
|
|
|
|
|
limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
|
|
|
|
|
|
|
|
|
|
if models and len(models):
|
|
|
|
|
model_choices = sorted(get_model_ct(model) for model in models)
|
|
|
|
|
elif limit_to_registered_models:
|
|
|
|
|
model_choices = self.build_models_list()
|
|
|
|
|
else:
|
|
|
|
|
model_choices = []
|
|
|
|
|
|
|
|
|
|
# 添加模型过滤条件
|
|
|
|
|
if len(model_choices) > 0:
|
|
|
|
|
if narrow_queries is None:
|
|
|
|
|
narrow_queries = set()
|
|
|
|
|
narrow_queries.add(' OR '.join(['%s:%s' % (DJANGO_CT, rm) for rm in model_choices]))
|
|
|
|
|
|
|
|
|
|
# 添加附加查询条件
|
|
|
|
|
if additional_query_string and additional_query_string != '*':
|
|
|
|
|
narrow_queries.add(additional_query_string)
|
|
|
|
|
|
|
|
|
|
# 执行窄化查询
|
|
|
|
|
narrow_searcher = None
|
|
|
|
|
if narrow_queries is not None:
|
|
|
|
|
narrow_searcher = self.index.searcher()
|
|
|
|
|
for nq in narrow_queries:
|
|
|
|
|
recent_narrowed_results = narrow_searcher.search(self.parser.parse(force_str(nq)), limit=None)
|
|
|
|
|
if len(recent_narrowed_results) <= 0:
|
|
|
|
|
return {'results': [], 'hits': 0}
|
|
|
|
|
if narrowed_results:
|
|
|
|
|
narrowed_results.filter(recent_narrowed_results)
|
|
|
|
|
else:
|
|
|
|
|
narrowed_results = recent_narrowed_results
|
|
|
|
|
|
|
|
|
|
# 计算分页
|
|
|
|
|
page_num, page_length = self.calculate_page(start_offset, end_offset)
|
|
|
|
|
self.index = self.index.refresh()
|
|
|
|
|
raw_results = EmptyResults()
|
|
|
|
|
|
|
|
|
|
# 执行相似文档查询
|
|
|
|
|
if self.index.doc_count():
|
|
|
|
|
query = "%s:%s" % (ID, get_identifier(model_instance))
|
|
|
|
|
searcher = self.index.searcher()
|
|
|
|
|
parsed_query = self.parser.parse(query)
|
|
|
|
|
results = searcher.search(parsed_query)
|
|
|
|
|
|
|
|
|
|
if len(results):
|
|
|
|
|
raw_results = results[0].more_like_this(field_name, top=end_offset)
|
|
|
|
|
|
|
|
|
|
# 应用窄化过滤
|
|
|
|
|
if narrowed_results is not None and hasattr(raw_results, 'filter'):
|
|
|
|
|
raw_results.filter(narrowed_results)
|
|
|
|
|
|
|
|
|
|
# 分页处理
|
|
|
|
|
try:
|
|
|
|
|
raw_page = ResultsPage(raw_results, page_num, page_length)
|
|
|
|
|
except ValueError:
|
|
|
|
|
if not self.silently_fail:
|
|
|
|
|
raise
|
|
|
|
|
return {'results': [], 'hits': 0}
|
|
|
|
|
|
|
|
|
|
if raw_page.pagenum < page_num:
|
|
|
|
|
return {'results': [], 'hits': 0}
|
|
|
|
|
|
|
|
|
|
results = self._process_results(raw_page, result_class=result_class)
|
|
|
|
|
searcher.close()
|
|
|
|
|
|
|
|
|
|
if hasattr(narrow_searcher, 'close'):
|
|
|
|
|
narrow_searcher.close()
|
|
|
|
|
|
|
|
|
|
return results
|
|
|
|
|
|
|
|
|
|
def _process_results(self, raw_page, highlight=False, query_string='', spelling_query=None, result_class=None):
|
|
|
|
|
"""处理原始搜索结果
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
raw_page: 原始结果页
|
|
|
|
|
highlight: 是否高亮
|
|
|
|
|
query_string: 查询字符串
|
|
|
|
|
spelling_query: 拼写查询
|
|
|
|
|
result_class: 结果类
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
dict: 处理后的结果
|
|
|
|
|
"""
|
|
|
|
|
from haystack import connections
|
|
|
|
|
results = []
|
|
|
|
|
hits = len(raw_page) # 命中数
|
|
|
|
|
|
|
|
|
|
if result_class is None:
|
|
|
|
|
result_class = SearchResult
|
|
|
|
|
|
|
|
|
|
facets = {}
|
|
|
|
|
spelling_suggestion = None
|
|
|
|
|
unified_index = connections[self.connection_alias].get_unified_index()
|
|
|
|
|
indexed_models = unified_index.get_indexed_models()
|
|
|
|
|
|
|
|
|
|
# 处理每个搜索结果
|
|
|
|
|
for doc_offset, raw_result in enumerate(raw_page):
|
|
|
|
|
score = raw_page.score(doc_offset) or 0
|
|
|
|
|
app_label, model_name = raw_result[DJANGO_CT].split('.')
|
|
|
|
|
additional_fields = {}
|
|
|
|
|
model = haystack_get_model(app_label, model_name)
|
|
|
|
|
|
|
|
|
|
if model and model in indexed_models:
|
|
|
|
|
# 处理每个字段值
|
|
|
|
|
for key, value in raw_result.items():
|
|
|
|
|
index = unified_index.get_index(model)
|
|
|
|
|
string_key = str(key)
|
|
|
|
|
|
|
|
|
|
if string_key in index.fields and hasattr(index.fields[string_key], 'convert'):
|
|
|
|
|
# 特殊处理多值字段
|
|
|
|
|
if index.fields[string_key].is_multivalued:
|
|
|
|
|
if value is None or len(value) == 0:
|
|
|
|
|
additional_fields[string_key] = []
|
|
|
|
|
else:
|
|
|
|
|
additional_fields[string_key] = value.split(',')
|
|
|
|
|
else:
|
|
|
|
|
additional_fields[string_key] = index.fields[string_key].convert(value)
|
|
|
|
|
else:
|
|
|
|
|
additional_fields[string_key] = self._to_python(value)
|
|
|
|
|
|
|
|
|
|
# 移除系统字段
|
|
|
|
|
del (additional_fields[DJANGO_CT])
|
|
|
|
|
del (additional_fields[DJANGO_ID])
|
|
|
|
|
|
|
|
|
|
# 高亮处理
|
|
|
|
|
if highlight:
|
|
|
|
|
sa = StemmingAnalyzer()
|
|
|
|
|
formatter = WhooshHtmlFormatter('em')
|
|
|
|
|
terms = [token.text for token in sa(query_string)]
|
|
|
|
|
|
|
|
|
|
whoosh_result = whoosh_highlight(
|
|
|
|
|
additional_fields.get(self.content_field_name), terms, sa,
|
|
|
|
|
ContextFragmenter(), formatter
|
|
|
|
|
)
|
|
|
|
|
additional_fields['highlighted'] = {self.content_field_name: [whoosh_result]}
|
|
|
|
|
|
|
|
|
|
# 创建结果对象
|
|
|
|
|
result = result_class(app_label, model_name, raw_result[DJANGO_ID], score, **additional_fields)
|
|
|
|
|
results.append(result)
|
|
|
|
|
else:
|
|
|
|
|
hits -= 1 # 调整命中数
|
|
|
|
|
|
|
|
|
|
# 拼写建议
|
|
|
|
|
if self.include_spelling:
|
|
|
|
|
spelling_suggestion = self.create_spelling_suggestion(
|
|
|
|
|
spelling_query if spelling_query else query_string)
|
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
'results': results,
|
|
|
|
|
'hits': hits,
|
|
|
|
|
'facets': facets,
|
|
|
|
|
'spelling_suggestion': spelling_suggestion,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def create_spelling_suggestion(self, query_string):
|
|
|
|
|
"""创建拼写建议
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
query_string: 查询字符串
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
str: 拼写建议
|
|
|
|
|
"""
|
|
|
|
|
spelling_suggestion = None
|
|
|
|
|
reader = self.index.reader()
|
|
|
|
|
corrector = reader.corrector(self.content_field_name)
|
|
|
|
|
cleaned_query = force_str(query_string)
|
|
|
|
|
|
|
|
|
|
if not query_string:
|
|
|
|
|
return spelling_suggestion
|
|
|
|
|
|
|
|
|
|
# 清理查询字符串中的保留字
|
|
|
|
|
for rev_word in self.RESERVED_WORDS:
|
|
|
|
|
cleaned_query = cleaned_query.replace(rev_word, '')
|
|
|
|
|
|
|
|
|
|
for rev_char in self.RESERVED_CHARACTERS:
|
|
|
|
|
cleaned_query = cleaned_query.replace(rev_char, '')
|
|
|
|
|
|
|
|
|
|
# 分词并获取建议
|
|
|
|
|
query_words = cleaned_query.split()
|
|
|
|
|
suggested_words = []
|
|
|
|
|
|
|
|
|
|
for word in query_words:
|
|
|
|
|
suggestions = corrector.suggest(word, limit=1)
|
|
|
|
|
if len(suggestions) > 0:
|
|
|
|
|
suggested_words.append(suggestions[0])
|
|
|
|
|
|
|
|
|
|
spelling_suggestion = ' '.join(suggested_words)
|
|
|
|
|
return spelling_suggestion
|
|
|
|
|
|
|
|
|
|
def _from_python(self, value):
|
|
|
|
|
"""将Python值转换为Whoosh字符串格式
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
value: Python值
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
str: Whoosh格式字符串
|
|
|
|
|
"""
|
|
|
|
|
if hasattr(value, 'strftime'): # 日期时间处理
|
|
|
|
|
if not hasattr(value, 'hour'):
|
|
|
|
|
value = datetime(value.year, value.month, value.day, 0, 0, 0)
|
|
|
|
|
elif isinstance(value, bool): # 布尔值处理
|
|
|
|
|
value = 'true' if value else 'false'
|
|
|
|
|
elif isinstance(value, (list, tuple)): # 列表元组处理
|
|
|
|
|
value = u','.join([force_str(v) for v in value])
|
|
|
|
|
elif isinstance(value, (six.integer_types, float)): # 数字保持原样
|
|
|
|
|
pass
|
|
|
|
|
else: # 其他转为字符串
|
|
|
|
|
value = force_str(value)
|
|
|
|
|
return value
|
|
|
|
|
|
|
|
|
|
def _to_python(self, value):
|
|
|
|
|
"""将Whoosh值转换为Python原生值
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
value: Whoosh值
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
object: Python值
|
|
|
|
|
"""
|
|
|
|
|
if value == 'true':
|
|
|
|
|
return True
|
|
|
|
|
elif value == 'false':
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
# 日期时间解析
|
|
|
|
|
if value and isinstance(value, six.string_types):
|
|
|
|
|
possible_datetime = DATETIME_REGEX.search(value)
|
|
|
|
|
if possible_datetime:
|
|
|
|
|
date_values = possible_datetime.groupdict()
|
|
|
|
|
for dk, dv in date_values.items():
|
|
|
|
|
date_values[dk] = int(dv)
|
|
|
|
|
return datetime(date_values['year'], date_values['month'], date_values['day'],
|
|
|
|
|
date_values['hour'], date_values['minute'], date_values['second'])
|
|
|
|
|
|
|
|
|
|
# JSON解析尝试
|
|
|
|
|
try:
|
|
|
|
|
converted_value = json.loads(value)
|
|
|
|
|
if isinstance(converted_value, (list, tuple, set, dict, six.integer_types, float, complex)):
|
|
|
|
|
return converted_value
|
|
|
|
|
except:
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
return value
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class WhooshSearchQuery(BaseSearchQuery):
|
|
|
|
|
"""Whoosh搜索查询类,处理查询构建"""
|
|
|
|
|
|
|
|
|
|
def _convert_datetime(self, date):
|
|
|
|
|
"""转换日期时间格式"""
|
|
|
|
|
if hasattr(date, 'hour'):
|
|
|
|
|
return force_str(date.strftime('%Y%m%d%H%M%S'))
|
|
|
|
|
else:
|
|
|
|
|
return force_str(date.strftime('%Y%m%d000000'))
|
|
|
|
|
|
|
|
|
|
def clean(self, query_fragment):
|
|
|
|
|
"""清理查询片段,转义保留字符[8](@ref)
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
query_fragment: 查询片段
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
str: 清理后的查询
|
|
|
|
|
"""
|
|
|
|
|
words = query_fragment.split()
|
|
|
|
|
cleaned_words = []
|
|
|
|
|
|
|
|
|
|
for word in words:
|
|
|
|
|
if word in self.backend.RESERVED_WORDS: # 保留字转小写
|
|
|
|
|
word = word.replace(word, word.lower())
|
|
|
|
|
|
|
|
|
|
for char in self.backend.RESERVED_CHARACTERS: # 保留字符加引号
|
|
|
|
|
if char in word:
|
|
|
|
|
word = "'%s'" % word
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
cleaned_words.append(word)
|
|
|
|
|
|
|
|
|
|
return ' '.join(cleaned_words)
|
|
|
|
|
|
|
|
|
|
def build_query_fragment(self, field, filter_type, value):
|
|
|
|
|
"""构建查询片段
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
field: 字段名
|
|
|
|
|
filter_type: 过滤类型
|
|
|
|
|
value: 字段值
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
str: 查询片段
|
|
|
|
|
"""
|
|
|
|
|
from haystack import connections
|
|
|
|
|
query_frag = ''
|
|
|
|
|
is_datetime = False
|
|
|
|
|
|
|
|
|
|
# 处理值类型
|
|
|
|
|
if not hasattr(value, 'input_type_name'):
|
|
|
|
|
if hasattr(value, 'strftime'):
|
|
|
|
|
is_datetime = True
|
|
|
|
|
if isinstance(value, six.string_types) and value != ' ':
|
|
|
|
|
value = Clean(value) # 文本清理
|
|
|
|
|
else:
|
|
|
|
|
value = PythonData(value) # Python数据
|
|
|
|
|
|
|
|
|
|
prepared_value = value.prepare(self) # 准备值
|
|
|
|
|
|
|
|
|
|
if not isinstance(prepared_value, (set, list, tuple)):
|
|
|
|
|
prepared_value = self.backend._from_python(prepared_value)
|
|
|
|
|
|
|
|
|
|
# 字段名处理
|
|
|
|
|
if field == 'content': # 内容字段特殊处理
|
|
|
|
|
index_fieldname = ''
|
|
|
|
|
else:
|
|
|
|
|
index_fieldname = u'%s:' % connections[self._using].get_unified_index().get_index_fieldname(field)
|
|
|
|
|
|
|
|
|
|
# 过滤类型映射
|
|
|
|
|
filter_types = {
|
|
|
|
|
'content': '%s',
|
|
|
|
|
'contains': '*%s*',
|
|
|
|
|
'endswith': "*%s",
|
|
|
|
|
'startswith': "%s*",
|
|
|
|
|
'exact': '%s',
|
|
|
|
|
'gt': "{%s to}",
|
|
|
|
|
'gte': "[%s to]",
|
|
|
|
|
'lt': "{to %s}",
|
|
|
|
|
'lte': "[to %s]",
|
|
|
|
|
'fuzzy': u'%s~',
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# 构建查询片段
|
|
|
|
|
if value.post_process is False:
|
|
|
|
|
query_frag = prepared_value
|
|
|
|
|
else:
|
|
|
|
|
if filter_type in ['content', 'contains', 'startswith', 'endswith', 'fuzzy']:
|
|
|
|
|
if value.input_type_name == 'exact':
|
|
|
|
|
query_frag = prepared_value
|
|
|
|
|
else:
|
|
|
|
|
# 多术语处理
|
|
|
|
|
terms = []
|
|
|
|
|
if isinstance(prepared_value, six.string_types):
|
|
|
|
|
possible_values = prepared_value.split(' ')
|
|
|
|
|
else:
|
|
|
|
|
if is_datetime is True:
|
|
|
|
|
prepared_value = self._convert_datetime(prepared_value)
|
|
|
|
|
possible_values = [prepared_value]
|
|
|
|
|
|
|
|
|
|
for possible_value in possible_values:
|
|
|
|
|
terms.append(filter_types[filter_type] % self.backend._from_python(possible_value))
|
|
|
|
|
|
|
|
|
|
if len(terms) == 1:
|
|
|
|
|
query_frag = terms[0]
|
|
|
|
|
else:
|
|
|
|
|
query_frag = u"(%s)" % " AND ".join(terms)
|
|
|
|
|
elif filter_type == 'in': # IN查询
|
|
|
|
|
in_options = []
|
|
|
|
|
for possible_value in prepared_value:
|
|
|
|
|
is_datetime = False
|
|
|
|
|
if hasattr(possible_value, 'strftime'):
|
|
|
|
|
is_datetime = True
|
|
|
|
|
pv = self.backend._from_python(possible_value)
|
|
|
|
|
if is_datetime is True:
|
|
|
|
|
pv = self._convert_datetime(pv)
|
|
|
|
|
if isinstance(pv, six.string_types) and not is_datetime:
|
|
|
|
|
in_options.append('"%s"' % pv)
|
|
|
|
|
else:
|
|
|
|
|
in_options.append('%s' % pv)
|
|
|
|
|
query_frag = "(%s)" % " OR ".join(in_options)
|
|
|
|
|
elif filter_type == 'range': # 范围查询
|
|
|
|
|
start = self.backend._from_python(prepared_value[0])
|
|
|
|
|
end = self.backend._from_python(prepared_value[1])
|
|
|
|
|
if hasattr(prepared_value[0], 'strftime'):
|
|
|
|
|
start = self._convert_datetime(start)
|
|
|
|
|
if hasattr(prepared_value[1], 'strftime'):
|
|
|
|
|
end = self._convert_datetime(end)
|
|
|
|
|
query_frag = u"[%s to %s]" % (start, end)
|
|
|
|
|
elif filter_type == 'exact': # 精确匹配
|
|
|
|
|
if value.input_type_name == 'exact':
|
|
|
|
|
query_frag = prepared_value
|
|
|
|
|
else:
|
|
|
|
|
prepared_value = Exact(prepared_value).prepare(self)
|
|
|
|
|
query_frag = filter_types[filter_type] % prepared_value
|
|
|
|
|
else: # 其他类型
|
|
|
|
|
if is_datetime is True:
|
|
|
|
|
prepared_value = self._convert_datetime(prepared_value)
|
|
|
|
|
query_frag = filter_types[filter_type] % prepared_value
|
|
|
|
|
|
|
|
|
|
# 添加括号
|
|
|
|
|
if len(query_frag) and not isinstance(value, Raw):
|
|
|
|
|
if not query_frag.startswith('(') and not query_frag.endswith(')'):
|
|
|
|
|
query_frag = "(%s)" % query_frag
|
|
|
|
|
|
|
|
|
|
return u"%s%s" % (index_fieldname, query_frag)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class WhooshEngine(BaseEngine):
|
|
|
|
|
"""Whoosh搜索引擎引擎类"""
|
|
|
|
|
backend = WhooshSearchBackend # 后端类
|
|
|
|
|
query = WhooshSearchQuery # 查询类
|