main
qweasdzxc227 6 months ago
parent d737bee569
commit 31d6d15c69

@ -1,115 +1,115 @@
# Scrapy settings for ArticleSpider project # Scrapy settings for ArticleSpider project
# #
# For simplicity, this file contains only settings considered important or # For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation: # commonly used. You can find more settings consulting the documentation:
# #
# https://docs.scrapy.org/en/latest/topics/settings.html # https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import os import os
import sys import sys
import scrapy.downloadermiddlewares.useragent import scrapy.downloadermiddlewares.useragent
import ArticleSpider.pipelines import ArticleSpider.pipelines
BOT_NAME = "ArticleSpider" BOT_NAME = "ArticleSpider"
SPIDER_MODULES = ["ArticleSpider.spiders"] SPIDER_MODULES = ["ArticleSpider.spiders"]
NEWSPIDER_MODULE = "ArticleSpider.spiders" NEWSPIDER_MODULE = "ArticleSpider.spiders"
# Crawl responsibly by identifying yourself (and your website) on the user-agent # Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36" USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36"
# Obey robots.txt rules # Obey robots.txt rules
ROBOTSTXT_OBEY = False ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16) # Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32 # CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0) # Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs # See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3 # DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of: # The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16 # CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16 # CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default) # Disable cookies (enabled by default)
COOKIES_ENABLED = True COOKIES_ENABLED = True
COOKIES_DEBUG = True COOKIES_DEBUG = True
# Disable Telnet Console (enabled by default) # Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False # TELNETCONSOLE_ENABLED = False
# Override the default request headers: # Override the default request headers:
# DEFAULT_REQUEST_HEADERS = { # DEFAULT_REQUEST_HEADERS = {
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", # "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
# "Accept-Language": "en", # "Accept-Language": "en",
# } # }
# Enable or disable spider middlewares # Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = { # SPIDER_MIDDLEWARES = {
# "ArticleSpider.middlewares.ArticlespiderSpiderMiddleware": 543, # "ArticleSpider.middlewares.ArticlespiderSpiderMiddleware": 543,
# } # }
# Enable or disable downloader middlewares # Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = { DOWNLOADER_MIDDLEWARES = {
# "ArticleSpider.middlewares.ArticlespiderDownloaderMiddleware": 543, # "ArticleSpider.middlewares.ArticlespiderDownloaderMiddleware": 543,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 2, 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 2,
} }
# Enable or disable extensions # Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html # See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = { # EXTENSIONS = {
# "scrapy.extensions.telnet.TelnetConsole": None, # "scrapy.extensions.telnet.TelnetConsole": None,
# } # }
# Configure item pipelines # Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = { ITEM_PIPELINES = {
# 'scrapy.pipelines.images.ImagesPipeline': 1, # 'scrapy.pipelines.images.ImagesPipeline': 1,
# 'ArticleSpider.pipelines.JsonWithEncodingPipeline': 2, # 'ArticleSpider.pipelines.JsonWithEncodingPipeline': 2,
# 'ArticleSpider.pipelines.JsonExporterPipeline': 3, # 'ArticleSpider.pipelines.JsonExporterPipeline': 3,
# 'ArticleSpider.pipelines.MysqlPipeline': 4, # 'ArticleSpider.pipelines.MysqlPipeline': 4,
# 'ArticleSpider.pipelines.MysqlTwistedPipline': 5, # 'ArticleSpider.pipelines.MysqlTwistedPipline': 5,
'ArticleSpider.pipelines.ElasticsearchPipeline': 6, 'ArticleSpider.pipelines.ElasticsearchPipeline': 6,
'ArticleSpider.pipelines.ArticlespiderPipeline': 300, 'ArticleSpider.pipelines.ArticlespiderPipeline': 300,
} }
# Enable and configure the AutoThrottle extension (disabled by default) # Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html # See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True # AUTOTHROTTLE_ENABLED = True
# The initial download delay # The initial download delay
# AUTOTHROTTLE_START_DELAY = 5 # AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies # The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60 # AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to # The average number of requests Scrapy should be sending in parallel to
# each remote server # each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received: # Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False # AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default) # Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True # HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0 # HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = "httpcache" # HTTPCACHE_DIR = "httpcache"
# HTTPCACHE_IGNORE_HTTP_CODES = [] # HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage" # HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
# Set settings whose default value is deprecated to a future-proof value # Set settings whose default value is deprecated to a future-proof value
REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7" REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor" TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
FEED_EXPORT_ENCODING = "utf-8" FEED_EXPORT_ENCODING = "utf-8"
# IMAGES_URLS_FIELD = 'front_image_url' # IMAGES_URLS_FIELD = 'front_image_url'
project_dir = os.path.abspath(os.path.dirname(__file__)) project_dir = os.path.abspath(os.path.dirname(__file__))
IMAGES_STORE = os.path.join(project_dir, 'images') IMAGES_STORE = os.path.join(project_dir, 'images')
MYSQL_HOST = '127.0.0.1' MYSQL_HOST = '127.0.0.1'
MYSQL_DBNAME = 'article_spider' MYSQL_DBNAME = 'article_spider'
MYSQL_USER = 'root' MYSQL_USER = 'root'
MYSQL_PASSWORD = 'qweasdzxc227' MYSQL_PASSWORD = 'qweasdzxc227'

@ -1,128 +1,87 @@
import json import json
import re import re
import os import os
import requests import requests
import scrapy import scrapy
import pickle import pickle
import datetime import datetime
from scrapy.http import Request from scrapy.http import Request
from urllib import parse from urllib import parse
from scrapy.loader import ItemLoader from scrapy.loader import ItemLoader
from ArticleSpider.items import ArticleItemLoader from ArticleSpider.items import ArticleItemLoader
from ArticleSpider.items import JobBoleArticleItem from ArticleSpider.items import JobBoleArticleItem
from ArticleSpider.utils import common from ArticleSpider.utils import common
from ArticleSpider.utils.common import get_md5 from ArticleSpider.utils.common import get_md5
from scrapy import signals from scrapy import signals
import time import time
from selenium import webdriver from selenium import webdriver
from scrapy.loader import ItemLoader from scrapy.loader import ItemLoader
class JobboleSpider(scrapy.Spider): class JobboleSpider(scrapy.Spider):
name = "jobbole" name = "jobbole"
allowed_domains = ["news.cnblogs.com"] allowed_domains = ["news.cnblogs.com"]
start_urls = ["http://news.cnblogs.com/"] start_urls = ["http://news.cnblogs.com/"]
def start_requests(self): def start_requests(self):
cookies = [] cookies = []
if os.path.exists(r'C:\Users\10955\ArticleSpider\cookies\jobbole.cookie'): if os.path.exists(r'C:\Users\10955\ArticleSpider\cookies\jobbole.cookie'):
cookies = pickle.load(open(r'C:\Users\10955\ArticleSpider\cookies\jobbole.cookie', 'rb')) cookies = pickle.load(open(r'C:\Users\10955\ArticleSpider\cookies\jobbole.cookie', 'rb'))
if not cookies: if not cookies:
driver = webdriver.Chrome() driver = webdriver.Chrome()
driver.implicitly_wait(10) driver.implicitly_wait(10)
# 进入登录网站 # 进入登录网站
driver.get('https://account.cnblogs.com/signin') driver.get('https://account.cnblogs.com/signin')
# 使点击验证码失效 # 使点击验证码失效
driver.execute_script("Object.defineProperties(navigator,{webdriver:{get:()=>undefined}})") driver.execute_script("Object.defineProperties(navigator,{webdriver:{get:()=>undefined}})")
# 输入账号 # 输入账号
driver.find_element_by_id('mat-input-0').send_keys('包包1') driver.find_element_by_id('mat-input-0').send_keys('包包1')
# 输入密码 # 输入密码
driver.find_element_by_id('mat-input-1').send_keys('qweasdzxc227') driver.find_element_by_id('mat-input-1').send_keys('qweasdzxc227')
# 点击登录 # 点击登录
driver.find_element_by_css_selector('.mat-button-wrapper').click() driver.find_element_by_css_selector('.mat-button-wrapper').click()
# 点击验证码 # 点击验证码
driver.find_element_by_xpath('//*[@id="Shape3"]').click() driver.find_element_by_xpath('//*[@id="Shape3"]').click()
time.sleep(5) time.sleep(5)
cookies = driver.get_cookies() cookies = driver.get_cookies()
pickle.dump(cookies, open(r'C:\Users\10955\ArticleSpider\cookies\jobbole.cookie', 'wb')) pickle.dump(cookies, open(r'C:\Users\10955\ArticleSpider\cookies\jobbole.cookie', 'wb'))
cookie_dict = {} cookie_dict = {}
for cookie in cookies: for cookie in cookies:
cookie_dict[cookie['name']] = cookie['value'] cookie_dict[cookie['name']] = cookie['value']
for url in self.start_urls: for url in self.start_urls:
yield scrapy.Request(url, dont_filter=True, cookies=cookie_dict) yield scrapy.Request(url, dont_filter=True, cookies=cookie_dict)
# cookie_dict = {cookie['name']: cookie['value'] for cookie in cookies} def parse(self, response):
# print(cookies) # 1.获取新闻列表页中的新闻url并交给scrapy进行下载后调用相应的解析方
# print(cookie_dict) # 提取文章链接,extract_first()提取第一个值
# yield scrapy.Request(url='https://account.cnblogs.com/signin', callback=self.parse, cookies=cookie_dict) post_nodes = response.css('#news_list .news_block')[:100]
for post_node in post_nodes:
def parse(self, response): image_url = "https:" + post_node.css('.entry_summary a img::attr(src)').extract_first("")
# 1.获取新闻列表页中的新闻url并交给scrapy进行下载后调用相应的解析方 post_url = post_node.css('h2 a::attr(href)').extract_first("")
# 提取文章链接,extract_first()提取第一个值 yield Request(url=parse.urljoin(response.url, post_url), meta={'front_image_url': image_url}, callback=self.parse_detail, dont_filter=True)
post_nodes = response.css('#news_list .news_block')[:1] # 2.获取下一页的url并交给scrapy进行下载下载完成后交给parse继续跟进
for post_node in post_nodes: next_url = response.css('div.pager a:last-child::attr(href)').extract_first("")
image_url = "https:" + post_node.css('.entry_summary a img::attr(src)').extract_first("") yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse)
post_url = post_node.css('h2 a::attr(href)').extract_first("")
yield Request(url=parse.urljoin(response.url, post_url), meta={'front_image_url': image_url}, callback=self.parse_detail, dont_filter=True) def parse_detail(self, response):
# 2.获取下一页的url并交给scrapy进行下载下载完成后交给parse继续跟进 match_re = re.match(".*?(\d+)", response.url)
# next_url = response.css('div.pager a:last-child::attr(href)').extract_first("") if match_re:
# yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse) post_id = match_re.group(1)
item_loader = ArticleItemLoader(item=JobBoleArticleItem(), response=response)
def parse_detail(self, response): item_loader.add_css('title', '#news_title a::text')
match_re = re.match(".*?(\d+)", response.url) item_loader.add_css('content', '#news_content')
if match_re: item_loader.add_css('tags', '.news_tags a::text')
post_id = match_re.group(1) item_loader.add_css('create_date', '#news_info .time::text')
# article_item = JobBoleArticleItem() item_loader.add_value('url', response.url)
# title = response.css('#news_title a::text').extract_first("") item_loader.add_value('front_image_url', response.meta.get('front_image_url', ''))
# create_date = response.css('#news_info .time::text').extract_first("") yield Request(url=parse.urljoin(response.url, "/NewsAjax/GetAjaxNewsInfo?contentId={}".format(post_id)),
# match_re = re.match('.*?(\d+.*)', create_date) meta={'article_item': item_loader, 'url':response.url}, callback=self.parse_nums)
# if match_re:
# create_date = match_re.group(1) def parse_nums(self, response):
# # create_date = response.xpath('//*[@id="news_info"]//*[@class="time"]/text()').extract_first("") j_data = json.loads(response.text)
# item_loader = response.meta.get('article_item', "")
# content = response.css('#news_content').extract()[0] item_loader.add_value('praise_nums', j_data['DiggCount'])
# tag_list = response.css('.news_tags a::text').extract() item_loader.add_value('fav_nums', j_data['TotalView'])
# tags = ','.join(tag_list) item_loader.add_value('comment_nums', j_data['CommentCount'])
# article_item['title'] = title item_loader.add_value('url_object_id', common.get_md5(response.meta.get('url', '')))
# article_item['create_date'] = create_date article_item = item_loader.load_item()
# article_item['content'] = content yield article_item
# article_item['tags'] = tags
# article_item['url'] = response.url
# if response.meta.get('front_image_url', ""):
# article_item['front_image_url'] = [response.meta.get('front_image_url', "")]
# else:
# article_item['front_image_url'] = []
item_loader = ArticleItemLoader(item=JobBoleArticleItem(), response=response)
item_loader.add_css('title', '#news_title a::text')
item_loader.add_css('content', '#news_content')
item_loader.add_css('tags', '.news_tags a::text')
item_loader.add_css('create_date', '#news_info .time::text')
item_loader.add_value('url', response.url)
item_loader.add_value('front_image_url', response.meta.get('front_image_url', ''))
# article_item = item_loader.load_item()
# if response.meta.get('front_image_url', ""):
# article_item['front_image_url'] = [response.meta.get('front_image_url', "")]
# else:
# article_item['front_image_url'] = []
yield Request(url=parse.urljoin(response.url, "/NewsAjax/GetAjaxNewsInfo?contentId={}".format(post_id)),
meta={'article_item': item_loader, 'url':response.url}, callback=self.parse_nums)
# praise_nums = j_data['DiggCount']
# fav_nums = j_data['TotalView']
# comment_nums = j_data['CommentCount']
# pass
def parse_nums(self, response):
j_data = json.loads(response.text)
item_loader = response.meta.get('article_item', "")
# praise_nums = j_data['DiggCount']
# fav_nums = j_data['TotalView']
# comment_nums = j_data['CommentCount']
item_loader.add_value('praise_nums', j_data['DiggCount'])
item_loader.add_value('fav_nums', j_data['TotalView'])
item_loader.add_value('comment_nums', j_data['CommentCount'])
item_loader.add_value('url_object_id', common.get_md5(response.meta.get('url', '')))
# article_item['praise_nums'] = praise_nums
# article_item['fav_nums'] = fav_nums
# article_item['comment_nums'] = comment_nums
# article_item['url_object_id'] = common.get_md5(article_item['url'])
article_item = item_loader.load_item()
yield article_item

@ -17,8 +17,11 @@ Including another URLconf
from django.contrib import admin from django.contrib import admin
from django.urls import path from django.urls import path
from django.views.generic import TemplateView from django.views.generic import TemplateView
from search.views import SearchSuggest,SearchView
urlpatterns = [ urlpatterns = [
path('admin/', admin.site.urls), path('admin/', admin.site.urls),
path('', TemplateView.as_view(template_name='index.html'), name='index'), path('', TemplateView.as_view(template_name='index.html'), name='index'),
path('suggest/', SearchSuggest.as_view(), name='suggest'),
path('search/', SearchView.as_view(), name='search'),
] ]

@ -1,3 +1,47 @@
from django.db import models from django.db import models
# Create your models here. # Create your models here.
# -*- coding: utf-8 -*-
__author__ = 'bobby'
from datetime import datetime
from elasticsearch_dsl import DocType, Date, Nested, Boolean, \
analyzer, InnerObjectWrapper, Completion, Keyword, Text, Integer
from elasticsearch_dsl.analysis import CustomAnalyzer as _CustomAnalyzer
from elasticsearch_dsl.connections import connections
connections.create_connection(hosts=["localhost"])
class CustomAnalyzer(_CustomAnalyzer):
def get_analysis_definition(self):
return {}
ik_analyzer = CustomAnalyzer("ik_max_word", filter=["lowercase"])
class ArticleType(DocType):
# 伯乐在线文章类型
suggest = Completion(analyzer=ik_analyzer)
title = Text(analyzer="ik_max_word")
create_date = Date()
url = Keyword()
url_object_id = Keyword()
front_image_url = Keyword()
front_image_path = Keyword()
praise_nums = Integer()
comment_nums = Integer()
fav_nums = Integer()
tags = Text(analyzer="ik_max_word")
content = Text(analyzer="ik_max_word")
class Meta:
index = "jobbole"
doc_type = "article"
if __name__ == "__main__":
ArticleType.init()

@ -1,3 +1,88 @@
from django.shortcuts import render from django.shortcuts import render
from django.views.generic.base import View
from search.models import ArticleType
from django.http import HttpResponse
import json
from elasticsearch import Elasticsearch
from datetime import datetime
client = Elasticsearch(hosts=['127.0.0.1'])
# Create your views here. # Create your views here.
class SearchSuggest(View):
# 搜索建议模块
def get(self, request):
key_words = request.GET.get('s', '')
re_datas = []
if key_words:
s = ArticleType.search()
s = s.suggest('my_suggest', key_words, completion={
"field": "suggest", "fuzzy": {
"fuzziness": 2
},
"size": 10
})
suggestions = s.execute_suggest()
for match in suggestions.my_suggest[0].options:
source = match._source
re_datas.append(source["title"])
return HttpResponse(json.dumps(re_datas), content_type="application/json")
class SearchView(View):
def get(self, request):
key_words = request.GET.get("q", '')
page = request.GET.get('p', '1')
try:
page = int(page)
except:
page = 1
start_time = datetime.now()
response = client.search(
index="jobbole",
body={
"query": {
"multi_match": {
"query": key_words,
"fields": ["tags", "title", "content"]
}
},
"from": (page - 1) * 10,
"size": 10,
"highlight": {
"pre_tags": ['<span class="keyWord">'],
"post_tags": ['</span>'],
"fields": {
"title": {},
"content": {},
}
}
}
)
end_time = datetime.now()
last_seconds = (end_time - start_time).total_seconds()
total_nums = response['hits']['total']
if (page % 10) > 0:
page_nums = int(total_nums / 10) + 1
else:
page_nums = int(total_nums / 10)
# 构造值,获取每个字段的值
hit_list = []
for hit in response['hits']['hits']:
hit_dict = {}
if 'title' in hit['highlight']:
hit_dict['title'] = "".join(hit['highlight']['title'])
else:
hit_dict['title'] = hit['_source']['title']
if 'content' in hit['highlight']:
hit_dict['content'] = "".join(hit['highlight']['content'])[:500]
else:
hit_dict['content'] = hit['_source']['content'][:500]
hit_dict["create_date"] = hit['_source']['create_date']
hit_dict["url"] = hit['_source']['url']
hit_dict["score"] = hit['_score']
hit_list.append(hit_dict)
return render(request, 'result.html',
{'page': page, 'total_nums': total_nums, 'all_hits': hit_list, 'key_words': key_words,
'page_nums': page_nums, 'total_nums': total_nums,'last_seconds':last_seconds})

@ -66,8 +66,8 @@
<script type="text/javascript" src="{% static 'js/jquery.js' %}"></script> <script type="text/javascript" src="{% static 'js/jquery.js' %}"></script>
<script type="text/javascript" src="{% static 'js/global.js' %}"></script> <script type="text/javascript" src="{% static 'js/global.js' %}"></script>
<script type="text/javascript"> <script type="text/javascript">
var suggest_url = "/suggest/" var suggest_url = "{% url "suggest" %}"
var search_url = "/search/" var search_url = "{% url "search" %}"
$('.searchList').on('click', '.searchItem', function(){ $('.searchList').on('click', '.searchItem', function(){

@ -124,7 +124,7 @@
<script type="text/javascript" src="{% static 'js/global.js' %}"></script> <script type="text/javascript" src="{% static 'js/global.js' %}"></script>
<script type="text/javascript" src="{% static 'js/pagination.js' %}"></script> <script type="text/javascript" src="{% static 'js/pagination.js' %}"></script>
<script type="text/javascript"> <script type="text/javascript">
var search_url = "{% url 'search' %}" var search_url = "{% url "search" %}"
$('.searchList').on('click', '.searchItem', function(){ $('.searchList').on('click', '.searchItem', function(){
$('.searchList .searchItem').removeClass('current'); $('.searchList .searchItem').removeClass('current');

Loading…
Cancel
Save