parent
b28724a877
commit
b449249b3e
@ -0,0 +1,53 @@
|
||||
# -*- codeing utf-8 -*-
|
||||
# @Time :2021/11/610:34
|
||||
# @Author :1900301218
|
||||
# @File :pythonpc.py
|
||||
# @Software :PyCharm
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
test_url = 'http://movie.douban.com/top250/'
|
||||
|
||||
|
||||
def download_page(url):
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'
|
||||
}
|
||||
|
||||
data = requests.get(url, 'utf-8', headers=headers).content
|
||||
return data
|
||||
|
||||
movie_name_list = []
|
||||
|
||||
|
||||
def parse_html(html):
|
||||
soup = BeautifulSoup(html)
|
||||
movie_list_soup = soup.find('ol', attrs={'class': 'grid_view'})
|
||||
if movie_list_soup != None:
|
||||
for movie_li in movie_list_soup.find_all('li'):
|
||||
detail = movie_li.find('div', attrs={'class': 'hd'})
|
||||
movie_name = detail.find(
|
||||
'span', attrs={'class': 'title'}).getText()
|
||||
movie_name_list.append(movie_name)
|
||||
|
||||
next_page = soup.find('span', attrs={'class': 'next'}).find('a')
|
||||
if next_page:
|
||||
parse_html(download_page(test_url + next_page['href']))
|
||||
return movie_name_list
|
||||
|
||||
|
||||
def main():
|
||||
file = r"pythonworkinfo.txt"
|
||||
fp = open(file, "w")
|
||||
handle = parse_html(download_page(test_url))
|
||||
if handle != None:
|
||||
handle = list(handle)
|
||||
for ele in handle:
|
||||
fp.write(ele)
|
||||
print(ele)
|
||||
fp.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
Loading…
Reference in new issue