This commit is contained in:
guange 2019-01-21 10:17:09 +08:00
parent 2fcd8ba6e0
commit 5639278337
10 changed files with 136 additions and 45 deletions

1
.gitignore vendored
View File

@ -7,3 +7,4 @@ chapter1/crawler/datas/comments1/
chapter1/crawler/datas/products/
chapter1/crawler/taobao/settings.py
chapter2/mysite/mysite/settings.py
.env

3
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,3 @@
{
"python.pythonPath": ".env/bin/python"
}

View File

@ -1,7 +1,9 @@
import re,requests,json
import re
import requests
import json
s = requests.session()
url = 'https://club.jd.com/comment/productPageComments.action'
url = "https://club.jd.com/comment/productPageComments.action"
data = {
'callback': 'fetchJSON_comment98vv61',
'productId': '3888284',
@ -12,20 +14,31 @@ data = {
'page': 0
}
def main():
while True:
t = s.get(url, params=data).text
try:
t = re.search(r'(?<=fetchJSON_comment98vv61\().*(?=\);)',t).group(0)
t = re.search(
r'(?<=fetchjson_comment98vv61\().*(?=\);)', t).group(0)
except Exception as e:
break
print(e)
j = json.loads(t)
commentSummary = j['comments']
for comment in commentSummary:
c_content = comment['content']
c_time = comment['referenceTime']
commentsummary = j["comments"]
for comment in commentsummary:
c_content = comment["content"]
c_time = comment['referencetime']
c_name = comment['nickname']
c_client = comment['userClientShow']
c_client = comment['userclientshow']
print('{} {} {}\n{}\n'.format(c_name, c_time, c_client, c_content))
data['page'] += 1
if __name__ == "__main__":
import time
import datetime
oldtime = datetime.timedelta(days=1)
print(oldtime)
print(datetime.datetime.now()-oldtime)

View File

@ -67,4 +67,3 @@ class JDCommentSummary(scrapy.Item):
comment_count = scrapy.Field()
default_good_count = scrapy.Field()
good_rate = scrapy.Field()

View File

@ -55,7 +55,8 @@ class Jd1Spider(scrapy.Spider):
c_name = comment['nickname']
c_client = comment['userClientShow']
c_id = "%s_%s" % (product_id, comment['id'])
print('{}, {} {} {}\n{}\n'.format(c_id, c_name, c_time, c_client, c_content))
print('{}, {} {} {}\n{}\n'.format(
c_id, c_name, c_time, c_client, c_content))
yield JDCommentItem(
id=c_id,
user_name=c_name,
@ -76,7 +77,6 @@ class Jd1Spider(scrapy.Spider):
good_rate=productCommentSummary['goodRate']
)
page = page + 1
next_comment_url = self.make_comment_url(product_id, page)
@ -96,20 +96,26 @@ class Jd1Spider(scrapy.Spider):
for item in items:
# pdb.set_trace()
name = item.xpath('.//div[contains(@class, "p-name")]/a/em/text()').extract_first()
name = item.xpath(
'.//div[contains(@class, "p-name")]/a/em/text()').extract_first()
print(name)
price = item.xpath(
'.//div[contains(@class, "p-price")]/strong/i/text()').extract_first()
print(price)
url = item.xpath('.//div[contains(@class, "p-name")]/a/@href').extract_first()
url = item.xpath(
'.//div[contains(@class, "p-name")]/a/@href').extract_first()
print('https:' + url)
img = item.xpath('.//div[@class="p-img"]/a/img/@data-lazy-img').extract_first()
img = item.xpath(
'.//div[@class="p-img"]/a/img/@data-lazy-img').extract_first()
if not img:
img = item.xpath('.//div[@class="p-img"]/a/img/@src').extract_first()
img = item.xpath(
'.//div[@class="p-img"]/a/img/@src').extract_first()
comment_num = item.xpath('.//div[@class="p-commit"]/strong/a/text()').extract_first()
comment_num = item.xpath(
'.//div[@class="p-commit"]/strong/a/text()').extract_first()
print(comment_num)
shop = item.xpath('.//div[@class="p-shop"]/span/a/@title').extract_first()
shop = item.xpath(
'.//div[@class="p-shop"]/span/a/@title').extract_first()
print(shop)
yield JDProductItem(
@ -132,7 +138,8 @@ class Jd1Spider(scrapy.Spider):
priority=100)
# 获取下一页
next_page = response.xpath('//a[@class="pn-next"]/@href').extract_first()
next_page = response.xpath(
'//a[@class="pn-next"]/@href').extract_first()
if next_page:
page = 1
m = re.match(r'.+page=(\d+).+', next_page)

View File

@ -158,6 +158,7 @@ function init(){
]
});
if(document.getElementById('histogramChart')){
var histogramChart = echarts.init(document.getElementById('histogramChart'));
histogramChart.setOption({
@ -255,6 +256,8 @@ function init(){
]
});
}
var lineChart2 = echarts.init(document.getElementById('lineChart2'));
lineChart2.setOption({

View File

@ -1042,3 +1042,44 @@ border-radius: 3px;
.popBox .ttBox{height: 30px; line-height: 30px; padding: 14px 30px; border-bottom: solid 1px #eef0f1;text-align: center;-webkit-box-sizing: content-box; -moz-box-sizing: content-box;box-sizing: content-box;}
.popBox .ttBox .tt{font-size: 20px; display: inline-block; height: 30px;}
.popBox .txtBox{height: calc(100% - 80px);overflow: auto;padding: 10px 0;}
.percentChart{
font-size: 14px;
padding: 90px 40px 0px 0px;
}
.percentChart li{
margin-bottom: 25px;
clear:both;
line-height: 35px;
display: flex;
}
.percentChart li span.leftTitle{
text-align: right;
display: block;
color: #ffffff;
width: 130px;
}
.percentChart li .rightPercent{
flex:1;
position: relative;
background-color: #32CD32;
height: 35px;
margin-left: 10px;
}
.percentChart li .rightPercent span{
position: absolute;
color: #fff;
right: 5px;
font-size: 12px;
line-height: 35px
}
.percentChart li .rightPercent p{
background-color: #FF7F50;
width: 50%;
text-align: right;
color: #ffffff;
line-height: 35px;
font-size: 12px;
padding-right: 5px;
box-sizing:border-box;
}

View File

@ -139,7 +139,29 @@
</div>
<div class="div_any_child">
<div class="div_any_title"><img src="{% static "images/title_4.png" %}">商品数据采集(当日)</div>
<p id="lineChart2" class="p_chart"></p>
<div class="percentChart">
<li>
<span class="leftTitle">京东Android</span>
<div class="rightPercent">
<span>120</span>
<p style="width:30%">40</p >
</div>
</li>
<li>
<span class="leftTitle">京东iPhone</span>
<div class="rightPercent">
<span>500</span>
<p style="width:60%">300</p >
</div>
</li>
<li>
<span class="leftTitle">微信购物</span>
<div class="rightPercent">
<span>300</span>
<p style="width:50%">150</p >
</div>
</li>
</div>
</div>
</div>
</div>

View File

@ -89,6 +89,7 @@ def collect_crawl_info(spark):
def collect_news(spark):
"""获取最新的20条采集"""
df = spark.sql("select * from jd_comment order by created_at desc limit 20")
for row in df.rdd.collect():

View File

@ -33,6 +33,7 @@ jupyter-core==4.4.0
lxml==4.3.0
MarkupSafe==1.1.0
mistune==0.8.4
mysqlclient==1.3.14
nbconvert==5.4.0
nbformat==4.4.0
notebook==5.7.4