您好,欢迎访问一九零五行业门户网

Python怎么爬取京东商品信息评论存并进MySQL

构建mysql数据表问题:使用sql alchemy时,非主键不能设置为自增长,但是我想让这个非主键仅仅是为了作为索引,autoincrement=true无效,该怎么实现让它自增长呢?
from sqlalchemy import string,integer,text,columnfrom sqlalchemy import create_enginefrom sqlalchemy.orm import sessionmakerfrom sqlalchemy.orm import scoped_sessionfrom sqlalchemy.ext.declarative import declarative_base engine=create_engine( "mysql+pymysql://root:root@127.0.0.1:3306/jdcrawl?charset=utf8", pool_size=200, max_overflow=300, echo=false) base=declarative_base() # 实例化 class goods(base): __tablename__='goods' id=column(integer(),primary_key=true,autoincrement=true) sku_id = column(string(200), primary_key=true, autoincrement=false) name=column(string(200)) price=column(string(200)) comments_num=column(integer) shop=column(string(200)) link=column(string(200)) class comments(base): __tablename__='comments' id=column(integer(),primary_key=true,autoincrement=true,nullable=false) sku_id=column(string(200),primary_key=true,autoincrement=false) comments=column(text()) base.metadata.create_all(engine)session=sessionmaker(engine)sess_db=scoped_session(session)
第一版:问题:爬取几页评论后就会爬取到空白页,添加refer后依旧如此
尝试解决方法:将获取评论地方的线程池改为单线程,并每获取一页评论增加延时1s
# 不能爬太快!!!不然获取不到评论 from bs4 import beautifulsoupimport requestsfrom urllib import parseimport csv,json,reimport threadpoolimport timefrom jd_mysqldb import goods,comments,sess_db headers={ 'user-agent': 'mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/86.0.4240.198 safari/537.36', 'cookie': '__jdv=76161171|baidu|-|organic|%25e4%25ba%25ac%25e4%25b8%259c|1613711947911; __jdu=16137119479101182770449; areaid=7; iploc-djd=7-458-466-0; pcsycityid=cn_410000_0_0; shshshfpa=07383463-032f-3f99-9d40-639cb57c6e28-1613711950; shshshfpb=u8s9uvxk66gfibm1munriog%3d%3d; user-key=153f6b4d-0704-4e56-82b6-8646f3f0dad4; cn=0; shshshfp=9a88944b34cb0ff3631a0a95907b75eb; __jdc=122270672; 3ab9d23f7a4b3c9b=seelvnxbpu7oaa3ux5jtkr5lqadm5yfjrky23z6hdbu4ot2nwygx525ckffvhtrdj7q5djrmrzqiqjow5gvby43xvi; jwotest_product=99; __jda=122270672.16137119479101182770449.1613711948.1613738165.1613748918.4; jsessionid=c06ec8d2e9384d2628ae22b1a6f9f8fc.s1; shshshsid=ab2ca3143928b1b01f6c5b71a15fcebe_5_1613750374847; __jdb=122270672.5.16137119479101182770449|4.1613748918', 'referer': 'https://www.jd.com/'} num=0 # 商品数量comments_num=0 # 评论数量 # 获取商品信息和skuiddef getindex(url): session=requests.session() session.headers=headers global num res=session.get(url,headers=headers) print(res.status_code) res.encoding=res.apparent_encoding soup=beautifulsoup(res.text,'lxml') items=soup.select('li.gl-item') for item in items[:3]: # 爬取3个商品测试 title=item.select_one('.p-name a em').text.strip().replace(' ','') price=item.select_one('.p-price strong').text.strip().replace('¥','') try: shop=item.select_one('.p-shopnum a').text.strip() # 获取书籍时查找店铺的方法 except: shop=item.select_one('.p-shop a').text.strip() # 获取其他商品时查找店铺的方法 link=parse.urljoin('https://',item.select_one('.p-img a').get('href')) skuid=re.search('\d+',link).group() comments_num=getcommentsnum(skuid,session) print(skuid,title, price, shop, link, comments_num) print("开始存入数据库...") try: intogoods(skuid,title, price, shop, link, comments_num) except exception as e: print(e) sess_db.rollback() num += 1 print("正在获取评论...") # 获取评论总页数 url1 = f'https://club.jd.com/comment/productpagecomments.action?productid={skuid}&score=0&sorttype=5&page=0&pagesize=10' headers['referer'] = f'https://item.jd.com/{skuid}.html' headers['connection']='keep-alive' res2 = session.get(url1,headers=headers) res2.encoding = res2.apparent_encoding json_data = json.loads(res2.text) max_page = json_data['maxpage'] # 经测试最多可获取100页评论,每页10条 args = [] for i in range(0, max_page): # 使用此链接获取评论得到的为json格式 url2 = f'https://club.jd.com/comment/productpagecomments.action?productid={skuid}&score=0&sorttype=5&page={i}&pagesize=10' # 使用此链接获取评论得到的非json格式,需要提取 # url2_2=f'https://club.jd.com/comment/productpagecomments.action?callback=jquery9287224&productid={skuid}&score=0&sorttype=5&page={i}&pagesize=10' args.append(([session,skuid,url2], none)) pool2 = threadpool.threadpool(2) # 2个线程 reque2 = threadpool.makerequests(getcomments,args) # 创建任务 for r in reque2: pool2.putrequest(r) # 提交任务到线程池 pool2.wait() # 获取评论总数量def getcommentsnum(skuid,sess): headers['referer']=f'https://item.jd.com/{skuid}.html' url=f'https://club.jd.com/comment/productcommentsummaries.action?referenceids={skuid}' res=sess.get(url,headers=headers) try: res.encoding=res.apparent_encoding json_data=json.loads(res.text) # json格式转为字典 num=json_data['commentscount'][0]['commentcount'] return num except: return 'error' # 获取评论def getcomments(sess,skuid,url2): global comments_num print(url2) headers['referer'] = f'https://item.jd.com/{skuid}.html' res2 = sess.get(url2,headers=headers) res2.encoding='gbk' json_data=res2.text ''' # 如果用url2_2需要进行如下操作提取json start = res2.text.find('jquery9287224(') + len('jquery9287224(') end = res2.text.find(');') json_data=res2.text[start:end] ''' dict_data = json.loads(json_data) try: comments=dict_data['comments'] for item in comments: comment=item['content'].replace('\n','') # print(comment) comments_num+=1 try: intocomments(skuid,comment) except exception as e: print(e) sess_db.rollback() except: pass # 商品信息入库def intogoods(skuid,title, price, shop, link, comments_num): goods_data=goods( sku_id=skuid, name=title, price=price, comments_num=comments_num, shop=shop, link=link ) sess_db.add(goods_data) sess_db.commit() # 评论入库def intocomments(skuid,comment): comments_data=comments( sku_id=skuid, comments=comment ) sess_db.add(comments_data) sess_db.commit() if __name__ == '__main__': start_time=time.time() urls=[] keyword=parse.quote(input("请输入要查询的关键词:")) for i in range(1,2): # 爬取一页进行测试 url=f'https://search.jd.com/search?keyword={keyword}&wq={keyword}&page={i}' urls.append(([url,],none)) # threadpool要求必须这样写 pool=threadpool.threadpool(2) # 2个线程的线程池 reque=threadpool.makerequests(getindex,urls) # 创建任务 for r in reque: pool.putrequest(r) # 向线程池提交任务 pool.wait() # 等待所有任务执行完毕 print("共获取{}件商品,获得{}条评论,耗时{}".format(num,comments_num,time.time()-start_time))
第二版 :经测试,的确不会出现空白页的情况
进一步优化:同时获取2个以上商品的评论
# 不能爬太快!!!不然获取不到评论from bs4 import beautifulsoupimport requestsfrom urllib import parseimport csv,json,reimport threadpoolimport timefrom jd_mysqldb import goods,comments,sess_db headers={ 'user-agent': 'mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/86.0.4240.198 safari/537.36', 'cookie': '__jdv=76161171|baidu|-|organic|%25e4%25ba%25ac%25e4%25b8%259c|1613711947911; __jdu=16137119479101182770449; areaid=7; iploc-djd=7-458-466-0; pcsycityid=cn_410000_0_0; shshshfpa=07383463-032f-3f99-9d40-639cb57c6e28-1613711950; shshshfpb=u8s9uvxk66gfibm1munriog%3d%3d; user-key=153f6b4d-0704-4e56-82b6-8646f3f0dad4; cn=0; shshshfp=9a88944b34cb0ff3631a0a95907b75eb; __jdc=122270672; 3ab9d23f7a4b3c9b=seelvnxbpu7oaa3ux5jtkr5lqadm5yfjrky23z6hdbu4ot2nwygx525ckffvhtrdj7q5djrmrzqiqjow5gvby43xvi; jwotest_product=99; __jda=122270672.16137119479101182770449.1613711948.1613738165.1613748918.4; jsessionid=c06ec8d2e9384d2628ae22b1a6f9f8fc.s1; shshshsid=ab2ca3143928b1b01f6c5b71a15fcebe_5_1613750374847; __jdb=122270672.5.16137119479101182770449|4.1613748918', 'referer': 'https://www.jd.com/'} num=0 # 商品数量comments_num=0 # 评论数量 # 获取商品信息和skuiddef getindex(url): session=requests.session() session.headers=headers global num res=session.get(url,headers=headers) print(res.status_code) res.encoding=res.apparent_encoding soup=beautifulsoup(res.text,'lxml') items=soup.select('li.gl-item') for item in items[:2]: # 爬取2个商品测试 title=item.select_one('.p-name a em').text.strip().replace(' ','') price=item.select_one('.p-price strong').text.strip().replace('¥','') try: shop=item.select_one('.p-shopnum a').text.strip() # 获取书籍时查找店铺的方法 except: shop=item.select_one('.p-shop a').text.strip() # 获取其他商品时查找店铺的方法 link=parse.urljoin('https://',item.select_one('.p-img a').get('href')) skuid=re.search('\d+',link).group() headers['referer'] = f'https://item.jd.com/{skuid}.html' headers['connection'] = 'keep-alive' comments_num=getcommentsnum(skuid,session) print(skuid,title, price, shop, link, comments_num) print("开始将商品存入数据库...") try: intogoods(skuid,title, price, shop, link, comments_num) except exception as e: print(e) sess_db.rollback() num += 1 print("正在获取评论...") # 获取评论总页数 url1 = f'https://club.jd.com/comment/productpagecomments.action?productid={skuid}&score=0&sorttype=5&page=0&pagesize=10' res2 = session.get(url1,headers=headers) res2.encoding = res2.apparent_encoding json_data = json.loads(res2.text) max_page = json_data['maxpage'] # 经测试最多可获取100页评论,每页10条 print("{}评论共{}页".format(skuid,max_page)) if max_page==0: intocomments(skuid,'0') else: for i in range(0, max_page): # 使用此链接获取评论得到的为json格式 url2 = f'https://club.jd.com/comment/productpagecomments.action?productid={skuid}&score=0&sorttype=5&page={i}&pagesize=10' # 使用此链接获取评论得到的非json格式,需要提取 # url2_2=f'https://club.jd.com/comment/productpagecomments.action?callback=jquery9287224&productid={skuid}&score=0&sorttype=5&page={i}&pagesize=10' print("开始获取第{}页评论:{}".format(i+1,url2) ) getcomments(session,skuid,url2) time.sleep(1) # 获取评论总数量def getcommentsnum(skuid,sess): url=f'https://club.jd.com/comment/productcommentsummaries.action?referenceids={skuid}' res=sess.get(url) try: res.encoding=res.apparent_encoding json_data=json.loads(res.text) # json格式转为字典 num=json_data['commentscount'][0]['commentcount'] return num except: return 'error' # 获取评论def getcomments(sess,skuid,url2): global comments_num res2 = sess.get(url2) res2.encoding=res2.apparent_encoding json_data=res2.text ''' # 如果用url2_2需要进行如下操作提取json start = res2.text.find('jquery9287224(') + len('jquery9287224(') end = res2.text.find(');') json_data=res2.text[start:end] ''' dict_data = json.loads(json_data) comments=dict_data['comments'] for item in comments: comment=item['content'].replace('\n','') # print(comment) comments_num+=1 try: intocomments(skuid,comment) except exception as e: print(e) sess_db.rollback() # 商品信息入库def intogoods(skuid,title, price, shop, link, comments_num): goods_data=goods( sku_id=skuid, name=title, price=price, comments_num=comments_num, shop=shop, link=link ) sess_db.add(goods_data) sess_db.commit() # 评论入库def intocomments(skuid,comment): comments_data=comments( sku_id=skuid, comments=comment ) sess_db.add(comments_data) sess_db.commit() if __name__ == '__main__': start_time=time.time() urls=[] keyword=parse.quote(input("请输入要查询的关键词:")) for i in range(1,2): # 爬取一页进行测试 url=f'https://search.jd.com/search?keyword={keyword}&wq={keyword}&page={i}' urls.append(([url,],none)) # threadpool要求必须这样写 pool=threadpool.threadpool(2) # 2个线程的线程池 reque=threadpool.makerequests(getindex,urls) # 创建任务 for r in reque: pool.putrequest(r) # 向线程池提交任务 pool.wait() # 等待所有任务执行完毕 print("共获取{}件商品,获得{}条评论,耗时{}".format(num,comments_num,time.time()-start_time))
第三版: 。。。。不行,又出现空白页了
# 不能爬太快!!!不然获取不到评论from bs4 import beautifulsoupimport requestsfrom urllib import parseimport csv,json,reimport threadpoolimport timefrom jd_mysqldb import goods,comments,sess_db headers={ 'user-agent': 'mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/86.0.4240.198 safari/537.36', 'cookie': '__jdv=76161171|baidu|-|organic|%25e4%25ba%25ac%25e4%25b8%259c|1613711947911; __jdu=16137119479101182770449; areaid=7; iploc-djd=7-458-466-0; pcsycityid=cn_410000_0_0; shshshfpa=07383463-032f-3f99-9d40-639cb57c6e28-1613711950; shshshfpb=u8s9uvxk66gfibm1munriog%3d%3d; user-key=153f6b4d-0704-4e56-82b6-8646f3f0dad4; cn=0; shshshfp=9a88944b34cb0ff3631a0a95907b75eb; __jdc=122270672; 3ab9d23f7a4b3c9b=seelvnxbpu7oaa3ux5jtkr5lqadm5yfjrky23z6hdbu4ot2nwygx525ckffvhtrdj7q5djrmrzqiqjow5gvby43xvi; jwotest_product=99; __jda=122270672.16137119479101182770449.1613711948.1613738165.1613748918.4; jsessionid=c06ec8d2e9384d2628ae22b1a6f9f8fc.s1; shshshsid=ab2ca3143928b1b01f6c5b71a15fcebe_5_1613750374847; __jdb=122270672.5.16137119479101182770449|4.1613748918', 'referer': 'https://www.jd.com/'} num=0 # 商品数量comments_num=0 # 评论数量 # 获取商品信息和skuiddef getindex(url): global num skuids=[] session=requests.session() session.headers=headers res=session.get(url,headers=headers) print(res.status_code) res.encoding=res.apparent_encoding soup=beautifulsoup(res.text,'lxml') items=soup.select('li.gl-item') for item in items[:3]: # 爬取3个商品测试 title=item.select_one('.p-name a em').text.strip().replace(' ','') price=item.select_one('.p-price strong').text.strip().replace('¥','') try: shop=item.select_one('.p-shopnum a').text.strip() # 获取书籍时查找店铺的方法 except: shop=item.select_one('.p-shop a').text.strip() # 获取其他商品时查找店铺的方法 link=parse.urljoin('https://',item.select_one('.p-img a').get('href')) skuid=re.search('\d+',link).group() skuids.append(([skuid,session],none)) headers['referer'] = f'https://item.jd.com/{skuid}.html' headers['connection'] = 'keep-alive' comments_num=getcommentsnum(skuid,session) # 评论数量 print(skuid,title, price, shop, link, comments_num) print("开始将商品存入数据库...") try: intogoods(skuid,title, price, shop, link, comments_num) except exception as e: print(e) sess_db.rollback() num += 1 print("开始获取评论并存入数据库...") pool2=threadpool.threadpool(3) # 可同时获取3个商品的评论 task=threadpool.makerequests(getcomments,skuids) for r in task: pool2.putrequest(r) pool2.wait() # 获取评论def getcomments(skuid,sess): # 获取评论总页数 url1 = f'https://club.jd.com/comment/productpagecomments.action?productid={skuid}&score=0&sorttype=5&page=0&pagesize=10' res2 = sess.get(url1, headers=headers) res2.encoding = res2.apparent_encoding json_data = json.loads(res2.text) max_page = json_data['maxpage'] # 经测试最多可获取100页评论,每页10条 print("{}评论共{}页".format(skuid, max_page)) if max_page == 0: intocomments(skuid, '0') else: for i in range(0, max_page): # 使用此链接获取评论得到的为json格式 url2 = f'https://club.jd.com/comment/productpagecomments.action?productid={skuid}&score=0&sorttype=5&page={i}&pagesize=10' # 使用此链接获取评论得到的非json格式,需要提取 # url2_2=f'https://club.jd.com/comment/productpagecomments.action?callback=jquery9287224&productid={skuid}&score=0&sorttype=5&page={i}&pagesize=10' print("开始获取第{}页评论:{}".format(i + 1, url2)) getcomments_one(sess, skuid, url2) time.sleep(1) # 获取评论总数量def getcommentsnum(skuid,sess): url=f'https://club.jd.com/comment/productcommentsummaries.action?referenceids={skuid}' res=sess.get(url) try: res.encoding=res.apparent_encoding json_data=json.loads(res.text) # json格式转为字典 num=json_data['commentscount'][0]['commentcount'] return num except: return 'error' # 获取单个评论def getcomments_one(sess,skuid,url2): global comments_num res2 = sess.get(url2) res2.encoding=res2.apparent_encoding json_data=res2.text ''' # 如果用url2_2需要进行如下操作提取json start = res2.text.find('jquery9287224(') + len('jquery9287224(') end = res2.text.find(');') json_data=res2.text[start:end] ''' dict_data = json.loads(json_data) comments=dict_data['comments'] for item in comments: comment=item['content'].replace('\n','') # print(comment) comments_num+=1 try: intocomments(skuid,comment) except exception as e: print(e) print("rollback!") sess_db.rollback() # 商品信息入库def intogoods(skuid,title, price, shop, link, comments_num): goods_data=goods( sku_id=skuid, name=title, price=price, comments_num=comments_num, shop=shop, link=link ) sess_db.add(goods_data) sess_db.commit() # 评论入库def intocomments(skuid,comment): comments_data=comments( sku_id=skuid, comments=comment ) sess_db.add(comments_data) sess_db.commit() if __name__ == '__main__': start_time=time.time() urls=[] keyword=parse.quote(input("请输入要查询的关键词:")) for i in range(1,2): # 爬取一页进行测试 url=f'https://search.jd.com/search?keyword={keyword}&wq={keyword}&page={i}' urls.append(([url,],none)) # threadpool要求必须这样写 pool=threadpool.threadpool(2) # 2个线程的线程池 reque=threadpool.makerequests(getindex,urls) # 创建任务 for r in reque: pool.putrequest(r) # 向线程池提交任务 pool.wait() # 等待所有任务执行完毕 print("共获取{}件商品,获得{}条评论,耗时{}".format(num,comments_num,time.time()-start_time))
以上就是python怎么爬取京东商品信息评论存并进mysql的详细内容。
其它类似信息

推荐信息