您好,欢迎访问一九零五行业门户网

在Python中使用CasperJS获取JS渲染生成的HTML内容的教程

文章摘要:其实这里casperjs与python没有直接关系,主要依赖casperjs调用phantomjs webkit获取html文件内容。长期以来,爬虫抓取 客户端javascript渲染生成的html页面 都极为 困难, java里面有 htmlunit, 而python里,我们可以使用独立的跨平台的casperjs。
    创建site.js(接口文件,输入:url,输出:html file)  
//usage: e:\toolkit\n1k0-casperjs-e3a77d0\bin>python casperjs site.js --url=http://spys.ru/free-proxy-list/ie/ --outputfile='temp.html' var fs = require('fs'); var casper = require('casper').create({ pagesettings: { loadimages: false, loadplugins: false, useragent: 'mozilla/5.0 (windows nt 6.1) applewebkit/537.36 (khtml, like gecko) chrome/34.0.1847.137 safari/537.36 lbbrowser' }, loglevel: debug,//日志等级 verbose: true // 记录日志到控制台 }); var url = casper.cli.raw.get('url'); var outputfile = casper.cli.raw.get('outputfile'); //请求页面 casper.start(url, function () { fs.write(outputfile, this.gethtml(), 'w'); }); casper.run();
python 代码, checkout_proxy.py
import json import sys #import requests #import requests.utils, pickle from bs4 import beautifulsoup import os.path,os import threading #from multiprocessing import process, manager from datetime import datetime import traceback import logging import re,random import subprocess import shutil import platform output_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),'proxy.txt') global_log = 'http_proxy' + datetime.now().strftime('%y-%m-%d') + '.log' if not os.path.exists(os.path.join(os.path.dirname(os.path.realpath(__file__)),'logs')): os.mkdir(os.path.join(os.path.dirname(os.path.realpath(__file__)),'logs')) global_log = os.path.join(os.path.dirname(os.path.realpath(__file__)),'logs',global_log) logging.basicconfig(level=logging.debug,format='[%(asctime)s] [%(levelname)s] [%(module)s] [%(funcname)s] [%(lineno)d] %(message)s',filename=global_log,filemode='a') log = logging.getlogger(__name__) #manager = manager() #proxy_list = manager.list() mutex = threading.lock() proxy_list = [] def iswindows(): if windows in str(platform.uname()): return true else: return false def gettagsbyattrs(tagname,pagecontent,attrname,attrregvalue): soup = beautifulsoup(pagecontent) return soup.find_all(tagname, { attrname : re.compile(attrregvalue) }) def gettagsbyattrsext(tagname,filename,attrname,attrregvalue): if os.path.isfile(filename): f = open(filename,'r') soup = beautifulsoup(f) f.close() return soup.find_all(tagname, { attrname : re.compile(attrregvalue) }) else: return none class site1thread(threading.thread): def __init__(self,outputfilepath): threading.thread.__init__(self) self.outputfilepath = outputfilepath self.filename = str(random.randint(100,1000)) + .html self.setname('site1thread') def run(self): site1_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),'site.js') site2_file = os.path.join(self.outputfilepath,'site.js') if not os.path.isfile(site2_file) and os.path.isfile(site1_file): shutil.copy(site1_file,site2_file) #proc = subprocess.popen([bash,-c, cd %s && ./casperjs site.js --url=http://spys.ru/free-proxy-list/ie/ --outputfile=%s % (self.outputfilepath,self.filename) ],stdout=subprocess.pipe) if iswindows(): proc = subprocess.popen([cmd,/c, %s/casperjs site.js --url=http://spys.ru/free-proxy-list/ie/ --outputfile=%s % (self.outputfilepath,self.filename) ],stdout=subprocess.pipe) else: proc = subprocess.popen([bash,-c, cd %s && ./casperjs site.js --url=http://spys.ru/free-proxy-list/ie/ --outputfile=%s % (self.outputfilepath,self.filename) ],stdout=subprocess.pipe) out=proc.communicate()[0] htmlfilename = '' #因为输出路径在windows不确定,所以这里加了所有可能的路径判断 if os.path.isfile(self.filename): htmlfilename = self.filename elif os.path.isfile(os.path.join(self.outputfilepath,self.filename)): htmlfilename = os.path.join(self.outputfilepath,self.filename) elif os.path.isfile(os.path.join(os.path.dirname(os.path.realpath(__file__)),self.filename)): htmlfilename = os.path.join(os.path.dirname(os.path.realpath(__file__)),self.filename) if (not os.path.isfile(htmlfilename)): print 'failed to get html content from http://spys.ru/free-proxy-list/ie/' print out sys.exit(3) mutex.acquire() proxylist= gettagsbyattrsext('font',htmlfilename,'class','spy14$') for proxy in proxylist: tdcontent = proxy.rendercontents() lineelems = re.split('[]',tdcontent) if re.compile(r'\d+').search(lineelems[-1]) and re.compile('(\d+\.\d+\.\d+)').search(lineelems[0]): print lineelems[0],lineelems[-1] proxy_list.append(%s:%s % (lineelems[0],lineelems[-1])) mutex.release() try: if os.path.isfile(htmlfilename): os.remove(htmlfilename) except: pass if __name__ == '__main__': try: if(len(sys.argv)) < 2: print usage:%s [casperjs path] % (sys.argv[0]) sys.exit(1) if not os.path.exists(sys.argv[1]): print casperjs path: %s does not exist! % (sys.argv[1]) sys.exit(2) if os.path.isfile(output_file): f = open(output_file) lines = f.readlines() f.close for line in lines: proxy_list.append(line.strip()) thread1 = site1thread(sys.argv[1]) thread1.start() thread1.join() f = open(output_file,'w') for proxy in set(proxy_list): f.write(proxy+\n) f.close() print done! except systemexit: pass except: errmsg = traceback.format_exc() print errmsg log.error(errmsg)
其它类似信息

推荐信息