使用urllib2,太强大了
试了下用代理登陆拉取cookie,跳转抓图片......
文档:http://docs.python.org/library/urllib2.html
直接上demo代码了
包括:直接拉取,使用reuqest(post/get),使用代理,cookie,跳转处理
#!/usr/bin/python# -*- coding:utf-8 -*-# urllib2_test.py# author: wklken# 2012-03-17 wklken@yeah.netimport urllib,urllib2,cookielib,socketurl = http://www.testurl..... #change yourself#最简单方式def use_urllib2(): try: f = urllib2.urlopen(url, timeout=5).read() except urllib2.urlerror, e: print e.reason print len(f)#使用requestdef get_request(): #可以设置超时 socket.setdefaulttimeout(5) #可以加入参数 [无参数,使用get,以下这种方式,使用post] params = {wd:a,b:2} #可以加入请求头信息,以便识别 i_headers = {user-agent: mozilla/5.0 (windows; u; windows nt 5.1; zh-cn; rv:1.9.1) gecko/20090624 firefox/3.5, accept: text/plain} #use post,have some params post to server,if not support ,will throw exception #req = urllib2.request(url, data=urllib.urlencode(params), headers=i_headers) req = urllib2.request(url, headers=i_headers) #创建request后,还可以进行其他添加,若是key重复,后者生效 #request.add_header('accept','application/json') #可以指定提交方式 #request.get_method = lambda: 'put' try: page = urllib2.urlopen(req) print len(page.read()) #like get #url_params = urllib.urlencode({a:1, b:2}) #final_url = url + ? + url_params #print final_url #data = urllib2.urlopen(final_url).read() #print method:get , len(data) except urllib2.httperror, e: print error code:, e.code except urllib2.urlerror, e: print error reason:, e.reasondef use_proxy(): enable_proxy = false proxy_handler = urllib2.proxyhandler({http:http://proxyurlxxxx.com:8080}) null_proxy_handler = urllib2.proxyhandler({}) if enable_proxy: opener = urllib2.build_opener(proxy_handler, urllib2.httphandler) else: opener = urllib2.build_opener(null_proxy_handler, urllib2.httphandler) #此句设置urllib2的全局opener urllib2.install_opener(opener) content = urllib2.urlopen(url).read() print proxy len:,len(content)class noexceptioncookieprocesser(urllib2.httpcookieprocessor): def http_error_403(self, req, fp, code, msg, hdrs): return fp def http_error_400(self, req, fp, code, msg, hdrs): return fp def http_error_500(self, req, fp, code, msg, hdrs): return fpdef hand_cookie(): cookie = cookielib.cookiejar() #cookie_handler = urllib2.httpcookieprocessor(cookie) #after add error exception handler cookie_handler = noexceptioncookieprocesser(cookie) opener = urllib2.build_opener(cookie_handler, urllib2.httphandler) url_login = https://www.yourwebsite/?login params = {username:user,password:111111} opener.open(url_login, urllib.urlencode(params)) for item in cookie: print item.name,item.value #urllib2.install_opener(opener) #content = urllib2.urlopen(url).read() #print len(content)#得到重定向 n 次以后最后页面urldef get_request_direct(): import httplib httplib.httpconnection.debuglevel = 1 request = urllib2.request(http://www.google.com) request.add_header(accept, text/html,*/*) request.add_header(connection, keep-alive) opener = urllib2.build_opener() f = opener.open(request) print f.url print f.headers.dict print len(f.read())if __name__ == __main__: use_urllib2() get_request() get_request_direct() use_proxy() hand_cookie()