爬虫简单实例
from urllib import request
def f(url):
print('GET: %s' % url)
resp = request.urlopen(url)
data = resp.read()
f=open("url.html",'wb')
f.write(data)
f.close()
print('%d bytes received from %s.' % (len(data), url))
f("https://www.cnblogs.com/alex3714/articles/5248247.html")
爬虫实例
from urllib import request
import gevent,time
from gevent import monkey
monkey.patch_all() #把当前程序所有的IO操作给我单独的做上标记 相当于打上sleep,识别IO阻塞
def f(url):
print('GET: %s' % url)
resp = request.urlopen(url)
data = resp.read()
print('%d bytes received from %s.' % (len(data), url))
urls=['https://www.python.org/',
'https://www.qq.com/',
'https://github.com/']
#同步 串行 进程执行
start_time=time.time()
for url in urls:
f(url)
print("同步cost:",time.time()-start_time)
#异步 协程
async_time_start=time.time()
#开启3个协程,执行f,传f的参数
gevent.joinall([
gevent.spawn(f, 'https://www.python.org/'),
gevent.spawn(f, 'https://www.qq.com/'),
gevent.spawn(f, 'https://github.com/'),
])
print("异步cost:",time.time()-async_time_start)