借助拉手网的开放api借口,获取特定城市的当日团购数据
import tornado.httpclient from bs4 import BeautifulSoup as BS def fetch(www): http_header = {'User-Agentg':'Chrome'} http_request = tornado.httpclient.HTTPRequest(url=www,method="GET",headers=http_header,connect_timeout=20,request_timeout=600) http_client = tornado.httpclient.HTTPClient() http_response = http_client.fetch(http_request) #打印响应代码 print http_response.code content = BS(http_response.body,"xml") for each in content.findChildren('url'): <span style="white-space:pre"> </span>#获取每个店铺的短标题和购买数量 print each.data.display.shortTitle.text,each.data.display.bought.text if __name__ == "__main__": fetch ("http://open.lashou.com/opendeals/lashou/649.xml")没有和etree.elementree做对比,据说bs解析xml速度有些慢, 这里只是做些尝试,发现也较为简单,暂时先记录下来。 有个问题是,xml文件中有个店面名称的标签name,和beautifulsoup中的关键字name冲突了, 所以用之前先检查下xml里的tag名称是否合法。 原文链接:https://www.f2er.com/xml/298263.html