python 结构化抓取指定url的ICP备案信息示例

前端之家收集整理的这篇文章主要介绍了python 结构化抓取指定url的ICP备案信息示例前端之家小编觉得挺不错的,现在分享给大家,也给大家做个参考。
对python这个高级语言感兴趣的小伙伴,下面一起跟随编程之家 jb51.cc的小编两巴掌来看看吧!
  1. # @param python 爬取指定url的ICP备案信息(结构化抓取)
  2. # @author 编程之家 jb51.cc|jb51.cc
  3. #coding=gbk
  4. import os
  5. import sys
  6. import re
  7. import time
  8. import urllib2
  9. def perror_and_exit(message,status = -1):
  10. sys.stderr.write(message + '\n')
  11. sys.exit(status)
  12. def get_text_from_html_tag(html):
  13. pattern_text = re.compile(r">.*? return pattern_text.findall(html)[0][1:-2].strip()
  14. def parse_alexa(url):
  15. url_alexa = "http://icp.alexa.cn/index.PHP?q=%s" % url
  16. print url_alexa
  17. #handle exception
  18. times = 0
  19. while times < 5000: #等待有一定次数限制
  20. try:
  21. alexa = urllib2.urlopen(url_alexa).read()
  22. pattern_table = re.compile(r".*?",re.DOTALL | re.MULTILINE)
  23. match_table = pattern_table.search(alexa)
  24. if not match_table:
  25. raise BaseException("No table in HTML")
  26. break
  27. except:
  28. print "try %s times:sleep %s seconds" % (times,2**times)
  29. times += 1
  30. time.sleep(2**times)
  31. continue
  32. table = match_table.group()
  33. pattern_tr = re.compile(r".*?",re.DOTALL | re.MULTILINE)
  34. match_tr = pattern_tr.findall(table)
  35. if len(match_tr) != 2:
  36. perror_and_exit("table format is incorrect")
  37. icp_tr = match_tr[1]
  38. pattern_td = re.compile(r".*?",re.DOTALL | re.MULTILINE)
  39. match_td = pattern_td.findall(icp_tr)
  40. #print match_td
  41. company_name = get_text_from_html_tag(match_td[1])
  42. company_properties = get_text_from_html_tag(match_td[2])
  43. company_icp = get_text_from_html_tag(match_td[3])
  44. company_icp = company_icp[company_icp.find(">") + 1:]
  45. company_website_name = get_text_from_html_tag(match_td[4])
  46. company_website_home_page = get_text_from_html_tag(match_td[5])
  47. company_website_home_page = company_website_home_page[company_website_home_page.rfind(">") + 1:]
  48. company_detail_url = get_text_from_html_tag(match_td[7])
  49. pattern_href = re.compile(r"href=\".*?\"",re.DOTALL | re.MULTILINE)
  50. match_href = pattern_href.findall(company_detail_url)
  51. if len(match_href) == 0:
  52. company_detail_url = ""
  53. else:
  54. company_detail_url = match_href[0][len("href=\""):-1]
  55. return [url,company_name,company_properties,company_icp,company_website_name,company_website_home_page,company_detail_url]
  56. pass
  57. if __name__ == "__main__":
  58. fw = file("out.txt","w")
  59. for url in sys.stdin:
  60. fw.write("\t".join(parse_alexa(url)) + "\n")
  61. #coding=gbk
  62. import os
  63. import sys
  64. import re
  65. import time
  66. import urllib2
  67. def perror_and_exit(message,status = -1):
  68. sys.stderr.write(message + '\n')
  69. sys.exit(status)
  70. def get_text_from_html_tag(html):
  71. pattern_text = re.compile(r">.*? return pattern_text.findall(html)[0][1:-2].strip()
  72. def parse_alexa(url):
  73. url_alexa = "http://icp.alexa.cn/index.PHP?q=%s" % url
  74. print url_alexa
  75. #handle exception
  76. times = 0
  77. while times < 5000: #等待有一定次数限制
  78. try:
  79. alexa = urllib2.urlopen(url_alexa).read()
  80. pattern_table = re.compile(r".*?",re.DOTALL | re.MULTILINE)
  81. match_table = pattern_table.search(alexa)
  82. if not match_table:
  83. raise BaseException("No table in HTML")
  84. break
  85. except:
  86. print "try %s times:sleep %s seconds" % (times,2**times)
  87. times += 1
  88. time.sleep(2**times)
  89. continue
  90. table = match_table.group()
  91. pattern_tr = re.compile(r".*?",re.DOTALL | re.MULTILINE)
  92. match_tr = pattern_tr.findall(table)
  93. if len(match_tr) != 2:
  94. perror_and_exit("table format is incorrect")
  95. icp_tr = match_tr[1]
  96. pattern_td = re.compile(r".*?",re.DOTALL | re.MULTILINE)
  97. match_td = pattern_td.findall(icp_tr)
  98. #print match_td
  99. company_name = get_text_from_html_tag(match_td[1])
  100. company_properties = get_text_from_html_tag(match_td[2])
  101. company_icp = get_text_from_html_tag(match_td[3])
  102. company_icp = company_icp[company_icp.find(">") + 1:]
  103. company_website_name = get_text_from_html_tag(match_td[4])
  104. company_website_home_page = get_text_from_html_tag(match_td[5])
  105. company_website_home_page = company_website_home_page[company_website_home_page.rfind(">") + 1:]
  106. company_detail_url = get_text_from_html_tag(match_td[7])
  107. pattern_href = re.compile(r"href=\".*?\"",re.DOTALL | re.MULTILINE)
  108. match_href = pattern_href.findall(company_detail_url)
  109. if len(match_href) == 0:
  110. company_detail_url = ""
  111. else:
  112. company_detail_url = match_href[0][len("href=\""):-1]
  113. return [url,company_detail_url]
  114. pass
  115. if __name__ == "__main__":
  116. fw = file("out.txt","w")
  117. for url in sys.stdin:
  118. fw.write("\t".join(parse_alexa(url)) + "\n")[python] view plaincopyprint? time.sleep(2)
  119. pass
  120. time.sleep(2)
  121. pass
  122. # End www.jb51.cc

 

每次抓取都会sleep 2s,防止ip被封,实际上即使sleep了IP过一段时间还是会被封

由于是结构化抓取,当网站格式变化此程序将无法使用

猜你在找的Python相关文章