wq_jieshu.py 2.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758
  1. # -*- coding: utf-8 -*-
  2. import datetime
  3. import re
  4. import scrapy
  5. from ..items import Wangjieguo
  6. class HgjieshuSpider(scrapy.Spider):
  7. name = 'wq_jieshu'
  8. to_day = datetime.datetime.now()
  9. allowed_domains = ['hg3535z.com']
  10. custom_settings = {
  11. "ITEM_PIPELINES":{
  12. 'hg3535.pipeline.Wangjieshuqiupipeline': 300,
  13. },
  14. 'LOG_LEVEL': 'DEBUG',
  15. 'LOG_FILE': "../hg3535/log/wq_jieshu_{}_{}_{}.log".format(to_day.year, to_day.month, to_day.day)
  16. }
  17. start_urls = ['https://hg3535z.com/zh-cn/info-centre/sportsbook-info/results/3/normal/1', 'https://hg3535z.com/zh-cn/info-centre/sportsbook-info/results/3/normal/1']
  18. def parse(self, response):
  19. if response.status == 200:
  20. # 所有比赛对象
  21. # tema = response.xpath('//div[@class="rt-event"]//span[@class="pt"]/text()')
  22. # print(tema)
  23. # 所有比赛队名
  24. # tema_name = [i.extract() for i in tema]
  25. # 获得所有比分对象
  26. # tema_score = response.xpath('//div[contains(@class,"rt-set")]')
  27. tema_score = response.xpath('//div[@class="flex-wrap"]/../div[5]')
  28. # 获得所有比赛id对象
  29. tema_id = response.xpath('//div[@class="flex-wrap"]/../div[1]/@id')
  30. # str.replace()
  31. # 所有比赛id列表
  32. temaid_list = [i.extract().replace('e-', "") for i in tema_id]
  33. temascore_list = []
  34. for score in tema_score:
  35. # 正则匹配规则
  36. p1 = r"\d{1,3}-\d{1,3}"
  37. pattern1 = re.compile(p1)
  38. try:
  39. # 获取正则匹配结果
  40. c = pattern1.findall(score.extract())[0]
  41. temascore_list.append(c)
  42. except:
  43. c = ""
  44. temascore_list.append(c)
  45. # 赛事id,赛事比元组列表
  46. tema_tupe = {(temaid_list[i], temascore_list[i]) for i in range(len(temaid_list))}
  47. id_list = []
  48. item = Wangjieguo()
  49. for y in tema_tupe:
  50. if y[1]:
  51. id_list.append(y[0])
  52. item['id_score'] = id_list
  53. yield item