sportslst.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. # -*- coding: utf-8 -*-
  2. import scrapy, lxml.etree, pycomm, json
  3. from collectSports.items import *
  4. # from mcollect.hg0088 import Resolver
  5. from biz.zqleague import zqLeague
  6. class SportslstSpider(scrapy.Spider):
  7. curSrc = None
  8. name = 'sportslst'
  9. allowed_domains = ['hg0088.com']
  10. # start_urls = ['http://hg0088.com/']
  11. custom_settings = {
  12. "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
  13. "Accept-Encoding": "gzip, deflate",
  14. "Accept-Language": "zh-CN,zh;q=0.8",
  15. "Cache-Control": "max-age=0",
  16. "Connection": "keep-alive",
  17. "Cookie": "OddType@21627573=H; protocolstr=http; gamePoint_21627573=2019-05-10%2A0%2A0; _ga=GA1.4.601418716.1557495256; _gid=GA1.4.1118061739.1557495256",
  18. "Host": "199.26.100.178",
  19. "USER_AGENT": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.4098.3 Safari/537.36",
  20. "ITEM_PIPELINES": {
  21. "collectSports.pipelines.sportslst.SportslstPipeline": 200,
  22. }
  23. }
  24. # start_url= 'http://199.26.100.178/app/member/get_game_allbets.php'
  25. def start_requests(self):
  26. self.curSrc = source = self.getCurrentSource()
  27. mc = __import__('mcollect.' + source)
  28. srcObj = getattr(mc, source)
  29. # zl=zqLeague()
  30. # zl.update({'league_id':1})
  31. for item in srcObj.links:
  32. url = item['url'].format(uid=srcObj.uid, page=1)
  33. if not item['cb']:
  34. params = 'default'
  35. else:
  36. params = str(item['cb'])
  37. request = scrapy.FormRequest(url, callback=self.parse, meta={'cb': params, 'subdel': 0})
  38. yield request
  39. def getCurrentSource(self):
  40. conf = pycomm.getCache('conf')
  41. if 'currentSource' in conf:
  42. return conf['currentSource']
  43. return
  44. def parse(self, response):
  45. <<<<<<< HEAD
  46. cb = response.meta['cb']
  47. subdel = response.meta['subdel']
  48. mc = __import__('mcollect.' + self.curSrc + '.Resolver', fromlist=True)
  49. res = mc.Resolver()
  50. cbk = getattr(res, cb)
  51. re = cbk(response.body)
  52. re = json.loads(re)
  53. print(subdel)
  54. if subdel == 0:
  55. if 'total_page' in re:
  56. self.subStart_request(re['total_page'], response.url, response.meta['cb'], 'page_no')
  57. print(6666)
  58. mcs = __import__('mcollect.' + self.curSrc + '.Storage', fromlist=True)
  59. ress = mcs.Storage()
  60. cbks = getattr(ress, cb)
  61. result = cbks(re)
  62. =======
  63. cb=response.meta['cb']
  64. subdel=response.meta['subdel']
  65. mc=__import__('mcollect.'+self.curSrc+'.Resolver',fromlist=True)
  66. res=mc.Resolver()
  67. cbk=getattr(res,cb)
  68. re=cbk(response.body)
  69. re=json.loads(re)
  70. if subdel==0:
  71. if 'total_page' in re:
  72. self.subStart_request(re['total_page'],response.url,response.meta['cb'],'page_no')
  73. mcs=__import__('mcollect.'+self.curSrc+'.Storage',fromlist=True)
  74. ress=mcs.Storage()
  75. cbks=getattr(ress,cb)
  76. result=cbks(re)
  77. >>>>>>> 701da4897c2e812ffc1dfb9d6c610731de87bca1
  78. yield result
  79. def subStart_request(self, total_page, url, cb, page_name='page'):
  80. while total_page > 1:
  81. print(1111)
  82. newurl = url.replace(page_name + '=0', page_name + '=' + total_page)
  83. newurl = newurl.replace(page_name + '=1', page_name + '=' + total_page)
  84. print(newurl)
  85. if not cb:
  86. params = 'default'
  87. else:
  88. params = str(cb)
  89. request = scrapy.FormRequest(newurl, callback=self.parse, meta={'cb': params, 'subdel': 1})
  90. total_page = total_page - 1
  91. yield request