# -*- coding: utf-8 -*- # Scrapy settings for hg3535 project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://doc.scrapy.org/en/latest/topics/settings.html # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html # https://doc.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'hg3535' SPIDER_MODULES = ['hg3535.spiders'] NEWSPIDER_MODULE = 'hg3535.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent USER_AGENT = { 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:16.0) Gecko/20120813 Firefox/16.0' } # Obey robots.txt rules ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16) CONCURRENT_REQUESTS = 16 # Configure a delay for requests for the same website (default: 0) # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs # DOWNLOAD_DELAY = 3 DOWNLOAD_DELAY = 0 # The download delay setting will honor only one of: CONCURRENT_REQUESTS_PER_DOMAIN = 16 CONCURRENT_REQUESTS_PER_IP = 0 # Disable cookies (enabled by default) # COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) # TELNETCONSOLE_ENABLED = False # Override the default request headers: # DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', # } # Enable or disable spider middlewares # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html # from scrapy_deltafetch # SPIDER_MIDDLEWARES = { # # 'scrapy_deltafetch.DeltaFetch': 100, # 'hg3535.middlewares.Hg3535SpiderMiddleware': 543 # } # # # DELTAFETCH_ENABLED = True # Enable or disable downloader middlewares # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html DOWNLOADER_MIDDLEWARES = { # 'hg3535.middlewares.Hg3535DownloaderMiddleware': 200, 'hg3535.middlewares.Hg3535timeoutDownloaderMiddleware': 200, 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 500, # 'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 502, } # Enable or disable extensions # See https://doc.scrapy.org/en/latest/topics/extensions.html # EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, # } # Configure item pipeline # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html ITEM_PIPELINES = {} # Enable and configure the AutoThrottle extension (disabled by default) # See https://doc.scrapy.org/en/latest/topics/autothrottle.html # AUTOTHROTTLE_ENABLED = True # The initial download delay # AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies # AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server # AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: # AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings # HTTPCACHE_ENABLED = True # HTTPCACHE_EXPIRATION_SECS = 0 # HTTPCACHE_DIR = 'httpcache' # HTTPCACHE_IGNORE_HTTP_CODES = [] # HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' # DOWNLOAD_TIMEOUT = 180 REACTOR_THREADPOOL_MAXSIZE = 40 # LOG_LEVEL = 'INFO' COOKIES_ENABLED = False RETRY_ENABLED = False DOWNLOAD_TIMEOUT = 10 REDIRECT_ENABLED = False CONCURRENT_ITEMS = 1000 # SCHEDULER_PERSIST = False # 是否在关闭时候保留原来的调度器和去重记录,True=保留,False=清空 # SCHEDULER_FLUSH_ON_START = False # TELNETCONSOLE_PORT = None # TELNETCONSOLE_ENABLED=False # AttributeError: 'TelnetConsole' object has no attribute 'port' # RETRY_ENABLED = True # RETRY_TIMES = 2 # RETRY_HTTP_CODES = [500, 502, 503, 504, 400, 408] # LOG_LEVEL = 'DEBUG' # SCHEDULER_PERSIST = False # LOG_FILE = './log/' M_HOST = '192.168.2.200' # M_HOST = '127.0.0.1' M_POST = 27017 M_USER = 'kaiyou' M_DB = 'kaiyou' M_PASSWORD = 'kaiyou' # M_PASSWORD = '123456' LEAGUE_URL = 'http://stadmin.bocai108.com:19093/setLeague' # LEAGUE_URL = 'http://stadmin.bocai108.com/setLeague' MATCH_URL = 'http://stadmin.bocai108.com:19093/setMatch' # MATCH_URL = 'http://stadmin.bocai108.com/setMatch' ODDS_URL = 'http://stadmin.bocai108.com:19093/setOdds' # ODDS_URL = 'http://stadmin.bocai108.com/setOdds' TOKEN_URL = "http://stadmin.bocai108.com/getToken" MATCH_RESULT = "http://stadmin.bocai108.com:19093/setMatchResult" MATCH_STATUS = "http://stadmin.bocai108.com:19093/upMatch" ODDSCH = "http://stadmin.bocai108.com:19093/setOddsCH" # SAIGUO_RESULT = "http://stadmin.bocai108.com:19093/setResultExpress" SAIGUO_RESULT = "http://stadmin.bocai108.com/setResultExpress" MATCHWARN = "http://stadmin.bocai108.com:19093/setMatchWarn" R_HOST = '192.168.2.200' R_POST = 6379 R_DB = 1 R_PASSWORD = 123456 # upMatch # POST_HOST = 'localhost' # POST_DATABASE = 'kaiyou' # POST_USER = 'kaiyou' # POST_PORT = '10432' # POST_PASSWORD = '123456' # SCHEDULER = "scrapy_redis.scheduler.Scheduler" # DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter" # SCHEDULER_SERIALIZER = "scrapy_redis.picklecompat" # #不要清理redis队列,允许暂停/恢复抓取。 # SCHEDULER_PERSIST = False # #使用优先级队列安排请求。(默认) # SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue' # REDIS_HOST = '192.168.2.200' # REDIS_PORT = 6379 # REDIS_PARAMS = {'password': 123456, 'db': 1} # 随机等待 # RANDOMIZE_DOWNLOAD_DELAY = True # # AutoThrottle扩展 # AUTOTHROTTLE_ENABLED = True # AUTOTHROTTLE_DEBUG = True # AUTOTHROTTLE_TARGET_CONCURRENCY = 0.25 # AUTOTHROTTLE_MAX_DELAY = 5 # SCHEDULER = "scrapy_redis.scheduler.Scheduler" # DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter" # REDIS_URL = 'redis://:123456@192.168.2.200:6379' # SCHEDULER_PERSIST = False