HabitSystemV3: ethiopian_spiderling_config.py

File ethiopian_spiderling_config.py, 11.1 KB (added by xsuchom2, 7 years ago)
Line 
1#===============================================================================
2#   SpiderLing -- A web spider/crawler for linguistic purpose.
3#   Copyright (c) 2011-2017  Vit Suchomel
4#   http://corpus.tools/wiki/SpiderLing
5#
6#   This program is free software: you can redistribute it and/or modify
7#   it under the terms of the GNU General Public License as published by
8#   the Free Software Foundation, either version 3 of the License, or
9#   (at your option) any later version.
10#
11#   This program is distributed in the hope that it will be useful,
12#   but WITHOUT ANY WARRANTY; without even the implied warranty of
13#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14#   GNU General Public License for more details.
15#
16#   You should have received a copy of the GNU General Public License
17#   along with this program.  If not, see <http://www.gnu.org/licenses/>.
18#===============================================================================
19
20#===============================================================================
21# SPIDERLING CONFIGURATION FILE
22# Set the most important configuration options here
23# Sections: general, connection, domains, processing, debug
24#===============================================================================
25
26
27#== general ==
28
29#file directories
30RUN_DIR = u'run'
31LOG_DIR = RUN_DIR
32PIPE_DIR = u'%s/pipes' % RUN_DIR #interprocess communication
33URL_DIR = u'%s/url' % RUN_DIR #URLs to download
34ROBOTS_DIR = u'%s/robots' % RUN_DIR #robots.txt
35REDIRECT_DIR = u'%s/redirect' % RUN_DIR #HTTP redirections
36WPAGE_DIR = u'%s/wpage' % RUN_DIR #raw encoded downloaded documents
37ARC_DIR = u'%s/arc' % RUN_DIR #raw encoded downloaded documents in the arc format
38DOC_META_DIR = u'%s/docmeta' % RUN_DIR #document metadata and extracted URLs
39PREVERT_DIR = u'%s/prevert' % RUN_DIR #processed documents in the prevertical format
40IGNORED_DIR = u'%s/ignored' % RUN_DIR #URLs excluded from further processing
41SAVE_DIR = u'%s/save' % RUN_DIR #saved states
42
43#max crawling time [s] (None for no limit)
44MAX_RUN_TIME = None
45
46#The number of downloaded data processing threads (process.py) to run,
47#2 is enough for very small languages, 16 is enough for huge English crawls.
48#Do not set to more than (<number of your processors> - 2).
49#A single document processor can deal with approx. 1000 MB/hour raw data
50#download speed (a rough estimate, depends on CPU).
51DOC_PROCESSOR_COUNT = 4
52
53#Interpreter and priority of subprocesses opened by the main script
54DOWNLOADER_EXEC = ['python', 'crawl.py']
55PROCESSOR_EXEC = ['ionice', '-c3', 'nice', '-n10', 'python', 'process.py']
56
57
58#== connection ==
59
60#You are responsible for properly identifying your crawler.
61#Some sites use the '+' in AGENT_URL to reject bots => it is polite to keep it.
62#The crawler's behaviour and possible masking is __your__ responsibility.
63AGENT = 'SpiderLing'
64AGENT_URL = '+http://nlp.fi.muni.cz/projects/biwec/'
65USER_AGENT = 'Mozilla/5.0 (compatible; SpiderLing (a SPIDER for LINGustic research); %s)' % AGENT_URL
66#Ignore the robots protocol if the crawler failed to fetch or parse robots.txt.
67IGNORE_ROBOTS_WHEN_FAILED = True #False
68#Give up waiting for robots.txt for a domain in time [s] (None to turn off).
69#Apply the ignore decision (see above).
70MAX_ROBOT_WAITING_TIME = 3*3600
71
72#https support, fall back to http if disabled
73#Used ssl.SSLContext functions require Python >= 2.7.9.
74#Disabling results in ignoring webs requiring SSL.
75SSL_ENABLED = True
76
77#http response size constraints
78DOC_SIZE_MIN = 200
79DOC_SIZE_MAX = 10*1024*1024 #do not set to less than 10000
80
81#max number of open connections (max: ulimit -n), must be >> OPEN_AT_ONCE
82MAX_OPEN_CONNS = 8000
83#max number of connections opened at once
84OPEN_AT_ONCE = 800
85#abandon open connection when not responding after time [s]
86NO_ACTIVITY_TIMEOUT = 80 #40
87
88#max number of urls waiting to be downloaded, decrease to save the RAM
89MAX_URL_QUEUE = 5000000
90#max number of documents waiting to be sent
91MAX_WPAGE_QUEUE = 5000
92
93#period [s] of connecting to the same IP
94IP_CONN_INTERVAL = 1
95#period [s] of connecting to the same host =~ crawl delay (20 means 3/min)
96HOST_CONN_INTERVAL = 20
97#The crawler's behaviour is __your__ responsibility,
98#setting these values too low will result in banning your IP by target sites
99#and can lead to blocking your internet connection by your ISP!
100
101
102#== domains ==
103
104#min docs downloaded from a domain before yield rate threshold applies
105MIN_DOCS_DOWNLOADED = 6
106#min bytes downloaded from a domain before yield rate threshold applies
107MIN_BYTES_DOWNLOADED = 256*1024
108#max (soft limit) cleaned non-empty docs from a domain (None for no limit)
109MAX_DOCS_CLEANED = None
110
111"""
112The yield rate threshold function sets the threshold to stop crawling a domain
113when it is no more efficient.
114"""
115from math import log
116def yield_rate_threshold(doc_count):
117    #standard threshold: 10: 0 %, 100: 1 %, 1000: 2 %, 10000: 3 %, 100000: 4 %
118    #return 0.01 * (log(doc_count, 10) - 1)
119    return 0.001 * (log(doc_count, 10) - 1) #small languages
120    #return 0.0 #threshold disabled
121
122#The web domain data (paths within the domain) is the most memory consuming
123#structure. A rough estimate is 500 MB - 5 GB per million domains, it depends
124#on the number of urls within a stored domain. The more domains the crawler has
125#to store, the more memory allocated. After reaching MAX_DOMS_READY domains,
126#additional domains in state "ready" are written into domains_oversize file
127#and never examined again. Therefore do not set MAX_DOMS_READY to a low value.
128#Domains in state "new" (IP not determined yet) consume memory in the same way.
129#The size of that queue is not limited. Therefore your free memory can be
130#depleted by "new" domains despite you limited "ready" domains.
131
132#max number of domains ready for crawling kept in memory
133#1M is enough for small languages (expect ~2 GB RAM required),
134#50M should be enough for huge English crawls (allocates up to ~200 GB RAM).
135MAX_DOMS_READY = 5*1000*1000
136#max domains selected for crawling at the same time
137MAX_DOMS_SELECTED = 500*1000
138#max time [s] no data received from a selected domain
139MAX_IDLE_TIME = 6*3600 #1800
140#period [s] of domain refresh (adding new, removing old, evaluating efficiency)
141UPDATE_DOMAINS_PERIOD = 600
142#max number of domains with the same IP selected for crawling at the same time
143MAX_IP_REPEAT = 50
144
145#Number of urls sent at once by the scheduler to the crawler. Decrease the
146#minimum value to 10 if crawling a small language. Set the minimum starting
147#value to 1 if crawling a single web domain. Increase the maximum value to
148#allow wider domain coverage in the downloader's url queue.
149#Must be < MAX_DOMS_SELECTED.
150MIN_URL_SELECT_START = 100
151MIN_URL_SELECT = 1000
152MAX_URL_SELECT = 100000
153#Decrease to improve domain variety and thus crawling speed, useful only when
154#there is more than ~500 domains crawled concurrently (big languages)
155MAX_URL_SELECT_PER_DOMAIN = 100
156
157#hostname -> IP mapping file (two space separated columns), None by default
158#Use recent DNS mappings only since hosts tend to move occasionally.
159DNS_TABLE_PATH = None #u'url/dns_table' #None
160
161
162#== processing ==
163
164#Set the languages of documents to recognise, e.g. ['Norwegian', 'Danish'].
165#This enables recognising unwanted languages similar to target languages.
166#Requires
167# - a plaintext in that languages: ./util/lang_samples/Norwegian
168# - a jusText wordlist for that languages: ./util/justext_wordlists/Norwegian
169# - a chared model for that languages: ./util/chared_models/Norwegian
170#The most similar language is selected (if below lang diff thresholds).
171LANGUAGES = ['Amharic', 'Oromo', 'Somali', 'Tigrinya', 'English', 'Arabic']
172#Set which of recognised languages to accept, e.g. ['Norwegian'].
173LANGUAGES_ACCEPT = ['Amharic', 'Oromo', 'Somali', 'Tigrinya']
174#max allowed difference to the lang model for document and paragraphs
175#(0.0 = same as the model); it does not work well with paragraphs, the reported
176#similarity is usually low
177LANG_DIFF_THRESHOLD_DOC = 0.7
178LANG_DIFF_THRESHOLD_PAR = 0.99
179#disable encoding detection and force an encoding (e.g. 'utf-8'), None by default
180FORCE_ENCODING = None
181#switch to unigram models (useful for Chinese, Japanese, Korean)
182UNIGRAM_MODELS = False
183
184#Allowed top level domains regexp -- crawling is restricted to these, e.g.:
185#English + general web: u'\.(?:uk|us|au|ca|com|org|net|info|biz|edu|name)$'
186#German + general web: u'\.(?:de|at|ch|com|org|net|info|eu|edu|name)$'
187#Czech only: u'\.cz$'
188#no restriction (all): u''
189from re import compile
190TLD_WHITELIST_RE = compile(u'')
191#Country/language native TLDs, e.g. u'\.(?:uk|us|au|ca)$' or u'\.cz$' or u''.
192#If EXTRACT_EMPTY_PAGE_EXTERNAL_LINKS == True, a matched cross domain link
193#will be followed. u'' is the default and results in following all links.
194TLD_NATIVE_RE = compile(u'\.(?:et|er|so|dj)$')
195#Use e.g. compile(u'\.da$') to block the Danish TLD,
196#TLD blacklist > TLD whitelist, None by default.
197TLD_BLACKLIST_RE = None #compile(u'')
198
199#file containing unwanted web domains (one per line), None by default
200DOMAIN_BLACKLIST_PATH = u'url/domain_blacklist' #None
201#file containing allowed web domains (one per line), None by default
202#priority: blacklist > whitelist > allowed TLDs
203DOMAIN_WHITELIST_PATH = None
204
205#extract links from web pages containing no clean text
206#useful to decrease crawling from domains in unwanted languages
207#which is dealt with by the trigram model
208#disabling this may increase crawling efficiency (yield rate)
209#at the cost of omitting some web domains (or visiting them later)
210EXTRACT_EMPTY_PAGE_INTERNAL_LINKS = True
211#extract links from pages with no clean text going out from the domain
212#priority: EXTRACT_EMPTY_PAGE_LINKS > EXTRACT_EMPTY_PAGE_EXTERNAL_LINKS
213EXTRACT_EMPTY_PAGE_EXTERNAL_LINKS = False
214
215#justext paragraph heuristic configuration
216#character count < length_low => bad or short
217JUSTEXT_LENGTH_LOW = 50 #justext default = 70
218#character count > length_high => good
219JUSTEXT_LENGTH_HIGH = 100 #justext default = 200
220#number of words frequent in the language >= stopwords_low => neargood
221JUSTEXT_STOPWORDS_LOW = 0.2 #justext default = 0.3
222#number of words frequent in the language >= stopwords_high => good or neargood
223JUSTEXT_STOPWORDS_HIGH = 0.3 #justext default = 0.32
224#density of link words (words inside the <a> tag) > max_link_density => bad
225JUSTEXT_MAX_LINK_DENSITY = 0.4 #justext default = 0.2
226#short/near-good headings in the distance of <value> characters before a good paragraph => good
227JUSTEXT_MAX_HEADING_DISTANCE = 200 #justext default = 200
228
229#path to justext wordlists, use None for the justext default
230#JUSTEXT_WORDLIST_DIR/LANGUAGE must exist for all LANGUAGES
231JUSTEXT_WORDLIST_DIR = u'util/justext_wordlists'
232#path to chared models
233#CHARED_MODEL_DIR/LANGUAGE must exist for all LANGUAGES
234CHARED_MODEL_DIR = u'util/chared_models'
235#allow conversion of binary formats (pdf, doc) to text (not tested,
236#the conversion is not reliable, may result in the presence of garbage text
237CONVERSION_ENABLED = False
238
239
240#== debug ==
241
242import logging
243LOG_LEVEL = logging.DEBUG #DEBUG|INFO|WARNING|ERROR
244LOG_FORMAT = u'%(asctime)-15s %(threadName)s %(message)s'
245#period [s] of writing out some debug info, 0 to disable
246INFO_PERIOD = 3600
247#period [s] of saving state of the crawler (all domain data), 0 to disable
248SAVE_PERIOD = 3600*24
249#log stream buffering (0 = none, 1 = line, -1 = fully/system, n = n bytes)
250LOG_BUFFERING = 1