-
Notifications
You must be signed in to change notification settings - Fork 182
/
findspam.py
3130 lines (2846 loc) · 141 KB
/
findspam.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# -*- coding: utf-8 -*-
# noinspection PyCompatibility
import sys
import math
from difflib import SequenceMatcher
from urllib.parse import urlparse, unquote_plus
from itertools import chain
from collections import Counter
from datetime import datetime
from string import punctuation
import time
import os
import os.path as path
import threading
import regex
# noinspection PyPackageRequirements
import tld
# noinspection PyPackageRequirements
from tld.utils import TldDomainNotFound
import phonenumbers
import dns.resolver
import requests
import chatcommunicate
from helpers import log, regex_compile_no_cache, strip_pre_and_code_elements, strip_code_elements, \
get_bookended_keyword_regex_text_from_entries, keyword_bookend_regex_text, KEYWORD_BOOKENDING_START, \
get_non_bookended_keyword_regex_text_from_entries, chunk_list
import metasmoke_cache
from globalvars import GlobalVars
import blacklists
import phone_numbers
if tuple(int(x) for x in regex.__version__.split('.')) < (2, 5, 82):
raise ImportError(
'Need regex >= 2020.6.8 (internal version number 2.5.82; got %s)' %
regex.__version__)
LINK_CACHE = dict()
LINK_CACHE_lock = threading.RLock()
LEVEN_DOMAIN_DISTANCE = 3
SIMILAR_THRESHOLD = 0.95
SIMILAR_ANSWER_THRESHOLD = 0.7
BODY_TITLE_SIMILAR_RATIO = 0.90
CHARACTER_USE_RATIO = 0.42
PUNCTUATION_RATIO = 0.42
REPEATED_CHARACTER_RATIO = 0.20
IMG_TXT_R_THRES = 0.7
EXCEPTION_RE = r"^Domain (.*) didn't .*!$"
RE_COMPILE = regex_compile_no_cache(EXCEPTION_RE)
COMMON_MALFORMED_PROTOCOLS = [
('httl://', 'http://'),
]
# These types of files frequently get caught as "misleading link"
SAFE_EXTENSIONS = {'htm', 'py', 'java', 'sh'}
SE_SITES_RE = r'(?:{sites})'.format(
sites='|'.join([
r'(?:[a-z]+\.)*stackoverflow\.com',
r'(?:{doms})\.com'.format(doms='|'.join(
[r'askubuntu', r'superuser', r'serverfault', r'stackapps', r'imgur'])),
r'mathoverflow\.net',
r'i\.sstatic\.net',
r'(?:[a-z]+\.)*stackexchange\.com']))
SE_SITES_DOMAINS = ['stackoverflow.com', 'askubuntu.com', 'superuser.com', 'serverfault.com',
'mathoverflow.net', 'stackapps.com', 'stackexchange.com', 'sstatic.net',
'imgur.com', 'sstatic.net'] # Frequently catching FP
WHITELISTED_WEBSITES_REGEX = regex_compile_no_cache(r"(?i)upload|\b(?:{})\b".format("|".join([
"yfrog", "gfycat", "tinypic", "sendvid", "ctrlv", "prntscr", "gyazo", r"youtu\.?be", "past[ie]", "dropbox",
"microsoft", "newegg", "cnet", "regex101", r"(?<!plus\.)google", "localhost", "ubuntu", "getbootstrap",
r"jsfiddle\.net", r"codepen\.io", "pastebin", r"nltk\.org", r"xahlee\.info", r"ergoemacs\.org", "regexr",
r"doi\.org"
] + [se_dom.replace(".", r"\.") for se_dom in SE_SITES_DOMAINS])))
URL_SHORTENER_REGEX_FRAGMENT = r"(?:{})".format('|'.join(regex.escape(site) for site in (
'0i.is', '1b.yt', '1th.me', '92q.com', '9nl.me', 'adf.ly', 'adfoc.us', 'adyou.co',
'alturl.com', 'amzn.to', 'bfy.tw', 'bit.do', 'bit.ly', 'bluenik.com', 'buff.ly',
'ckk.ai', 'cl.ly', 'clk.ink', 'clk.sh', 'clkmein.com', 'cu2.io', 'cutt.us', 'dyo.gs',
'etsy.me', 'fb.me', 'g3t.nl',
'goo.gl', # doctored; see below
'inro.in', 'is.gd', 'j.mp', 'jfi.uno', 'mex.su', 'n9.cl', 'numl.org', 'ovo.fyi',
'ow.ly', 'pdf.ac', 'post.ly', 'qrf.in', 'rave.dj', 'rplg.co', 'rurl.us', 'sco.lt',
'snip.ly', 'surl.cn.com', 't.co', 't.ly', 'tez.kr', 'tgig.ir', 'tgw.onl', 'tiny.cc',
'tinyurl.com', 'tr.im', 'wicc.me', 'wn.nr', 'wp.me', 'x4up.org', 'xurl.es', 'zee.gl',
'zee.im'
)))
# Special case for goo.gl; update the escaped regex with some actual non-escaped regex
# to exclude anything like goo.gl/maps/...
URL_SHORTENER_REGEX_FRAGMENT = URL_SHORTENER_REGEX_FRAGMENT.replace(
r'goo\.gl', r'goo\.gl(?![?&/]maps/)')
ASN_WHITELISTED_WEBSITES = [
"unity3d.com", "ffmpeg.org", "bitcoincore.org", "latex.codecogs.com",
"advancedcustomfields.com", "name.com", "businessbloomer.com",
"wkhtmltopdf.org", "thefreedictionary.com", "ruby-doc.org",
"site.com.br", "test.ooo-pnu.ru", "swift.org", "site2.com",
"rxweb.io", "tenforums.com", "rhydolabz.com", "javatpoint.com",
# ^^ top 20 FP hosts that get reported due to 'bad ASN', collated by regex parsing
# https://metasmoke.erwaysoftware.com/data/sql/queries/164-bad-asn-in-false-positives
# As of 2019-04-18, the following had >=6 ASN detections which were FP
"ampps.com", "bintray.com", "config.ru", "datetime.date", "myexample.com",
"mywiki.wooledge.org", "sevenforums.com", "ultimatefreehost.in", "wa.me", "web.com",
# As of 2019-04-18, the following had 5 ASN detections which were FP
"androidforums.com", "getclayton.com", "indeed.com", "math.net",
"mobilefirstplatform.ibmcloud.com", "ss.ms", "table.to",
# As of 2019-04-18, the following had 4 ASN detections which were FP
"code.kx.com", "daniweb.com", "files.catbox.moe", "greatrecipetips.com", "html5up.net",
"irc.freenode.net", "italian-stresser.online", "meteocaldas.com", "techpowerup.com",
"unit-conversion.info", "wicked.io",
# not being added: bitcoinofficial.org, learn-neural-networks.com
# As of 2019-04-18, the following had 3 ASN detections which were FP
"bankingifsccodes.com", "bethelp.byethost3.com", "calculator.net", "change.by",
"clover.com", "cognimem.com", "csde.epizy.com", "docker.bintray.io", "domaine.com",
"dreamstime.com", "embedded101.com", "emildeveloping.com", "extjs.eu", "fixer.io",
"fondation-zeitgeist.com", "form.media", "formcontact.esy.es", "godandscience.org",
"kajariaceramics.com", "macappstore.org", "maven.ibiblio.org", "nayuki.io", "nxos.org",
"pdf.datasheetcatalog.com", "php.info", "pool.sks-keyservers.net", "reg.ru", "scaan.in",
"shop.btownmedia.com", "smtp.sendgrid.net", "sysengineering.ru", "techspot.com",
"testking.com", "thecollegeroar.com", "ursuscode.com", "vwo.com",
# Not added:
# 000webhostapp.com This domain would be reasonable to add if it didn't also whitelist all subdomains.
# As of 2019-04-18, the following had 2 ASN detections which were FP, and appeared to be common.
"wikileaks.org",
# Additional added not as part of a systematic investigation:
"ntp.org", "cpu-world.com", "caniuse.com", "guru99.com", "fontawesome.com",
"nirsoft.net",
# Added to prevent having 3 detections on just the domain.
"writingexplained.org", "eitren.com"]
# Hostname whitelist for the "*bad IP for hostname in {}" detections (i.e. for ip_for_url_host)
# Hostnames should be all lowercase, as the hostnames are obtained from
# urlparse().hostname, which lowercases the hostname.
WHITELISTED_IP_HOSTNAMES = [
"alfa.com.tw",
"angular.io",
"api.flutter.dev",
"bot.run",
"build.ninja",
"data.gov.sg",
"docker.com",
"ergoemacs.org",
"flutter.dev",
"form.as",
"godoc.org",
"image.network",
"log.info",
"maindomain.com",
"material.angular.io",
"model.fit",
"newsite.com",
"nextjs.org",
"ourdomain.com",
"paint.net",
"pipeline.fit",
"portforward.com",
"pub.dev",
"reactjs.org",
"shop.pimoroni.com",
"size.my",
"socket.io",
"talkerscode.com",
"terrytao.wordpress.com",
"thepihut.com",
"tips4java.wordpress.com",
"ufile.io",
"usenix.org",
"whatever.com",
"www.docker.com",
"www.sefaria.org",
"www.usenix.org",
"xahlee.info",
# Some which are in "bad IP for hostname in {}", but have only FP:
"oceansyrup.com",
"madainproject.com",
]
# Hostname whitelist for the "*bad NS for domain in {}" detections (i.e. for ns_for_url_domain)
# Hostnames should be all lowercase, as the hostnames are obtained from
# urlparse().hostname, which lowercases the hostname.
# This was seeded with those in the query:
# https://metasmoke.erwaysoftware.com/data/sql/queries/264-domains-in-bad-ns-for-domain-detections-by-fp
# which had 0 TP and > 5 FP.
# In addition, some which had a TP or two were also included, as those TP were individually determined to be
# detected by other things, TP for some reason other than what was detected, or otherwise determined to not
# need detection by this reason:
# paint.net, caniuse.com, nesbot.com, mail.com, dev47apps.com, 2.to, rudrastyh.com, joxi.ru
WHITELISTED_NS_HOSTNAMES = [
"1.run",
"123.com",
"2.to",
"4309.co.uk",
"4gtricks.com",
"5.run",
"7-zip.org",
"a2hosting.com",
"aaai.org",
"aiohttp.org",
"algoseek.com",
"ametsoc.org",
"anbox.io",
"antongerdelan.net",
"aspnetcore.app",
"automationtesting.in",
"bitsrc.io",
"bot.run",
"caniuse.com",
"ccel.org",
"cmake.org",
"cmder.net",
"coder.work",
"coinmarketcap.com",
"coinscious.io",
"convertcsv.com",
"cvedetails.com",
"datasciencemadesimple.com",
"deniz-tasarim.site",
"deploy.sh",
"desertbot.io",
"dev47apps.com",
"displaylink.com",
"duply.net",
"elecrow.com",
"engineeringtoolbox.com",
"ergoemacs.org",
"euclideanspace.com",
"everythingfonts.com",
"example2.com",
"expert-advice.org",
"flashrom.org",
"ftdichip.com",
"gitmemory.com",
"graphene-python.org",
"grymoire.com",
"hackingwithswift.com",
"harrisgeospatial.com",
"hashicorp.com",
"hopechurch.xyz",
"hostname.com",
"howtomechatronics.com",
"image.network",
"inmotionhosting.com",
"internaldomain.com",
"ionos.com",
"islamiqate.com",
"item.name",
"itsfoss.com",
"jazz-soft.net",
"johndcook.com",
"joxi.ru",
"json.org",
"json2csharp.com",
"jwork.org",
"key.properties",
"keyboard.press",
"killernetworking.com",
"kitgram.cn",
"kitware.com",
"ledsupply.com",
"liberty-development.net",
"linfo.org",
"ludwig.guru",
"mail.com",
"mastertheboss.com",
"melpa.org",
"messagebox.show",
"myhost.com",
"mypage.com",
"nesbot.com",
"newtonsoft.com",
"nextjs.org",
"nltk.org",
"orgmode.org",
"otexts.com",
"paint.net",
"pcpartpicker.com",
"pingcap.com",
"pointclouds.org",
"programming.vip",
"qgistutorials.com",
"qt.io",
"quasar.dev",
"raspberrytips.com",
"rcompanion.org",
"reactivex.io",
"readme.md",
"regexstorm.net",
"relevantcodes.com",
"repairwin.com",
"requirejs.org",
"robjhyndman.com",
"rudrastyh.com",
"sane-project.org",
"scintilla.org",
"simos.info",
"smartmontools.org",
"socket.io",
"springdoc.org",
"sqlitebrowser.org",
"sqlteam.com",
"stepik.org",
"substrate.io",
"sunfounder.cc",
"tampermonkey.net",
"techmikael.com",
"terraform.io",
"this.how",
"this.id",
"tiamet3d.com",
"tinyapps.org",
"ubuntuupdates.org",
"uipath.com",
"user.email",
"uubyte.com",
"webslesson.info",
"wikidevi.com",
"wintips.org",
"wphierarchy.com",
"wpshout.com",
"x.com",
"xahlee.info",
"xda-developers.com",
"yoursite.com",
"zlib.net",
# Some which are blacklisted, but have FP
"mastertheboss.com",
"xenarmor.com",
]
if GlobalVars.perspective_key:
PERSPECTIVE = "https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze?key=" + GlobalVars.perspective_key
PERSPECTIVE_THRESHOLD = 0.85 # conservative
# Flee before the ugly URL validator regex!
# We are using this, instead of a nice library like BeautifulSoup, because spammers are
# stupid and don't always know how to actually *link* their web site. BeautifulSoup misses
# those plain text URLs.
# https://gist.github.com/dperini/729294#gistcomment-1296121
URL_REGEX = regex_compile_no_cache(
r"""((?:(?:https?|ftp)://)(?:\S+(?::\S*)?@)?(?:(?!(?:10|127)"""
r"""(?:\.\d{1,3}){3})(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2}))"""
r"""(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])"""
r"""(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))"""
r"""|\b(?:(?:[A-Za-z\u00a1-\uffff0-9]-?)*[A-Za-z\u00a1-\uffff0-9]+)(?:\.(?:[A-Za-z\u00a1-\uffff0-9]-?)"""
r"""*[A-Za-z\u00a1-\uffff0-9]+)*(?:\.(?:[A-Za-z\u00a1-\uffff]{2,})))(?::\d{2,5})?(?:/\S*)?""", regex.U)
TAG_REGEX = regex_compile_no_cache(r"</?[abcdehiklopsu][^>]*?>|\w+://", regex.U)
UNIFORM = math.log(1 / 36)
UNIFORM_PRIOR = math.log(1 / 5)
ENGLISH = {
'a': -2.56940287968626,
'e': -2.6325365263400786,
'o': -2.9482912667071903,
'r': -2.9867566750238046,
'i': -3.043195438576378,
's': -3.053589802306065,
'n': -3.0696364572432233,
'1': -3.134872509228817,
't': -3.230441879550407,
'l': -3.2558408400221905,
'2': -3.4663376838336166,
'm': -3.4810979044444426,
'd': -3.5635447023561517,
'0': -3.5958227205042967,
'c': -3.6348280308631855,
'p': -3.6771505079154236,
'3': -3.7158848391017765,
'h': -3.7019152926538648,
'b': -3.74138548356748,
'u': -3.8457967842578014,
'k': -3.9048726800430713,
'4': -3.9411171656325226,
'5': -3.9708339604329925,
'g': -3.961715896933319,
'9': -4.019842096462643,
'6': -4.041864072829501,
'8': -4.096998079687665,
'7': -4.122126943234552,
'y': -4.1666976658279635,
'f': -4.351040269361279,
'w': -4.360690517108493,
'j': -4.741006747760368,
'v': -4.759276833451455,
'z': -5.036594538526155,
'x': -5.137009730369897,
'q': -5.624531280146579
}
ENGLISH_PRIOR = math.log(4 / 5)
class PostFilter:
"""
General filter for SE posts
"""
def __init__(self, all_sites=True, sites=None, max_rep=1, max_score=0, question=True, answer=True):
self.all_sites = all_sites
self.sites = set(sites) if sites is not None else set()
self.max_rep = max_rep
self.max_score = max_score
self.question = question
self.answer = answer
def match(self, post):
"""
See if a post matches this filter
"""
if (post.is_answer and not self.answer) or (not post.is_answer and not self.question):
# Wrong post type
return False
elif self.all_sites == (post.post_site in self.sites):
# Post is on wrong site
return False
elif (post.owner_rep > self.max_rep) or (post.post_score > self.max_score):
# High score or high rep
return False
else:
return True
class Rule:
"""
A single spam-checking rule
"""
default_filter = PostFilter()
def __init__(self, item, reason, title=True, body=True, body_summary=True, username=True, filter=None,
stripcodeblocks=False, whole_post=False, skip_creation_sanity_check=False, rule_id=None,
elapsed_time_reporting=None):
self.regex = None
self.func = None
if isinstance(item, (str, URL_REGEX.__class__)):
self.regex = item
else:
self.func = item
self.reason = reason
self.title = title
self.body = body
self.body_summary = body_summary
self.username = username
self.filter = filter or Rule.default_filter
self.stripcodeblocks = stripcodeblocks
self.whole_post = whole_post
self.rule_id = rule_id
self.elapsed_time_reporting = elapsed_time_reporting
if not skip_creation_sanity_check:
self.sanity_check()
def sanity_check(self):
if not self.func and not self.regex:
raise TypeError("A rule must have either 'func' or 'regex' valid! : {}".format(self.reason))
def match(self, post):
"""
Run this rule against a post.
Returns a list of 3 tuples for [result_title, result_username, result_body],
each in (match, reason, why) format
"""
if not self.filter.match(post):
# Post not matching the filter
return [(False, "", "")] * 3
body_to_check = post.body.replace("&nsbp;", "").replace("\xAD", "") \
.replace("\u200B", "").replace("\u200C", "")
body_type = "body" if not post.is_answer else "answer"
reason = self.reason
reason_title = reason.replace("{}", "title")
reason_username = reason.replace("{}", "username")
reason_body = reason.replace("{}", body_type)
if self.stripcodeblocks:
# use a placeholder to avoid triggering "linked punctuation" on code-only links
body_to_check = strip_pre_and_code_elements(body_to_check, leave_note=True)
if reason == 'phone number detected in {}':
body_to_check = regex.sub("<(?:a|img)[^>]+>", "", body_to_check)
matched_title, matched_username, matched_body = False, False, False
result_title, result_username, result_body = None, None, None
if self.func: # Functional check takes precedence over regex check
if self.whole_post:
matched_title, matched_username, matched_body, why_text = self.func(post)
result_title = (matched_title, reason_title,
reason_title.capitalize() + " - " + why_text)
result_username = (matched_username, reason_username,
reason_username.capitalize() + " - " + why_text)
result_body = (matched_body, reason_body,
reason_body.capitalize() + " - " + why_text)
else:
if self.title and not post.is_answer:
matched_title, why_text = self.func(post.title, post.post_site)
result_title = (matched_title, reason_title,
reason_title.capitalize() + " - " + why_text)
else:
result_title = (False, "", "")
if self.username:
matched_username, why_text = self.func(post.user_name, post.post_site)
result_username = (matched_username, reason_username,
reason_username.capitalize() + " - " + why_text)
else:
result_username = (False, "", "")
if self.body and not post.body_is_summary:
matched_body, why_text = self.func(body_to_check, post.post_site)
result_body = (matched_body, reason_body,
reason_body.capitalize() + " - " + why_text)
elif self.body_summary and post.body_is_summary:
matched_body, _ = self.func(body_to_check, post.post_site)
result_body = (matched_body, "", "")
else:
result_body = (False, "", "")
elif self.regex:
try:
compiled_regex = self.compiled_regex
except AttributeError:
compiled_regex = regex_compile_no_cache(self.regex, regex.UNICODE, city=city_list, ignore_unused=True)
self.compiled_regex = compiled_regex
if self.title and not post.is_answer:
matches = list(compiled_regex.finditer(post.title))
result_title = (bool(matches), reason_title,
reason_title.capitalize() + " - " + FindSpam.match_infos(matches))
else:
result_title = (False, "", "")
if self.username:
matches = list(compiled_regex.finditer(post.user_name))
result_username = (bool(matches), reason_username,
reason_username.capitalize() + " - " + FindSpam.match_infos(matches))
else:
result_username = (False, "", "")
if (self.body and not post.body_is_summary) \
or (self.body_summary and post.body_is_summary):
matches = list(compiled_regex.finditer(body_to_check))
result_body = (bool(matches), reason_body,
reason_body.capitalize() + " - " + FindSpam.match_infos(matches))
else:
result_body = (False, "", "")
else:
raise TypeError("To match, a rule must have either 'func' or 'regex' valid! : {}".format(reason))
# "result" format: tuple((title_spam, title_reason, why), (username_spam, username_reason, why),
# (body_spam, body_reason, why))
return result_title, result_username, result_body
def __call__(self, *args, **kwargs):
# Preserve the functionality of a function
if self.func:
return self.func(*args, **kwargs)
raise TypeError("This rule has no function set, can't call")
class FindSpam:
rules = []
rule_ids = set()
reasons = set()
# supplied at the bottom of this file
rule_bad_keywords = None
rule_watched_keywords = None
rule_blacklisted_websites = None
rule_blacklisted_usernames = None
# This is the minimum number of seconds which need to have elapsed for the Rule
# in order for some extra text to be added to the log output in order to draw more
# attention to the line.
ELAPSED_TIME_DRAW_ATTENTION_MIN = 5
# This is an arbitrarily long list of tuples representing different required elapsed times,
# the log level to use and text to prepend to the chat message.
# If the text for the log level doesn't exist, then a log(<log level>, report text)
# call is not executed.
# If there's no text to prepend to the chat message, then no chat message is sent for that entry.
# The list is scanned through in order with the last value where the elapsed time for the Rule
# is greater than the <minimum elapsed time> is acted upon.
ELAPSED_TIME_LOG_AND_TELL_LEVELS = [
# (<Log level>, <text prepended to chat message>, <minimum elapsed time>)
('debug', '', 1), # > 1 s: Log a "debug" level message for the Rule; No chat message
('info', 'High ', 10), # > 10 s: Log an "info" for the Rule and output to chat as "High "
('warning', '**Very High** ', 30), # > 30 s: Log a "warning" and output to chat as bold "Very High"
]
@staticmethod
def _update_a_blacklist_dual_rule(rule_list, regex_text_generator, entries):
entries = list(entries)
entries_length = len(entries)
if entries_length > 100:
# Get the length to the 100 below the current length
chunk_length = int(str(entries_length)[:-2] + '00')
entries_lists = chunk_list(entries, chunk_length)
else:
# With <= 100 entries, Use an entries_lists with all the entries first, then a regex that can never match
entries_lists = [entries]
if len(entries_lists) == 1:
entries_lists.append([r'q(?<!q)'])
for index in range(2):
new_regex_text = regex_text_generator(entries_lists[index])
if new_regex_text != rule_list[index].regex:
rule_list[index].regex = new_regex_text
try:
del rule_list[index].compiled_regex
except AttributeError:
pass
rule_list[index].sanity_check()
@classmethod
def reload_blacklists(cls):
global bad_keywords_nwb
blacklists.load_blacklists()
cls._update_a_blacklist_dual_rule(cls.rule_bad_keywords,
get_bookended_keyword_regex_text_from_entries,
GlobalVars.bad_keywords)
cls._update_a_blacklist_dual_rule(cls.rule_watched_keywords,
get_bookended_keyword_regex_text_from_entries,
GlobalVars.watched_keywords.keys())
cls._update_a_blacklist_dual_rule(cls.rule_blacklisted_websites,
get_non_bookended_keyword_regex_text_from_entries,
GlobalVars.blacklisted_websites)
cls._update_a_blacklist_dual_rule(cls.rule_blacklisted_usernames,
get_non_bookended_keyword_regex_text_from_entries,
GlobalVars.blacklisted_usernames)
GlobalVars.blacklisted_numbers_full, GlobalVars.blacklisted_numbers, \
GlobalVars.blacklisted_numbers_normalized = \
phone_numbers.process_numlist(GlobalVars.blacklisted_numbers_raw)
GlobalVars.watched_numbers_full, GlobalVars.watched_numbers, \
GlobalVars.watched_numbers_normalized = phone_numbers.process_numlist(GlobalVars.watched_numbers_raw)
log('debug', "Global blacklists loaded")
@staticmethod
def test_post(post):
result = []
why_title, why_username, why_body = [], [], []
post_brief_id = "{}/{}/{}".format(post.post_site, "a" if post.is_answer else "q", post.post_id)
for rule in FindSpam.rules:
start_time = time.time()
title, username, body = rule.match(post)
end_time = time.time()
elapsed_time = end_time - start_time
elapsed_time_draw_attention_min = FindSpam.ELAPSED_TIME_DRAW_ATTENTION_MIN
elapsed_time_levels = FindSpam.ELAPSED_TIME_LOG_AND_TELL_LEVELS
if type(rule.elapsed_time_reporting) is dict:
elapsed_time_draw_attention_min = rule.elapsed_time_reporting.get('draw_attention_min', 600)
elapsed_time_levels = rule.elapsed_time_reporting.get('levels', [])
draw_attention = ' <------------------' if elapsed_time > elapsed_time_draw_attention_min else ''
log_type = ''
tell_text = ''
for log_level, tell_level, minimum_elapsed_time in elapsed_time_levels:
if (elapsed_time >= minimum_elapsed_time):
log_type = log_level
tell_text = tell_level
if log_type or tell_text:
log_message = ('Rule elapsed time: {:.2f} s'.format(elapsed_time)
+ ': {}: {}'.format(rule.reason, rule.rule_id)
+ ' for [{}](https://{})'.format(post_brief_id, post_brief_id))
if log_type:
log(log_type, log_message + draw_attention)
if tell_text:
chatcommunicate.tell_rooms_with('long-rule-times', tell_text + log_message)
if title[0]:
result.append(title[1])
why_title.append(title[2])
if username[0]:
result.append(username[1])
why_username.append(username[2])
if body[0]:
result.append(body[1])
why_body.append(body[2])
result = list(set(result))
result.sort()
why = "\n".join(sorted(why_title + why_username + why_body)).strip()
return result, why
@staticmethod
def match_info(match):
start, end = match.span()
group = match.group().replace("\n", "")
return "Position {}-{}: {}".format(start + 1, end, group)
@staticmethod
def match_infos(matches):
spans = {}
for match in matches:
group = match.group().strip().replace("\n", "")
if group not in spans:
spans[group] = [match.span()]
else:
spans[group].append(match.span())
infos = [(sorted(spans[word]), word) for word in spans]
infos.sort(key=lambda info: info[0]) # Sort by positions of appearances
return ", ".join([
"Position{} {}: {}".format(
"s" if len(span) > 1 else "",
", ".join(
["{}-{}".format(a, b) for a, b in span]
if len(span) < 14 else
["{}-{}".format(a, b) for a, b in span[:12]] + ["+{} more".format(len(span) - 12)]
),
word
)
for span, word in infos])
################################################################################
# The Creator of all the spam check rules
# Do NOT touch the default values unless you want to break things
# what if a function does more than one job?
def create_rule(reason, regex=None, func=None, *, all=True, sites=[],
title=True, body=True, body_summary=False, username=False,
max_score=0, max_rep=1, question=True, answer=True, stripcodeblocks=False,
whole_post=False, # For some functions
disabled=False, # yeah, disabled=True is intuitive
rule_id=None, # Unique rule ID [The "reason" may be on multiple rules; this is unique to the rule.]
elapsed_time_reporting=None,
skip_creation_sanity_check=False):
if not isinstance(reason, str):
raise ValueError("reason must be a string")
if GlobalVars.valid_detection_reasons is not None and reason not in GlobalVars.valid_detection_reasons:
# There was a list of reasons provided in the config file as valid and this detection reason isn't in that list.
disabled = True
if rule_id is None and reason not in FindSpam.reasons:
# Only the first Rule with the reason can use the reason as the default rule_id.
rule_id = reason
if rule_id is None or rule_id in FindSpam.rule_ids:
raise ValueError("rule_id must exist and be unique for reason: {}:: ID: {}".format(reason, rule_id))
FindSpam.rule_ids.add(rule_id)
FindSpam.reasons.add(reason)
if GlobalVars.valid_rule_ids is not None and rule_id not in GlobalVars.valid_rule_ids:
# There was a list of valid rule IDs provided in the config file and this detection isn't in that list.
disabled = True
if not (body or body_summary or username): # title-only
answer = False # answers have no titles, this saves some loops
post_filter = PostFilter(all_sites=all, sites=sites, max_score=max_score, max_rep=max_rep,
question=question, answer=answer)
if regex is not None:
# Standalone mode
rule = Rule(regex, reason=reason, filter=post_filter,
title=title, body=body, body_summary=body_summary, username=username,
stripcodeblocks=stripcodeblocks, skip_creation_sanity_check=skip_creation_sanity_check,
rule_id=rule_id, elapsed_time_reporting=elapsed_time_reporting)
if not disabled:
FindSpam.rules.append(rule)
return rule
else:
# Decorator-generator mode
def decorator(func):
if isinstance(func, Rule):
func = func.func # Extract the real function from the created rule to allow multi-creation
try:
func.__call__
except AttributeError:
raise ValueError("This rule does not contain a function, can't recreate") from None
rule = Rule(func, reason=reason, filter=post_filter, whole_post=whole_post,
title=title, body=body, body_summary=body_summary, username=username,
stripcodeblocks=stripcodeblocks, skip_creation_sanity_check=skip_creation_sanity_check,
rule_id=rule_id, elapsed_time_reporting=elapsed_time_reporting)
if not disabled:
FindSpam.rules.append(rule)
return rule
if func is not None: # Function is supplied, no need to decorate
return decorator(func)
else: # real decorator mode
return decorator
def create_multiple_rules(*args, rule_quantity=1, rule_id=None, **kwargs):
rules = []
for index in range(rule_quantity):
index_rule_id = rule_id + ' index:' + str(index)
rules.append(create_rule(*args, rule_id=index_rule_id, **kwargs))
return rules
def is_whitelisted_website(url):
# Imported from method link_at_end
return bool(WHITELISTED_WEBSITES_REGEX.search(url)) or metasmoke_cache.is_website_whitelisted(url)
def levenshtein(s1, s2):
if len(s1) < len(s2):
return levenshtein(s2, s1)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
@create_rule("misleading link", title=False, max_rep=11, max_score=1, stripcodeblocks=True)
def misleading_link(s, site):
# Regex that finds the href value and the link text from an HTML <a>, if the link text
# doesn't contain a '<' or space.
link_regex = r"<a href=\"([^\"]++)\"[^>]*+>\s*+([^< ]++)\s*+<\/a>"
compiled = regex.compile(link_regex)
search = compiled.search(s)
if search is None:
# The s string contained no HTML <a> elements with an href and link text matching the link_regex.
return False, ''
href, text = search[1], search[2]
if '.' not in text:
# To have a first level domain, the link text must contain a '.'.
return False, ''
try:
parsed_href = tld.get_tld(href, as_object=True)
if parsed_href.fld in SE_SITES_DOMAINS:
return False, ''
parsed_text = tld.get_tld(text, fix_protocol=True, as_object=True)
# The parsed_text_fld_with_extra_subdomain check verifies the tld package found an actual domain,
# rather than a second part of a tld. The tld package has gotten better at being sure it gets
# a full tld, when it exists, but there may be some corner cases. This is definitely needed
# at least for some tld versions prior to 0.9.8 (e.g. 0.9.0), but possible corner cases hint
# that this check should be retained.
parsed_text_fld_with_extra_subdomain = tld.get_tld('foo.' + parsed_text.fld, fix_protocol=True, as_object=True)
if parsed_text.fld == parsed_text.tld or parsed_text.tld != parsed_text_fld_with_extra_subdomain.tld:
# The link text doesn't have a valid domain (i.e. the FLD must be more than just the TLD).
return False, ''
except (tld.exceptions.TldDomainNotFound, tld.exceptions.TldBadUrl, ValueError) as err:
return False, ''
if site == 'stackoverflow.com' and parsed_text.fld.split('.')[-1] in SAFE_EXTENSIONS:
return False, ''
if href.endswith('/' + text):
# Don't detect URLs like "https://example.com/foo.txt" for link text "foo.txt".
return False, ''
if levenshtein(parsed_href.domain, parsed_text.domain) <= LEVEN_DOMAIN_DISTANCE: # Preempt
return False, ''
try:
href_domain = unquote_plus(parsed_href.domain.encode("ascii").decode("idna"))
except ValueError:
href_domain = parsed_href.domain
try:
text_domain = unquote_plus(parsed_text.domain.encode("ascii").decode("idna")) # people do post this, sad
except ValueError:
text_domain = parsed_text.domain
if levenshtein(href_domain, text_domain) > LEVEN_DOMAIN_DISTANCE:
return True, 'Domain {} indicated by possible misleading text {}.'.format(
parsed_href.fld, parsed_text.fld
)
else:
return False, ''
# noinspection PyUnusedLocal,PyMissingTypeHints,PyTypeChecker
@create_rule("repeating words in {}", max_rep=11, stripcodeblocks=True)
def has_repeating_words(s, site):
# RegEx DoS warning!!!
matcher = regex.compile(r"\b(?P<words>(?P<word>[a-z]+))(?:[][\s.,;!/\()+_-]+(?P<words>(?P=word))){4,}\b",
flags=regex.I | regex.S | regex.V0)
for match in matcher.finditer(s):
words = match.captures("words")
word = match.group("word")
if len(words) >= 5 and len(word) * len(words) >= 0.18 * len(s):
return True, "{}*{}".format(repr(word), len(words))
return False, ""
# noinspection PyUnusedLocal,PyMissingTypeHints
@create_rule("few unique characters in {}", title=False, max_rep=10000, max_score=10000)
def has_few_characters(s, site):
s = regex.sub("</?(?:p|strong|em)>", "", s).rstrip() # remove HTML paragraph tags from posts
uniques = len(set(s) - {"\n", "\t"})
length = len(s)
thresholds = [ # LBound, UBound, MaxUnique
(30, 36, 6), (36, 42, 7), (42, 48, 8), (48, 54, 9), (54, 60, 10),
(60, 70, 11), (70, 80, 12), (80, 90, 13), (90, 100, 14), (100, 2**30, 15),
]
if any([t[0] <= length < t[1] and uniques <= t[2] for t in thresholds]):
if uniques >= 5 and site == "math.stackexchange.com":
# Special case for Math.SE: Uniques case may trigger false-positives.
return False, ""
return True, "Contains {} unique character{}".format(uniques, "s" if uniques >= 2 else "")
return False, ""
def len_img_block(string):
""" Length of image html blocks from a string. """
all_oc = regex.findall(r'<img\s[^<>]*+>', string)
tot_len = 0
for oc in all_oc:
tot_len += len(oc)
return tot_len
# max_score=2 to prevent voting fraud
@create_rule("post is mostly images", title=False, max_rep=201, max_score=2)
def mostly_img(s, site):
if len(s) == 0:
return False, ""
s_len_img = len_img_block(strip_code_elements(s))
if s_len_img / len(s) > IMG_TXT_R_THRES:
return True, "{:.4f} of the post is html image blocks".format(s_len_img / len(s))
return False, ""
# noinspection PyUnusedLocal,PyMissingTypeHints
@create_rule("repeating characters in {}", stripcodeblocks=True, max_rep=10000, max_score=10000)
def has_repeating_characters(s, site):
s = s.strip().replace("\u200B", "").replace("\u200C", "") # Strip leading and trailing spaces
if "\n\n" in s or "<code>" in s or "<pre>" in s:
return False, ""
s = URL_REGEX.sub("", s) # Strip URLs for this check
if not s:
return False, ""
# Don't detect a couple of common ways for people to try to include tables (reduces FP by ~20%).
if regex.search(r"(?:(?:----+|====+)[+|]+){2}", s):
return False, ""
# matches = regex.compile(r"([^\s_.,?!=~*/0-9-])(\1{9,})", regex.UNICODE).findall(s)
matches = regex.compile(r"([^\s\d_.])(\1{9,})", regex.UNICODE).findall(s)
match = "".join(["".join(match) for match in matches])
if len(match) / len(s) >= REPEATED_CHARACTER_RATIO: # Repeating characters make up >= 20 percent
return True, "{}".format(", ".join(
["{}*{}".format(repr(match[0]), len(''.join(match))) for match in matches]))
return False, ""
# noinspection PyUnusedLocal,PyMissingTypeHints
@create_rule("link at end of {}", title=False, all=False,
sites=["superuser.com", "askubuntu.com", "drupal.stackexchange.com", "meta.stackexchange.com",
"security.stackexchange.com", "patents.stackexchange.com", "money.stackexchange.com",
"gaming.stackexchange.com", "arduino.stackexchange.com", "workplace.stackexchange.com"],
rule_id="link at end: main link_at_end, limited sites")
def link_at_end(s, site): # link at end of question, on selected sites
s = regex.sub("</?(?:strong|em|p)>", "", s)
match = regex.compile(
r"(?i)https?://(?:"
"" r"[.A-Za-z0-9-]*/?[.A-Za-z0-9-]*/?|"
"" r"plus\.google\.com/[\w/]*|"
"" r"www\.pinterest\.com/pin/[\d/]*|"
r")(?=</a>\s*$)").search(s)
if match and not is_whitelisted_website(match.group(0)):
return True, u"Link at end: {}".format(match.group(0))
return False, ""
# noinspection PyUnusedLocal,PyMissingTypeHints,PyTypeChecker
@create_rule("non-English link in {}", title=False, question=False, stripcodeblocks=True, sites=[
"pt.stackoverflow.com", "es.stackoverflow.com", "ja.stackoverflow.com", "ru.stackoverflow.com",
"rus.stackexchange.com", "islam.stackexchange.com", "japanese.stackexchange.com", "hinduism.stackexchange.com",
"judaism.stackexchange.com", "buddhism.stackexchange.com", "chinese.stackexchange.com",
"russian.stackexchange.com", "french.stackexchange.com", "portuguese.stackexchange.com",
"spanish.stackexchange.com", "codegolf.stackexchange.com", "korean.stackexchange.com",
"esperanto.stackexchange.com", "ukrainian.stackexchange.com"])
def non_english_link(s, site): # non-english link in short answer
if len(s) < 600:
links = regex.compile(r'nofollow(?: noreferrer)?">([^<]*)(?=</a>)', regex.UNICODE).findall(s)
for link_text in links:
word_chars = regex.sub(r"(?u)\W", "", link_text)
non_latin_chars = regex.sub(r"\w", "", word_chars)
if len(word_chars) >= 1 and ((len(word_chars) <= 20 and len(non_latin_chars) >= 1) or
(len(non_latin_chars) >= 0.05 * len(word_chars))):
return True, u"Non-English link text: *{}*".format(link_text)
return False, ""
# noinspection PyUnusedLocal,PyMissingTypeHints,PyTypeChecker
@create_rule("mostly non-Latin {}", stripcodeblocks=True, sites=[
"stackoverflow.com", "ja.stackoverflow.com", "pt.stackoverflow.com", "es.stackoverflow.com",
"islam.stackexchange.com", "japanese.stackexchange.com", "anime.stackexchange.com",
"hinduism.stackexchange.com", "judaism.stackexchange.com", "buddhism.stackexchange.com",
"chinese.stackexchange.com", "french.stackexchange.com", "spanish.stackexchange.com",
"portuguese.stackexchange.com", "codegolf.stackexchange.com", "korean.stackexchange.com",
"ukrainian.stackexchange.com"], body_summary=True, rule_id="Mostly non-Latin: most sites")
@create_rule("mostly non-Latin {}", all=False, sites=["stackoverflow.com"],
stripcodeblocks=True, body_summary=True, question=False, rule_id="Mostly non-Latin: SO answers only")
def mostly_non_latin(s, site): # majority of post is in non-Latin, non-Cyrillic characters
word_chars = regex.sub(r'(?u)[\W0-9]|http\S*', "", s)
non_latin_chars = regex.sub(r"(?u)\p{script=Latin}|\p{script=Cyrillic}", "", word_chars)
if len(non_latin_chars) > 0.4 * len(word_chars):
return True, "Text contains {} non-Latin characters out of {}".format(len(non_latin_chars), len(word_chars))
return False, ""
phone_number_detected_in_title_second_exclude_regex_text = (
r"(?is)^(?:"
"" r"(?="
"" "" r"(?!.*?(?:quick[\W_]*+books?|binance|support|help(?:line|desk)|\bcall\b|antivirus|customer|whatsapp?))"
"" "" r".*?(?:"