-
Notifications
You must be signed in to change notification settings - Fork 6
/
exploitdbWebApp.py
85 lines (79 loc) · 2.79 KB
/
exploitdbWebApp.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
# -*- codinig: utf-8 -*-
import scrapy
import os
from scrapy.exceptions import CloseSpider
import facebook
class exploitdbWebApp(scrapy.Spider):
name = "exploitdbWebApp"
allowed_domains = ["exploit-db.com"]
start_urls = [
"https://www.exploit-db.com/webapps/"
]
global ult
if os.path.isfile('baseWebApp.txt') and os.stat('baseWebApp.txt').st_size == 0:
os.remove('baseWebApp.txt')
if os.path.isfile('baseWebApp.txt'):
arq = open('baseWebApp.txt', 'r')
ult = arq.readlines()[1].rstrip("\n")
arq.close()
else:
os.mknod('baseWebApp.txt')
ult = ''
def parse(self,response):
global ult
datas = response.selector.xpath('//tr/td[@class="date"]/text()').extract()
links = response.selector.xpath('//tr/td[5]/a/@href').extract()
descs = response.selector.xpath('//tr/td[5]/a[@href]/text()').extract()
for data, desc, link in zip(datas, descs, links):
if desc.encode('utf-8') != ult and ult == '':
with open('baseWebApp.txt', 'a+') as arq:
arq.write(data.strip()+'\n')
arq.write(desc.encode('utf-8')+'\n')
arq.write(link+'\n\n')
arq.close()
i = 0
elif desc.encode('utf-8') != ult:
with open('aux.txt','a+') as arq:
arq.write(data.strip()+'\n')
arq.write(desc.encode('utf-8')+'\n')
arq.write(link+'\n\n')
arq.close()
i = 1
else:
i = 0
break
if not i:
if os.path.exists('aux.txt'):
os.remove('baseWebApp.txt')
os.rename('aux.txt', 'baseWebApp.txt')
# HERE YOU PUT THE TOKEN OF YOUR PAGE ON FACEBOOK
access_token = "HERE YOU PUT THE TOKEN OF YOUR PAGE ON FACEBOOK"
api = facebook.GraphAPI(access_token)
total = 0
arq = open('baseWebApp.txt','r')
linhas = arq.readlines()
for i in linhas:
if i == '\n':
total = total + 1
a = 0
b = 3
lista = []
for i in range(total):
xxx = linhas[a:b]
lista.append(xxx)
xxx = ''
a = b + 1
b = a + 3
lista.reverse()
for i in lista:
x = ''.join(i)
api.put_wall_post(x)
raise CloseSpider('[+] BASE ATUALIZADA [+]')
else:
try:
proxima_pagina = response.xpath(
'//a[@href and contains(.,"next")]/@href').extract()[0]
if proxima_pagina:
yield scrapy.Request(url=proxima_pagina, callback=self.parse)
except:
pass