-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathspread_dork.py
220 lines (150 loc) · 5.69 KB
/
spread_dork.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
#!/usr/bin/env python3.6
# spread_dork.py
import os
import sys
import time
from modules.tools import *
try:
import requests
except ImportError:
print(f"[{R}!!!{N}] Error import 'requests' model ")
exit()
try:
import concurrent.futures
except ImportError:
print(f"[{R}!!!{N}] Error import 'concurrent' model ")
exit()
try:
from interruptingcow import timeout
except ImportError:
print(f"[{R}!!!{N}] Error import 'interruptingcow' model ")
exit()
try:
from bs4 import BeautifulSoup
except ImportError:
print(f"[{R}!!!{N}] Error import 'bs4' model ")
exit()
def SAVE_URLS(URLs, FILE):
read_file = str()
try:
tryread_file = [i.rstrip('\n') for i in open(FILE).readlines()]
except FileNotFoundError:
FILE = 'target.txt'
with open(os.path.join(os.getcwd(), FILE), 'a') as f:
try:
'''
If your need just url like this => http://<site>.com
URL = f"{URLs.split('/')[0]}//{URLs.split('/')[2]}"
'''
URL = URLs
if URL not in read_file:
f.write(f'{URL}\n')
except:
pass
# [ URL ] GOOGLE
Url = "https://www.google.com"
# [ PAGE ] search IN GOOGLE
Path_search = "/search?q="
# [ PROXY ] for searching
# PROXY = {"http":"http://200.255.122.170:8080"}
def STYLES(*args, **kwargs):
print(f"{G}{index()}{N}\n\n{W}{'-'*50}{N}\n[ {B}+{N} ] Dork : {args[0]:<10}\n[ {B}+{N} ] Result: {args[1]}\n[ {B}+{N} ] File : {args[2]}\n[{B}==>{N}] Plaes Wite....\n{W}{'-'*50}{N}")
def GET_DATA(args, **kwargs):
global GET
Target, Number = args.split(';')
# Session FOR SITE
SIS = requests.Session()
# EDITE THE USER AGENT FOR AECCES SOURSE SITE
SIS.headers['User-Agent'] = ''
# [ SET ] Proxy In the Process
# SIS.proxies = PROXY
# [ GET ] THE DATA SOURSE
with timeout(10):
try:
time.sleep(1.25)
GET = SIS.get(f"{Url}{Path_search}{Target}&start={Number}{'&filter=0' if int(Number) > 0 else '' }")
if GET.status_code != 200:
print(f"[{R}!!!{N}] Sorry You've been banned from Google")
return 'gogend'
except TimeoutError:
print(f"[{R}!!!{N}] Timeout Error");exit()
except requests.exceptions.ConnectionError:
print(f"[{R}!!!{N}] Sorry No internet");exit()
# return DATA WITH CONTENT LIKE [ BIN ]
return GET.content
def PINDEX(*args, **kwargs):
# global GET
for i in args:
''' ANY URL IN GOOGLE SEARCH /url?q= '''
if '/url?q=' in i[0]:i[0] = i[0].split('/url?q=')[-1].split('&sa=')[0]
# END THE [ results ] AND EXIT !
if '/search%3' in i[0] and i[1] == '':
print(f"\n{'-'*50}\n[ {B}OK{N} ] Save All Urls in {i[4]}{N}\n[ {R}END{N} ] Find {len(i[3]) - 1 } results \n{W}")
exit()
print(f"[ {T if i[1] != '' else R }ID{N} ] {i[2]}\n[ {T if i[1] != '' else R }TIT{N} ] {N}{i[1]}\n[ {T if i[1] != '' else R}URL{N} ] {N}{i[0]}\n{W}{'='*50}{N}")
SAVE_URLS(i[0], i[4])
def GET_URLS(*args, **kwargs):
NUM = ID = int()
LIST = []
while True:
# Threading in the secript
with concurrent.futures.ProcessPoolExecutor() as excutor:
try:
rDATA = excutor.submit(GET_DATA, f'{args[0]};{NUM}')
lDATA = rDATA.result()
except RuntimeError:
print(f"[ {R}!{N} ] Sorry Time Out {TIMEOUT + 1}")
if TIMEOUT >= 3:
exit()
else:
TIMEOUT += 1
continue
except KeyboardInterrupt:
break
if lDATA == 'gogend':
break
# RUN BeautifulSoup TO FIND THE URL AND TITLE
FORMATS = BeautifulSoup(lDATA, "html5lib")
# CLASS THE div FOR URL AND TITLE
for i in FORMATS.findAll("div", {"class":"ZINbbc xpd O9g5cc uUPGi"}):
# THIS IS PROCESSING FOR OUTPUT URL
try:URL = i.find("a").get('href')
except:URL = ""
if "www.google.com" in URL:URL = ""
if "/search?ie=UTF-8" in URL:URL = ""
# THIS IS PROCESSING FOR OUTPUT TITLE
try:TITLE = i.find("div", {"class":"BNeawe vvjwJb AP7Wnd"}).text
except:TITLE = ""
# THIS IS PROCESSING FOR LIST
if TITLE is "" and URL is "":continue
ID +=1
LIST.append(URL)
PINDEX([URL,TITLE,ID, LIST, args[1]])
# if len(LIST) >= int(args[1]):break
NUM = len(LIST)
if __name__ == "__main__":
try:
s, *dork = sys.argv
except:
pass
# Help ...
if '-h' in sys.argv or '--help' in sys.argv:
print(f"Example:\n\tpython3 {s} inurl:contactus site:np")
exit()
# File urls save.
FILE = input(f"[ {R}*{N} ] File Output default is target.txt [ Y/N ]: ")
if FILE.upper() == 'Y' or FILE == '':
FILE = 'target.txt'
else:
FILE = input(f"[ {R}*{N} ] Enter Path File Output: ")
if dork != []:
if len(dork) >= 1:
DORK = str()
for i in dork:
DORK += f'{i} '
STYLES(DORK.strip(), 1000, FILE)
GET_URLS(DORK.strip(), FILE)
else:
YES = input(f"[ {R}*{N} ] Enter The Dork : ")
STYLES(YES.strip(), 1000, FILE)
GET_URLS(YES.strip(), FILE)