-
Notifications
You must be signed in to change notification settings - Fork 230
/
pyprox_tcp.py
150 lines (108 loc) · 5.51 KB
/
pyprox_tcp.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
#!/usr/bin/env python3
import socket
import threading
from pathlib import Path
import os
import copy
import time
import datetime
import logging
from logging.handlers import TimedRotatingFileHandler
if os.name == 'posix':
print('os is linux')
import resource # ( -> pip install python-resources )
# set linux max_num_open_socket from 1024 to 128k
resource.setrlimit(resource.RLIMIT_NOFILE, (127000, 128000))
listen_PORT = 2500 # pyprox listening to 127.0.0.1:listen_PORT
Cloudflare_IP = '162.159.135.42' # plos.org (can be any dirty cloudflare ip)
Cloudflare_port = 443
L_fragment = 77 # length of fragments of Client Hello packet (L_fragment Byte in each chunk)
fragment_sleep = 0.2 # sleep between each fragment to make GFW-cache full so it forget previous chunks. LOL.
# ignore description below , its for old code , just leave it intact.
my_socket_timeout = 60 # default for google is ~21 sec , recommend 60 sec unless you have low ram and need close soon
first_time_sleep = 0.01 # speed control , avoid server crash if huge number of users flooding (default 0.1)
accept_time_sleep = 0.01 # avoid server crash on flooding request -> max 100 sockets per second
class ThreadedServer(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
def listen(self):
self.sock.listen(128) # up to 128 concurrent unaccepted socket queued , the more is refused untill accepting those.
while True:
client_sock , client_addr = self.sock.accept()
client_sock.settimeout(my_socket_timeout)
#print('someone connected')
time.sleep(accept_time_sleep) # avoid server crash on flooding request
thread_up = threading.Thread(target = self.my_upstream , args =(client_sock,) )
thread_up.daemon = True #avoid memory leak by telling os its belong to main program , its not a separate program , so gc collect it when thread finish
thread_up.start()
def my_upstream(self, client_sock):
first_flag = True
backend_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
backend_sock.settimeout(my_socket_timeout)
backend_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) #force localhost kernel to send TCP packet immediately (idea: @free_the_internet)
while True:
try:
if( first_flag == True ):
first_flag = False
time.sleep(first_time_sleep) # speed control + waiting for packet to fully recieve
data = client_sock.recv(16384)
#print('len data -> ',str(len(data)))
#print('user talk :')
if data:
backend_sock.connect((Cloudflare_IP,Cloudflare_port))
thread_down = threading.Thread(target = self.my_downstream , args = (backend_sock , client_sock) )
thread_down.daemon = True
thread_down.start()
# backend_sock.sendall(data)
send_data_in_fragment(data,backend_sock)
else:
raise Exception('cli syn close')
else:
data = client_sock.recv(4096)
if data:
backend_sock.sendall(data)
else:
raise Exception('cli pipe close')
except Exception as e:
#print('upstream : '+ repr(e) )
time.sleep(2) # wait two second for another thread to flush
client_sock.close()
backend_sock.close()
return False
def my_downstream(self, backend_sock , client_sock):
first_flag = True
while True:
try:
if( first_flag == True ):
first_flag = False
data = backend_sock.recv(16384)
if data:
client_sock.sendall(data)
else:
raise Exception('backend pipe close at first')
else:
data = backend_sock.recv(4096)
if data:
client_sock.sendall(data)
else:
raise Exception('backend pipe close')
except Exception as e:
#print('downstream '+backend_name +' : '+ repr(e))
time.sleep(2) # wait two second for another thread to flush
backend_sock.close()
client_sock.close()
return False
def send_data_in_fragment(data , sock):
for i in range(0, len(data), L_fragment):
fragment_data = data[i:i+L_fragment]
print('send ',len(fragment_data),' bytes')
# sock.send(fragment_data)
sock.sendall(fragment_data)
time.sleep(fragment_sleep)
print('----------finish------------')
print ("Now listening at: 127.0.0.1:"+str(listen_PORT))
ThreadedServer('',listen_PORT).listen()