|
@@ -0,0 +1,98 @@
|
|
1
|
+# Guarda lista de puertos de cada dip por cada sip
|
|
2
|
+#ftp remote edit
|
|
3
|
+from silk import *
|
|
4
|
+import multiprocessing as mp
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+#Para filtrar por puertos. Pero no queremos todavia
|
|
8
|
+#minPort = 20
|
|
9
|
+#maxPort = 5000
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+def verify_type(filename):
|
|
13
|
+
|
|
14
|
+ dportHash = {} #contains amount of dport per each sip
|
|
15
|
+ filename = [filename]
|
|
16
|
+ #print "stooy aqui"
|
|
17
|
+
|
|
18
|
+ for file in filename:
|
|
19
|
+
|
|
20
|
+ for rec in silkfile_open(file, READ):#reading the flow file
|
|
21
|
+ sip = str(rec.sip)
|
|
22
|
+ dip = str(rec.dip)
|
|
23
|
+ dport = rec.dport
|
|
24
|
+ if (':' in sip): #Si en el paso anterior se vio que n #tiene el length de puertos requerido, se ignora
|
|
25
|
+
|
|
26
|
+ # x+=1
|
|
27
|
+ continue
|
|
28
|
+ else:
|
|
29
|
+ if sip in dportHash:
|
|
30
|
+ if dip in dportHash[sip]:
|
|
31
|
+ if dport in dportHash[sip][dip]:
|
|
32
|
+ dportHash[sip][dip][dport] += 1
|
|
33
|
+ else:
|
|
34
|
+ dportHash[sip][dip][dport] = 1
|
|
35
|
+ else:
|
|
36
|
+ dportHash[sip][dip] = {dport : 1}
|
|
37
|
+ else:
|
|
38
|
+ dportHash[sip] = { dip: {dport: 1} }
|
|
39
|
+
|
|
40
|
+ return dportHash
|
|
41
|
+
|
|
42
|
+def join_hash(list):
|
|
43
|
+ complete_hash ={}
|
|
44
|
+ for i in list:
|
|
45
|
+ for sip, hash in i.items():
|
|
46
|
+ if sip in complete_hash:
|
|
47
|
+ #print "hello", sip
|
|
48
|
+ for dip, dports in i[sip].items():
|
|
49
|
+ #print dip
|
|
50
|
+ if dip in complete_hash[sip]:
|
|
51
|
+ #print "wassup"
|
|
52
|
+ for number, value in dports.items():
|
|
53
|
+ if number in complete_hash[sip]:
|
|
54
|
+ print "DPORTS", number
|
|
55
|
+ complete_hash[sip][dip][number] += value
|
|
56
|
+ else:
|
|
57
|
+ complete_hash[sip][dip][number]= value
|
|
58
|
+ else:
|
|
59
|
+ complete_hash[sip][dip]= dports
|
|
60
|
+ else:
|
|
61
|
+ complete_hash[sip]= hash
|
|
62
|
+ return complete_hash
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+def main():
|
|
66
|
+ startDate = "2018/09/1"
|
|
67
|
+ endDate = "2018/09/30"
|
|
68
|
+ otherHash = {}
|
|
69
|
+ counter = 0
|
|
70
|
+ process_num = 8
|
|
71
|
+ pool = mp.Pool(processes=process_num)
|
|
72
|
+ files = FGlob(classname="all", type="all", start_date=startDate, end_date=endDate, site_config_file="/etc/silk/conf-v9/silk.conf", data_rootdir="/home/scratch/flow/rwflowpack/")
|
|
73
|
+
|
|
74
|
+ files = [x for x in files]
|
|
75
|
+ print len(files)
|
|
76
|
+ fileHash = pool.map(verify_type, files) # FGlob(classname="all", type="all", start_date=startDate, end_date=endDate, site_config_file="/etc/silk/conf-v9/silk.conf", data_rootdir="/home/scratch/flow/rwflowpack/"))
|
|
77
|
+ flowHash = join_hash(fileHash)
|
|
78
|
+ print "FLOW", len(flowHash)
|
|
79
|
+ for sips in flowHash: #se itera por todos los dip y sus counters o puertos
|
|
80
|
+ #print sips
|
|
81
|
+ for dips, dports in flowHash[sips].items():
|
|
82
|
+ #print "Dip", dips, dports
|
|
83
|
+ if len(dports) >= 100: #si la cantidad de puertos es mayor o igual a 100, nos interesan
|
|
84
|
+ #y por lo tanto se guardan en un hash
|
|
85
|
+ print "DIP", dips, len(dports)
|
|
86
|
+ if sips in otherHash:
|
|
87
|
+ otherHash[sips][dips] = dports
|
|
88
|
+ else:
|
|
89
|
+ otherHash[sips] = {dips: dports}
|
|
90
|
+
|
|
91
|
+ for dips, dports in otherHash.items():
|
|
92
|
+ counter +=1 #para contar los elementos del hash
|
|
93
|
+
|
|
94
|
+ print counter
|
|
95
|
+#print otherHash
|
|
96
|
+
|
|
97
|
+if __name__== "__main__":
|
|
98
|
+ main()
|