-
Notifications
You must be signed in to change notification settings - Fork 0
/
dbaccess.py
executable file
·663 lines (571 loc) · 22.8 KB
/
dbaccess.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
import web
import common
import json
def test_database():
result = 0
try:
common.db.query("SELECT 1 FROM Syslog LIMIT 1;")
except Exception as e:
result = e[0]
# see http://dev.mysql.com/doc/refman/5.7/en/error-messages-server.html for codes
# if e[0] == 1049: # Unknown database 'samapper'
# Common.create_database()
# return self.GET(name)
# elif e[0] == 1045: # Access Denied for '%s'@'%s' (using password: (YES|NO))
# rows = [e[1], "Check you username / password? (dbconfig_local.py)"]
return result
def create_database():
params = common.dbconfig.params.copy()
params.pop('db')
connection = web.database(**params)
connection.query("CREATE DATABASE IF NOT EXISTS samapper;")
exec_sql("./sql/setup_database.sql")
reset_port_names()
def parse_sql_file(path):
with open(path, 'r') as f:
lines = f.readlines()
# remove comment lines
lines = [i for i in lines if not i.startswith("--")]
# join into one long string
script = " ".join(lines)
# split string into a list of commands
commands = script.split(";")
# ignore empty statements (like trailing newlines)
commands = filter(lambda x: bool(x.strip()), commands)
return commands
def exec_sql(path):
commands = parse_sql_file(path)
for command in commands:
common.db.query(command)
def reset_port_names():
# drop and recreate the table
exec_sql("./sql/setup_LUTs.sql")
with open("./sql/default_port_data.json", 'rb') as f:
port_data = json.loads("".join(f.readlines()))
ports = port_data["ports"].values()
for port in ports:
if len(port['name']) > 10:
port['name'] = port['name'][:10]
if len(port['description']) > 255:
port['description'] = port['description'][:255]
common.db.multiple_insert('portLUT', values=ports)
def get_nodes(ip8=-1, ip16=-1, ip24=-1, ip32=-1):
r = common.determine_range(ip8, ip16, ip24, ip32)
if ip8 < 0 or ip8 > 255:
# check Nodes8
# rows = common.db.select("Nodes8")
rows = common.db.select("Nodes", where="subnet=8")
elif ip16 < 0 or ip16 > 255:
# check Nodes16
rows = common.db.select("Nodes", where="subnet=16 && ipstart BETWEEN {0} AND {1}".format(r[0], r[1]))
elif ip24 < 0 or ip24 > 255:
# check Nodes24
rows = common.db.select("Nodes", where="subnet=24 && ipstart BETWEEN {0} AND {1}".format(r[0], r[1]))
elif ip32 < 0 or ip32 > 255:
# check Nodes32
rows = common.db.select("Nodes", where="subnet=32 && ipstart BETWEEN {0} AND {1}".format(r[0], r[1]))
else:
rows = []
return rows
def get_links_in(ip8, ip16=-1, ip24=-1, ip32=-1, port_filter=None, timerange=None):
"""
This function returns a list of the connections coming in to a given node from the rest of the graph.
* The connections are aggregated into groups based on the first diverging ancestor.
that means that connections to destination 1.2.3.4
that come from source 1.9.*.* will be grouped together as a single connection.
* for /8, /16, and /24, `SourceIP` -> `DestIP` make a unique connection.
* for /32, `SourceIP` -> `DestIP` : `Port` make a unique connection.
* If filter is provided, only connections over the given port are considered.
* If timerange is provided, only connections that occur within the given time range are considered.
:param ip8: The first segment of the IPv4 address: __.xx.xx.xx
:param ip16: The second segment of the IPv4 address: xx.__.xx.xx
:param ip24: The third segment of the IPv4 address: xx.xx.__.xx
:param ip32: The fourth segment of the IPv4 address: xx.xx.xx.__
:param port_filter: Only consider connections using this destination port. Default is no filtering.
:param timerange: Tuple of (start, end) timestamps. Only connections happening
during this time period are considered.
:return: A list of db results formated as web.storage objects (used like dictionaries)
"""
r = common.determine_range(ip8, ip16, ip24, ip32)
ports = 0 <= ip32 <= 255 # include ports in the results?
where = build_where_clause(timerange, port_filter)
if ports:
select = "src_start, src_end, dst_start, dst_end, port, sum(links) AS 'links'"
group_by = "GROUP BY src_start, src_end, dst_start, dst_end, port"
else:
select = "src_start, src_end, dst_start, dst_end, sum(links) AS 'links'"
group_by = "GROUP BY src_start, src_end, dst_start, dst_end"
query = """
SELECT {select}
FROM LinksIn
WHERE dst_start = $start && dst_end = $end
{where}
{group_by}
""".format(where=where, select=select, group_by=group_by)
qvars = {"start": r[0], "end": r[1]}
rows = list(common.db.query(query, vars=qvars))
return rows
def get_links_out(ip8, ip16=-1, ip24=-1, ip32=-1, port_filter=None, timerange=None):
"""
This function returns a list of the connections going out of a given node from the rest of the graph.
* The connections are aggregated into groups based on the first diverging ancestor.
that means that connections to destination 1.2.3.4
that come from source 1.9.*.* will be grouped together as a single connection.
* for /8, /16, and /24, `SourceIP` -> `DestIP` make a unique connection.
* for /32, `SourceIP` -> `DestIP` : `Port` make a unique connection.
* If filter is provided, only connections over the given port are considered.
* If timerange is provided, only connections that occur within the given time range are considered.
:param ip8: The first segment of the IPv4 address: __.xx.xx.xx
:param ip16: The second segment of the IPv4 address: xx.__.xx.xx
:param ip24: The third segment of the IPv4 address: xx.xx.__.xx
:param ip32: The fourth segment of the IPv4 address: xx.xx.xx.__
:param port_filter: Only consider connections using this destination port. Default is no filtering.
:param timerange: Tuple of (start, end) timestamps. Only connections happening
during this time period are considered.
:return: A list of db results formated as web.storage objects (used like dictionaries)
"""
r = common.determine_range(ip8, ip16, ip24, ip32)
ports = 0 <= ip32 <= 255 # include ports in the results?
where = build_where_clause(timerange, port_filter)
if ports:
select = "src_start, src_end, dst_start, dst_end, port, sum(links) AS 'links'"
group_by = "GROUP BY src_start, src_end, dst_start, dst_end, port"
else:
select = "src_start, src_end, dst_start, dst_end, sum(links) AS 'links'"
group_by = "GROUP BY src_start, src_end, dst_start, dst_end"
query = """
SELECT {select}
FROM LinksOut
WHERE src_start = $start && src_end = $end
{where}
{group_by}
""".format(where=where, select=select, group_by=group_by)
qvars = {"start": r[0], "end": r[1]}
rows = list(common.db.query(query, vars=qvars))
return rows
def build_where_clause(timestamp_range=None, port=None, rounding=True):
clauses = []
t_start = 0
t_end = 0
if timestamp_range:
t_start = timestamp_range[0]
t_end = timestamp_range[1]
if rounding:
# rounding to 5 minutes, for use with the Syslog table
if t_start > 150:
t_start -= 150
if t_end <= 2 ** 31 - 150:
t_end += 149
clauses.append("timestamp BETWEEN FROM_UNIXTIME($tstart) AND FROM_UNIXTIME($tend)")
if port:
clauses.append("port = $port")
qvars = {'tstart': t_start, 'tend': t_end, 'port': port}
WHERE = str(web.db.reparam("\n && ".join(clauses), qvars))
if WHERE:
WHERE = " && " + WHERE
return WHERE
def get_details_summary(ip_range, timestamp_range=None, port=None):
WHERE = build_where_clause(timestamp_range=timestamp_range, port=port)
query2 = """
SELECT (
SELECT COUNT(DISTINCT src)
FROM Links
WHERE dst BETWEEN $start AND $end
{0}) AS 'unique_in'
, (SELECT COUNT(DISTINCT dst)
FROM Links
WHERE src BETWEEN $start AND $end
{0}) AS 'unique_out'
, (SELECT COUNT(DISTINCT port)
FROM Links
WHERE dst BETWEEN $start AND $end
{0}) AS 'unique_ports';""".format(WHERE)
qvars = {'start': ip_range[0], 'end': ip_range[1]}
rows = common.db.query(query2, vars=qvars)
row = rows[0]
return row
def get_details_connections(ip_range, inbound, timestamp_range=None, port=None, page=1, page_size=50, order="-links"):
sort_options = ['links', 'src', 'dst', 'port']
qvars = {
'start': ip_range[0],
'end': ip_range[1],
'page': page_size * (page-1),
'page_size': page_size,
'WHERE': build_where_clause(timestamp_range, port)
}
if inbound:
qvars['collected'] = "src"
qvars['filtered'] = "dst"
else:
qvars['filtered'] = "src"
qvars['collected'] = "dst"
if order and order[0] == '-':
sort_dir = "DESC"
else:
sort_dir = "ASC"
if order and order[1:] in sort_options:
sort_by = order[1:]
else:
sort_by = sort_options[0]
qvars['order'] = "{0} {1}".format(sort_by, sort_dir)
query = """
SELECT decodeIP({collected}) AS 'ip', port AS 'port', sum(links) AS 'links'
FROM Links
WHERE {filtered} BETWEEN $start AND $end
{WHERE}
GROUP BY {collected}, port
ORDER BY {order}
LIMIT {page}, {page_size};
""".format(**qvars)
return list(common.db.query(query, vars=qvars))
def get_details_ports(ip_range, timestamp_range=None, port=None, page=1, page_size=50, order="-links"):
sort_options = ['links', 'port']
first_result = (page - 1) * page_size
qvars = {
'start': ip_range[0],
'end': ip_range[1],
'first': first_result,
'size': page_size,
'WHERE': build_where_clause(timestamp_range, port)
}
if order and order[0] == '-':
sort_dir = "DESC"
else:
sort_dir = "ASC"
if order and order[1:] in sort_options:
sort_by = order[1:]
else:
sort_by = sort_options[0]
qvars['order'] = "{0} {1}".format(sort_by, sort_dir)
query = """
SELECT port AS 'port', sum(links) AS 'links'
FROM Links
WHERE dst BETWEEN $start AND $end
{WHERE}
GROUP BY port
ORDER BY {order}
LIMIT $first, $size;
""".format(**qvars)
return list(common.db.query(query, vars=qvars))
def get_details_children(ip_range, subnet, page, page_size, order):
sort_options = ['ipstart', 'hostname', 'endpoints', 'ratio']
start = ip_range[0]
end = ip_range[1]
quotient = ip_range[2]
child_subnet_start = subnet + 1
child_subnet_end = subnet + 8
first_result = (page - 1) * page_size
qvars = {'ip_start': start,
'ip_end': end,
's_start': child_subnet_start,
's_end': child_subnet_end,
'first': first_result,
'size': page_size,
'quot': quotient,
'quot_1': quotient - 1}
if order and order[0] == '-':
sort_dir = "DESC"
else:
sort_dir = "ASC"
if order and order[1:] in sort_options:
sort_by = order[1:]
else:
sort_by = sort_options[0]
qvars['order'] = "{0} {1}".format(sort_by, sort_dir)
query = """
SELECT decodeIP(`n`.ipstart) AS 'address'
, COALESCE(`n`.alias, '') AS 'hostname'
, `n`.subnet AS 'subnet'
, `sn`.kids AS 'endpoints'
, COALESCE(`l_in`.links,0) / (COALESCE(`l_in`.links,0) + COALESCE(`l_out`.links,0)) AS 'ratio'
FROM Nodes AS `n`
LEFT JOIN (
SELECT dst_start DIV $quot * $quot AS 'low'
, dst_end DIV $quot * $quot + $quot_1 AS 'high'
, sum(links) AS 'links'
FROM LinksIn
GROUP BY low, high
) AS `l_in`
ON `l_in`.low = `n`.ipstart AND `l_in`.high = `n`.ipend
LEFT JOIN (
SELECT src_start DIV $quot * $quot AS 'low'
, src_end DIV $quot * $quot + $quot_1 AS 'high'
, sum(links) AS 'links'
FROM LinksOut
GROUP BY low, high
) AS `l_out`
ON `l_out`.low = `n`.ipstart AND `l_out`.high = `n`.ipend
LEFT JOIN (
SELECT ipstart DIV $quot * $quot AS 'low'
, ipend DIV $quot * $quot + $quot_1 AS 'high'
, COUNT(ipstart) AS 'kids'
FROM Nodes
WHERE ipstart = ipend
GROUP BY low, high
) AS `sn`
ON `sn`.low = `n`.ipstart AND `sn`.high = `n`.ipend
WHERE `n`.ipstart BETWEEN $ip_start AND $ip_end
AND `n`.subnet BETWEEN $s_start AND $s_end
ORDER BY {order}
LIMIT $first, $size;
""".format(order=qvars['order'])
return list(common.db.query(query, vars=qvars))
def get_tags(address):
ipstart, ipend = common.determine_range_string(address)
WHERE = 'ipstart <= $start AND ipend >= $end'
qvars = {'start': ipstart, 'end': ipend}
data = common.db.select("Tags", vars=qvars, where=WHERE)
parent_tags = []
tags = []
for row in data:
if row.ipend == ipend and row.ipstart == ipstart:
tags.append(row.tag)
else:
parent_tags.append(row.tag)
return {"p_tags": parent_tags, "tags": tags}
def get_tag_list():
return [row.tag for row in common.db.select("Tags", what="DISTINCT tag") if row.tag]
def set_tags(address, new_tags):
table = 'Tags'
what = "ipstart, ipend, tag"
r = common.determine_range_string(address)
row = {"ipstart": r[0], "ipend": r[1]}
where = "ipstart = $ipstart AND ipend = $ipend"
existing = list(common.db.select(table, vars=row, what=what, where=where))
old_tags = [x.tag for x in existing]
removals = [x for x in old_tags if x not in new_tags]
additions = [x for x in new_tags if x not in old_tags]
# print("-"*70, '\n', '-'*70)
# print("TAG FACTORY")
# print("old_tags: " + repr(old_tags))
# print("new_tags: " + repr(new_tags))
# print("additions: " + repr(additions))
# print("removals: " + repr(removals))
# print("-"*70, '\n', '-'*70)
for tag in additions:
row['tag'] = tag
common.db.insert("Tags", **row)
for tag in removals:
row['tag'] = tag
where = "ipstart = $ipstart AND ipend = $ipend AND tag = $tag"
common.db.delete("Tags", where=where, vars=row)
def get_env(address):
ipstart, ipend = common.determine_range_string(address)
WHERE = 'ipstart <= $start AND ipend >= $end'
qvars = {'start': ipstart, 'end': ipend}
data = common.db.select("Nodes", vars=qvars, where=WHERE, what="ipstart, ipend, env")
parent_env = "production"
env = "inherit"
nearest_distance = -1
for row in data:
if row.ipend == ipend and row.ipstart == ipstart:
if row.env:
env = row.env
else:
dist = row.ipend - ipend + ipstart - row.ipstart
if nearest_distance == -1 or dist < nearest_distance:
if row.env and row.env != "inherit":
parent_env = row.env
return {"env": env, "p_env": parent_env}
def get_env_list():
envs = set(row.env for row in common.db.select("Nodes", what="DISTINCT env") if row.env)
envs.add("production")
envs.add("inherit")
envs.add("dev")
return envs
def set_env(address, env):
r = common.determine_range_string(address)
where = {"ipstart": r[0], "ipend": r[1]}
common.db.update('Nodes', where, env=env)
def get_node_info(address):
ipstart, ipend = common.determine_range_string(address)
query = """
SELECT CONCAT(decodeIP(n.ipstart), CONCAT('/', n.subnet)) AS 'address'
, COALESCE(n.hostname, '') AS 'hostname'
, COALESCE(l_out.unique_out_ip, 0) AS 'unique_out_ip'
, COALESCE(l_out.unique_out_conn, 0) AS 'unique_out_conn'
, COALESCE(l_out.total_out, 0) AS 'total_out'
, COALESCE(l_in.unique_in_ip, 0) AS 'unique_in_ip'
, COALESCE(l_in.unique_in_conn, 0) AS 'unique_in_conn'
, COALESCE(l_in.total_in, 0) AS 'total_in'
, COALESCE(l_in.ports_used, 0) AS 'ports_used'
, children.endpoints AS 'endpoints'
, t.seconds
FROM (
SELECT ipstart, subnet, alias AS 'hostname'
FROM Nodes
WHERE ipstart = $start AND ipend = $end
) AS n
LEFT JOIN (
SELECT $start AS 's1'
, COUNT(DISTINCT dst) AS 'unique_out_ip'
, COUNT(DISTINCT dst, port) AS 'unique_out_conn'
, SUM(links) AS 'total_out'
FROM Links
WHERE src BETWEEN $start AND $end
GROUP BY 's1'
) AS l_out
ON n.ipstart = l_out.s1
LEFT JOIN (
SELECT $start AS 's1'
, COUNT(DISTINCT src) AS 'unique_in_ip'
, COUNT(DISTINCT src, port) AS 'unique_in_conn'
, SUM(links) AS 'total_in'
, COUNT(DISTINCT port) AS 'ports_used'
FROM Links
WHERE dst BETWEEN $start AND $end
GROUP BY 's1'
) AS l_in
ON n.ipstart = l_in.s1
LEFT JOIN (
SELECT $start AS 's1'
, COUNT(ipstart) AS 'endpoints'
FROM Nodes
WHERE ipstart = ipend AND ipstart BETWEEN $start AND $end
) AS children
ON n.ipstart = children.s1
LEFT JOIN (
SELECT $start AS 's1'
, (MAX(TIME_TO_SEC(timestamp)) - MIN(TIME_TO_SEC(timestamp))) AS 'seconds'
FROM Links
GROUP BY 's1'
) AS t
ON n.ipstart = t.s1
LIMIT 1;
"""
qvars = {"start": ipstart, "end": ipend}
results = common.db.query(query, vars=qvars)
if len(results) == 1:
return results[0]
else:
return {}
def set_node_info(address, data):
print("-" * 50)
print("Setting node info!")
print("type data: " + str(type(data)))
print(data)
print("-" * 50)
r = common.determine_range_string(address)
where = {"ipstart": r[0], "ipend": r[1]}
common.db.update('Nodes', where, **data)
def get_port_info(port):
if isinstance(port, list):
arg = "("
for i in port:
arg += str(i) + ","
arg = arg[:-1] + ")"
else:
arg = "({0})".format(port)
query = """
SELECT portLUT.port, portLUT.active, portLUT.name, portLUT.description,
portAliasLUT.name AS alias_name,
portAliasLUT.description AS alias_description
FROM portLUT
LEFT JOIN portAliasLUT
ON portLUT.port=portAliasLUT.port
WHERE portLUT.port IN {0}
""".format(arg)
info = list(common.db.query(query))
return info
def set_port_info(data):
MAX_NAME_LENGTH = 10
MAX_DESCRIPTION_LENGTH = 255
if 'port' not in data:
print "Error setting port info: no port specified"
return
port = data['port']
alias_name = ''
alias_description = ''
active = 0
if 'alias_name' in data:
alias_name = data['alias_name'][:MAX_NAME_LENGTH]
if 'alias_description' in data:
alias_description = data['alias_description'][:MAX_DESCRIPTION_LENGTH]
if 'active' in data:
active = 1 if data['active'] == '1' or data['active'] == 1 else 0
# update portAliasLUT database of names to include the new information
exists = common.db.select('portAliasLUT', what="1", where={"port": port})
if len(exists) == 1:
kwargs = {}
if 'alias_name' in data:
kwargs['name'] = alias_name
if 'alias_description' in data:
kwargs['description'] = alias_description
if kwargs:
common.db.update('portAliasLUT', {"port": port}, **kwargs)
else:
common.db.insert('portAliasLUT', port=port, name=alias_name, description=alias_description)
# update portLUT database of default values to include the missing information
exists = common.db.select('portLUT', what="1", where={"port": port})
if len(exists) == 1:
if 'active' in data:
common.db.update('portLUT', {"port": port}, active=active)
else:
common.db.insert('portLUT', port=port, active=active, tcp=1, udp=1, name="", description="")
def get_table_info(clauses, page, page_size, order_by, order_dir):
WHERE = " && ".join(clause.where() for clause in clauses if clause.where() and clause.enabled)
if WHERE:
WHERE = "WHERE " + WHERE
HAVING = " && ".join(clause.having() for clause in clauses if clause.having() and clause.enabled)
if HAVING:
HAVING = "HAVING " + HAVING
cols = ['nodes.ipstart', 'nodes.alias', 'conn_out', 'conn_in']
ORDERBY = ""
if 0 <= order_by < len(cols) and order_dir in ['asc', 'desc']:
ORDERBY = "ORDER BY {0} {1}".format(cols[order_by], order_dir)
# note: group concat max length is default at 1024.
# if info is lost, try:
# SET group_concat_max_len = 2048
query = """
SELECT CONCAT(decodeIP(old.ipstart), CONCAT('/', old.subnet)) AS 'address'
, old.alias
, old.env
, old.conn_out
, old.conn_in
, t.tags
, GROUP_CONCAT(pt.tag SEPARATOR ', ') AS 'parent_tags'
FROM (
SELECT nodes.ipstart
, nodes.ipend
, nodes.subnet
, COALESCE((
SELECT env
FROM Nodes nz
WHERE nodes.ipstart >= nz.ipstart AND nodes.ipend <= nz.ipend AND env IS NOT NULL AND env != "inherit"
ORDER BY (nodes.ipstart - nz.ipstart + nz.ipend - nodes.ipend) ASC
LIMIT 1
), 'production') AS "env"
, COALESCE(nodes.alias, '') AS 'alias'
, COALESCE((SELECT SUM(links)
FROM LinksOut AS l_out
WHERE l_out.src_start = nodes.ipstart
AND l_out.src_end = nodes.ipend
),0) AS 'conn_out'
, COALESCE((SELECT SUM(links)
FROM LinksIn AS l_in
WHERE l_in.dst_start = nodes.ipstart
AND l_in.dst_end = nodes.ipend
),0) AS 'conn_in'
FROM Nodes AS nodes
{WHERE}
{HAVING}
{ORDER}
LIMIT {START},{RANGE}
) AS `old`
LEFT JOIN (
SELECT GROUP_CONCAT(tag SEPARATOR ', ') AS 'tags', ipstart, ipend
FROM Tags
GROUP BY ipstart, ipend
) AS t
ON t.ipstart = old.ipstart AND t.ipend = old.ipend
LEFT JOIN Tags AS pt
ON (pt.ipstart <= old.ipstart AND pt.ipend > old.ipend) OR (pt.ipstart < old.ipstart AND pt.ipend >= old.ipend)
GROUP BY old.ipstart, old.subnet, old.alias, old.env, old.conn_out, old.conn_in, t.tags;
""".format(
WHERE=WHERE,
HAVING=HAVING,
ORDER=ORDERBY,
START=page * page_size,
RANGE=page_size + 1)
info = list(common.db.query(query))
return info