我想知道是否可以使用Neo4j分析网络流量日志文件。 因此,我使用“sh”-library实时“Broiling”来自Bro IDS的3个不同日志文件,并将日志记录导入neo4j,这似乎是使用py2neo很慢的。 CSV导入在这里不起作用,因为它是实时的。Neo4j - 实时导入日志文件的写入速度太慢?
一个示例:我正在使用tcpreplay分析一个小时的数据包捕获文件,该文件具有几乎4.000.000个连接。我甚至用速度的一半来演奏它。因此,在2小时后,我有大约4000000条日志条目。现在,在分析开始后的3,5小时,我刚刚导入了由5个节点和4个关系组成的289691个图。总而言之,大约15%的数据几乎是时间的两倍。
我使用py2neo和代码看起来像以下(这是图的一个):
def create_conn_graph(connlog):
[...]
## Start Session
graph = Graph(bolt=True, password="neo4j")
tx = graph.begin()
############
## Nodes ##
############
## Connection Node
conn = Node("Connection", uid=connlog['uid'],
ts=connlog['ts'],
date=evt_date,
time=evt_time,
[...])
conn_properties = dict(conn)
for key in conn_properties.keys():
if conn[key] == "-" or conn[key] == "(empty)":
conn[key] = "0"
conn.update()
tx.merge(conn, "Connection", "uid")
## IP Nodes
orig = Node("IP", ip=connlog['orig_h'])
tx.merge(orig)
resp = Node("IP", ip=connlog['resp_h'])
tx.merge(resp)
## History Node
if connlog['history']:
hist_flow = history_flow(connlog['history'])
history_node = Node("History", history=connlog['history'], flow=hist_flow)
tx.merge(history_node, "History", "history")
## (Connection)-[HAS_HISTORY]->(History)
conn_hist = Relationship(conn, "HAS_HISTORY", history_node)
tx.merge(conn_hist)
## Conn_State
conn_state = Node("Conn_State", state=connlog['conn_state'], meaning=CONN_STATE[connlog['conn_state']])
tx.merge(conn_state, "Conn_State", "conn_state")
tx.commit()
tx = graph.begin()
#####################
## Relationships ##
#####################
## (IP)-[STARTS_CONNECTION]->(Connection)
orig_conn = Relationship(orig, "STARTS_CONNECTION", conn, port=connlog['orig_p'])
tx.merge(orig_conn)
## (Connection)-[CONNECTS_TO]->(IP)
conn_resp = Relationship(conn, "CONNECTS_TO", resp, port=connlog['resp_p'])
tx.merge(conn_resp)
## (Connection)-[HAS_CONN_STATE]->(Conn_State)
conn_connstate = Relationship(conn, "HAS_CONN_STATE", conn_state)
tx.merge(conn_connstate)
tx.commit()
## (Connection)-[PRODUCED]-> (DNS|HTTP)
if connlog['service'] == "dns":
graph.run("MATCH (c:Connection {uid:{uid}}), (d:DNS {uid:{uid}}) \
MERGE (c)-[:PRODUCED]->(d)",
{"uid": connlog['uid']})
if connlog['service'] == "http":
graph.run("MATCH (c:Connection {uid:{uid}}), (d:HTTP {uid:{uid}}) \
MERGE (c)-[:PRODUCED]->(d)",
{"uid": connlog['uid']})
return True
## End of create_conn_graph ########################################
if __name__ == "__main__":
logentry = {}
logfield = CONNLOG
logline = []
for line in tail("-F", LOG_DIR, _iter=True, _bg=True):
entry = line.strip().split("\t")
if line.startswith('#'):
continue
for i in range(len(logfield)):
logentry[logfield[i]] = entry[i]
create_conn_graph(logentry)
我有以下的约束和索引:
graph.run("CREATE CONSTRAINT ON (c:Connection) ASSERT c.uid IS UNIQUE")
graph.run("CREATE CONSTRAINT ON (i:IP) ASSERT i.ip IS UNIQUE")
graph.run("CREATE CONSTRAINT ON (c:Conn_State) ASSERT c.conn_state IS UNIQUE")
graph.run("CREATE CONSTRAINT ON (h:History) ASSERT h.history IS UNIQUE")
graph.run("CREATE CONSTRAINT ON (host:Host) ASSERT host.host is UNIQUE")
graph.run("CREATE CONSTRAINT ON (q:QueryType) ASSERT q.type is UNIQUE")
graph.run("CREATE CONSTRAINT ON (qc:QueryClass) ASSERT qc.class is UNIQUE")
graph.run("CREATE CONSTRAINT ON (rc:ResponseCode) ASSERT rc.code is UNIQUE")
graph.run("CREATE CONSTRAINT ON (ic:InfoCode) ASSERT ic.code is UNIQUE")
graph.run("CREATE CONSTRAINT ON (ua:UserAgent) ASSERT ua.useragent is UNIQUE")
graph.run("CREATE CONSTRAINT ON (m:Method) ASSERT m.method is UNIQUE")
graph.run("CREATE CONSTRAINT ON (r:Referrer) ASSERT r.referrer is UNIQUE")
graph.run("CREATE INDEX ON :DNS(uid)")
graph.run("CREATE INDEX ON :Uri(uri)")
graph.run("CREATE INDEX ON :HTTP(uid)")
也许有人可以给我暗示我做错了什么或者我在代码中犯了什么错误? 提交的数量是由于尝试写入neo4j时的暂时错误造成的。随着更多的交易,我没有更多的错误。
预先感谢任何帮助
您是否制作一些模式约束/索引? – logisima
我已将限制和索引添加到问题 –