This repository has been archived by the owner on Dec 6, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 32
/
Copy pathplaso.l2tcsv.conf
executable file
·90 lines (82 loc) · 3 KB
/
plaso.l2tcsv.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
# vim: syntax=python
# Please check https://github.com/cvandeplas/ELK-forensics for more information.
# Created by Christophe Vandeplas <[email protected]>
# Import a mactime output file to your Elasticsearch database.
#
# Do note that Plaso can export directly to Elasticsearch.
# Have a look at the plaso.conf file in this repository.
#
# To generate the mactime file using the CSV output:
# - first generate the dump file with 'log2timeline.py' from Plaso
# - then use 'psort.py' to output to csv
# - transfer the csv to logstash
# Example:
# - log2timeline.py win7-64-nfury-10.3.58.6.dump win7-64-nfury-c-drive/win7-64-nfury-c-drive.E01
# - psort.py -o l2tcsv win7-64-nfury-10.3.58.6.dump > win7-64-nfury-10.3.58.6.csv
# - cat win7-64-nfury-10.3.58.6.csv | nc 127.0.0.1 18005
input {
tcp {
type => "l2tcsv"
port => 18005
}
}
filter {
if [type] == "l2tcsv" {
csv {
separator => ","
quote_char => "ª" # workaround: don't use a quote character as " gives issues if the field contains a "
columns => ["date","time","timezone","macb","source","sourcetype","eventtype","user","host","short","desc","version","filename","inode","notes","format","extra"]
}
if [date] == "date" {
drop {} # drop the first line that contains the column names
}
mutate { merge => ["date", "time"] } # merge and join need to be in separate mutates
mutate { merge => ["date", "timezone"] } # merge and join need to be in separate mutates
mutate { join => ["date", " "] } # merge and join need to be in separate mutates
date {
match => ["date", "MM/dd/YYYY HH:mm:ss z" ]
}
# extract macb info
if ("M" in [macb]) { mutate { add_tag => ["modified"] } }
if ("A" in [macb]) { mutate { add_tag => ["accessed"] } }
if ("C" in [macb]) { mutate { add_tag => ["changed"] } }
if ("B" in [macb]) { mutate { add_tag => ["birth"] } }
# Extract filenames
if [source] == "FILE" {
grok {
break_on_match => false
match => ["desc", "(:(?<extracted.path>/.*?))?$",
"extracted.path", "(?<extracted.filename>[^/]+?)?$",
"extracted.filename", "((\.(?<extracted.ext>[^./]+))?)?$"
]
}
}
if [source] == "META" {
grok {
break_on_match => false
match => ["filename", "(:(?<extracted.path>/.*?))?$",
"extracted.path", "(?<extracted.filename>[^/]+?)?$",
"extracted.filename", "((\.(?<extracted.ext>[^./]+))?)?$"
]
}
}
# Extract urls
if [source] == "WEBHIST" {
grok { match => ["desc", "Location: (?<extracted.url>.*?)[ $]"] }
}
mutate {
convert => ["inode", "integer",
"version", "integer"]
lowercase => ["extracted.ext"]
remove_field => ["message", "short", "date", "time", "timezone"]
}
}
}
output {
if [type] == "l2tcsv" {
elasticsearch {
index => "logstash-l2tcsv"
hosts => localhost
}
}
}