diff --git a/tests/datasets/datasets-lua-02/dataset-dns.lua b/tests/datasets/datasets-lua-02/dataset-dns.lua index 044cabdb5..0785461db 100644 --- a/tests/datasets/datasets-lua-02/dataset-dns.lua +++ b/tests/datasets/datasets-lua-02/dataset-dns.lua @@ -2,7 +2,6 @@ local dataset = require("suricata.dataset") function init (args) local needs = {} - needs["dns.request"] = tostring(true) return needs end diff --git a/tests/datasets/datasets-lua-02/dataset-lua.rules b/tests/datasets/datasets-lua-02/dataset-lua.rules index 55e60ac0f..889fb2fc6 100644 --- a/tests/datasets/datasets-lua-02/dataset-lua.rules +++ b/tests/datasets/datasets-lua-02/dataset-lua.rules @@ -1 +1 @@ -alert dns any any -> any any (flow:to_server; lua:dataset-dns.lua; sid:1;) +alert dns:request_complete any any -> any any (flow:to_server; lua:dataset-dns.lua; sid:1;) diff --git a/tests/dns-lua-rules/test-request.lua b/tests/dns-lua-rules/test-request.lua index 281bff1e6..7c801591f 100644 --- a/tests/dns-lua-rules/test-request.lua +++ b/tests/dns-lua-rules/test-request.lua @@ -1,6 +1,5 @@ function init (args) local needs = {} - needs["dns.request"] = tostring(true) return needs end diff --git a/tests/dns-lua-rules/test-response.lua b/tests/dns-lua-rules/test-response.lua index 6c1c84641..6c0a8a00b 100644 --- a/tests/dns-lua-rules/test-response.lua +++ b/tests/dns-lua-rules/test-response.lua @@ -1,6 +1,5 @@ function init (args) local needs = {} - needs["dns.response"] = tostring(true) return needs end diff --git a/tests/dns-lua-rules/test-rrname.lua b/tests/dns-lua-rules/test-rrname.lua index f5b1059d7..367bd23a6 100644 --- a/tests/dns-lua-rules/test-rrname.lua +++ b/tests/dns-lua-rules/test-rrname.lua @@ -1,11 +1,10 @@ function init (args) local needs = {} - needs["dns.rrname"] = tostring(true) return needs end function match(args) - rrname = tostring(args["dns.rrname"]) + rrname = DnsGetDnsRrname() if rrname == "www.suricata-ids.org" then return 1 end diff --git a/tests/dns-lua-rules/test.rules b/tests/dns-lua-rules/test.rules index 7ff3f02d9..c5ca3b6bc 100644 --- a/tests/dns-lua-rules/test.rules +++ b/tests/dns-lua-rules/test.rules @@ -1,7 +1,6 @@ -alert dns any any -> any any (msg:"TEST DNS LUA dns.rrname"; \ +alert dns:request_complete any any -> any any (msg:"TEST DNS LUA dns.rrname"; \ lua:test-rrname.lua; sid:1; rev:1;) -alert dns any any -> any any (msg:"TEST DNS LUA dns.request"; \ +alert dns:request_complete any any -> any any (msg:"TEST DNS LUA dns.request"; \ lua:test-request.lua; sid:2; rev:1;) -alert dns any any -> any any (msg:"TEST DNS LUA dns.response"; \ - lua:test-response.lua; sid:3; rev:1;) - \ No newline at end of file +alert dns:response_complete any any -> any any (msg:"TEST DNS LUA dns.response"; \ + lua:test-response.lua; sid:3; rev:1;) diff --git a/tests/lua-memleak/test.lua b/tests/lua-memleak/test.lua index 141014c1b..91f7d38c6 100644 --- a/tests/lua-memleak/test.lua +++ b/tests/lua-memleak/test.lua @@ -1,6 +1,5 @@ function init (args) local needs = {} - needs["http.request_headers"] = tostring(true) return needs end diff --git a/tests/lua-memleak/test.rules b/tests/lua-memleak/test.rules index 844ac631e..2087b768e 100644 --- a/tests/lua-memleak/test.rules +++ b/tests/lua-memleak/test.rules @@ -1 +1 @@ -alert http any any -> any any (msg: "Test1"; flow: to_server; lua:test.lua; sid:6677001; rev:1;) +alert http1:request_complete any any -> any any (msg: "Test1"; flow: to_server; lua:test.lua; sid:6677001; rev:1;) diff --git a/tests/lua-output-dns-pre8/README.md b/tests/lua-output-dns-pre8/README.md new file mode 100644 index 000000000..6bbac174b --- /dev/null +++ b/tests/lua-output-dns-pre8/README.md @@ -0,0 +1,3 @@ +Tests the output of DNS being logged by Lua. + +PCAPs created by Jason Ish. diff --git a/tests/lua-output-dns-pre8/suricata.yaml b/tests/lua-output-dns-pre8/suricata.yaml new file mode 100644 index 000000000..c5276c0e1 --- /dev/null +++ b/tests/lua-output-dns-pre8/suricata.yaml @@ -0,0 +1,13 @@ +%YAML 1.1 +--- + +include: ../../etc/suricata-3.1.2.yaml + +rule-files: + +outputs: + - lua: + enabled: yes + scripts-dir: . + scripts: + - test.lua diff --git a/tests/lua-output-dns-pre8/test.lua b/tests/lua-output-dns-pre8/test.lua new file mode 100644 index 000000000..d0515cc1b --- /dev/null +++ b/tests/lua-output-dns-pre8/test.lua @@ -0,0 +1,95 @@ +filename = "lua-dns.log" + +function init (args) + local needs = {} + needs["protocol"] = "dns" + return needs +end + +function setup (args) + SCLogNotice("lua: setup()") + file = assert(io.open(SCLogPath() .. "/" .. filename, "w")) +end + +function log(args) + ts = SCPacketTimeString() + ip_ver, src_ip, dst_ip, proto, sp, dp = SCFlowTuple() + tx_id = DnsGetTxid() + + queries = DnsGetQueries() + if queries ~= nil then + for n, t in pairs(queries) do + msg = string.format( + "%s [**] Query TX %04x [**] %s [**] %s [**] %s:%d -> %s:%d", + ts, + tx_id, + t["rrname"], + t["type"], + src_ip, + sp, + dst_ip, + dp) + write(msg) + end + end + + rcode = DnsGetRcode() + if rcode ~= nil then + msg = string.format( + "%s [**] Response TX %04x [**] %s [**] %s:%d -> %s:%d", + ts, + tx_id, + rcode, + src_ip, + sp, + dst_ip, + dp) + write(msg) + end + + answers = DnsGetAnswers() + if answers ~= nil then + for n, t in pairs(answers) do + msg = string.format( + "%s [**] Response TX %04x [**] %s [**] %s [**] TTL %d [**] %s [**] %s:%d -> %s:%d", + ts, + tx_id, + t["rrname"], + t["type"], + t["ttl"], + t["addr"], + src_ip, + sp, + dst_ip, + dp); + write(msg) + end + end + + authorities = DnsGetAuthorities() + if authorities ~= nil then + for n, t in pairs(authorities) do + msg = string.format( + "%s [**] Response TX %04x [**] %s [**] %s [**] TTL %d [**] %s:%d -> %s:%d", + ts, + tx_id, + t["rrname"], + t["type"], + t["ttl"], + src_ip, + sp, + dst_ip, + dp); + write(msg) + end + end + +end + +function deinit(args) + file:close(file) +end + +function write(msg) + file:write(msg .. "\n") +end diff --git a/tests/lua-output-dns-pre8/test.yaml b/tests/lua-output-dns-pre8/test.yaml new file mode 100644 index 000000000..417c53014 --- /dev/null +++ b/tests/lua-output-dns-pre8/test.yaml @@ -0,0 +1,19 @@ +requires: + features: + - HAVE_LUA + lt-version: 8 + +pcap: ../lua-output-dns/test.pcap + +checks: + - shell: + args: grep -q "Query TX 0d4f \[\*\*\] block.dropbox.com \[\*\*\] A \[\*\*\] 10.16.1.11:49697 -> 10.16.1.1:53" lua-dns.log + - shell: + args: cat lua-dns.log | grep Response | grep client-cf.dropbox.com | wc -l + expect: 2 + - shell: + args: cat lua-dns.log | grep "Response TX 62b2" | grep NXDOMAIN | wc -l + expect: 1 + - shell: + args: grep SOA lua-dns.log | wc -l + expect: 1 diff --git a/tests/lua-output-dns/test.lua b/tests/lua-output-dns/test.lua index d0515cc1b..69425c722 100644 --- a/tests/lua-output-dns/test.lua +++ b/tests/lua-output-dns/test.lua @@ -1,3 +1,5 @@ +local packet = require "suricata.packet" + filename = "lua-dns.log" function init (args) @@ -12,7 +14,8 @@ function setup (args) end function log(args) - ts = SCPacketTimeString() + p = packet.get() + ts = p:timestring() ip_ver, src_ip, dst_ip, proto, sp, dp = SCFlowTuple() tx_id = DnsGetTxid() diff --git a/tests/lua-output-dns/test.yaml b/tests/lua-output-dns/test.yaml index 9db269e6e..5d86e3239 100644 --- a/tests/lua-output-dns/test.yaml +++ b/tests/lua-output-dns/test.yaml @@ -1,4 +1,5 @@ requires: + min-version: 8 features: - HAVE_LUA diff --git a/tests/lua-output-http-pre8/README.md b/tests/lua-output-http-pre8/README.md new file mode 100644 index 000000000..6393e9a11 --- /dev/null +++ b/tests/lua-output-http-pre8/README.md @@ -0,0 +1 @@ +Test Lua output of HTTP metadata. diff --git a/tests/lua-output-http-pre8/default.yaml b/tests/lua-output-http-pre8/default.yaml new file mode 100644 index 000000000..af883b357 --- /dev/null +++ b/tests/lua-output-http-pre8/default.yaml @@ -0,0 +1,1630 @@ +%YAML 1.1 +--- + +# Suricata configuration file. In addition to the comments describing all +# options in this file, full documentation can be found at: +# https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Suricatayaml + +## +## Step 1: inform Suricata about your network +## + +vars: + # more specifc is better for alert accuracy and performance + address-groups: + HOME_NET: "[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]" + #HOME_NET: "[192.168.0.0/16]" + #HOME_NET: "[10.0.0.0/8]" + #HOME_NET: "[172.16.0.0/12]" + #HOME_NET: "any" + + EXTERNAL_NET: "!$HOME_NET" + #EXTERNAL_NET: "any" + + HTTP_SERVERS: "$HOME_NET" + SMTP_SERVERS: "$HOME_NET" + SQL_SERVERS: "$HOME_NET" + DNS_SERVERS: "$HOME_NET" + TELNET_SERVERS: "$HOME_NET" + AIM_SERVERS: "$EXTERNAL_NET" + DNP3_SERVER: "$HOME_NET" + DNP3_CLIENT: "$HOME_NET" + MODBUS_CLIENT: "$HOME_NET" + MODBUS_SERVER: "$HOME_NET" + ENIP_CLIENT: "$HOME_NET" + ENIP_SERVER: "$HOME_NET" + + port-groups: + HTTP_PORTS: "80" + SHELLCODE_PORTS: "!80" + ORACLE_PORTS: 1521 + SSH_PORTS: 22 + DNP3_PORTS: 20000 + MODBUS_PORTS: 502 + + +## +## Step 2: select the rules to enable or disable +## + +default-rule-path: /home/jason/projects/oi../../etc/suricata/rules +rule-files: + - botcc.rules + - ciarmy.rules + - compromised.rules + - drop.rules + - dshield.rules +# - emerging-activex.rules + - emerging-attack_response.rules + - emerging-chat.rules + - emerging-current_events.rules + - emerging-dns.rules + - emerging-dos.rules + - emerging-exploit.rules + - emerging-ftp.rules +# - emerging-games.rules +# - emerging-icmp_info.rules +# - emerging-icmp.rules + - emerging-imap.rules +# - emerging-inappropriate.rules + - emerging-malware.rules + - emerging-misc.rules + - emerging-mobile_malware.rules + - emerging-netbios.rules + - emerging-p2p.rules + - emerging-policy.rules + - emerging-pop3.rules + - emerging-rpc.rules + - emerging-scada.rules + - emerging-scan.rules +# - emerging-shellcode.rules + - emerging-smtp.rules + - emerging-snmp.rules + - emerging-sql.rules + - emerging-telnet.rules + - emerging-tftp.rules + - emerging-trojan.rules + - emerging-user_agents.rules + - emerging-voip.rules + - emerging-web_client.rules + - emerging-web_server.rules +# - emerging-web_specific_apps.rules + - emerging-worm.rules + - tor.rules +# - decoder-events.rules # available in suricata sources under rules dir +# - stream-events.rules # available in suricata sources under rules dir + - http-events.rules # available in suricata sources under rules dir + - smtp-events.rules # available in suricata sources under rules dir + - dns-events.rules # available in suricata sources under rules dir + - tls-events.rules # available in suricata sources under rules dir +# - modbus-events.rules # available in suricata sources under rules dir +# - app-layer-events.rules # available in suricata sources under rules dir + +classification-file: /home/jason/projects/oi../../etc/suricata/classification.config +reference-config-file: /home/jason/projects/oi../../etc/suricata/reference.config +# threshold-file: /home/jason/projects/oi../../etc/suricata/threshold.config + + +## +## Step 3: select outputs to enable +## + +# The default logging directory. Any log or output file will be +# placed here if its not specified with a full path name. This can be +# overridden with the -l command line parameter. +default-log-dir: /home/jason/projects/oisf/log/suricata/ + +# global stats configuration +stats: + enabled: yes + # The interval field (in seconds) controls at what interval + # the loggers are invoked. + interval: 8 + +# Configure the type of alert (and other) logging you would like. +outputs: + # a line based alerts log similar to Snort's fast.log + - fast: + enabled: yes + filename: fast.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # Extensible Event Format (nicknamed EVE) event log in JSON format + - eve-log: + enabled: yes + filetype: regular #regular|syslog|unix_dgram|unix_stream|redis + filename: eve.json + #prefix: "@cee: " # prefix to prepend to each log entry + # the following are valid when type: syslog above + #identity: "suricata" + #facility: local5 + #level: Info ## possible levels: Emergency, Alert, Critical, + ## Error, Warning, Notice, Info, Debug + #redis: + # server: 127.0.0.1 + # port: 6379 + # mode: list ## possible values: list (default), channel + # key: suricata ## key or channel to use (default to suricata) + # Redis pipelining set up. This will enable to only do a query every + # 'batch-size' events. This should lower the latency induced by network + # connection at the cost of some memory. There is no flushing implemented + # so this setting as to be reserved to high traffic suricata. + # pipelining: + # enabled: yes ## set enable to yes to enable query pipelining + # batch-size: 10 ## number of entry to keep in buffer + types: + - alert: + # payload: yes # enable dumping payload in Base64 + # payload-buffer-size: 4kb # max size of payload buffer to output in eve-log + # payload-printable: yes # enable dumping payload in printable (lossy) format + # packet: yes # enable dumping of packet (without stream segments) + http: yes # enable dumping of http fields + tls: yes # enable dumping of tls fields + ssh: yes # enable dumping of ssh fields + smtp: yes # enable dumping of smtp fields + + # HTTP X-Forwarded-For support by adding an extra field or overwriting + # the source or destination IP address (depending on flow direction) + # with the one reported in the X-Forwarded-For HTTP header. This is + # helpful when reviewing alerts for traffic that is being reverse + # or forward proxied. + xff: + enabled: no + # Two operation modes are available, "extra-data" and "overwrite". + mode: extra-data + # Two proxy deployments are supported, "reverse" and "forward". In + # a "reverse" deployment the IP address used is the last one, in a + # "forward" deployment the first IP address is used. + deployment: reverse + # Header name where the actual IP address will be reported, if more + # than one IP address is present, the last IP address will be the + # one taken into consideration. + header: X-Forwarded-For + - http: + extended: yes # enable this for extended logging information + # custom allows additional http fields to be included in eve-log + # the example below adds three additional fields when uncommented + #custom: [Accept-Encoding, Accept-Language, Authorization] + - dns + - tls: + extended: yes # enable this for extended logging information + - files: + force-magic: no # force logging magic on all logged files + force-md5: no # force logging of md5 checksums + #- drop: + # alerts: no # log alerts that caused drops + - smtp: + #extended: yes # enable this for extended logging information + # this includes: bcc, message-id, subject, x_mailer, user-agent + # custom fields logging from the list: + # reply-to, bcc, message-id, subject, x-mailer, user-agent, received, + # x-originating-ip, in-reply-to, references, importance, priority, + # sensitivity, organization, content-md5, date + #custom: [received, x-mailer, x-originating-ip, relays, reply-to, bcc] + # output md5 of fields: body, subject + # for the body you need to set app-layer.protocols.smtp.mime.body-md5 + # to yes + #md5: [body, subject] + + - ssh + - stats: + totals: yes # stats for all threads merged together + threads: no # per thread stats + deltas: no # include delta values + # bi-directional flows + - flow + # uni-directional flows + #- netflow + + # alert output for use with Barnyard2 + - unified2-alert: + enabled: no + filename: unified2.alert + + # File size limit. Can be specified in kb, mb, gb. Just a number + # is parsed as bytes. + #limit: 32mb + + # Sensor ID field of unified2 alerts. + #sensor-id: 0 + + # Include payload of packets related to alerts. Defaults to true, set to + # false if payload is not required. + #payload: yes + + # HTTP X-Forwarded-For support by adding the unified2 extra header or + # overwriting the source or destination IP address (depending on flow + # direction) with the one reported in the X-Forwarded-For HTTP header. + # This is helpful when reviewing alerts for traffic that is being reverse + # or forward proxied. + xff: + enabled: no + # Two operation modes are available, "extra-data" and "overwrite". Note + # that in the "overwrite" mode, if the reported IP address in the HTTP + # X-Forwarded-For header is of a different version of the packet + # received, it will fall-back to "extra-data" mode. + mode: extra-data + # Two proxy deployments are supported, "reverse" and "forward". In + # a "reverse" deployment the IP address used is the last one, in a + # "forward" deployment the first IP address is used. + deployment: reverse + # Header name where the actual IP address will be reported, if more + # than one IP address is present, the last IP address will be the + # one taken into consideration. + header: X-Forwarded-For + + # a line based log of HTTP requests (no alerts) + - http-log: + enabled: no + filename: http.log + append: yes + #extended: yes # enable this for extended logging information + #custom: yes # enabled the custom logging format (defined by customformat) + #customformat: "%{%D-%H:%M:%S}t.%z %{X-Forwarded-For}i %H %m %h %u %s %B %a:%p -> %A:%P" + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # a line based log of TLS handshake parameters (no alerts) + - tls-log: + enabled: no # Log TLS connections. + filename: tls.log # File to store TLS logs. + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + #extended: yes # Log extended information like fingerprint + + # output module to store certificates chain to disk + - tls-store: + enabled: no + #certs-log-dir: certs # directory to store the certificates files + + # a line based log of DNS requests and/or replies (no alerts) + - dns-log: + enabled: no + filename: dns.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # Packet log... log packets in pcap format. 3 modes of operation: "normal" + # "multi" and "sguil". + # + # In normal mode a pcap file "filename" is created in the default-log-dir, + # or are as specified by "dir". + # In multi mode, a file is created per thread. This will perform much + # better, but will create multiple files where 'normal' would create one. + # In multi mode the filename takes a few special variables: + # - %n -- thread number + # - %i -- thread id + # - %t -- timestamp (secs or secs.usecs based on 'ts-format' + # E.g. filename: pcap.%n.%t + # + # Note that it's possible to use directories, but the directories are not + # created by Suricata. E.g. filename: pcaps/%n/log.%s will log into the + # per thread directory. + # + # Also note that the limit and max-files settings are enforced per thread. + # So the size limit when using 8 threads with 1000mb files and 2000 files + # is: 8*1000*2000 ~ 16TiB. + # + # In Sguil mode "dir" indicates the base directory. In this base dir the + # pcaps are created in th directory structure Sguil expects: + # + # $sguil-base-dir/YYYY-MM-DD/$filename. + # + # By default all packets are logged except: + # - TCP streams beyond stream.reassembly.depth + # - encrypted streams after the key exchange + # + - pcap-log: + enabled: no + filename: log.pcap + + # File size limit. Can be specified in kb, mb, gb. Just a number + # is parsed as bytes. + limit: 1000mb + + # If set to a value will enable ring buffer mode. Will keep Maximum of "max-files" of size "limit" + max-files: 2000 + + mode: normal # normal, multi or sguil. + #sguil-base-dir: /nsm_data/ + #ts-format: usec # sec or usec second format (default) is filename.sec usec is filename.sec.usec + use-stream-depth: no #If set to "yes" packets seen after reaching stream inspection depth are ignored. "no" logs all packets + honor-pass-rules: no # If set to "yes", flows in which a pass rule matched will stopped being logged. + + # a full alerts log containing much information for signature writers + # or for investigating suspected false positives. + - alert-debug: + enabled: no + filename: alert-debug.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # alert output to prelude (http://www.prelude-technologies.com/) only + # available if Suricata has been compiled with --enable-prelude + - alert-prelude: + enabled: no + profile: suricata + log-packet-content: no + log-packet-header: yes + + # Stats.log contains data from various counters of the suricata engine. + - stats: + enabled: yes + filename: stats.log + totals: yes # stats for all threads merged together + threads: no # per thread stats + #null-values: yes # print counters that have value 0 + + # a line based alerts log similar to fast.log into syslog + - syslog: + enabled: no + # reported identity to syslog. If ommited the program name (usually + # suricata) will be used. + #identity: "suricata" + facility: local5 + #level: Info ## possible levels: Emergency, Alert, Critical, + ## Error, Warning, Notice, Info, Debug + + # a line based information for dropped packets in IPS mode + - drop: + enabled: no + filename: drop.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # output module to store extracted files to disk + # + # The files are stored to the log-dir in a format "file." where is + # an incrementing number starting at 1. For each file "file." a meta + # file "file..meta" is created. + # + # File extraction depends on a lot of things to be fully done: + # - stream reassembly depth. For optimal results, set this to 0 (unlimited) + # - http request / response body sizes. Again set to 0 for optimal results. + # - rules that contain the "filestore" keyword. + - file-store: + enabled: no # set to yes to enable + log-dir: files # directory to store the files + force-magic: no # force logging magic on all stored files + force-md5: no # force logging of md5 checksums + force-filestore: no # force storing of all files + #waldo: file.waldo # waldo file to store the file_id across runs + + # output module to log files tracked in a easily parsable json format + - file-log: + enabled: no + filename: files-json.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + force-magic: no # force logging magic on all logged files + force-md5: no # force logging of md5 checksums + + # Log TCP data after stream normalization + # 2 types: file or dir. File logs into a single logfile. Dir creates + # 2 files per TCP session and stores the raw TCP data into them. + # Using 'both' will enable both file and dir modes. + # + # Note: limited by stream.depth + - tcp-data: + enabled: no + type: file + filename: tcp-data.log + + # Log HTTP body data after normalization, dechunking and unzipping. + # 2 types: file or dir. File logs into a single logfile. Dir creates + # 2 files per HTTP session and stores the normalized data into them. + # Using 'both' will enable both file and dir modes. + # + # Note: limited by the body limit settings + - http-body-data: + enabled: no + type: file + filename: http-data.log + + # Lua Output Support - execute lua script to generate alert and event + # output. + # Documented at: + # https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Lua_Output + - lua: + enabled: no + #scripts-dir../../etc/suricata/lua-output/ + scripts: + # - script1.lua + +# Logging configuration. This is not about logging IDS alerts/events, but +# output about what Suricata is doing, like startup messages, errors, etc. +logging: + # The default log level, can be overridden in an output section. + # Note that debug level logging will only be emitted if Suricata was + # compiled with the --enable-debug configure option. + # + # This value is overriden by the SC_LOG_LEVEL env var. + default-log-level: notice + + # The default output format. Optional parameter, should default to + # something reasonable if not provided. Can be overriden in an + # output section. You can leave this out to get the default. + # + # This value is overriden by the SC_LOG_FORMAT env var. + #default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- " + + # A regex to filter output. Can be overridden in an output section. + # Defaults to empty (no filter). + # + # This value is overriden by the SC_LOG_OP_FILTER env var. + default-output-filter: + + # Define your logging outputs. If none are defined, or they are all + # disabled you will get the default - console output. + outputs: + - console: + enabled: yes + # type: json + - file: + enabled: yes + level: info + filename: /home/jason/projects/oisf/log/suricata/suricata.log + # type: json + - syslog: + enabled: no + facility: local5 + format: "[%i] <%d> -- " + # type: json + + +## +## Step 4: configure common capture settings +## +## See "Advanced Capture Options" below for more options, including NETMAP +## and PF_RING. +## + +# Linux high speed capture support +af-packet: + - interface: eth0 + # Number of receive threads. "auto" uses the number of cores + #threads: auto + # Default clusterid. AF_PACKET will load balance packets based on flow. + cluster-id: 99 + # Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash. + # This is only supported for Linux kernel > 3.1 + # possible value are: + # * cluster_round_robin: round robin load balancing + # * cluster_flow: all packets of a given flow are send to the same socket + # * cluster_cpu: all packets treated in kernel by a CPU are send to the same socket + # * cluster_qm: all packets linked by network card to a RSS queue are sent to the same + # socket. Requires at least Linux 3.14. + # * cluster_random: packets are sent randomly to sockets but with an equipartition. + # Requires at least Linux 3.14. + # * cluster_rollover: kernel rotates between sockets filling each socket before moving + # to the next. Requires at least Linux 3.10. + # Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system + # with capture card using RSS (require cpu affinity tuning and system irq tuning) + cluster-type: cluster_flow + # In some fragmentation case, the hash can not be computed. If "defrag" is set + # to yes, the kernel will do the needed defragmentation before sending the packets. + defrag: yes + # After Linux kernel 3.10 it is possible to activate the rollover option: if a socket is + # full then kernel will send the packet on the next socket with room available. This option + # can minimize packet drop and increase the treated bandwidth on single intensive flow. + #rollover: yes + # To use the ring feature of AF_PACKET, set 'use-mmap' to yes + #use-mmap: yes + # Lock memory map to avoid it goes to swap. Be careful that over suscribing could lock + # your system + #mmap-locked: yes + # Use tpacket_v3, capture mode, only active if user-mmap is true + tpacket-v3: yes + # Ring size will be computed with respect to max_pending_packets and number + # of threads. You can set manually the ring size in number of packets by setting + # the following value. If you are using flow cluster-type and have really network + # intensive single-flow you could want to set the ring-size independently of the number + # of threads: + #ring-size: 2048 + # Block size is used by tpacket_v3 only. It should set to a value high enough to contain + # a decent number of packets. Size is in bytes so please consider your MTU. It should be + # a power of 2 and it must be multiple of page size (usually 4096). + #block-size: 32768 + # tpacket_v3 block timeout: an open block is passed to userspace if it is not + # filled after block-timeout milliseconds. + #block-timeout: 10 + # On busy system, this could help to set it to yes to recover from a packet drop + # phase. This will result in some packets (at max a ring flush) being non treated. + #use-emergency-flush: yes + # recv buffer size, increase value could improve performance + # buffer-size: 32768 + # Set to yes to disable promiscuous mode + # disable-promisc: no + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - kernel: use indication sent by kernel for each packet (default) + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: kernel + # BPF filter to apply to this interface. The pcap filter syntax apply here. + #bpf-filter: port 80 or udp + # You can use the following variables to activate AF_PACKET tap or IPS mode. + # If copy-mode is set to ips or tap, the traffic coming to the current + # interface will be copied to the copy-iface interface. If 'tap' is set, the + # copy is complete. If 'ips' is set, the packet matching a 'drop' action + # will not be copied. + #copy-mode: ips + #copy-iface: eth1 + + # Put default values here. These will be used for an interface that is not + # in the list above. + - interface: default + #threads: auto + #use-mmap: no + #rollover: yes + tpacket-v3: yes + +# Cross platform libpcap capture support +pcap: + - interface: eth0 + # On Linux, pcap will try to use mmaped capture and will use buffer-size + # as total of memory used by the ring. So set this to something bigger + # than 1% of your bandwidth. + #buffer-size: 16777216 + #bpf-filter: "tcp and port 25" + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. (default) + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: auto + # With some accelerator cards using a modified libpcap (like myricom), you + # may want to have the same number of capture threads as the number of capture + # rings. In this case, set up the threads variable to N to start N threads + # listening on the same interface. + #threads: 16 + # set to no to disable promiscuous mode: + #promisc: no + # set snaplen, if not set it defaults to MTU if MTU can be known + # via ioctl call and to full capture if not. + #snaplen: 1518 + # Put default values here + - interface: default + #checksum-checks: auto + +# Settings for reading pcap files +pcap-file: + # Possible values are: + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. (default) + # Warning: 'checksum-validation' must be set to yes to have checksum tested + checksum-checks: auto + +# See "Advanced Capture Options" below for more options, including NETMAP +# and PF_RING. + + +## +## Step 5: App Layer Protocol Configuration +## + +# Configure the app-layer parsers. The protocols section details each +# protocol. +# +# The option "enabled" takes 3 values - "yes", "no", "detection-only". +# "yes" enables both detection and the parser, "no" disables both, and +# "detection-only" enables protocol detection only (parser disabled). +app-layer: + protocols: + tls: + enabled: yes + detection-ports: + dp: 443 + + #no-reassemble: yes + dcerpc: + enabled: yes + ftp: + enabled: yes + ssh: + enabled: yes + smtp: + enabled: yes + # Configure SMTP-MIME Decoder + mime: + # Decode MIME messages from SMTP transactions + # (may be resource intensive) + # This field supercedes all others because it turns the entire + # process on or off + decode-mime: yes + + # Decode MIME entity bodies (ie. base64, quoted-printable, etc.) + decode-base64: yes + decode-quoted-printable: yes + + # Maximum bytes per header data value stored in the data structure + # (default is 2000) + header-value-depth: 2000 + + # Extract URLs and save in state data structure + extract-urls: yes + # Set to yes to compute the md5 of the mail body. You will then + # be able to journalize it. + body-md5: no + # Configure inspected-tracker for file_data keyword + inspected-tracker: + content-limit: 100000 + content-inspect-min-size: 32768 + content-inspect-window: 4096 + imap: + enabled: detection-only + msn: + enabled: detection-only + smb: + enabled: yes + detection-ports: + dp: 139 + # Note: Modbus probe parser is minimalist due to the poor significant field + # Only Modbus message length (greater than Modbus header length) + # And Protocol ID (equal to 0) are checked in probing parser + # It is important to enable detection port and define Modbus port + # to avoid false positive + modbus: + # How many unreplied Modbus requests are considered a flood. + # If the limit is reached, app-layer-event:modbus.flooded; will match. + #request-flood: 500 + + enabled: no + detection-ports: + dp: 502 + # According to MODBUS Messaging on TCP/IP Implementation Guide V1.0b, it + # is recommended to keep the TCP connection opened with a remote device + # and not to open and close it for each MODBUS/TCP transaction. In that + # case, it is important to set the depth of the stream reassembling as + # unlimited (stream.reassembly.depth: 0) + # smb2 detection is disabled internally inside the engine. + #smb2: + # enabled: yes + dns: + # memcaps. Globally and per flow/state. + #global-memcap: 16mb + #state-memcap: 512kb + + # How many unreplied DNS requests are considered a flood. + # If the limit is reached, app-layer-event:dns.flooded; will match. + #request-flood: 500 + + tcp: + enabled: yes + detection-ports: + dp: 53 + udp: + enabled: yes + detection-ports: + dp: 53 + http: + enabled: yes + # memcap: 64mb + + # default-config: Used when no server-config matches + # personality: List of personalities used by default + # request-body-limit: Limit reassembly of request body for inspection + # by http_client_body & pcre /P option. + # response-body-limit: Limit reassembly of response body for inspection + # by file_data, http_server_body & pcre /Q option. + # double-decode-path: Double decode path section of the URI + # double-decode-query: Double decode query section of the URI + # + # server-config: List of server configurations to use if address matches + # address: List of ip addresses or networks for this block + # personalitiy: List of personalities used by this block + # request-body-limit: Limit reassembly of request body for inspection + # by http_client_body & pcre /P option. + # response-body-limit: Limit reassembly of response body for inspection + # by file_data, http_server_body & pcre /Q option. + # double-decode-path: Double decode path section of the URI + # double-decode-query: Double decode query section of the URI + # + # uri-include-all: Include all parts of the URI. By default the + # 'scheme', username/password, hostname and port + # are excluded. Setting this option to true adds + # all of them to the normalized uri as inspected + # by http_uri, urilen, pcre with /U and the other + # keywords that inspect the normalized uri. + # Note that this does not affect http_raw_uri. + # Also, note that including all was the default in + # 1.4 and 2.0beta1. + # + # meta-field-limit: Hard size limit for request and response size + # limits. Applies to request line and headers, + # response line and headers. Does not apply to + # request or response bodies. Default is 18k. + # If this limit is reached an event is raised. + # + # Currently Available Personalities: + # Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0, + # IIS_7_0, IIS_7_5, Apache_2 + libhtp: + default-config: + personality: IDS + + # Can be specified in kb, mb, gb. Just a number indicates + # it's in bytes. + request-body-limit: 100kb + response-body-limit: 100kb + + # inspection limits + request-body-minimal-inspect-size: 32kb + request-body-inspect-window: 4kb + response-body-minimal-inspect-size: 40kb + response-body-inspect-window: 16kb + + # auto will use http-body-inline mode in IPS mode, yes or no set it statically + http-body-inline: auto + + # Take a random value for inspection sizes around the specified value. + # This lower the risk of some evasion technics but could lead + # detection change between runs. It is set to 'yes' by default. + #randomize-inspection-sizes: yes + # If randomize-inspection-sizes is active, the value of various + # inspection size will be choosen in the [1 - range%, 1 + range%] + # range + # Default value of randomize-inspection-range is 10. + #randomize-inspection-range: 10 + + # decoding + double-decode-path: no + double-decode-query: no + + server-config: + + #- apache: + # address: [192.168.1.0/24, 127.0.0.0/8, "::1"] + # personality: Apache_2 + # # Can be specified in kb, mb, gb. Just a number indicates + # # it's in bytes. + # request-body-limit: 4096 + # response-body-limit: 4096 + # double-decode-path: no + # double-decode-query: no + + #- iis7: + # address: + # - 192.168.0.0/24 + # - 192.168.10.0/24 + # personality: IIS_7_0 + # # Can be specified in kb, mb, gb. Just a number indicates + # # it's in bytes. + # request-body-limit: 4096 + # response-body-limit: 4096 + # double-decode-path: no + # double-decode-query: no + +# Limit for the maximum number of asn1 frames to decode (default 256) +asn1-max-frames: 256 + + +############################################################################## +## +## Advanced settings below +## +############################################################################## + +## +## Run Options +## + +# Run suricata as user and group. +#run-as: +# user: suri +# group: suri + +# Some logging module will use that name in event as identifier. The default +# value is the hostname +#sensor-name: suricata + +# Default pid file. +# Will use this file if no --pidfile in command options. +#pid-file: /home/jason/projects/oisf/run/suricata.pid + +# Daemon working directory +# Suricata will change directory to this one if provided +# Default: "/" +#daemon-directory: "/" + +# Suricata core dump configuration. Limits the size of the core dump file to +# approximately max-dump. The actual core dump size will be a multiple of the +# page size. Core dumps that would be larger than max-dump are truncated. On +# Linux, the actual core dump size may be a few pages larger than max-dump. +# Setting max-dump to 0 disables core dumping. +# Setting max-dump to 'unlimited' will give the full core dump file. +# On 32-bit Linux, a max-dump value >= ULONG_MAX may cause the core dump size +# to be 'unlimited'. + +coredump: + max-dump: unlimited + +# If suricata box is a router for the sniffed networks, set it to 'router'. If +# it is a pure sniffing setup, set it to 'sniffer-only'. +# If set to auto, the variable is internally switch to 'router' in IPS mode +# and 'sniffer-only' in IDS mode. +# This feature is currently only used by the reject* keywords. +host-mode: auto + +# Number of packets preallocated per thread. The default is 1024. A higher number +# will make sure each CPU will be more easily kept busy, but may negatively +# impact caching. +# +# If you are using the CUDA pattern matcher (mpm-algo: ac-cuda), different rules +# apply. In that case try something like 60000 or more. This is because the CUDA +# pattern matcher buffers and scans as many packets as possible in parallel. +#max-pending-packets: 1024 + +# Runmode the engine should use. Please check --list-runmodes to get the available +# runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned +# load balancing). +#runmode: autofp + +# Specifies the kind of flow load balancer used by the flow pinned autofp mode. +# +# Supported schedulers are: +# +# round-robin - Flows assigned to threads in a round robin fashion. +# active-packets - Flows assigned to threads that have the lowest number of +# unprocessed packets (default). +# hash - Flow alloted usihng the address hash. More of a random +# technique. Was the default in Suricata 1.2.1 and older. +# +#autofp-scheduler: active-packets + +# Preallocated size for packet. Default is 1514 which is the classical +# size for pcap on ethernet. You should adjust this value to the highest +# packet size (MTU + hardware header) on your system. +#default-packet-size: 1514 + +# Unix command socket can be used to pass commands to suricata. +# An external tool can then connect to get information from suricata +# or trigger some modifications of the engine. Set enabled to yes +# to activate the feature. You can use the filename variable to set +# the file name of the socket. +unix-command: + enabled: no + #filename: custom.socket + +# Magic file. The extension .mgc is added to the value here. +#magic-file: /usr/share/file/magic +#magic-file: + +legacy: + uricontent: enabled + +## +## Detection settings +## + +# Set the order of alerts bassed on actions +# The default order is pass, drop, reject, alert +# action-order: +# - pass +# - drop +# - reject +# - alert + +# IP Reputation +#reputation-categories-file: /home/jason/projects/oi../../etc/suricata/iprep/categories.txt +#default-reputation-path: /home/jason/projects/oi../../etc/suricata/iprep +#reputation-files: +# - reputation.list + +# When run with the option --engine-analysis, the engine will read each of +# the parameters below, and print reports for each of the enabled sections +# and exit. The reports are printed to a file in the default log dir +# given by the parameter "default-log-dir", with engine reporting +# subsection below printing reports in its own report file. +engine-analysis: + # enables printing reports for fast-pattern for every rule. + rules-fast-pattern: yes + # enables printing reports for each rule + rules: yes + +#recursion and match limits for PCRE where supported +pcre: + match-limit: 3500 + match-limit-recursion: 1500 + +## +## Advanced Traffic Tracking and Reconstruction Settings +## + +# Host specific policies for defragmentation and TCP stream +# reassembly. The host OS lookup is done using a radix tree, just +# like a routing table so the most specific entry matches. +host-os-policy: + # Make the default policy windows. + windows: [0.0.0.0/0] + bsd: [] + bsd-right: [] + old-linux: [] + linux: [] + old-solaris: [] + solaris: [] + hpux10: [] + hpux11: [] + irix: [] + macos: [] + vista: [] + windows2k3: [] + +# Defrag settings: + +defrag: + memcap: 32mb + hash-size: 65536 + trackers: 65535 # number of defragmented flows to follow + max-frags: 65535 # number of fragments to keep (higher than trackers) + prealloc: yes + timeout: 60 + +# Enable defrag per host settings +# host-config: +# +# - dmz: +# timeout: 30 +# address: [192.168.1.0/24, 127.0.0.0/8, 1.1.1.0/24, 2.2.2.0/24, "1.1.1.1", "2.2.2.2", "::1"] +# +# - lan: +# timeout: 45 +# address: +# - 192.168.0.0/24 +# - 192.168.10.0/24 +# - 172.16.14.0/24 + +# Flow settings: +# By default, the reserved memory (memcap) for flows is 32MB. This is the limit +# for flow allocation inside the engine. You can change this value to allow +# more memory usage for flows. +# The hash-size determine the size of the hash used to identify flows inside +# the engine, and by default the value is 65536. +# At the startup, the engine can preallocate a number of flows, to get a better +# performance. The number of flows preallocated is 10000 by default. +# emergency-recovery is the percentage of flows that the engine need to +# prune before unsetting the emergency state. The emergency state is activated +# when the memcap limit is reached, allowing to create new flows, but +# prunning them with the emergency timeouts (they are defined below). +# If the memcap is reached, the engine will try to prune flows +# with the default timeouts. If it doens't find a flow to prune, it will set +# the emergency bit and it will try again with more agressive timeouts. +# If that doesn't work, then it will try to kill the last time seen flows +# not in use. +# The memcap can be specified in kb, mb, gb. Just a number indicates it's +# in bytes. + +flow: + memcap: 128mb + hash-size: 65536 + prealloc: 10000 + emergency-recovery: 30 + #managers: 1 # default to one flow manager + #recyclers: 1 # default to one flow recycler thread + +# This option controls the use of vlan ids in the flow (and defrag) +# hashing. Normally this should be enabled, but in some (broken) +# setups where both sides of a flow are not tagged with the same vlan +# tag, we can ignore the vlan id's in the flow hashing. +vlan: + use-for-tracking: true + +# Specific timeouts for flows. Here you can specify the timeouts that the +# active flows will wait to transit from the current state to another, on each +# protocol. The value of "new" determine the seconds to wait after a hanshake or +# stream startup before the engine free the data of that flow it doesn't +# change the state to established (usually if we don't receive more packets +# of that flow). The value of "established" is the amount of +# seconds that the engine will wait to free the flow if it spend that amount +# without receiving new packets or closing the connection. "closed" is the +# amount of time to wait after a flow is closed (usually zero). +# +# There's an emergency mode that will become active under attack circumstances, +# making the engine to check flow status faster. This configuration variables +# use the prefix "emergency-" and work similar as the normal ones. +# Some timeouts doesn't apply to all the protocols, like "closed", for udp and +# icmp. + +flow-timeouts: + + default: + new: 30 + established: 300 + closed: 0 + emergency-new: 10 + emergency-established: 100 + emergency-closed: 0 + tcp: + new: 60 + established: 600 + closed: 60 + emergency-new: 5 + emergency-established: 100 + emergency-closed: 10 + udp: + new: 30 + established: 300 + emergency-new: 10 + emergency-established: 100 + icmp: + new: 30 + established: 300 + emergency-new: 10 + emergency-established: 100 + +# Stream engine settings. Here the TCP stream tracking and reassembly +# engine is configured. +# +# stream: +# memcap: 32mb # Can be specified in kb, mb, gb. Just a +# # number indicates it's in bytes. +# checksum-validation: yes # To validate the checksum of received +# # packet. If csum validation is specified as +# # "yes", then packet with invalid csum will not +# # be processed by the engine stream/app layer. +# # Warning: locally generated trafic can be +# # generated without checksum due to hardware offload +# # of checksum. You can control the handling of checksum +# # on a per-interface basis via the 'checksum-checks' +# # option +# prealloc-sessions: 2k # 2k sessions prealloc'd per stream thread +# midstream: false # don't allow midstream session pickups +# async-oneside: false # don't enable async stream handling +# inline: no # stream inline mode +# max-synack-queued: 5 # Max different SYN/ACKs to queue +# +# reassembly: +# memcap: 64mb # Can be specified in kb, mb, gb. Just a number +# # indicates it's in bytes. +# depth: 1mb # Can be specified in kb, mb, gb. Just a number +# # indicates it's in bytes. +# toserver-chunk-size: 2560 # inspect raw stream in chunks of at least +# # this size. Can be specified in kb, mb, +# # gb. Just a number indicates it's in bytes. +# # The max acceptable size is 4024 bytes. +# toclient-chunk-size: 2560 # inspect raw stream in chunks of at least +# # this size. Can be specified in kb, mb, +# # gb. Just a number indicates it's in bytes. +# # The max acceptable size is 4024 bytes. +# randomize-chunk-size: yes # Take a random value for chunk size around the specified value. +# # This lower the risk of some evasion technics but could lead +# # detection change between runs. It is set to 'yes' by default. +# randomize-chunk-range: 10 # If randomize-chunk-size is active, the value of chunk-size is +# # a random value between (1 - randomize-chunk-range/100)*toserver-chunk-size +# # and (1 + randomize-chunk-range/100)*toserver-chunk-size and the same +# # calculation for toclient-chunk-size. +# # Default value of randomize-chunk-range is 10. +# +# raw: yes # 'Raw' reassembly enabled or disabled. +# # raw is for content inspection by detection +# # engine. +# +# chunk-prealloc: 250 # Number of preallocated stream chunks. These +# # are used during stream inspection (raw). +# segments: # Settings for reassembly segment pool. +# - size: 4 # Size of the (data)segment for a pool +# prealloc: 256 # Number of segments to prealloc and keep +# # in the pool. +# zero-copy-size: 128 # This option sets in bytes the value at +# # which segment data is passed to the app +# # layer API directly. Data sizes equal to +# # and higher than the value set are passed +# # on directly. +# +stream: + memcap: 64mb + checksum-validation: yes # reject wrong csums + inline: auto # auto will use inline mode in IPS mode, yes or no set it statically + reassembly: + memcap: 256mb + depth: 1mb # reassemble 1mb into a stream + toserver-chunk-size: 2560 + toclient-chunk-size: 2560 + randomize-chunk-size: yes + #randomize-chunk-range: 10 + #raw: yes + #chunk-prealloc: 250 + #segments: + # - size: 4 + # prealloc: 256 + # - size: 16 + # prealloc: 512 + # - size: 112 + # prealloc: 512 + # - size: 248 + # prealloc: 512 + # - size: 512 + # prealloc: 512 + # - size: 768 + # prealloc: 1024 + # - size: 1448 + # prealloc: 1024 + # - size: 65535 + # prealloc: 128 + #zero-copy-size: 128 + +# Host table: +# +# Host table is used by tagging and per host thresholding subsystems. +# +host: + hash-size: 4096 + prealloc: 1000 + memcap: 32mb + +# IP Pair table: +# +# Used by xbits 'ippair' tracking. +# +#ippair: +# hash-size: 4096 +# prealloc: 1000 +# memcap: 32mb + + +## +## Performance tuning and profiling +## + +# The detection engine builds internal groups of signatures. The engine +# allow us to specify the profile to use for them, to manage memory on an +# efficient way keeping a good performance. For the profile keyword you +# can use the words "low", "medium", "high" or "custom". If you use custom +# make sure to define the values at "- custom-values" as your convenience. +# Usually you would prefer medium/high/low. +# +# "sgh mpm-context", indicates how the staging should allot mpm contexts for +# the signature groups. "single" indicates the use of a single context for +# all the signature group heads. "full" indicates a mpm-context for each +# group head. "auto" lets the engine decide the distribution of contexts +# based on the information the engine gathers on the patterns from each +# group head. +# +# The option inspection-recursion-limit is used to limit the recursive calls +# in the content inspection code. For certain payload-sig combinations, we +# might end up taking too much time in the content inspection code. +# If the argument specified is 0, the engine uses an internally defined +# default limit. On not specifying a value, we use no limits on the recursion. +detect: + profile: medium + custom-values: + toclient-groups: 3 + toserver-groups: 25 + sgh-mpm-context: auto + inspection-recursion-limit: 3000 + # If set to yes, the loading of signatures will be made after the capture + # is started. This will limit the downtime in IPS mode. + #delayed-detect: yes + + # the grouping values above control how many groups are created per + # direction. Port whitelisting forces that port to get it's own group. + # Very common ports will benefit, as well as ports with many expensive + # rules. + grouping: + #tcp-whitelist: 53, 80, 139, 443, 445, 1433, 3306, 3389, 6666, 6667, 8080 + #udp-whitelist: 53, 135, 5060 + + profiling: + # Log the rules that made it past the prefilter stage, per packet + # default is off. The threshold setting determines how many rules + # must have made it past pre-filter for that rule to trigger the + # logging. + #inspect-logging-threshold: 200 + grouping: + dump-to-disk: false + include-rules: false # very verbose + include-mpm-stats: false + +# Select the multi pattern algorithm you want to run for scan/search the +# in the engine. +# +# The supported algorithms are: +# "ac" - Aho-Corasick, default implementation +# "ac-bs" - Aho-Corasick, reduced memory implementation +# "ac-cuda" - Aho-Corasick, CUDA implementation +# "ac-tile" - Aho-Corasick, optimized for Tilera architecture +# "hs" - Hyperscan, available when built with Hyperscan support +# +# The default mpm-algo value of "auto" will use "hs" if Hyperscan is available, +# "ac-tile" on Tilera platforms, and "ac" otherwise. +# +# The mpm you choose also decides the distribution of mpm contexts for +# signature groups, specified by the conf - "detect.sgh-mpm-context". +# Selecting "ac" as the mpm would require "detect.sgh-mpm-context" +# to be set to "single", because of ac's memory requirements, unless the +# ruleset is small enough to fit in one's memory, in which case one can +# use "full" with "ac". Rest of the mpms can be run in "full" mode. +# +# There is also a CUDA pattern matcher (only available if Suricata was +# compiled with --enable-cuda: b2g_cuda. Make sure to update your +# max-pending-packets setting above as well if you use b2g_cuda. + +mpm-algo: auto + +# Select the matching algorithm you want to use for single-pattern searches. +# +# Supported algorithms are "bm" (Boyer-Moore) and "hs" (Hyperscan, only +# available if Suricata has been built with Hyperscan support). +# +# The default of "auto" will use "hs" if available, otherwise "bm". + +spm-algo: auto + +# Suricata is multi-threaded. Here the threading can be influenced. +threading: + # On some cpu's/architectures it is beneficial to tie individual threads + # to specific CPU's/CPU cores. In this case all threads are tied to CPU0, + # and each extra CPU/core has one "detect" thread. + # + # On Intel Core2 and Nehalem CPU's enabling this will degrade performance. + # + set-cpu-affinity: no + # Tune cpu affinity of suricata threads. Each family of threads can be bound + # on specific CPUs. + cpu-affinity: + - management-cpu-set: + cpu: [ 0 ] # include only these cpus in affinity settings + - receive-cpu-set: + cpu: [ 0 ] # include only these cpus in affinity settings + - decode-cpu-set: + cpu: [ 0, 1 ] + mode: "balanced" + - stream-cpu-set: + cpu: [ "0-1" ] + - detect-cpu-set: + cpu: [ "all" ] + mode: "exclusive" # run detect threads in these cpus + # Use explicitely 3 threads and don't compute number by using + # detect-thread-ratio variable: + # threads: 3 + prio: + low: [ 0 ] + medium: [ "1-2" ] + high: [ 3 ] + default: "medium" + - verdict-cpu-set: + cpu: [ 0 ] + prio: + default: "high" + - reject-cpu-set: + cpu: [ 0 ] + prio: + default: "low" + - output-cpu-set: + cpu: [ "all" ] + prio: + default: "medium" + # + # By default Suricata creates one "detect" thread per available CPU/CPU core. + # This setting allows controlling this behaviour. A ratio setting of 2 will + # create 2 detect threads for each CPU/CPU core. So for a dual core CPU this + # will result in 4 detect threads. If values below 1 are used, less threads + # are created. So on a dual core CPU a setting of 0.5 results in 1 detect + # thread being created. Regardless of the setting at a minimum 1 detect + # thread will always be created. + # + detect-thread-ratio: 1.5 + +# Profiling settings. Only effective if Suricata has been built with the +# the --enable-profiling configure flag. +# +profiling: + # Run profiling for every xth packet. The default is 1, which means we + # profile every packet. If set to 1000, one packet is profiled for every + # 1000 received. + #sample-rate: 1000 + + # rule profiling + rules: + + # Profiling can be disabled here, but it will still have a + # performance impact if compiled in. + enabled: yes + filename: rule_perf.log + append: yes + + # Sort options: ticks, avgticks, checks, matches, maxticks + sort: avgticks + + # Limit the number of items printed at exit (ignored for json). + limit: 100 + + # output to json + json: true + + # per keyword profiling + keywords: + enabled: yes + filename: keyword_perf.log + append: yes + + # per rulegroup profiling + rulegroups: + enabled: yes + filename: rule_group_perf.log + append: yes + + # packet profiling + packets: + + # Profiling can be disabled here, but it will still have a + # performance impact if compiled in. + enabled: yes + filename: packet_stats.log + append: yes + + # per packet csv output + csv: + + # Output can be disabled here, but it will still have a + # performance impact if compiled in. + enabled: no + filename: packet_stats.csv + + # profiling of locking. Only available when Suricata was built with + # --enable-profiling-locks. + locks: + enabled: no + filename: lock_stats.log + append: yes + + pcap-log: + enabled: no + filename: pcaplog_stats.log + append: yes + +## +## Netfilter integration +## + +# When running in NFQ inline mode, it is possible to use a simulated +# non-terminal NFQUEUE verdict. +# This permit to do send all needed packet to suricata via this a rule: +# iptables -I FORWARD -m mark ! --mark $MARK/$MASK -j NFQUEUE +# And below, you can have your standard filtering ruleset. To activate +# this mode, you need to set mode to 'repeat' +# If you want packet to be sent to another queue after an ACCEPT decision +# set mode to 'route' and set next-queue value. +# On linux >= 3.1, you can set batchcount to a value > 1 to improve performance +# by processing several packets before sending a verdict (worker runmode only). +# On linux >= 3.6, you can set the fail-open option to yes to have the kernel +# accept the packet if suricata is not able to keep pace. +nfq: +# mode: accept +# repeat-mark: 1 +# repeat-mask: 1 +# route-queue: 2 +# batchcount: 20 +# fail-open: yes + +#nflog support +nflog: + # netlink multicast group + # (the same as the iptables --nflog-group param) + # Group 0 is used by the kernel, so you can't use it + - group: 2 + # netlink buffer size + buffer-size: 18432 + # put default value here + - group: default + # set number of packet to queue inside kernel + qthreshold: 1 + # set the delay before flushing packet in the queue inside kernel + qtimeout: 100 + # netlink max buffer size + max-size: 20000 + +## +## Advanced Capture Options +## + +# Netmap support +# +# Netmap operates with NIC directly in driver, so you need FreeBSD wich have +# built-in netmap support or compile and install netmap module and appropriate +# NIC driver on your Linux system. +# To reach maximum throughput disable all receive-, segmentation-, +# checksum- offloadings on NIC. +# Disabling Tx checksum offloading is *required* for connecting OS endpoint +# with NIC endpoint. +# You can find more information at https://github.com/luigirizzo/netmap +# +netmap: + # To specify OS endpoint add plus sign at the end (e.g. "eth0+") + - interface: eth2 + # Number of receive threads. "auto" uses number of RSS queues on interface. + threads: auto + # You can use the following variables to activate netmap tap or IPS mode. + # If copy-mode is set to ips or tap, the traffic coming to the current + # interface will be copied to the copy-iface interface. If 'tap' is set, the + # copy is complete. If 'ips' is set, the packet matching a 'drop' action + # will not be copied. + # To specify the OS as the copy-iface (so the OS can route packets, or forward + # to a service running on the same machine) add a plus sign at the end + # (e.g. "copy-iface: eth0+"). Don't forget to set up a symmetrical eth0+ -> eth0 + # for return packets. Hardware checksumming must be *off* on the interface if + # using an OS endpoint (e.g. 'ifconfig eth0 -rxcsum -txcsum -rxcsum6 -txcsum6' for FreeBSD + # or 'ethtool -K eth0 tx off rx off' for Linux). + #copy-mode: tap + #copy-iface: eth3 + # Set to yes to disable promiscuous mode + # disable-promisc: no + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: auto + # BPF filter to apply to this interface. The pcap filter syntax apply here. + #bpf-filter: port 80 or udp + #- interface: eth3 + #threads: auto + #copy-mode: tap + #copy-iface: eth2 + # Put default values here + - interface: default + +# PF_RING configuration. for use with native PF_RING support +# for more info see http://www.ntop.org/products/pf_ring/ +pfring: + - interface: eth0 + # Number of receive threads (>1 will enable experimental flow pinned + # runmode) + threads: 1 + + # Default clusterid. PF_RING will load balance packets based on flow. + # All threads/processes that will participate need to have the same + # clusterid. + cluster-id: 99 + + # Default PF_RING cluster type. PF_RING can load balance per flow. + # Possible values are cluster_flow or cluster_round_robin. + cluster-type: cluster_flow + # bpf filter for this interface + #bpf-filter: tcp + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - rxonly: only compute checksum for packets received by network card. + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. (default) + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: auto + # Second interface + #- interface: eth1 + # threads: 3 + # cluster-id: 93 + # cluster-type: cluster_flow + # Put default values here + - interface: default + #threads: 2 + +# For FreeBSD ipfw(8) divert(4) support. +# Please make sure you have ipfw_load="YES" and ipdivert_load="YES" +# i../../etc/loader.conf or kldload'ing the appropriate kernel modules. +# Additionally, you need to have an ipfw rule for the engine to see +# the packets from ipfw. For Example: +# +# ipfw add 100 divert 8000 ip from any to any +# +# The 8000 above should be the same number you passed on the command +# line, i.e. -d 8000 +# +ipfw: + + # Reinject packets at the specified ipfw rule number. This config + # option is the ipfw rule number AT WHICH rule processing continues + # in the ipfw processing system after the engine has finished + # inspecting the packet for acceptance. If no rule number is specified, + # accepted packets are reinjected at the divert rule which they entered + # and IPFW rule processing continues. No check is done to verify + # this will rule makes sense so care must be taken to avoid loops in ipfw. + # + ## The following example tells the engine to reinject packets + # back into the ipfw firewall AT rule number 5500: + # + # ipfw-reinjection-rule-number: 5500 + + +napatech: + # The Host Buffer Allowance for all streams + # (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back) + hba: -1 + + # use_all_streams set to "yes" will query the Napatech service for all configured + # streams and listen on all of them. When set to "no" the streams config array + # will be used. + use-all-streams: yes + + # The streams to listen on + streams: [1, 2, 3] + +# Tilera mpipe configuration. for use on Tilera TILE-Gx. +mpipe: + + # Load balancing modes: "static", "dynamic", "sticky", or "round-robin". + load-balance: dynamic + + # Number of Packets in each ingress packet queue. Must be 128, 512, 2028 or 65536 + iqueue-packets: 2048 + + # List of interfaces we will listen on. + inputs: + - interface: xgbe2 + - interface: xgbe3 + - interface: xgbe4 + + + # Relative weight of memory for packets of each mPipe buffer size. + stack: + size128: 0 + size256: 9 + size512: 0 + size1024: 0 + size1664: 7 + size4096: 0 + size10386: 0 + size16384: 0 + +## +## Hardware accelaration +## + +# Cuda configuration. +cuda: + # The "mpm" profile. On not specifying any of these parameters, the engine's + # internal default values are used, which are same as the ones specified in + # in the default conf file. + mpm: + # The minimum length required to buffer data to the gpu. + # Anything below this is MPM'ed on the CPU. + # Can be specified in kb, mb, gb. Just a number indicates it's in bytes. + # A value of 0 indicates there's no limit. + data-buffer-size-min-limit: 0 + # The maximum length for data that we would buffer to the gpu. + # Anything over this is MPM'ed on the CPU. + # Can be specified in kb, mb, gb. Just a number indicates it's in bytes. + data-buffer-size-max-limit: 1500 + # The ring buffer size used by the CudaBuffer API to buffer data. + cudabuffer-buffer-size: 500mb + # The max chunk size that can be sent to the gpu in a single go. + gpu-transfer-size: 50mb + # The timeout limit for batching of packets in microseconds. + batching-timeout: 2000 + # The device to use for the mpm. Currently we don't support load balancing + # on multiple gpus. In case you have multiple devices on your system, you + # can specify the device to use, using this conf. By default we hold 0, to + # specify the first device cuda sees. To find out device-id associated with + # the card(s) on the system run "suricata --list-cuda-cards". + device-id: 0 + # No of Cuda streams used for asynchronous processing. All values > 0 are valid. + # For this option you need a device with Compute Capability > 1.0. + cuda-streams: 2 + +## +## Include other configs +## + +# Includes. Files included here will be handled as if they were +# inlined in this configuration file. +#include: include1.yaml +#include: include2.yaml diff --git a/tests/lua-output-http-pre8/expected/http_lua.log b/tests/lua-output-http-pre8/expected/http_lua.log new file mode 100644 index 000000000..523f8b5f3 --- /dev/null +++ b/tests/lua-output-http-pre8/expected/http_lua.log @@ -0,0 +1 @@ +10/06/2015-15:16:43.137833 codemonkey.net [**] /base64-hello-world.txt [**] curl/7.40.0 [**] 10.16.1.11:59733 -> 104.131.202.103:80 diff --git a/tests/lua-output-http-pre8/http.lua b/tests/lua-output-http-pre8/http.lua new file mode 100644 index 000000000..9d60f9139 --- /dev/null +++ b/tests/lua-output-http-pre8/http.lua @@ -0,0 +1,50 @@ +-- simple fast-log to file lua module +name = "http_lua.log" + +function init (args) + local needs = {} + needs["protocol"] = "http" + return needs +end + +function setup (args) + filename = SCLogPath() .. "/" .. name + file = assert(io.open(filename, "a")) + SCLogInfo("HTTP Log Filename " .. filename) + http = 0 +end + +function log(args) + http_uri = HttpGetRequestUriRaw() + if http_uri == nil then + http_uri = "" + end + http_uri = string.gsub(http_uri, "%c", ".") + + http_host = HttpGetRequestHost() + if http_host == nil then + http_host = "" + end + http_host = string.gsub(http_host, "%c", ".") + + http_ua = HttpGetRequestHeader("User-Agent") + if http_ua == nil then + http_ua = "" + end + http_ua = string.gsub(http_ua, "%c", ".") + + ts = SCPacketTimeString() + ipver, srcip, dstip, proto, sp, dp = SCFlowTuple() + + file:write (ts .. " " .. http_host .. " [**] " .. http_uri .. " [**] " .. + http_ua .. " [**] " .. srcip .. ":" .. math.floor(sp) .. " -> " .. + dstip .. ":" .. math.floor(dp) .. "\n") + file:flush() + + http = http + 1 +end + +function deinit (args) + SCLogInfo ("HTTP transactions logged: " .. http); + file:close(file) +end diff --git a/tests/lua-output-http-pre8/suricata.yaml b/tests/lua-output-http-pre8/suricata.yaml new file mode 100644 index 000000000..f47029fc9 --- /dev/null +++ b/tests/lua-output-http-pre8/suricata.yaml @@ -0,0 +1,11 @@ +%YAML 1.1 +--- + +include: default.yaml + +outputs: + - lua: + enabled: yes + scripts-dir: . + scripts: + - http.lua diff --git a/tests/lua-output-http-pre8/test.yaml b/tests/lua-output-http-pre8/test.yaml new file mode 100644 index 000000000..5a284063f --- /dev/null +++ b/tests/lua-output-http-pre8/test.yaml @@ -0,0 +1,11 @@ +requires: + features: + - HAVE_LUA + lt-version: 8 + +pcap: ../lua-output-http/input.pcap + +checks: + - file-compare: + filename: http_lua.log + expected: expected/http_lua.log diff --git a/tests/lua-output-http/http.lua b/tests/lua-output-http/http.lua index 9d60f9139..c64b771f9 100644 --- a/tests/lua-output-http/http.lua +++ b/tests/lua-output-http/http.lua @@ -1,4 +1,6 @@ -- simple fast-log to file lua module +local packet = require "suricata.packet" + name = "http_lua.log" function init (args) @@ -33,7 +35,8 @@ function log(args) end http_ua = string.gsub(http_ua, "%c", ".") - ts = SCPacketTimeString() + p = packet.get() + ts = p:timestring() ipver, srcip, dstip, proto, sp, dp = SCFlowTuple() file:write (ts .. " " .. http_host .. " [**] " .. http_uri .. " [**] " .. diff --git a/tests/lua-output-http/test.yaml b/tests/lua-output-http/test.yaml index c4e436605..3b94b8382 100644 --- a/tests/lua-output-http/test.yaml +++ b/tests/lua-output-http/test.yaml @@ -1,6 +1,7 @@ requires: features: - HAVE_LUA + min-version: 8 checks: - file-compare: diff --git a/tests/lua-output-smtp-pre8/README.txt b/tests/lua-output-smtp-pre8/README.txt new file mode 100644 index 000000000..ae90e334d --- /dev/null +++ b/tests/lua-output-smtp-pre8/README.txt @@ -0,0 +1,2 @@ +PCAP source: +https://wiki.wireshark.org/SampleCaptures?action=AttachFile&do=get&target=smtp.pcap diff --git a/tests/lua-output-smtp-pre8/default.yaml b/tests/lua-output-smtp-pre8/default.yaml new file mode 100644 index 000000000..af883b357 --- /dev/null +++ b/tests/lua-output-smtp-pre8/default.yaml @@ -0,0 +1,1630 @@ +%YAML 1.1 +--- + +# Suricata configuration file. In addition to the comments describing all +# options in this file, full documentation can be found at: +# https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Suricatayaml + +## +## Step 1: inform Suricata about your network +## + +vars: + # more specifc is better for alert accuracy and performance + address-groups: + HOME_NET: "[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]" + #HOME_NET: "[192.168.0.0/16]" + #HOME_NET: "[10.0.0.0/8]" + #HOME_NET: "[172.16.0.0/12]" + #HOME_NET: "any" + + EXTERNAL_NET: "!$HOME_NET" + #EXTERNAL_NET: "any" + + HTTP_SERVERS: "$HOME_NET" + SMTP_SERVERS: "$HOME_NET" + SQL_SERVERS: "$HOME_NET" + DNS_SERVERS: "$HOME_NET" + TELNET_SERVERS: "$HOME_NET" + AIM_SERVERS: "$EXTERNAL_NET" + DNP3_SERVER: "$HOME_NET" + DNP3_CLIENT: "$HOME_NET" + MODBUS_CLIENT: "$HOME_NET" + MODBUS_SERVER: "$HOME_NET" + ENIP_CLIENT: "$HOME_NET" + ENIP_SERVER: "$HOME_NET" + + port-groups: + HTTP_PORTS: "80" + SHELLCODE_PORTS: "!80" + ORACLE_PORTS: 1521 + SSH_PORTS: 22 + DNP3_PORTS: 20000 + MODBUS_PORTS: 502 + + +## +## Step 2: select the rules to enable or disable +## + +default-rule-path: /home/jason/projects/oi../../etc/suricata/rules +rule-files: + - botcc.rules + - ciarmy.rules + - compromised.rules + - drop.rules + - dshield.rules +# - emerging-activex.rules + - emerging-attack_response.rules + - emerging-chat.rules + - emerging-current_events.rules + - emerging-dns.rules + - emerging-dos.rules + - emerging-exploit.rules + - emerging-ftp.rules +# - emerging-games.rules +# - emerging-icmp_info.rules +# - emerging-icmp.rules + - emerging-imap.rules +# - emerging-inappropriate.rules + - emerging-malware.rules + - emerging-misc.rules + - emerging-mobile_malware.rules + - emerging-netbios.rules + - emerging-p2p.rules + - emerging-policy.rules + - emerging-pop3.rules + - emerging-rpc.rules + - emerging-scada.rules + - emerging-scan.rules +# - emerging-shellcode.rules + - emerging-smtp.rules + - emerging-snmp.rules + - emerging-sql.rules + - emerging-telnet.rules + - emerging-tftp.rules + - emerging-trojan.rules + - emerging-user_agents.rules + - emerging-voip.rules + - emerging-web_client.rules + - emerging-web_server.rules +# - emerging-web_specific_apps.rules + - emerging-worm.rules + - tor.rules +# - decoder-events.rules # available in suricata sources under rules dir +# - stream-events.rules # available in suricata sources under rules dir + - http-events.rules # available in suricata sources under rules dir + - smtp-events.rules # available in suricata sources under rules dir + - dns-events.rules # available in suricata sources under rules dir + - tls-events.rules # available in suricata sources under rules dir +# - modbus-events.rules # available in suricata sources under rules dir +# - app-layer-events.rules # available in suricata sources under rules dir + +classification-file: /home/jason/projects/oi../../etc/suricata/classification.config +reference-config-file: /home/jason/projects/oi../../etc/suricata/reference.config +# threshold-file: /home/jason/projects/oi../../etc/suricata/threshold.config + + +## +## Step 3: select outputs to enable +## + +# The default logging directory. Any log or output file will be +# placed here if its not specified with a full path name. This can be +# overridden with the -l command line parameter. +default-log-dir: /home/jason/projects/oisf/log/suricata/ + +# global stats configuration +stats: + enabled: yes + # The interval field (in seconds) controls at what interval + # the loggers are invoked. + interval: 8 + +# Configure the type of alert (and other) logging you would like. +outputs: + # a line based alerts log similar to Snort's fast.log + - fast: + enabled: yes + filename: fast.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # Extensible Event Format (nicknamed EVE) event log in JSON format + - eve-log: + enabled: yes + filetype: regular #regular|syslog|unix_dgram|unix_stream|redis + filename: eve.json + #prefix: "@cee: " # prefix to prepend to each log entry + # the following are valid when type: syslog above + #identity: "suricata" + #facility: local5 + #level: Info ## possible levels: Emergency, Alert, Critical, + ## Error, Warning, Notice, Info, Debug + #redis: + # server: 127.0.0.1 + # port: 6379 + # mode: list ## possible values: list (default), channel + # key: suricata ## key or channel to use (default to suricata) + # Redis pipelining set up. This will enable to only do a query every + # 'batch-size' events. This should lower the latency induced by network + # connection at the cost of some memory. There is no flushing implemented + # so this setting as to be reserved to high traffic suricata. + # pipelining: + # enabled: yes ## set enable to yes to enable query pipelining + # batch-size: 10 ## number of entry to keep in buffer + types: + - alert: + # payload: yes # enable dumping payload in Base64 + # payload-buffer-size: 4kb # max size of payload buffer to output in eve-log + # payload-printable: yes # enable dumping payload in printable (lossy) format + # packet: yes # enable dumping of packet (without stream segments) + http: yes # enable dumping of http fields + tls: yes # enable dumping of tls fields + ssh: yes # enable dumping of ssh fields + smtp: yes # enable dumping of smtp fields + + # HTTP X-Forwarded-For support by adding an extra field or overwriting + # the source or destination IP address (depending on flow direction) + # with the one reported in the X-Forwarded-For HTTP header. This is + # helpful when reviewing alerts for traffic that is being reverse + # or forward proxied. + xff: + enabled: no + # Two operation modes are available, "extra-data" and "overwrite". + mode: extra-data + # Two proxy deployments are supported, "reverse" and "forward". In + # a "reverse" deployment the IP address used is the last one, in a + # "forward" deployment the first IP address is used. + deployment: reverse + # Header name where the actual IP address will be reported, if more + # than one IP address is present, the last IP address will be the + # one taken into consideration. + header: X-Forwarded-For + - http: + extended: yes # enable this for extended logging information + # custom allows additional http fields to be included in eve-log + # the example below adds three additional fields when uncommented + #custom: [Accept-Encoding, Accept-Language, Authorization] + - dns + - tls: + extended: yes # enable this for extended logging information + - files: + force-magic: no # force logging magic on all logged files + force-md5: no # force logging of md5 checksums + #- drop: + # alerts: no # log alerts that caused drops + - smtp: + #extended: yes # enable this for extended logging information + # this includes: bcc, message-id, subject, x_mailer, user-agent + # custom fields logging from the list: + # reply-to, bcc, message-id, subject, x-mailer, user-agent, received, + # x-originating-ip, in-reply-to, references, importance, priority, + # sensitivity, organization, content-md5, date + #custom: [received, x-mailer, x-originating-ip, relays, reply-to, bcc] + # output md5 of fields: body, subject + # for the body you need to set app-layer.protocols.smtp.mime.body-md5 + # to yes + #md5: [body, subject] + + - ssh + - stats: + totals: yes # stats for all threads merged together + threads: no # per thread stats + deltas: no # include delta values + # bi-directional flows + - flow + # uni-directional flows + #- netflow + + # alert output for use with Barnyard2 + - unified2-alert: + enabled: no + filename: unified2.alert + + # File size limit. Can be specified in kb, mb, gb. Just a number + # is parsed as bytes. + #limit: 32mb + + # Sensor ID field of unified2 alerts. + #sensor-id: 0 + + # Include payload of packets related to alerts. Defaults to true, set to + # false if payload is not required. + #payload: yes + + # HTTP X-Forwarded-For support by adding the unified2 extra header or + # overwriting the source or destination IP address (depending on flow + # direction) with the one reported in the X-Forwarded-For HTTP header. + # This is helpful when reviewing alerts for traffic that is being reverse + # or forward proxied. + xff: + enabled: no + # Two operation modes are available, "extra-data" and "overwrite". Note + # that in the "overwrite" mode, if the reported IP address in the HTTP + # X-Forwarded-For header is of a different version of the packet + # received, it will fall-back to "extra-data" mode. + mode: extra-data + # Two proxy deployments are supported, "reverse" and "forward". In + # a "reverse" deployment the IP address used is the last one, in a + # "forward" deployment the first IP address is used. + deployment: reverse + # Header name where the actual IP address will be reported, if more + # than one IP address is present, the last IP address will be the + # one taken into consideration. + header: X-Forwarded-For + + # a line based log of HTTP requests (no alerts) + - http-log: + enabled: no + filename: http.log + append: yes + #extended: yes # enable this for extended logging information + #custom: yes # enabled the custom logging format (defined by customformat) + #customformat: "%{%D-%H:%M:%S}t.%z %{X-Forwarded-For}i %H %m %h %u %s %B %a:%p -> %A:%P" + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # a line based log of TLS handshake parameters (no alerts) + - tls-log: + enabled: no # Log TLS connections. + filename: tls.log # File to store TLS logs. + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + #extended: yes # Log extended information like fingerprint + + # output module to store certificates chain to disk + - tls-store: + enabled: no + #certs-log-dir: certs # directory to store the certificates files + + # a line based log of DNS requests and/or replies (no alerts) + - dns-log: + enabled: no + filename: dns.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # Packet log... log packets in pcap format. 3 modes of operation: "normal" + # "multi" and "sguil". + # + # In normal mode a pcap file "filename" is created in the default-log-dir, + # or are as specified by "dir". + # In multi mode, a file is created per thread. This will perform much + # better, but will create multiple files where 'normal' would create one. + # In multi mode the filename takes a few special variables: + # - %n -- thread number + # - %i -- thread id + # - %t -- timestamp (secs or secs.usecs based on 'ts-format' + # E.g. filename: pcap.%n.%t + # + # Note that it's possible to use directories, but the directories are not + # created by Suricata. E.g. filename: pcaps/%n/log.%s will log into the + # per thread directory. + # + # Also note that the limit and max-files settings are enforced per thread. + # So the size limit when using 8 threads with 1000mb files and 2000 files + # is: 8*1000*2000 ~ 16TiB. + # + # In Sguil mode "dir" indicates the base directory. In this base dir the + # pcaps are created in th directory structure Sguil expects: + # + # $sguil-base-dir/YYYY-MM-DD/$filename. + # + # By default all packets are logged except: + # - TCP streams beyond stream.reassembly.depth + # - encrypted streams after the key exchange + # + - pcap-log: + enabled: no + filename: log.pcap + + # File size limit. Can be specified in kb, mb, gb. Just a number + # is parsed as bytes. + limit: 1000mb + + # If set to a value will enable ring buffer mode. Will keep Maximum of "max-files" of size "limit" + max-files: 2000 + + mode: normal # normal, multi or sguil. + #sguil-base-dir: /nsm_data/ + #ts-format: usec # sec or usec second format (default) is filename.sec usec is filename.sec.usec + use-stream-depth: no #If set to "yes" packets seen after reaching stream inspection depth are ignored. "no" logs all packets + honor-pass-rules: no # If set to "yes", flows in which a pass rule matched will stopped being logged. + + # a full alerts log containing much information for signature writers + # or for investigating suspected false positives. + - alert-debug: + enabled: no + filename: alert-debug.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # alert output to prelude (http://www.prelude-technologies.com/) only + # available if Suricata has been compiled with --enable-prelude + - alert-prelude: + enabled: no + profile: suricata + log-packet-content: no + log-packet-header: yes + + # Stats.log contains data from various counters of the suricata engine. + - stats: + enabled: yes + filename: stats.log + totals: yes # stats for all threads merged together + threads: no # per thread stats + #null-values: yes # print counters that have value 0 + + # a line based alerts log similar to fast.log into syslog + - syslog: + enabled: no + # reported identity to syslog. If ommited the program name (usually + # suricata) will be used. + #identity: "suricata" + facility: local5 + #level: Info ## possible levels: Emergency, Alert, Critical, + ## Error, Warning, Notice, Info, Debug + + # a line based information for dropped packets in IPS mode + - drop: + enabled: no + filename: drop.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # output module to store extracted files to disk + # + # The files are stored to the log-dir in a format "file." where is + # an incrementing number starting at 1. For each file "file." a meta + # file "file..meta" is created. + # + # File extraction depends on a lot of things to be fully done: + # - stream reassembly depth. For optimal results, set this to 0 (unlimited) + # - http request / response body sizes. Again set to 0 for optimal results. + # - rules that contain the "filestore" keyword. + - file-store: + enabled: no # set to yes to enable + log-dir: files # directory to store the files + force-magic: no # force logging magic on all stored files + force-md5: no # force logging of md5 checksums + force-filestore: no # force storing of all files + #waldo: file.waldo # waldo file to store the file_id across runs + + # output module to log files tracked in a easily parsable json format + - file-log: + enabled: no + filename: files-json.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + force-magic: no # force logging magic on all logged files + force-md5: no # force logging of md5 checksums + + # Log TCP data after stream normalization + # 2 types: file or dir. File logs into a single logfile. Dir creates + # 2 files per TCP session and stores the raw TCP data into them. + # Using 'both' will enable both file and dir modes. + # + # Note: limited by stream.depth + - tcp-data: + enabled: no + type: file + filename: tcp-data.log + + # Log HTTP body data after normalization, dechunking and unzipping. + # 2 types: file or dir. File logs into a single logfile. Dir creates + # 2 files per HTTP session and stores the normalized data into them. + # Using 'both' will enable both file and dir modes. + # + # Note: limited by the body limit settings + - http-body-data: + enabled: no + type: file + filename: http-data.log + + # Lua Output Support - execute lua script to generate alert and event + # output. + # Documented at: + # https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Lua_Output + - lua: + enabled: no + #scripts-dir../../etc/suricata/lua-output/ + scripts: + # - script1.lua + +# Logging configuration. This is not about logging IDS alerts/events, but +# output about what Suricata is doing, like startup messages, errors, etc. +logging: + # The default log level, can be overridden in an output section. + # Note that debug level logging will only be emitted if Suricata was + # compiled with the --enable-debug configure option. + # + # This value is overriden by the SC_LOG_LEVEL env var. + default-log-level: notice + + # The default output format. Optional parameter, should default to + # something reasonable if not provided. Can be overriden in an + # output section. You can leave this out to get the default. + # + # This value is overriden by the SC_LOG_FORMAT env var. + #default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- " + + # A regex to filter output. Can be overridden in an output section. + # Defaults to empty (no filter). + # + # This value is overriden by the SC_LOG_OP_FILTER env var. + default-output-filter: + + # Define your logging outputs. If none are defined, or they are all + # disabled you will get the default - console output. + outputs: + - console: + enabled: yes + # type: json + - file: + enabled: yes + level: info + filename: /home/jason/projects/oisf/log/suricata/suricata.log + # type: json + - syslog: + enabled: no + facility: local5 + format: "[%i] <%d> -- " + # type: json + + +## +## Step 4: configure common capture settings +## +## See "Advanced Capture Options" below for more options, including NETMAP +## and PF_RING. +## + +# Linux high speed capture support +af-packet: + - interface: eth0 + # Number of receive threads. "auto" uses the number of cores + #threads: auto + # Default clusterid. AF_PACKET will load balance packets based on flow. + cluster-id: 99 + # Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash. + # This is only supported for Linux kernel > 3.1 + # possible value are: + # * cluster_round_robin: round robin load balancing + # * cluster_flow: all packets of a given flow are send to the same socket + # * cluster_cpu: all packets treated in kernel by a CPU are send to the same socket + # * cluster_qm: all packets linked by network card to a RSS queue are sent to the same + # socket. Requires at least Linux 3.14. + # * cluster_random: packets are sent randomly to sockets but with an equipartition. + # Requires at least Linux 3.14. + # * cluster_rollover: kernel rotates between sockets filling each socket before moving + # to the next. Requires at least Linux 3.10. + # Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system + # with capture card using RSS (require cpu affinity tuning and system irq tuning) + cluster-type: cluster_flow + # In some fragmentation case, the hash can not be computed. If "defrag" is set + # to yes, the kernel will do the needed defragmentation before sending the packets. + defrag: yes + # After Linux kernel 3.10 it is possible to activate the rollover option: if a socket is + # full then kernel will send the packet on the next socket with room available. This option + # can minimize packet drop and increase the treated bandwidth on single intensive flow. + #rollover: yes + # To use the ring feature of AF_PACKET, set 'use-mmap' to yes + #use-mmap: yes + # Lock memory map to avoid it goes to swap. Be careful that over suscribing could lock + # your system + #mmap-locked: yes + # Use tpacket_v3, capture mode, only active if user-mmap is true + tpacket-v3: yes + # Ring size will be computed with respect to max_pending_packets and number + # of threads. You can set manually the ring size in number of packets by setting + # the following value. If you are using flow cluster-type and have really network + # intensive single-flow you could want to set the ring-size independently of the number + # of threads: + #ring-size: 2048 + # Block size is used by tpacket_v3 only. It should set to a value high enough to contain + # a decent number of packets. Size is in bytes so please consider your MTU. It should be + # a power of 2 and it must be multiple of page size (usually 4096). + #block-size: 32768 + # tpacket_v3 block timeout: an open block is passed to userspace if it is not + # filled after block-timeout milliseconds. + #block-timeout: 10 + # On busy system, this could help to set it to yes to recover from a packet drop + # phase. This will result in some packets (at max a ring flush) being non treated. + #use-emergency-flush: yes + # recv buffer size, increase value could improve performance + # buffer-size: 32768 + # Set to yes to disable promiscuous mode + # disable-promisc: no + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - kernel: use indication sent by kernel for each packet (default) + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: kernel + # BPF filter to apply to this interface. The pcap filter syntax apply here. + #bpf-filter: port 80 or udp + # You can use the following variables to activate AF_PACKET tap or IPS mode. + # If copy-mode is set to ips or tap, the traffic coming to the current + # interface will be copied to the copy-iface interface. If 'tap' is set, the + # copy is complete. If 'ips' is set, the packet matching a 'drop' action + # will not be copied. + #copy-mode: ips + #copy-iface: eth1 + + # Put default values here. These will be used for an interface that is not + # in the list above. + - interface: default + #threads: auto + #use-mmap: no + #rollover: yes + tpacket-v3: yes + +# Cross platform libpcap capture support +pcap: + - interface: eth0 + # On Linux, pcap will try to use mmaped capture and will use buffer-size + # as total of memory used by the ring. So set this to something bigger + # than 1% of your bandwidth. + #buffer-size: 16777216 + #bpf-filter: "tcp and port 25" + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. (default) + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: auto + # With some accelerator cards using a modified libpcap (like myricom), you + # may want to have the same number of capture threads as the number of capture + # rings. In this case, set up the threads variable to N to start N threads + # listening on the same interface. + #threads: 16 + # set to no to disable promiscuous mode: + #promisc: no + # set snaplen, if not set it defaults to MTU if MTU can be known + # via ioctl call and to full capture if not. + #snaplen: 1518 + # Put default values here + - interface: default + #checksum-checks: auto + +# Settings for reading pcap files +pcap-file: + # Possible values are: + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. (default) + # Warning: 'checksum-validation' must be set to yes to have checksum tested + checksum-checks: auto + +# See "Advanced Capture Options" below for more options, including NETMAP +# and PF_RING. + + +## +## Step 5: App Layer Protocol Configuration +## + +# Configure the app-layer parsers. The protocols section details each +# protocol. +# +# The option "enabled" takes 3 values - "yes", "no", "detection-only". +# "yes" enables both detection and the parser, "no" disables both, and +# "detection-only" enables protocol detection only (parser disabled). +app-layer: + protocols: + tls: + enabled: yes + detection-ports: + dp: 443 + + #no-reassemble: yes + dcerpc: + enabled: yes + ftp: + enabled: yes + ssh: + enabled: yes + smtp: + enabled: yes + # Configure SMTP-MIME Decoder + mime: + # Decode MIME messages from SMTP transactions + # (may be resource intensive) + # This field supercedes all others because it turns the entire + # process on or off + decode-mime: yes + + # Decode MIME entity bodies (ie. base64, quoted-printable, etc.) + decode-base64: yes + decode-quoted-printable: yes + + # Maximum bytes per header data value stored in the data structure + # (default is 2000) + header-value-depth: 2000 + + # Extract URLs and save in state data structure + extract-urls: yes + # Set to yes to compute the md5 of the mail body. You will then + # be able to journalize it. + body-md5: no + # Configure inspected-tracker for file_data keyword + inspected-tracker: + content-limit: 100000 + content-inspect-min-size: 32768 + content-inspect-window: 4096 + imap: + enabled: detection-only + msn: + enabled: detection-only + smb: + enabled: yes + detection-ports: + dp: 139 + # Note: Modbus probe parser is minimalist due to the poor significant field + # Only Modbus message length (greater than Modbus header length) + # And Protocol ID (equal to 0) are checked in probing parser + # It is important to enable detection port and define Modbus port + # to avoid false positive + modbus: + # How many unreplied Modbus requests are considered a flood. + # If the limit is reached, app-layer-event:modbus.flooded; will match. + #request-flood: 500 + + enabled: no + detection-ports: + dp: 502 + # According to MODBUS Messaging on TCP/IP Implementation Guide V1.0b, it + # is recommended to keep the TCP connection opened with a remote device + # and not to open and close it for each MODBUS/TCP transaction. In that + # case, it is important to set the depth of the stream reassembling as + # unlimited (stream.reassembly.depth: 0) + # smb2 detection is disabled internally inside the engine. + #smb2: + # enabled: yes + dns: + # memcaps. Globally and per flow/state. + #global-memcap: 16mb + #state-memcap: 512kb + + # How many unreplied DNS requests are considered a flood. + # If the limit is reached, app-layer-event:dns.flooded; will match. + #request-flood: 500 + + tcp: + enabled: yes + detection-ports: + dp: 53 + udp: + enabled: yes + detection-ports: + dp: 53 + http: + enabled: yes + # memcap: 64mb + + # default-config: Used when no server-config matches + # personality: List of personalities used by default + # request-body-limit: Limit reassembly of request body for inspection + # by http_client_body & pcre /P option. + # response-body-limit: Limit reassembly of response body for inspection + # by file_data, http_server_body & pcre /Q option. + # double-decode-path: Double decode path section of the URI + # double-decode-query: Double decode query section of the URI + # + # server-config: List of server configurations to use if address matches + # address: List of ip addresses or networks for this block + # personalitiy: List of personalities used by this block + # request-body-limit: Limit reassembly of request body for inspection + # by http_client_body & pcre /P option. + # response-body-limit: Limit reassembly of response body for inspection + # by file_data, http_server_body & pcre /Q option. + # double-decode-path: Double decode path section of the URI + # double-decode-query: Double decode query section of the URI + # + # uri-include-all: Include all parts of the URI. By default the + # 'scheme', username/password, hostname and port + # are excluded. Setting this option to true adds + # all of them to the normalized uri as inspected + # by http_uri, urilen, pcre with /U and the other + # keywords that inspect the normalized uri. + # Note that this does not affect http_raw_uri. + # Also, note that including all was the default in + # 1.4 and 2.0beta1. + # + # meta-field-limit: Hard size limit for request and response size + # limits. Applies to request line and headers, + # response line and headers. Does not apply to + # request or response bodies. Default is 18k. + # If this limit is reached an event is raised. + # + # Currently Available Personalities: + # Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0, + # IIS_7_0, IIS_7_5, Apache_2 + libhtp: + default-config: + personality: IDS + + # Can be specified in kb, mb, gb. Just a number indicates + # it's in bytes. + request-body-limit: 100kb + response-body-limit: 100kb + + # inspection limits + request-body-minimal-inspect-size: 32kb + request-body-inspect-window: 4kb + response-body-minimal-inspect-size: 40kb + response-body-inspect-window: 16kb + + # auto will use http-body-inline mode in IPS mode, yes or no set it statically + http-body-inline: auto + + # Take a random value for inspection sizes around the specified value. + # This lower the risk of some evasion technics but could lead + # detection change between runs. It is set to 'yes' by default. + #randomize-inspection-sizes: yes + # If randomize-inspection-sizes is active, the value of various + # inspection size will be choosen in the [1 - range%, 1 + range%] + # range + # Default value of randomize-inspection-range is 10. + #randomize-inspection-range: 10 + + # decoding + double-decode-path: no + double-decode-query: no + + server-config: + + #- apache: + # address: [192.168.1.0/24, 127.0.0.0/8, "::1"] + # personality: Apache_2 + # # Can be specified in kb, mb, gb. Just a number indicates + # # it's in bytes. + # request-body-limit: 4096 + # response-body-limit: 4096 + # double-decode-path: no + # double-decode-query: no + + #- iis7: + # address: + # - 192.168.0.0/24 + # - 192.168.10.0/24 + # personality: IIS_7_0 + # # Can be specified in kb, mb, gb. Just a number indicates + # # it's in bytes. + # request-body-limit: 4096 + # response-body-limit: 4096 + # double-decode-path: no + # double-decode-query: no + +# Limit for the maximum number of asn1 frames to decode (default 256) +asn1-max-frames: 256 + + +############################################################################## +## +## Advanced settings below +## +############################################################################## + +## +## Run Options +## + +# Run suricata as user and group. +#run-as: +# user: suri +# group: suri + +# Some logging module will use that name in event as identifier. The default +# value is the hostname +#sensor-name: suricata + +# Default pid file. +# Will use this file if no --pidfile in command options. +#pid-file: /home/jason/projects/oisf/run/suricata.pid + +# Daemon working directory +# Suricata will change directory to this one if provided +# Default: "/" +#daemon-directory: "/" + +# Suricata core dump configuration. Limits the size of the core dump file to +# approximately max-dump. The actual core dump size will be a multiple of the +# page size. Core dumps that would be larger than max-dump are truncated. On +# Linux, the actual core dump size may be a few pages larger than max-dump. +# Setting max-dump to 0 disables core dumping. +# Setting max-dump to 'unlimited' will give the full core dump file. +# On 32-bit Linux, a max-dump value >= ULONG_MAX may cause the core dump size +# to be 'unlimited'. + +coredump: + max-dump: unlimited + +# If suricata box is a router for the sniffed networks, set it to 'router'. If +# it is a pure sniffing setup, set it to 'sniffer-only'. +# If set to auto, the variable is internally switch to 'router' in IPS mode +# and 'sniffer-only' in IDS mode. +# This feature is currently only used by the reject* keywords. +host-mode: auto + +# Number of packets preallocated per thread. The default is 1024. A higher number +# will make sure each CPU will be more easily kept busy, but may negatively +# impact caching. +# +# If you are using the CUDA pattern matcher (mpm-algo: ac-cuda), different rules +# apply. In that case try something like 60000 or more. This is because the CUDA +# pattern matcher buffers and scans as many packets as possible in parallel. +#max-pending-packets: 1024 + +# Runmode the engine should use. Please check --list-runmodes to get the available +# runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned +# load balancing). +#runmode: autofp + +# Specifies the kind of flow load balancer used by the flow pinned autofp mode. +# +# Supported schedulers are: +# +# round-robin - Flows assigned to threads in a round robin fashion. +# active-packets - Flows assigned to threads that have the lowest number of +# unprocessed packets (default). +# hash - Flow alloted usihng the address hash. More of a random +# technique. Was the default in Suricata 1.2.1 and older. +# +#autofp-scheduler: active-packets + +# Preallocated size for packet. Default is 1514 which is the classical +# size for pcap on ethernet. You should adjust this value to the highest +# packet size (MTU + hardware header) on your system. +#default-packet-size: 1514 + +# Unix command socket can be used to pass commands to suricata. +# An external tool can then connect to get information from suricata +# or trigger some modifications of the engine. Set enabled to yes +# to activate the feature. You can use the filename variable to set +# the file name of the socket. +unix-command: + enabled: no + #filename: custom.socket + +# Magic file. The extension .mgc is added to the value here. +#magic-file: /usr/share/file/magic +#magic-file: + +legacy: + uricontent: enabled + +## +## Detection settings +## + +# Set the order of alerts bassed on actions +# The default order is pass, drop, reject, alert +# action-order: +# - pass +# - drop +# - reject +# - alert + +# IP Reputation +#reputation-categories-file: /home/jason/projects/oi../../etc/suricata/iprep/categories.txt +#default-reputation-path: /home/jason/projects/oi../../etc/suricata/iprep +#reputation-files: +# - reputation.list + +# When run with the option --engine-analysis, the engine will read each of +# the parameters below, and print reports for each of the enabled sections +# and exit. The reports are printed to a file in the default log dir +# given by the parameter "default-log-dir", with engine reporting +# subsection below printing reports in its own report file. +engine-analysis: + # enables printing reports for fast-pattern for every rule. + rules-fast-pattern: yes + # enables printing reports for each rule + rules: yes + +#recursion and match limits for PCRE where supported +pcre: + match-limit: 3500 + match-limit-recursion: 1500 + +## +## Advanced Traffic Tracking and Reconstruction Settings +## + +# Host specific policies for defragmentation and TCP stream +# reassembly. The host OS lookup is done using a radix tree, just +# like a routing table so the most specific entry matches. +host-os-policy: + # Make the default policy windows. + windows: [0.0.0.0/0] + bsd: [] + bsd-right: [] + old-linux: [] + linux: [] + old-solaris: [] + solaris: [] + hpux10: [] + hpux11: [] + irix: [] + macos: [] + vista: [] + windows2k3: [] + +# Defrag settings: + +defrag: + memcap: 32mb + hash-size: 65536 + trackers: 65535 # number of defragmented flows to follow + max-frags: 65535 # number of fragments to keep (higher than trackers) + prealloc: yes + timeout: 60 + +# Enable defrag per host settings +# host-config: +# +# - dmz: +# timeout: 30 +# address: [192.168.1.0/24, 127.0.0.0/8, 1.1.1.0/24, 2.2.2.0/24, "1.1.1.1", "2.2.2.2", "::1"] +# +# - lan: +# timeout: 45 +# address: +# - 192.168.0.0/24 +# - 192.168.10.0/24 +# - 172.16.14.0/24 + +# Flow settings: +# By default, the reserved memory (memcap) for flows is 32MB. This is the limit +# for flow allocation inside the engine. You can change this value to allow +# more memory usage for flows. +# The hash-size determine the size of the hash used to identify flows inside +# the engine, and by default the value is 65536. +# At the startup, the engine can preallocate a number of flows, to get a better +# performance. The number of flows preallocated is 10000 by default. +# emergency-recovery is the percentage of flows that the engine need to +# prune before unsetting the emergency state. The emergency state is activated +# when the memcap limit is reached, allowing to create new flows, but +# prunning them with the emergency timeouts (they are defined below). +# If the memcap is reached, the engine will try to prune flows +# with the default timeouts. If it doens't find a flow to prune, it will set +# the emergency bit and it will try again with more agressive timeouts. +# If that doesn't work, then it will try to kill the last time seen flows +# not in use. +# The memcap can be specified in kb, mb, gb. Just a number indicates it's +# in bytes. + +flow: + memcap: 128mb + hash-size: 65536 + prealloc: 10000 + emergency-recovery: 30 + #managers: 1 # default to one flow manager + #recyclers: 1 # default to one flow recycler thread + +# This option controls the use of vlan ids in the flow (and defrag) +# hashing. Normally this should be enabled, but in some (broken) +# setups where both sides of a flow are not tagged with the same vlan +# tag, we can ignore the vlan id's in the flow hashing. +vlan: + use-for-tracking: true + +# Specific timeouts for flows. Here you can specify the timeouts that the +# active flows will wait to transit from the current state to another, on each +# protocol. The value of "new" determine the seconds to wait after a hanshake or +# stream startup before the engine free the data of that flow it doesn't +# change the state to established (usually if we don't receive more packets +# of that flow). The value of "established" is the amount of +# seconds that the engine will wait to free the flow if it spend that amount +# without receiving new packets or closing the connection. "closed" is the +# amount of time to wait after a flow is closed (usually zero). +# +# There's an emergency mode that will become active under attack circumstances, +# making the engine to check flow status faster. This configuration variables +# use the prefix "emergency-" and work similar as the normal ones. +# Some timeouts doesn't apply to all the protocols, like "closed", for udp and +# icmp. + +flow-timeouts: + + default: + new: 30 + established: 300 + closed: 0 + emergency-new: 10 + emergency-established: 100 + emergency-closed: 0 + tcp: + new: 60 + established: 600 + closed: 60 + emergency-new: 5 + emergency-established: 100 + emergency-closed: 10 + udp: + new: 30 + established: 300 + emergency-new: 10 + emergency-established: 100 + icmp: + new: 30 + established: 300 + emergency-new: 10 + emergency-established: 100 + +# Stream engine settings. Here the TCP stream tracking and reassembly +# engine is configured. +# +# stream: +# memcap: 32mb # Can be specified in kb, mb, gb. Just a +# # number indicates it's in bytes. +# checksum-validation: yes # To validate the checksum of received +# # packet. If csum validation is specified as +# # "yes", then packet with invalid csum will not +# # be processed by the engine stream/app layer. +# # Warning: locally generated trafic can be +# # generated without checksum due to hardware offload +# # of checksum. You can control the handling of checksum +# # on a per-interface basis via the 'checksum-checks' +# # option +# prealloc-sessions: 2k # 2k sessions prealloc'd per stream thread +# midstream: false # don't allow midstream session pickups +# async-oneside: false # don't enable async stream handling +# inline: no # stream inline mode +# max-synack-queued: 5 # Max different SYN/ACKs to queue +# +# reassembly: +# memcap: 64mb # Can be specified in kb, mb, gb. Just a number +# # indicates it's in bytes. +# depth: 1mb # Can be specified in kb, mb, gb. Just a number +# # indicates it's in bytes. +# toserver-chunk-size: 2560 # inspect raw stream in chunks of at least +# # this size. Can be specified in kb, mb, +# # gb. Just a number indicates it's in bytes. +# # The max acceptable size is 4024 bytes. +# toclient-chunk-size: 2560 # inspect raw stream in chunks of at least +# # this size. Can be specified in kb, mb, +# # gb. Just a number indicates it's in bytes. +# # The max acceptable size is 4024 bytes. +# randomize-chunk-size: yes # Take a random value for chunk size around the specified value. +# # This lower the risk of some evasion technics but could lead +# # detection change between runs. It is set to 'yes' by default. +# randomize-chunk-range: 10 # If randomize-chunk-size is active, the value of chunk-size is +# # a random value between (1 - randomize-chunk-range/100)*toserver-chunk-size +# # and (1 + randomize-chunk-range/100)*toserver-chunk-size and the same +# # calculation for toclient-chunk-size. +# # Default value of randomize-chunk-range is 10. +# +# raw: yes # 'Raw' reassembly enabled or disabled. +# # raw is for content inspection by detection +# # engine. +# +# chunk-prealloc: 250 # Number of preallocated stream chunks. These +# # are used during stream inspection (raw). +# segments: # Settings for reassembly segment pool. +# - size: 4 # Size of the (data)segment for a pool +# prealloc: 256 # Number of segments to prealloc and keep +# # in the pool. +# zero-copy-size: 128 # This option sets in bytes the value at +# # which segment data is passed to the app +# # layer API directly. Data sizes equal to +# # and higher than the value set are passed +# # on directly. +# +stream: + memcap: 64mb + checksum-validation: yes # reject wrong csums + inline: auto # auto will use inline mode in IPS mode, yes or no set it statically + reassembly: + memcap: 256mb + depth: 1mb # reassemble 1mb into a stream + toserver-chunk-size: 2560 + toclient-chunk-size: 2560 + randomize-chunk-size: yes + #randomize-chunk-range: 10 + #raw: yes + #chunk-prealloc: 250 + #segments: + # - size: 4 + # prealloc: 256 + # - size: 16 + # prealloc: 512 + # - size: 112 + # prealloc: 512 + # - size: 248 + # prealloc: 512 + # - size: 512 + # prealloc: 512 + # - size: 768 + # prealloc: 1024 + # - size: 1448 + # prealloc: 1024 + # - size: 65535 + # prealloc: 128 + #zero-copy-size: 128 + +# Host table: +# +# Host table is used by tagging and per host thresholding subsystems. +# +host: + hash-size: 4096 + prealloc: 1000 + memcap: 32mb + +# IP Pair table: +# +# Used by xbits 'ippair' tracking. +# +#ippair: +# hash-size: 4096 +# prealloc: 1000 +# memcap: 32mb + + +## +## Performance tuning and profiling +## + +# The detection engine builds internal groups of signatures. The engine +# allow us to specify the profile to use for them, to manage memory on an +# efficient way keeping a good performance. For the profile keyword you +# can use the words "low", "medium", "high" or "custom". If you use custom +# make sure to define the values at "- custom-values" as your convenience. +# Usually you would prefer medium/high/low. +# +# "sgh mpm-context", indicates how the staging should allot mpm contexts for +# the signature groups. "single" indicates the use of a single context for +# all the signature group heads. "full" indicates a mpm-context for each +# group head. "auto" lets the engine decide the distribution of contexts +# based on the information the engine gathers on the patterns from each +# group head. +# +# The option inspection-recursion-limit is used to limit the recursive calls +# in the content inspection code. For certain payload-sig combinations, we +# might end up taking too much time in the content inspection code. +# If the argument specified is 0, the engine uses an internally defined +# default limit. On not specifying a value, we use no limits on the recursion. +detect: + profile: medium + custom-values: + toclient-groups: 3 + toserver-groups: 25 + sgh-mpm-context: auto + inspection-recursion-limit: 3000 + # If set to yes, the loading of signatures will be made after the capture + # is started. This will limit the downtime in IPS mode. + #delayed-detect: yes + + # the grouping values above control how many groups are created per + # direction. Port whitelisting forces that port to get it's own group. + # Very common ports will benefit, as well as ports with many expensive + # rules. + grouping: + #tcp-whitelist: 53, 80, 139, 443, 445, 1433, 3306, 3389, 6666, 6667, 8080 + #udp-whitelist: 53, 135, 5060 + + profiling: + # Log the rules that made it past the prefilter stage, per packet + # default is off. The threshold setting determines how many rules + # must have made it past pre-filter for that rule to trigger the + # logging. + #inspect-logging-threshold: 200 + grouping: + dump-to-disk: false + include-rules: false # very verbose + include-mpm-stats: false + +# Select the multi pattern algorithm you want to run for scan/search the +# in the engine. +# +# The supported algorithms are: +# "ac" - Aho-Corasick, default implementation +# "ac-bs" - Aho-Corasick, reduced memory implementation +# "ac-cuda" - Aho-Corasick, CUDA implementation +# "ac-tile" - Aho-Corasick, optimized for Tilera architecture +# "hs" - Hyperscan, available when built with Hyperscan support +# +# The default mpm-algo value of "auto" will use "hs" if Hyperscan is available, +# "ac-tile" on Tilera platforms, and "ac" otherwise. +# +# The mpm you choose also decides the distribution of mpm contexts for +# signature groups, specified by the conf - "detect.sgh-mpm-context". +# Selecting "ac" as the mpm would require "detect.sgh-mpm-context" +# to be set to "single", because of ac's memory requirements, unless the +# ruleset is small enough to fit in one's memory, in which case one can +# use "full" with "ac". Rest of the mpms can be run in "full" mode. +# +# There is also a CUDA pattern matcher (only available if Suricata was +# compiled with --enable-cuda: b2g_cuda. Make sure to update your +# max-pending-packets setting above as well if you use b2g_cuda. + +mpm-algo: auto + +# Select the matching algorithm you want to use for single-pattern searches. +# +# Supported algorithms are "bm" (Boyer-Moore) and "hs" (Hyperscan, only +# available if Suricata has been built with Hyperscan support). +# +# The default of "auto" will use "hs" if available, otherwise "bm". + +spm-algo: auto + +# Suricata is multi-threaded. Here the threading can be influenced. +threading: + # On some cpu's/architectures it is beneficial to tie individual threads + # to specific CPU's/CPU cores. In this case all threads are tied to CPU0, + # and each extra CPU/core has one "detect" thread. + # + # On Intel Core2 and Nehalem CPU's enabling this will degrade performance. + # + set-cpu-affinity: no + # Tune cpu affinity of suricata threads. Each family of threads can be bound + # on specific CPUs. + cpu-affinity: + - management-cpu-set: + cpu: [ 0 ] # include only these cpus in affinity settings + - receive-cpu-set: + cpu: [ 0 ] # include only these cpus in affinity settings + - decode-cpu-set: + cpu: [ 0, 1 ] + mode: "balanced" + - stream-cpu-set: + cpu: [ "0-1" ] + - detect-cpu-set: + cpu: [ "all" ] + mode: "exclusive" # run detect threads in these cpus + # Use explicitely 3 threads and don't compute number by using + # detect-thread-ratio variable: + # threads: 3 + prio: + low: [ 0 ] + medium: [ "1-2" ] + high: [ 3 ] + default: "medium" + - verdict-cpu-set: + cpu: [ 0 ] + prio: + default: "high" + - reject-cpu-set: + cpu: [ 0 ] + prio: + default: "low" + - output-cpu-set: + cpu: [ "all" ] + prio: + default: "medium" + # + # By default Suricata creates one "detect" thread per available CPU/CPU core. + # This setting allows controlling this behaviour. A ratio setting of 2 will + # create 2 detect threads for each CPU/CPU core. So for a dual core CPU this + # will result in 4 detect threads. If values below 1 are used, less threads + # are created. So on a dual core CPU a setting of 0.5 results in 1 detect + # thread being created. Regardless of the setting at a minimum 1 detect + # thread will always be created. + # + detect-thread-ratio: 1.5 + +# Profiling settings. Only effective if Suricata has been built with the +# the --enable-profiling configure flag. +# +profiling: + # Run profiling for every xth packet. The default is 1, which means we + # profile every packet. If set to 1000, one packet is profiled for every + # 1000 received. + #sample-rate: 1000 + + # rule profiling + rules: + + # Profiling can be disabled here, but it will still have a + # performance impact if compiled in. + enabled: yes + filename: rule_perf.log + append: yes + + # Sort options: ticks, avgticks, checks, matches, maxticks + sort: avgticks + + # Limit the number of items printed at exit (ignored for json). + limit: 100 + + # output to json + json: true + + # per keyword profiling + keywords: + enabled: yes + filename: keyword_perf.log + append: yes + + # per rulegroup profiling + rulegroups: + enabled: yes + filename: rule_group_perf.log + append: yes + + # packet profiling + packets: + + # Profiling can be disabled here, but it will still have a + # performance impact if compiled in. + enabled: yes + filename: packet_stats.log + append: yes + + # per packet csv output + csv: + + # Output can be disabled here, but it will still have a + # performance impact if compiled in. + enabled: no + filename: packet_stats.csv + + # profiling of locking. Only available when Suricata was built with + # --enable-profiling-locks. + locks: + enabled: no + filename: lock_stats.log + append: yes + + pcap-log: + enabled: no + filename: pcaplog_stats.log + append: yes + +## +## Netfilter integration +## + +# When running in NFQ inline mode, it is possible to use a simulated +# non-terminal NFQUEUE verdict. +# This permit to do send all needed packet to suricata via this a rule: +# iptables -I FORWARD -m mark ! --mark $MARK/$MASK -j NFQUEUE +# And below, you can have your standard filtering ruleset. To activate +# this mode, you need to set mode to 'repeat' +# If you want packet to be sent to another queue after an ACCEPT decision +# set mode to 'route' and set next-queue value. +# On linux >= 3.1, you can set batchcount to a value > 1 to improve performance +# by processing several packets before sending a verdict (worker runmode only). +# On linux >= 3.6, you can set the fail-open option to yes to have the kernel +# accept the packet if suricata is not able to keep pace. +nfq: +# mode: accept +# repeat-mark: 1 +# repeat-mask: 1 +# route-queue: 2 +# batchcount: 20 +# fail-open: yes + +#nflog support +nflog: + # netlink multicast group + # (the same as the iptables --nflog-group param) + # Group 0 is used by the kernel, so you can't use it + - group: 2 + # netlink buffer size + buffer-size: 18432 + # put default value here + - group: default + # set number of packet to queue inside kernel + qthreshold: 1 + # set the delay before flushing packet in the queue inside kernel + qtimeout: 100 + # netlink max buffer size + max-size: 20000 + +## +## Advanced Capture Options +## + +# Netmap support +# +# Netmap operates with NIC directly in driver, so you need FreeBSD wich have +# built-in netmap support or compile and install netmap module and appropriate +# NIC driver on your Linux system. +# To reach maximum throughput disable all receive-, segmentation-, +# checksum- offloadings on NIC. +# Disabling Tx checksum offloading is *required* for connecting OS endpoint +# with NIC endpoint. +# You can find more information at https://github.com/luigirizzo/netmap +# +netmap: + # To specify OS endpoint add plus sign at the end (e.g. "eth0+") + - interface: eth2 + # Number of receive threads. "auto" uses number of RSS queues on interface. + threads: auto + # You can use the following variables to activate netmap tap or IPS mode. + # If copy-mode is set to ips or tap, the traffic coming to the current + # interface will be copied to the copy-iface interface. If 'tap' is set, the + # copy is complete. If 'ips' is set, the packet matching a 'drop' action + # will not be copied. + # To specify the OS as the copy-iface (so the OS can route packets, or forward + # to a service running on the same machine) add a plus sign at the end + # (e.g. "copy-iface: eth0+"). Don't forget to set up a symmetrical eth0+ -> eth0 + # for return packets. Hardware checksumming must be *off* on the interface if + # using an OS endpoint (e.g. 'ifconfig eth0 -rxcsum -txcsum -rxcsum6 -txcsum6' for FreeBSD + # or 'ethtool -K eth0 tx off rx off' for Linux). + #copy-mode: tap + #copy-iface: eth3 + # Set to yes to disable promiscuous mode + # disable-promisc: no + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: auto + # BPF filter to apply to this interface. The pcap filter syntax apply here. + #bpf-filter: port 80 or udp + #- interface: eth3 + #threads: auto + #copy-mode: tap + #copy-iface: eth2 + # Put default values here + - interface: default + +# PF_RING configuration. for use with native PF_RING support +# for more info see http://www.ntop.org/products/pf_ring/ +pfring: + - interface: eth0 + # Number of receive threads (>1 will enable experimental flow pinned + # runmode) + threads: 1 + + # Default clusterid. PF_RING will load balance packets based on flow. + # All threads/processes that will participate need to have the same + # clusterid. + cluster-id: 99 + + # Default PF_RING cluster type. PF_RING can load balance per flow. + # Possible values are cluster_flow or cluster_round_robin. + cluster-type: cluster_flow + # bpf filter for this interface + #bpf-filter: tcp + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - rxonly: only compute checksum for packets received by network card. + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. (default) + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: auto + # Second interface + #- interface: eth1 + # threads: 3 + # cluster-id: 93 + # cluster-type: cluster_flow + # Put default values here + - interface: default + #threads: 2 + +# For FreeBSD ipfw(8) divert(4) support. +# Please make sure you have ipfw_load="YES" and ipdivert_load="YES" +# i../../etc/loader.conf or kldload'ing the appropriate kernel modules. +# Additionally, you need to have an ipfw rule for the engine to see +# the packets from ipfw. For Example: +# +# ipfw add 100 divert 8000 ip from any to any +# +# The 8000 above should be the same number you passed on the command +# line, i.e. -d 8000 +# +ipfw: + + # Reinject packets at the specified ipfw rule number. This config + # option is the ipfw rule number AT WHICH rule processing continues + # in the ipfw processing system after the engine has finished + # inspecting the packet for acceptance. If no rule number is specified, + # accepted packets are reinjected at the divert rule which they entered + # and IPFW rule processing continues. No check is done to verify + # this will rule makes sense so care must be taken to avoid loops in ipfw. + # + ## The following example tells the engine to reinject packets + # back into the ipfw firewall AT rule number 5500: + # + # ipfw-reinjection-rule-number: 5500 + + +napatech: + # The Host Buffer Allowance for all streams + # (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back) + hba: -1 + + # use_all_streams set to "yes" will query the Napatech service for all configured + # streams and listen on all of them. When set to "no" the streams config array + # will be used. + use-all-streams: yes + + # The streams to listen on + streams: [1, 2, 3] + +# Tilera mpipe configuration. for use on Tilera TILE-Gx. +mpipe: + + # Load balancing modes: "static", "dynamic", "sticky", or "round-robin". + load-balance: dynamic + + # Number of Packets in each ingress packet queue. Must be 128, 512, 2028 or 65536 + iqueue-packets: 2048 + + # List of interfaces we will listen on. + inputs: + - interface: xgbe2 + - interface: xgbe3 + - interface: xgbe4 + + + # Relative weight of memory for packets of each mPipe buffer size. + stack: + size128: 0 + size256: 9 + size512: 0 + size1024: 0 + size1664: 7 + size4096: 0 + size10386: 0 + size16384: 0 + +## +## Hardware accelaration +## + +# Cuda configuration. +cuda: + # The "mpm" profile. On not specifying any of these parameters, the engine's + # internal default values are used, which are same as the ones specified in + # in the default conf file. + mpm: + # The minimum length required to buffer data to the gpu. + # Anything below this is MPM'ed on the CPU. + # Can be specified in kb, mb, gb. Just a number indicates it's in bytes. + # A value of 0 indicates there's no limit. + data-buffer-size-min-limit: 0 + # The maximum length for data that we would buffer to the gpu. + # Anything over this is MPM'ed on the CPU. + # Can be specified in kb, mb, gb. Just a number indicates it's in bytes. + data-buffer-size-max-limit: 1500 + # The ring buffer size used by the CudaBuffer API to buffer data. + cudabuffer-buffer-size: 500mb + # The max chunk size that can be sent to the gpu in a single go. + gpu-transfer-size: 50mb + # The timeout limit for batching of packets in microseconds. + batching-timeout: 2000 + # The device to use for the mpm. Currently we don't support load balancing + # on multiple gpus. In case you have multiple devices on your system, you + # can specify the device to use, using this conf. By default we hold 0, to + # specify the first device cuda sees. To find out device-id associated with + # the card(s) on the system run "suricata --list-cuda-cards". + device-id: 0 + # No of Cuda streams used for asynchronous processing. All values > 0 are valid. + # For this option you need a device with Compute Capability > 1.0. + cuda-streams: 2 + +## +## Include other configs +## + +# Includes. Files included here will be handled as if they were +# inlined in this configuration file. +#include: include1.yaml +#include: include2.yaml diff --git a/tests/lua-output-smtp-pre8/expected/smtp_lua.log b/tests/lua-output-smtp-pre8/expected/smtp_lua.log new file mode 100644 index 000000000..18ead8437 --- /dev/null +++ b/tests/lua-output-smtp-pre8/expected/smtp_lua.log @@ -0,0 +1 @@ +10/05/2009-06:06:12.248291 FROM TO {} diff --git a/tests/lua-output-smtp-pre8/smtp.lua b/tests/lua-output-smtp-pre8/smtp.lua new file mode 100644 index 000000000..0e8d12c51 --- /dev/null +++ b/tests/lua-output-smtp-pre8/smtp.lua @@ -0,0 +1,34 @@ +-- simple fast-log to file lua module +name = "smtp_lua.log" + +function init (args) + local needs = {} + needs["protocol"] = "smtp" + return needs +end + +function setup (args) + filename = SCLogPath() .. "/" .. name + file = assert(io.open(filename, "a")) + SCLogInfo("Log Filename " .. filename) + count = 0 +end + +function log(args) + ts = SCPacketTimeString() + from = SMTPGetMailFrom() + to = SMTPGetRcptList() + to_string = "" + for key,val in pairs(to) do + to_string = to_string .. val + end + file:write(ts .. " FROM " .. from .. " TO {" .. to_string .. "}\n") + file:flush() + + count = count + 1 +end + +function deinit (args) + SCLogInfo ("transactions logged: " .. count); + file:close(file) +end diff --git a/tests/lua-output-smtp-pre8/suricata.yaml b/tests/lua-output-smtp-pre8/suricata.yaml new file mode 100644 index 000000000..ed3a1dc90 --- /dev/null +++ b/tests/lua-output-smtp-pre8/suricata.yaml @@ -0,0 +1,11 @@ +%YAML 1.1 +--- + +include: default.yaml + +outputs: + - lua: + enabled: yes + scripts-dir: . + scripts: + - smtp.lua diff --git a/tests/lua-output-smtp-pre8/test.yaml b/tests/lua-output-smtp-pre8/test.yaml new file mode 100644 index 000000000..56db7a061 --- /dev/null +++ b/tests/lua-output-smtp-pre8/test.yaml @@ -0,0 +1,10 @@ +pcap: ../bug-3616-smtp/input.pcap + +requires: + features: + - HAVE_LUA + lt-version: 8 + +checks: + - shell: + args: grep -q 'FROM TO {}' smtp_lua.log diff --git a/tests/lua-output-smtp/smtp.lua b/tests/lua-output-smtp/smtp.lua index 0e8d12c51..38a453ef0 100644 --- a/tests/lua-output-smtp/smtp.lua +++ b/tests/lua-output-smtp/smtp.lua @@ -1,4 +1,6 @@ -- simple fast-log to file lua module +local packet = require "suricata.packet" + name = "smtp_lua.log" function init (args) @@ -15,7 +17,8 @@ function setup (args) end function log(args) - ts = SCPacketTimeString() + p = packet.get() + ts = p:timestring() from = SMTPGetMailFrom() to = SMTPGetRcptList() to_string = "" diff --git a/tests/lua-output-smtp/test.yaml b/tests/lua-output-smtp/test.yaml index c39b6c1f0..1485a9af2 100644 --- a/tests/lua-output-smtp/test.yaml +++ b/tests/lua-output-smtp/test.yaml @@ -3,6 +3,7 @@ pcap: ../bug-3616-smtp/input.pcap requires: features: - HAVE_LUA + min-version: 8 checks: - shell: diff --git a/tests/lua-scflowvarget/test.lua b/tests/lua-scflowvarget/test.lua index 38e49acdb..aaa1e0ec2 100644 --- a/tests/lua-scflowvarget/test.lua +++ b/tests/lua-scflowvarget/test.lua @@ -1,6 +1,5 @@ function init (args) local needs = {} - needs["http.request_headers"] = tostring(true) needs["flowvar"] = {"TestVar"} return needs end diff --git a/tests/lua-scpackettuple-pre8/README.md b/tests/lua-scpackettuple-pre8/README.md new file mode 100644 index 000000000..f85440313 --- /dev/null +++ b/tests/lua-scpackettuple-pre8/README.md @@ -0,0 +1 @@ +Tests Lua's SCPacketTuple output. diff --git a/tests/lua-scpackettuple-pre8/expected/scpacket-tuple.log b/tests/lua-scpackettuple-pre8/expected/scpacket-tuple.log new file mode 100644 index 000000000..8ded52111 --- /dev/null +++ b/tests/lua-scpackettuple-pre8/expected/scpacket-tuple.log @@ -0,0 +1,40 @@ +{10/06/2015-15:16:43.136335 [**] +SCPacketTuple is +IP Version: 4 +Src: 10.16.1.11:59733 -> Dst: 104.131.202.103:80 [**] Protocol: 6} +{10/06/2015-15:16:43.136772 [**] +SCPacketTuple is +IP Version: 4 +Src: 104.131.202.103:80 -> Dst: 10.16.1.11:59733 [**] Protocol: 6} +{10/06/2015-15:16:43.136823 [**] +SCPacketTuple is +IP Version: 4 +Src: 10.16.1.11:59733 -> Dst: 104.131.202.103:80 [**] Protocol: 6} +{10/06/2015-15:16:43.136911 [**] +SCPacketTuple is +IP Version: 4 +Src: 10.16.1.11:59733 -> Dst: 104.131.202.103:80 [**] Protocol: 6} +{10/06/2015-15:16:43.137046 [**] +SCPacketTuple is +IP Version: 4 +Src: 104.131.202.103:80 -> Dst: 10.16.1.11:59733 [**] Protocol: 6} +{10/06/2015-15:16:43.137760 [**] +SCPacketTuple is +IP Version: 4 +Src: 104.131.202.103:80 -> Dst: 10.16.1.11:59733 [**] Protocol: 6} +{10/06/2015-15:16:43.137833 [**] +SCPacketTuple is +IP Version: 4 +Src: 10.16.1.11:59733 -> Dst: 104.131.202.103:80 [**] Protocol: 6} +{10/06/2015-15:16:43.138142 [**] +SCPacketTuple is +IP Version: 4 +Src: 10.16.1.11:59733 -> Dst: 104.131.202.103:80 [**] Protocol: 6} +{10/06/2015-15:16:43.138441 [**] +SCPacketTuple is +IP Version: 4 +Src: 104.131.202.103:80 -> Dst: 10.16.1.11:59733 [**] Protocol: 6} +{10/06/2015-15:16:43.138468 [**] +SCPacketTuple is +IP Version: 4 +Src: 10.16.1.11:59733 -> Dst: 104.131.202.103:80 [**] Protocol: 6} diff --git a/tests/lua-scpackettuple-pre8/scpackettuple.lua b/tests/lua-scpackettuple-pre8/scpackettuple.lua new file mode 100644 index 000000000..9107ce437 --- /dev/null +++ b/tests/lua-scpackettuple-pre8/scpackettuple.lua @@ -0,0 +1,29 @@ +-- simple SCPacketTuple log test +name = "scpacket-tuple.log" + +function init(args) + local needs = {} + needs["type"] = "packet" + return needs +end + +function setup(args) + filename = SCLogPath() .. "/" .. name + file = assert(io.open(filename, "a")) + SCLogInfo("Lua SCPacketTuple Log Filename " .. filename) + packets = 0 +end + +function log(args) + timestring = SCPacketTimeString() + ipver, srcip, dstip, proto, sp, dp = SCPacketTuple() + + file:write ("{" .. timestring .. " [**]\nSCPacketTuple is\nIP Version: " .. ipver .. "\nSrc: " .. srcip .. ":" .. sp .. " -> Dst: " .. dstip .. ":" .. dp .. " [**] Protocol: " .. proto .. "}\n") + file:flush() + packets = packets + 1 +end + +function deinit(args) + SCLogInfo ("Packets logged: " .. packets); + file:close(file) +end diff --git a/tests/lua-scpackettuple-pre8/suricata.yaml b/tests/lua-scpackettuple-pre8/suricata.yaml new file mode 100644 index 000000000..607c2ef13 --- /dev/null +++ b/tests/lua-scpackettuple-pre8/suricata.yaml @@ -0,0 +1,9 @@ +%YAML 1.1 +--- + +outputs: + - lua: + enabled: yes + scripts-dir: . + scripts: + - scpackettuple.lua diff --git a/tests/lua-scpackettuple-pre8/test.yaml b/tests/lua-scpackettuple-pre8/test.yaml new file mode 100644 index 000000000..cfb6b6531 --- /dev/null +++ b/tests/lua-scpackettuple-pre8/test.yaml @@ -0,0 +1,12 @@ +pcap: ../lua-output-http/input.pcap + +requires: + features: + - HAVE_LUA + lt-version: 8 + +checks: + - file-compare: + # A check that compares two files + filename: scpacket-tuple.log + expected: expected/scpacket-tuple.log diff --git a/tests/lua-scpackettuple/scpackettuple.lua b/tests/lua-scpackettuple/scpackettuple.lua index 9107ce437..6b848e8d7 100644 --- a/tests/lua-scpackettuple/scpackettuple.lua +++ b/tests/lua-scpackettuple/scpackettuple.lua @@ -1,3 +1,5 @@ +local packet = require "suricata.packet" + -- simple SCPacketTuple log test name = "scpacket-tuple.log" @@ -15,8 +17,10 @@ function setup(args) end function log(args) - timestring = SCPacketTimeString() - ipver, srcip, dstip, proto, sp, dp = SCPacketTuple() + p = packet.get() + timestring = p:timestring() + + ipver, srcip, dstip, proto, sp, dp = p:tuple() file:write ("{" .. timestring .. " [**]\nSCPacketTuple is\nIP Version: " .. ipver .. "\nSrc: " .. srcip .. ":" .. sp .. " -> Dst: " .. dstip .. ":" .. dp .. " [**] Protocol: " .. proto .. "}\n") file:flush() diff --git a/tests/lua-scpackettuple/test.yaml b/tests/lua-scpackettuple/test.yaml index c344e1250..7ebddfc3a 100644 --- a/tests/lua-scpackettuple/test.yaml +++ b/tests/lua-scpackettuple/test.yaml @@ -3,6 +3,7 @@ pcap: ../lua-output-http/input.pcap requires: features: - HAVE_LUA + min-version: 8 checks: - file-compare: diff --git a/tests/lua-scrule-ids-pre8/README.md b/tests/lua-scrule-ids-pre8/README.md new file mode 100644 index 000000000..dad9c3023 --- /dev/null +++ b/tests/lua-scrule-ids-pre8/README.md @@ -0,0 +1 @@ +Tests Lua's SCRuleIds output. diff --git a/tests/lua-scrule-ids-pre8/expected/lua-scrule-ids.log b/tests/lua-scrule-ids-pre8/expected/lua-scrule-ids.log new file mode 100644 index 000000000..731cab4f0 --- /dev/null +++ b/tests/lua-scrule-ids-pre8/expected/lua-scrule-ids.log @@ -0,0 +1,6 @@ +[**] 07/13/2016-22:42:07.573103 +SCRuleIds is +[**] +Signature id: 2100498 +revision: 7 +Group id: 1[**] \ No newline at end of file diff --git a/tests/lua-scrule-ids-pre8/lua-scrule-ids.lua b/tests/lua-scrule-ids-pre8/lua-scrule-ids.lua new file mode 100644 index 000000000..312732120 --- /dev/null +++ b/tests/lua-scrule-ids-pre8/lua-scrule-ids.lua @@ -0,0 +1,27 @@ +-- lua_pushinteger output test for SCRuleIds and ... +name = "lua-scrule-ids.log" + +function init(args) + local needs = {} + needs["type"] = "packet" + needs["filter"] = "alerts" + return needs +end + +function setup(args) + filename = SCLogPath() .. "/" .. name + file = assert(io.open(filename, "a")) + SCLogInfo("lua SCRuleIds Log Filename " .. filename) +end + +function log(args) + timestring = SCPacketTimeString() + sid, rev, gid = SCRuleIds() + + file:write ("[**] " .. timestring .. "\nSCRuleIds is\n[**]\nSignature id: " .. sid .. "\nrevision: " .. rev .. "\nGroup id: " .. gid .. "[**]") + file:flush() +end + +function deinit(args) + file:close(file) +end diff --git a/tests/lua-scrule-ids-pre8/suricata.yaml b/tests/lua-scrule-ids-pre8/suricata.yaml new file mode 100644 index 000000000..9083c38af --- /dev/null +++ b/tests/lua-scrule-ids-pre8/suricata.yaml @@ -0,0 +1,45 @@ +%YAML 1.1 +--- + +logging: + default-log-level: notice + default-output-filter: + outputs: + - console: + enabled: yes + + +outputs: + - lua: + enabled: yes + scripts-dir: . + scripts: + - lua-scrule-ids.lua + - eve-log: + enabled: yes + filetype: regular #regular|syslog|unix_dgram|unix_stream|redis + filename: eve.json + types: + - alert: + payload: yes + payload-buffer-size: 4kb + payload-printable: yes + packet: yes + http: yes + tls: yes + ssh: yes + smtp: yes + xff: + enabled: yes + mode: extra-data + deployment: reverse + header: X-Forwarded-For + + - unified2-alert: + enabled: yes + filename: unified2.alert + xff: + enabled: yes + mode: extra-data + deployment: reverse + header: X-Forwarded-For diff --git a/tests/lua-scrule-ids-pre8/test.rules b/tests/lua-scrule-ids-pre8/test.rules new file mode 100644 index 000000000..9f1307bdb --- /dev/null +++ b/tests/lua-scrule-ids-pre8/test.rules @@ -0,0 +1 @@ +alert ip any any -> any any (msg:"GPL ATTACK_RESPONSE id check returned root"; content:"uid=0|28|root|29|"; classtype:bad-unknown; sid:2100498; rev:7;) diff --git a/tests/lua-scrule-ids-pre8/test.yaml b/tests/lua-scrule-ids-pre8/test.yaml new file mode 100644 index 000000000..69d9aaf87 --- /dev/null +++ b/tests/lua-scrule-ids-pre8/test.yaml @@ -0,0 +1,16 @@ +pcap: ../flowbit-oring/input.pcap + +requires: + features: + - HAVE_LUA + lt-version: 8 + +checks: + - filter: + count: 1 + match: + event_type: alert + - file-compare: + # A check that compares two files + filename: lua-scrule-ids.log + expected: expected/lua-scrule-ids.log diff --git a/tests/lua-scrule-ids/lua-scrule-ids.lua b/tests/lua-scrule-ids/lua-scrule-ids.lua index 312732120..95cd9fa84 100644 --- a/tests/lua-scrule-ids/lua-scrule-ids.lua +++ b/tests/lua-scrule-ids/lua-scrule-ids.lua @@ -1,4 +1,5 @@ -- lua_pushinteger output test for SCRuleIds and ... +local packet = require "suricata.packet" name = "lua-scrule-ids.log" function init(args) @@ -15,7 +16,8 @@ function setup(args) end function log(args) - timestring = SCPacketTimeString() + p = packet.get() + timestring = p:timestring() sid, rev, gid = SCRuleIds() file:write ("[**] " .. timestring .. "\nSCRuleIds is\n[**]\nSignature id: " .. sid .. "\nrevision: " .. rev .. "\nGroup id: " .. gid .. "[**]") diff --git a/tests/lua-scrule-ids/test.yaml b/tests/lua-scrule-ids/test.yaml index 75a7c41a5..79a6a6922 100644 --- a/tests/lua-scrule-ids/test.yaml +++ b/tests/lua-scrule-ids/test.yaml @@ -3,6 +3,7 @@ pcap: ../flowbit-oring/input.pcap requires: features: - HAVE_LUA + min-version: 8 checks: - filter: diff --git a/tests/lua/lua-hashlib/test-hashing.lua b/tests/lua/lua-hashlib/test-hashing.lua index df89e6976..0e0e0f369 100644 --- a/tests/lua/lua-hashlib/test-hashing.lua +++ b/tests/lua/lua-hashlib/test-hashing.lua @@ -6,7 +6,6 @@ local expected_md5 = "27170ec0609347c6a158bb5b694822a5" function init (args) local needs = {} - needs["dns.rrname"] = tostring(true) return needs end @@ -127,7 +126,7 @@ function test_md5(name) end function match(args) - rrname = tostring(args["dns.rrname"]) + rrname = DnsGetDnsRrname() if not test_sha256(rrname) then SCLogError("test_sha256 failed") diff --git a/tests/lua/lua-hashlib/test.rules b/tests/lua/lua-hashlib/test.rules index eef4c1fac..6116b9987 100644 --- a/tests/lua/lua-hashlib/test.rules +++ b/tests/lua/lua-hashlib/test.rules @@ -1,3 +1,4 @@ alert dns any any -> any any (msg:"TEST DNS LUA dns.rrname"; \ + flow:to_server; \ dns.query.name; content: "www.suricata-ids.org"; \ lua:test-hashing.lua; sid:1; rev:1;) diff --git a/tests/lua/lua-packetlib-01/packet.lua b/tests/lua/lua-packetlib-01/packet.lua new file mode 100644 index 000000000..cb2ef8c00 --- /dev/null +++ b/tests/lua/lua-packetlib-01/packet.lua @@ -0,0 +1,22 @@ +local packet = require "suricata.packet" + +function init (args) + local needs = {} + return needs +end + +function match (args) + p = packet.get() + payload = p:payload() + ts = p:timestring() + + for line in payload:gmatch("([^\r\n]*)[\r\n]+") do + if line == "GET /index.html HTTP/1.0" then + ipver, srcip, dstip, proto, sp, dp = p:tuple() + SCLogNotice(string.format("%s %s->%s %d->%d (pcap_cnt:%d) match! %s", ts, srcip, dstip, sp, dp, p:pcap_cnt(), line)); + return 1 + end + end + + return 0 +end diff --git a/tests/lua/lua-packetlib-01/suricata.yaml b/tests/lua/lua-packetlib-01/suricata.yaml new file mode 100644 index 000000000..b63582207 --- /dev/null +++ b/tests/lua/lua-packetlib-01/suricata.yaml @@ -0,0 +1,314 @@ +%YAML 1.1 +--- + +stats: + enabled: yes + interval: 8 + +# Configure the type of alert (and other) logging you would like. +outputs: + - eve-log: + enabled: yes + filetype: regular #regular|syslog|unix_dgram|unix_stream|redis + filename: eve.json + # Enable for multi-threaded eve.json output; output files are amended with + # an identifier, e.g., eve.9.json + #threaded: false + #prefix: "@cee: " # prefix to prepend to each log entry + # the following are valid when type: syslog above + #identity: "suricata" + #facility: local5 + #level: Info ## possible levels: Emergency, Alert, Critical, + ## Error, Warning, Notice, Info, Debug + #ethernet: no # log ethernet header in events when available + #redis: + # server: 127.0.0.1 + # port: 6379 + # async: true ## if redis replies are read asynchronously + # mode: list ## possible values: list|lpush (default), rpush, channel|publish, xadd|stream + # ## lpush and rpush are using a Redis list. "list" is an alias for lpush + # ## publish is using a Redis channel. "channel" is an alias for publish + # ## xadd is using a Redis stream. "stream" is an alias for xadd + # key: suricata ## string denoting the key/channel/stream to use (default to suricata) + # stream-maxlen: 100000 ## Automatically trims the stream length to at most + ## this number of events. Set to 0 to disable trimming. + ## Only used when mode is set to xadd/stream. + # stream-trim-exact: false ## Trim exactly to the maximum stream length above. + ## Default: use inexact trimming (inexact by a few + ## tens of items) + ## Only used when mode is set to xadd/stream. + # Redis pipelining set up. This will enable to only do a query every + # 'batch-size' events. This should lower the latency induced by network + # connection at the cost of some memory. There is no flushing implemented + # so this setting should be reserved to high traffic Suricata deployments. + # pipelining: + # enabled: yes ## set enable to yes to enable query pipelining + # batch-size: 10 ## number of entries to keep in buffer + + # Include top level metadata. Default yes. + #metadata: no + + # include the name of the input pcap file in pcap file processing mode + pcap-file: false + + # Community Flow ID + # Adds a 'community_id' field to EVE records. These are meant to give + # records a predictable flow ID that can be used to match records to + # output of other tools such as Zeek (Bro). + # + # Takes a 'seed' that needs to be same across sensors and tools + # to make the id less predictable. + + # enable/disable the community id feature. + community-id: false + # Seed value for the ID output. Valid values are 0-65535. + community-id-seed: 0 + + # HTTP X-Forwarded-For support by adding an extra field or overwriting + # the source or destination IP address (depending on flow direction) + # with the one reported in the X-Forwarded-For HTTP header. This is + # helpful when reviewing alerts for traffic that is being reverse + # or forward proxied. + xff: + enabled: no + # Two operation modes are available: "extra-data" and "overwrite". + mode: extra-data + # Two proxy deployments are supported: "reverse" and "forward". In + # a "reverse" deployment the IP address used is the last one, in a + # "forward" deployment the first IP address is used. + deployment: reverse + # Header name where the actual IP address will be reported. If more + # than one IP address is present, the last IP address will be the + # one taken into consideration. + header: X-Forwarded-For + + types: + - alert: + # payload: yes # enable dumping payload in Base64 + # payload-buffer-size: 4 KiB # max size of payload buffer to output in eve-log + # payload-printable: yes # enable dumping payload in printable (lossy) format + # payload-length: yes # enable dumping payload length, including the gaps + # packet: yes # enable dumping of packet (without stream segments) + # metadata: no # enable inclusion of app layer metadata with alert. Default yes + # If you want metadata, use: + # metadata: + # Include the decoded application layer (ie. http, dns) + #app-layer: true + # Log the current state of the flow record. + #flow: true + #rule: + # Log the metadata field from the rule in a structured + # format. + #metadata: true + # Log the raw rule text. + #raw: false + #reference: false # include reference information from the rule + # http-body: yes # Requires metadata; enable dumping of HTTP body in Base64 + # http-body-printable: yes # Requires metadata; enable dumping of HTTP body in printable format + # websocket-payload: yes # Requires metadata; enable dumping of WebSocket Payload in Base64 + # websocket-payload-printable: yes # Requires metadata; enable dumping of WebSocket Payload in printable format + + # Enable the logging of tagged packets for rules using the + # "tag" keyword. + tagged-packets: yes + # Enable logging the final action taken on a packet by the engine + # (e.g: the alert may have action 'allowed' but the verdict be + # 'drop' due to another alert. That's the engine's verdict) + # verdict: yes + # app layer frames + - frame: + # disabled by default as this is very verbose. + enabled: no + # payload-buffer-size: 4 KiB # max size of frame payload buffer to output in eve-log + - anomaly: + # Anomaly log records describe unexpected conditions such + # as truncated packets, packets with invalid IP/UDP/TCP + # length values, and other events that render the packet + # invalid for further processing or describe unexpected + # behavior on an established stream. Networks which + # experience high occurrences of anomalies may experience + # packet processing degradation. + # + # Anomalies are reported for the following: + # 1. Decode: Values and conditions that are detected while + # decoding individual packets. This includes invalid or + # unexpected values for low-level protocol lengths as well + # as stream related events (TCP 3-way handshake issues, + # unexpected sequence number, etc). + # 2. Stream: This includes stream related events (TCP + # 3-way handshake issues, unexpected sequence number, + # etc). + # 3. Application layer: These denote application layer + # specific conditions that are unexpected, invalid or are + # unexpected given the application monitoring state. + # + # By default, anomaly logging is enabled. When anomaly + # logging is enabled, applayer anomaly reporting is + # also enabled. + enabled: yes + # + # Choose one or more types of anomaly logging and whether to enable + # logging of the packet header for packet anomalies. + types: + # decode: no + # stream: no + # applayer: yes + #packethdr: no + - http: + extended: yes # enable this for extended logging information + # custom allows additional HTTP fields to be included in eve-log. + # the example below adds three additional fields when uncommented + #custom: [Accept-Encoding, Accept-Language, Authorization] + # set this value to one and only one from {both, request, response} + # to dump all HTTP headers for every HTTP request and/or response + # dump-all-headers: none + - dns: + # Suricata 8.0 uses a new DNS logging format, to keep with + # the old format while you upgrade the version can be set + # to 2. See https://docs.suricata.io/en/latest/upgrade/8.0-dns-logging-changes.html + #version: 3 + + # Enable/disable this logger. Default: enabled. + #enabled: yes + + # Control logging of requests and responses: + # - requests: enable logging of DNS queries + # - responses: enable logging of DNS answers + # By default both requests and responses are logged. + #requests: no + #responses: no + + # Format of answer logging: + # - detailed: array item per answer + # - grouped: answers aggregated by type + # Default: all + #formats: [detailed, grouped] + + # DNS record types to log, based on the query type. + # Default: all. + #types: [a, aaaa, cname, mx, ns, ptr, txt] + - tls: + extended: yes # enable this for extended logging information + # output TLS transaction where the session is resumed using a + # session id + #session-resumption: no + # custom controls which TLS fields that are included in eve-log + # WARNING: enabling custom disables extended logging. + #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s, ja4, subjectaltname, client, client_certificate, client_chain, client_alpns, server_alpns] + - files: + force-magic: no # force logging magic on all logged files + # force logging of checksums, available hash functions are md5, + # sha1 and sha256 + #force-hash: [md5] + #- drop: + # alerts: yes # log alerts that caused drops + # flows: all # start or all: 'start' logs only a single drop + # # per flow direction. All logs each dropped pkt. + # Enable logging the final action taken on a packet by the engine + # (will show more information in case of a drop caused by 'reject') + # verdict: yes + - smtp: + #extended: yes # enable this for extended logging information + # this includes: bcc, message-id, subject, x_mailer, user-agent + # custom fields logging from the list: + # reply-to, bcc, message-id, subject, x-mailer, user-agent, received, + # x-originating-ip, in-reply-to, references, importance, priority, + # sensitivity, organization, content-md5, date + #custom: [received, x-mailer, x-originating-ip, relays, reply-to, bcc] + # output md5 of fields: body, subject + # for the body you need to set app-layer.protocols.smtp.mime.body-md5 + # to yes + #md5: [body, subject] + + #- dnp3 + - websocket + #- enip + - ftp + - rdp + - nfs + - smb + - tftp + - ike + - dcerpc + - krb5 + - snmp + - rfb + - sip + - quic + - ldap + - arp: + enabled: no # Many events can be logged. Disabled by default + - dhcp: + enabled: yes + # When extended mode is on, all DHCP messages are logged + # with full detail. When extended mode is off (the + # default), just enough information to map a MAC address + # to an IP address is logged. + extended: no + - ssh + - mqtt: + # passwords: yes # enable output of passwords + # string-log-limit: 1KiB # limit size of logged strings in bytes. + # Can be specified in KiB, MiB, GiB. Just a number + # is parsed as bytes. Default is 1 KiB. + # Use a value of 0 to disable limiting. + # Note that the size is also bounded by + # the maximum parsed message size (see + # app-layer configuration) + - http2 + # dns over http2 + - doh2 + - pgsql: + enabled: no + # passwords: yes # enable output of passwords. Disabled by default + - stats: + totals: yes # stats for all threads merged together + threads: no # per thread stats + deltas: no # include delta values + # Don't log stats counters that are zero. Default: true + #null-values: false # False will NOT log stats counters: 0 + # bi-directional flows + - flow + # uni-directional flows + #- netflow + + # Metadata event type. Triggered whenever a pktvar is saved + # and will include the pktvars, flowvars, flowbits and + # flowints. + #- metadata + + # EXPERIMENTAL per packet output giving TCP state tracking details + # including internal state, flags, etc. + # This output is experimental, meant for debugging and subject to + # change in both config and output without any notice. + #- stream: + # all: false # log all TCP packets + # event-set: false # log packets that have a decoder/stream event + # state-update: false # log packets triggering a TCP state update + # spurious-retransmission: false # log spurious retransmission packets + +# Datasets default settings +datasets: + # Default fallback memcap and hashsize values for datasets in case these + # were not explicitly defined. + defaults: + #memcap: 100 MiB + #hashsize: 2048 + + rules: + # Set to true to allow absolute filenames and filenames that use + # ".." components to reference parent directories in rules that specify + # their filenames. + #allow-absolute-filenames: false + + # Allow datasets in rules write access for "save" and + # "state". This is enabled by default, however write access is + # limited to the data directory. + #allow-write: true + + conn-seen: + type: string + +security: + lua: + allow-rules: true diff --git a/tests/lua/lua-packetlib-01/test.rules b/tests/lua/lua-packetlib-01/test.rules new file mode 100644 index 000000000..518421d5b --- /dev/null +++ b/tests/lua/lua-packetlib-01/test.rules @@ -0,0 +1 @@ +alert tcp any any -> any any (flow:to_server; lua:packet.lua; sid:1;) diff --git a/tests/lua/lua-packetlib-01/test.yaml b/tests/lua/lua-packetlib-01/test.yaml new file mode 100644 index 000000000..f628f7e1b --- /dev/null +++ b/tests/lua/lua-packetlib-01/test.yaml @@ -0,0 +1,13 @@ +requires: + min-version: 8 + +pcap: ../../http-request-line/input.pcap + +args: + - -k none + +checks: + - filter: + count: 1 + match: + event_type: alert diff --git a/tests/lua/lua-packetlib-02-restricted-funcs-allowed/packet.lua b/tests/lua/lua-packetlib-02-restricted-funcs-allowed/packet.lua new file mode 100644 index 000000000..cb2ef8c00 --- /dev/null +++ b/tests/lua/lua-packetlib-02-restricted-funcs-allowed/packet.lua @@ -0,0 +1,22 @@ +local packet = require "suricata.packet" + +function init (args) + local needs = {} + return needs +end + +function match (args) + p = packet.get() + payload = p:payload() + ts = p:timestring() + + for line in payload:gmatch("([^\r\n]*)[\r\n]+") do + if line == "GET /index.html HTTP/1.0" then + ipver, srcip, dstip, proto, sp, dp = p:tuple() + SCLogNotice(string.format("%s %s->%s %d->%d (pcap_cnt:%d) match! %s", ts, srcip, dstip, sp, dp, p:pcap_cnt(), line)); + return 1 + end + end + + return 0 +end diff --git a/tests/lua/lua-packetlib-02-restricted-funcs-allowed/suricata.yaml b/tests/lua/lua-packetlib-02-restricted-funcs-allowed/suricata.yaml new file mode 100644 index 000000000..b63582207 --- /dev/null +++ b/tests/lua/lua-packetlib-02-restricted-funcs-allowed/suricata.yaml @@ -0,0 +1,314 @@ +%YAML 1.1 +--- + +stats: + enabled: yes + interval: 8 + +# Configure the type of alert (and other) logging you would like. +outputs: + - eve-log: + enabled: yes + filetype: regular #regular|syslog|unix_dgram|unix_stream|redis + filename: eve.json + # Enable for multi-threaded eve.json output; output files are amended with + # an identifier, e.g., eve.9.json + #threaded: false + #prefix: "@cee: " # prefix to prepend to each log entry + # the following are valid when type: syslog above + #identity: "suricata" + #facility: local5 + #level: Info ## possible levels: Emergency, Alert, Critical, + ## Error, Warning, Notice, Info, Debug + #ethernet: no # log ethernet header in events when available + #redis: + # server: 127.0.0.1 + # port: 6379 + # async: true ## if redis replies are read asynchronously + # mode: list ## possible values: list|lpush (default), rpush, channel|publish, xadd|stream + # ## lpush and rpush are using a Redis list. "list" is an alias for lpush + # ## publish is using a Redis channel. "channel" is an alias for publish + # ## xadd is using a Redis stream. "stream" is an alias for xadd + # key: suricata ## string denoting the key/channel/stream to use (default to suricata) + # stream-maxlen: 100000 ## Automatically trims the stream length to at most + ## this number of events. Set to 0 to disable trimming. + ## Only used when mode is set to xadd/stream. + # stream-trim-exact: false ## Trim exactly to the maximum stream length above. + ## Default: use inexact trimming (inexact by a few + ## tens of items) + ## Only used when mode is set to xadd/stream. + # Redis pipelining set up. This will enable to only do a query every + # 'batch-size' events. This should lower the latency induced by network + # connection at the cost of some memory. There is no flushing implemented + # so this setting should be reserved to high traffic Suricata deployments. + # pipelining: + # enabled: yes ## set enable to yes to enable query pipelining + # batch-size: 10 ## number of entries to keep in buffer + + # Include top level metadata. Default yes. + #metadata: no + + # include the name of the input pcap file in pcap file processing mode + pcap-file: false + + # Community Flow ID + # Adds a 'community_id' field to EVE records. These are meant to give + # records a predictable flow ID that can be used to match records to + # output of other tools such as Zeek (Bro). + # + # Takes a 'seed' that needs to be same across sensors and tools + # to make the id less predictable. + + # enable/disable the community id feature. + community-id: false + # Seed value for the ID output. Valid values are 0-65535. + community-id-seed: 0 + + # HTTP X-Forwarded-For support by adding an extra field or overwriting + # the source or destination IP address (depending on flow direction) + # with the one reported in the X-Forwarded-For HTTP header. This is + # helpful when reviewing alerts for traffic that is being reverse + # or forward proxied. + xff: + enabled: no + # Two operation modes are available: "extra-data" and "overwrite". + mode: extra-data + # Two proxy deployments are supported: "reverse" and "forward". In + # a "reverse" deployment the IP address used is the last one, in a + # "forward" deployment the first IP address is used. + deployment: reverse + # Header name where the actual IP address will be reported. If more + # than one IP address is present, the last IP address will be the + # one taken into consideration. + header: X-Forwarded-For + + types: + - alert: + # payload: yes # enable dumping payload in Base64 + # payload-buffer-size: 4 KiB # max size of payload buffer to output in eve-log + # payload-printable: yes # enable dumping payload in printable (lossy) format + # payload-length: yes # enable dumping payload length, including the gaps + # packet: yes # enable dumping of packet (without stream segments) + # metadata: no # enable inclusion of app layer metadata with alert. Default yes + # If you want metadata, use: + # metadata: + # Include the decoded application layer (ie. http, dns) + #app-layer: true + # Log the current state of the flow record. + #flow: true + #rule: + # Log the metadata field from the rule in a structured + # format. + #metadata: true + # Log the raw rule text. + #raw: false + #reference: false # include reference information from the rule + # http-body: yes # Requires metadata; enable dumping of HTTP body in Base64 + # http-body-printable: yes # Requires metadata; enable dumping of HTTP body in printable format + # websocket-payload: yes # Requires metadata; enable dumping of WebSocket Payload in Base64 + # websocket-payload-printable: yes # Requires metadata; enable dumping of WebSocket Payload in printable format + + # Enable the logging of tagged packets for rules using the + # "tag" keyword. + tagged-packets: yes + # Enable logging the final action taken on a packet by the engine + # (e.g: the alert may have action 'allowed' but the verdict be + # 'drop' due to another alert. That's the engine's verdict) + # verdict: yes + # app layer frames + - frame: + # disabled by default as this is very verbose. + enabled: no + # payload-buffer-size: 4 KiB # max size of frame payload buffer to output in eve-log + - anomaly: + # Anomaly log records describe unexpected conditions such + # as truncated packets, packets with invalid IP/UDP/TCP + # length values, and other events that render the packet + # invalid for further processing or describe unexpected + # behavior on an established stream. Networks which + # experience high occurrences of anomalies may experience + # packet processing degradation. + # + # Anomalies are reported for the following: + # 1. Decode: Values and conditions that are detected while + # decoding individual packets. This includes invalid or + # unexpected values for low-level protocol lengths as well + # as stream related events (TCP 3-way handshake issues, + # unexpected sequence number, etc). + # 2. Stream: This includes stream related events (TCP + # 3-way handshake issues, unexpected sequence number, + # etc). + # 3. Application layer: These denote application layer + # specific conditions that are unexpected, invalid or are + # unexpected given the application monitoring state. + # + # By default, anomaly logging is enabled. When anomaly + # logging is enabled, applayer anomaly reporting is + # also enabled. + enabled: yes + # + # Choose one or more types of anomaly logging and whether to enable + # logging of the packet header for packet anomalies. + types: + # decode: no + # stream: no + # applayer: yes + #packethdr: no + - http: + extended: yes # enable this for extended logging information + # custom allows additional HTTP fields to be included in eve-log. + # the example below adds three additional fields when uncommented + #custom: [Accept-Encoding, Accept-Language, Authorization] + # set this value to one and only one from {both, request, response} + # to dump all HTTP headers for every HTTP request and/or response + # dump-all-headers: none + - dns: + # Suricata 8.0 uses a new DNS logging format, to keep with + # the old format while you upgrade the version can be set + # to 2. See https://docs.suricata.io/en/latest/upgrade/8.0-dns-logging-changes.html + #version: 3 + + # Enable/disable this logger. Default: enabled. + #enabled: yes + + # Control logging of requests and responses: + # - requests: enable logging of DNS queries + # - responses: enable logging of DNS answers + # By default both requests and responses are logged. + #requests: no + #responses: no + + # Format of answer logging: + # - detailed: array item per answer + # - grouped: answers aggregated by type + # Default: all + #formats: [detailed, grouped] + + # DNS record types to log, based on the query type. + # Default: all. + #types: [a, aaaa, cname, mx, ns, ptr, txt] + - tls: + extended: yes # enable this for extended logging information + # output TLS transaction where the session is resumed using a + # session id + #session-resumption: no + # custom controls which TLS fields that are included in eve-log + # WARNING: enabling custom disables extended logging. + #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s, ja4, subjectaltname, client, client_certificate, client_chain, client_alpns, server_alpns] + - files: + force-magic: no # force logging magic on all logged files + # force logging of checksums, available hash functions are md5, + # sha1 and sha256 + #force-hash: [md5] + #- drop: + # alerts: yes # log alerts that caused drops + # flows: all # start or all: 'start' logs only a single drop + # # per flow direction. All logs each dropped pkt. + # Enable logging the final action taken on a packet by the engine + # (will show more information in case of a drop caused by 'reject') + # verdict: yes + - smtp: + #extended: yes # enable this for extended logging information + # this includes: bcc, message-id, subject, x_mailer, user-agent + # custom fields logging from the list: + # reply-to, bcc, message-id, subject, x-mailer, user-agent, received, + # x-originating-ip, in-reply-to, references, importance, priority, + # sensitivity, organization, content-md5, date + #custom: [received, x-mailer, x-originating-ip, relays, reply-to, bcc] + # output md5 of fields: body, subject + # for the body you need to set app-layer.protocols.smtp.mime.body-md5 + # to yes + #md5: [body, subject] + + #- dnp3 + - websocket + #- enip + - ftp + - rdp + - nfs + - smb + - tftp + - ike + - dcerpc + - krb5 + - snmp + - rfb + - sip + - quic + - ldap + - arp: + enabled: no # Many events can be logged. Disabled by default + - dhcp: + enabled: yes + # When extended mode is on, all DHCP messages are logged + # with full detail. When extended mode is off (the + # default), just enough information to map a MAC address + # to an IP address is logged. + extended: no + - ssh + - mqtt: + # passwords: yes # enable output of passwords + # string-log-limit: 1KiB # limit size of logged strings in bytes. + # Can be specified in KiB, MiB, GiB. Just a number + # is parsed as bytes. Default is 1 KiB. + # Use a value of 0 to disable limiting. + # Note that the size is also bounded by + # the maximum parsed message size (see + # app-layer configuration) + - http2 + # dns over http2 + - doh2 + - pgsql: + enabled: no + # passwords: yes # enable output of passwords. Disabled by default + - stats: + totals: yes # stats for all threads merged together + threads: no # per thread stats + deltas: no # include delta values + # Don't log stats counters that are zero. Default: true + #null-values: false # False will NOT log stats counters: 0 + # bi-directional flows + - flow + # uni-directional flows + #- netflow + + # Metadata event type. Triggered whenever a pktvar is saved + # and will include the pktvars, flowvars, flowbits and + # flowints. + #- metadata + + # EXPERIMENTAL per packet output giving TCP state tracking details + # including internal state, flags, etc. + # This output is experimental, meant for debugging and subject to + # change in both config and output without any notice. + #- stream: + # all: false # log all TCP packets + # event-set: false # log packets that have a decoder/stream event + # state-update: false # log packets triggering a TCP state update + # spurious-retransmission: false # log spurious retransmission packets + +# Datasets default settings +datasets: + # Default fallback memcap and hashsize values for datasets in case these + # were not explicitly defined. + defaults: + #memcap: 100 MiB + #hashsize: 2048 + + rules: + # Set to true to allow absolute filenames and filenames that use + # ".." components to reference parent directories in rules that specify + # their filenames. + #allow-absolute-filenames: false + + # Allow datasets in rules write access for "save" and + # "state". This is enabled by default, however write access is + # limited to the data directory. + #allow-write: true + + conn-seen: + type: string + +security: + lua: + allow-rules: true diff --git a/tests/lua/lua-packetlib-02-restricted-funcs-allowed/test.rules b/tests/lua/lua-packetlib-02-restricted-funcs-allowed/test.rules new file mode 100644 index 000000000..518421d5b --- /dev/null +++ b/tests/lua/lua-packetlib-02-restricted-funcs-allowed/test.rules @@ -0,0 +1 @@ +alert tcp any any -> any any (flow:to_server; lua:packet.lua; sid:1;) diff --git a/tests/lua/lua-packetlib-02-restricted-funcs-allowed/test.yaml b/tests/lua/lua-packetlib-02-restricted-funcs-allowed/test.yaml new file mode 100644 index 000000000..f9814fced --- /dev/null +++ b/tests/lua/lua-packetlib-02-restricted-funcs-allowed/test.yaml @@ -0,0 +1,14 @@ +requires: + min-version: 8 + +pcap: ../../http-request-line/input.pcap + +args: + - -k none + - --set security.lua.allow-restricted-functions=true + +checks: + - filter: + count: 1 + match: + event_type: alert diff --git a/tests/lua/lua-packetlib-03/packet.lua b/tests/lua/lua-packetlib-03/packet.lua new file mode 100644 index 000000000..4fc49c138 --- /dev/null +++ b/tests/lua/lua-packetlib-03/packet.lua @@ -0,0 +1,18 @@ +local packet = require "suricata.packet" + +function init (args) + local needs = {} + return needs +end + +function match (args) + p = packet.get() + if p:sp() == 6666 and p:dp() == 63 then + ts = p:timestring() + + SCLogNotice(string.format("%s %d->%d (pcap_cnt:%d) match!", ts, p:sp(), p:dp(), p:pcap_cnt())); + return 1 + end + + return 0 +end diff --git a/tests/lua/lua-packetlib-03/suricata.yaml b/tests/lua/lua-packetlib-03/suricata.yaml new file mode 100644 index 000000000..b63582207 --- /dev/null +++ b/tests/lua/lua-packetlib-03/suricata.yaml @@ -0,0 +1,314 @@ +%YAML 1.1 +--- + +stats: + enabled: yes + interval: 8 + +# Configure the type of alert (and other) logging you would like. +outputs: + - eve-log: + enabled: yes + filetype: regular #regular|syslog|unix_dgram|unix_stream|redis + filename: eve.json + # Enable for multi-threaded eve.json output; output files are amended with + # an identifier, e.g., eve.9.json + #threaded: false + #prefix: "@cee: " # prefix to prepend to each log entry + # the following are valid when type: syslog above + #identity: "suricata" + #facility: local5 + #level: Info ## possible levels: Emergency, Alert, Critical, + ## Error, Warning, Notice, Info, Debug + #ethernet: no # log ethernet header in events when available + #redis: + # server: 127.0.0.1 + # port: 6379 + # async: true ## if redis replies are read asynchronously + # mode: list ## possible values: list|lpush (default), rpush, channel|publish, xadd|stream + # ## lpush and rpush are using a Redis list. "list" is an alias for lpush + # ## publish is using a Redis channel. "channel" is an alias for publish + # ## xadd is using a Redis stream. "stream" is an alias for xadd + # key: suricata ## string denoting the key/channel/stream to use (default to suricata) + # stream-maxlen: 100000 ## Automatically trims the stream length to at most + ## this number of events. Set to 0 to disable trimming. + ## Only used when mode is set to xadd/stream. + # stream-trim-exact: false ## Trim exactly to the maximum stream length above. + ## Default: use inexact trimming (inexact by a few + ## tens of items) + ## Only used when mode is set to xadd/stream. + # Redis pipelining set up. This will enable to only do a query every + # 'batch-size' events. This should lower the latency induced by network + # connection at the cost of some memory. There is no flushing implemented + # so this setting should be reserved to high traffic Suricata deployments. + # pipelining: + # enabled: yes ## set enable to yes to enable query pipelining + # batch-size: 10 ## number of entries to keep in buffer + + # Include top level metadata. Default yes. + #metadata: no + + # include the name of the input pcap file in pcap file processing mode + pcap-file: false + + # Community Flow ID + # Adds a 'community_id' field to EVE records. These are meant to give + # records a predictable flow ID that can be used to match records to + # output of other tools such as Zeek (Bro). + # + # Takes a 'seed' that needs to be same across sensors and tools + # to make the id less predictable. + + # enable/disable the community id feature. + community-id: false + # Seed value for the ID output. Valid values are 0-65535. + community-id-seed: 0 + + # HTTP X-Forwarded-For support by adding an extra field or overwriting + # the source or destination IP address (depending on flow direction) + # with the one reported in the X-Forwarded-For HTTP header. This is + # helpful when reviewing alerts for traffic that is being reverse + # or forward proxied. + xff: + enabled: no + # Two operation modes are available: "extra-data" and "overwrite". + mode: extra-data + # Two proxy deployments are supported: "reverse" and "forward". In + # a "reverse" deployment the IP address used is the last one, in a + # "forward" deployment the first IP address is used. + deployment: reverse + # Header name where the actual IP address will be reported. If more + # than one IP address is present, the last IP address will be the + # one taken into consideration. + header: X-Forwarded-For + + types: + - alert: + # payload: yes # enable dumping payload in Base64 + # payload-buffer-size: 4 KiB # max size of payload buffer to output in eve-log + # payload-printable: yes # enable dumping payload in printable (lossy) format + # payload-length: yes # enable dumping payload length, including the gaps + # packet: yes # enable dumping of packet (without stream segments) + # metadata: no # enable inclusion of app layer metadata with alert. Default yes + # If you want metadata, use: + # metadata: + # Include the decoded application layer (ie. http, dns) + #app-layer: true + # Log the current state of the flow record. + #flow: true + #rule: + # Log the metadata field from the rule in a structured + # format. + #metadata: true + # Log the raw rule text. + #raw: false + #reference: false # include reference information from the rule + # http-body: yes # Requires metadata; enable dumping of HTTP body in Base64 + # http-body-printable: yes # Requires metadata; enable dumping of HTTP body in printable format + # websocket-payload: yes # Requires metadata; enable dumping of WebSocket Payload in Base64 + # websocket-payload-printable: yes # Requires metadata; enable dumping of WebSocket Payload in printable format + + # Enable the logging of tagged packets for rules using the + # "tag" keyword. + tagged-packets: yes + # Enable logging the final action taken on a packet by the engine + # (e.g: the alert may have action 'allowed' but the verdict be + # 'drop' due to another alert. That's the engine's verdict) + # verdict: yes + # app layer frames + - frame: + # disabled by default as this is very verbose. + enabled: no + # payload-buffer-size: 4 KiB # max size of frame payload buffer to output in eve-log + - anomaly: + # Anomaly log records describe unexpected conditions such + # as truncated packets, packets with invalid IP/UDP/TCP + # length values, and other events that render the packet + # invalid for further processing or describe unexpected + # behavior on an established stream. Networks which + # experience high occurrences of anomalies may experience + # packet processing degradation. + # + # Anomalies are reported for the following: + # 1. Decode: Values and conditions that are detected while + # decoding individual packets. This includes invalid or + # unexpected values for low-level protocol lengths as well + # as stream related events (TCP 3-way handshake issues, + # unexpected sequence number, etc). + # 2. Stream: This includes stream related events (TCP + # 3-way handshake issues, unexpected sequence number, + # etc). + # 3. Application layer: These denote application layer + # specific conditions that are unexpected, invalid or are + # unexpected given the application monitoring state. + # + # By default, anomaly logging is enabled. When anomaly + # logging is enabled, applayer anomaly reporting is + # also enabled. + enabled: yes + # + # Choose one or more types of anomaly logging and whether to enable + # logging of the packet header for packet anomalies. + types: + # decode: no + # stream: no + # applayer: yes + #packethdr: no + - http: + extended: yes # enable this for extended logging information + # custom allows additional HTTP fields to be included in eve-log. + # the example below adds three additional fields when uncommented + #custom: [Accept-Encoding, Accept-Language, Authorization] + # set this value to one and only one from {both, request, response} + # to dump all HTTP headers for every HTTP request and/or response + # dump-all-headers: none + - dns: + # Suricata 8.0 uses a new DNS logging format, to keep with + # the old format while you upgrade the version can be set + # to 2. See https://docs.suricata.io/en/latest/upgrade/8.0-dns-logging-changes.html + #version: 3 + + # Enable/disable this logger. Default: enabled. + #enabled: yes + + # Control logging of requests and responses: + # - requests: enable logging of DNS queries + # - responses: enable logging of DNS answers + # By default both requests and responses are logged. + #requests: no + #responses: no + + # Format of answer logging: + # - detailed: array item per answer + # - grouped: answers aggregated by type + # Default: all + #formats: [detailed, grouped] + + # DNS record types to log, based on the query type. + # Default: all. + #types: [a, aaaa, cname, mx, ns, ptr, txt] + - tls: + extended: yes # enable this for extended logging information + # output TLS transaction where the session is resumed using a + # session id + #session-resumption: no + # custom controls which TLS fields that are included in eve-log + # WARNING: enabling custom disables extended logging. + #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s, ja4, subjectaltname, client, client_certificate, client_chain, client_alpns, server_alpns] + - files: + force-magic: no # force logging magic on all logged files + # force logging of checksums, available hash functions are md5, + # sha1 and sha256 + #force-hash: [md5] + #- drop: + # alerts: yes # log alerts that caused drops + # flows: all # start or all: 'start' logs only a single drop + # # per flow direction. All logs each dropped pkt. + # Enable logging the final action taken on a packet by the engine + # (will show more information in case of a drop caused by 'reject') + # verdict: yes + - smtp: + #extended: yes # enable this for extended logging information + # this includes: bcc, message-id, subject, x_mailer, user-agent + # custom fields logging from the list: + # reply-to, bcc, message-id, subject, x-mailer, user-agent, received, + # x-originating-ip, in-reply-to, references, importance, priority, + # sensitivity, organization, content-md5, date + #custom: [received, x-mailer, x-originating-ip, relays, reply-to, bcc] + # output md5 of fields: body, subject + # for the body you need to set app-layer.protocols.smtp.mime.body-md5 + # to yes + #md5: [body, subject] + + #- dnp3 + - websocket + #- enip + - ftp + - rdp + - nfs + - smb + - tftp + - ike + - dcerpc + - krb5 + - snmp + - rfb + - sip + - quic + - ldap + - arp: + enabled: no # Many events can be logged. Disabled by default + - dhcp: + enabled: yes + # When extended mode is on, all DHCP messages are logged + # with full detail. When extended mode is off (the + # default), just enough information to map a MAC address + # to an IP address is logged. + extended: no + - ssh + - mqtt: + # passwords: yes # enable output of passwords + # string-log-limit: 1KiB # limit size of logged strings in bytes. + # Can be specified in KiB, MiB, GiB. Just a number + # is parsed as bytes. Default is 1 KiB. + # Use a value of 0 to disable limiting. + # Note that the size is also bounded by + # the maximum parsed message size (see + # app-layer configuration) + - http2 + # dns over http2 + - doh2 + - pgsql: + enabled: no + # passwords: yes # enable output of passwords. Disabled by default + - stats: + totals: yes # stats for all threads merged together + threads: no # per thread stats + deltas: no # include delta values + # Don't log stats counters that are zero. Default: true + #null-values: false # False will NOT log stats counters: 0 + # bi-directional flows + - flow + # uni-directional flows + #- netflow + + # Metadata event type. Triggered whenever a pktvar is saved + # and will include the pktvars, flowvars, flowbits and + # flowints. + #- metadata + + # EXPERIMENTAL per packet output giving TCP state tracking details + # including internal state, flags, etc. + # This output is experimental, meant for debugging and subject to + # change in both config and output without any notice. + #- stream: + # all: false # log all TCP packets + # event-set: false # log packets that have a decoder/stream event + # state-update: false # log packets triggering a TCP state update + # spurious-retransmission: false # log spurious retransmission packets + +# Datasets default settings +datasets: + # Default fallback memcap and hashsize values for datasets in case these + # were not explicitly defined. + defaults: + #memcap: 100 MiB + #hashsize: 2048 + + rules: + # Set to true to allow absolute filenames and filenames that use + # ".." components to reference parent directories in rules that specify + # their filenames. + #allow-absolute-filenames: false + + # Allow datasets in rules write access for "save" and + # "state". This is enabled by default, however write access is + # limited to the data directory. + #allow-write: true + + conn-seen: + type: string + +security: + lua: + allow-rules: true diff --git a/tests/lua/lua-packetlib-03/test.rules b/tests/lua/lua-packetlib-03/test.rules new file mode 100644 index 000000000..518421d5b --- /dev/null +++ b/tests/lua/lua-packetlib-03/test.rules @@ -0,0 +1 @@ +alert tcp any any -> any any (flow:to_server; lua:packet.lua; sid:1;) diff --git a/tests/lua/lua-packetlib-03/test.yaml b/tests/lua/lua-packetlib-03/test.yaml new file mode 100644 index 000000000..f628f7e1b --- /dev/null +++ b/tests/lua/lua-packetlib-03/test.yaml @@ -0,0 +1,13 @@ +requires: + min-version: 8 + +pcap: ../../http-request-line/input.pcap + +args: + - -k none + +checks: + - filter: + count: 1 + match: + event_type: alert diff --git a/tests/lua/lua-packetlib-04-icmp-spdp/packet.lua b/tests/lua/lua-packetlib-04-icmp-spdp/packet.lua new file mode 100644 index 000000000..7e6a062b5 --- /dev/null +++ b/tests/lua/lua-packetlib-04-icmp-spdp/packet.lua @@ -0,0 +1,23 @@ +local packet = require "suricata.packet" + +function init (args) + local needs = {} + return needs +end + +function match (args) + p = packet.get() + + sp, err = p:sp() + if err == nil then + SCLogError("sp() should have failed for icmp") + return 0 + end + + if err ~= "sp only available for tcp, udp and sctp" then + SCLogError("sp() error message mismatch") + return 0 + end + + return 1 +end diff --git a/tests/lua/lua-packetlib-04-icmp-spdp/suricata.yaml b/tests/lua/lua-packetlib-04-icmp-spdp/suricata.yaml new file mode 100644 index 000000000..b63582207 --- /dev/null +++ b/tests/lua/lua-packetlib-04-icmp-spdp/suricata.yaml @@ -0,0 +1,314 @@ +%YAML 1.1 +--- + +stats: + enabled: yes + interval: 8 + +# Configure the type of alert (and other) logging you would like. +outputs: + - eve-log: + enabled: yes + filetype: regular #regular|syslog|unix_dgram|unix_stream|redis + filename: eve.json + # Enable for multi-threaded eve.json output; output files are amended with + # an identifier, e.g., eve.9.json + #threaded: false + #prefix: "@cee: " # prefix to prepend to each log entry + # the following are valid when type: syslog above + #identity: "suricata" + #facility: local5 + #level: Info ## possible levels: Emergency, Alert, Critical, + ## Error, Warning, Notice, Info, Debug + #ethernet: no # log ethernet header in events when available + #redis: + # server: 127.0.0.1 + # port: 6379 + # async: true ## if redis replies are read asynchronously + # mode: list ## possible values: list|lpush (default), rpush, channel|publish, xadd|stream + # ## lpush and rpush are using a Redis list. "list" is an alias for lpush + # ## publish is using a Redis channel. "channel" is an alias for publish + # ## xadd is using a Redis stream. "stream" is an alias for xadd + # key: suricata ## string denoting the key/channel/stream to use (default to suricata) + # stream-maxlen: 100000 ## Automatically trims the stream length to at most + ## this number of events. Set to 0 to disable trimming. + ## Only used when mode is set to xadd/stream. + # stream-trim-exact: false ## Trim exactly to the maximum stream length above. + ## Default: use inexact trimming (inexact by a few + ## tens of items) + ## Only used when mode is set to xadd/stream. + # Redis pipelining set up. This will enable to only do a query every + # 'batch-size' events. This should lower the latency induced by network + # connection at the cost of some memory. There is no flushing implemented + # so this setting should be reserved to high traffic Suricata deployments. + # pipelining: + # enabled: yes ## set enable to yes to enable query pipelining + # batch-size: 10 ## number of entries to keep in buffer + + # Include top level metadata. Default yes. + #metadata: no + + # include the name of the input pcap file in pcap file processing mode + pcap-file: false + + # Community Flow ID + # Adds a 'community_id' field to EVE records. These are meant to give + # records a predictable flow ID that can be used to match records to + # output of other tools such as Zeek (Bro). + # + # Takes a 'seed' that needs to be same across sensors and tools + # to make the id less predictable. + + # enable/disable the community id feature. + community-id: false + # Seed value for the ID output. Valid values are 0-65535. + community-id-seed: 0 + + # HTTP X-Forwarded-For support by adding an extra field or overwriting + # the source or destination IP address (depending on flow direction) + # with the one reported in the X-Forwarded-For HTTP header. This is + # helpful when reviewing alerts for traffic that is being reverse + # or forward proxied. + xff: + enabled: no + # Two operation modes are available: "extra-data" and "overwrite". + mode: extra-data + # Two proxy deployments are supported: "reverse" and "forward". In + # a "reverse" deployment the IP address used is the last one, in a + # "forward" deployment the first IP address is used. + deployment: reverse + # Header name where the actual IP address will be reported. If more + # than one IP address is present, the last IP address will be the + # one taken into consideration. + header: X-Forwarded-For + + types: + - alert: + # payload: yes # enable dumping payload in Base64 + # payload-buffer-size: 4 KiB # max size of payload buffer to output in eve-log + # payload-printable: yes # enable dumping payload in printable (lossy) format + # payload-length: yes # enable dumping payload length, including the gaps + # packet: yes # enable dumping of packet (without stream segments) + # metadata: no # enable inclusion of app layer metadata with alert. Default yes + # If you want metadata, use: + # metadata: + # Include the decoded application layer (ie. http, dns) + #app-layer: true + # Log the current state of the flow record. + #flow: true + #rule: + # Log the metadata field from the rule in a structured + # format. + #metadata: true + # Log the raw rule text. + #raw: false + #reference: false # include reference information from the rule + # http-body: yes # Requires metadata; enable dumping of HTTP body in Base64 + # http-body-printable: yes # Requires metadata; enable dumping of HTTP body in printable format + # websocket-payload: yes # Requires metadata; enable dumping of WebSocket Payload in Base64 + # websocket-payload-printable: yes # Requires metadata; enable dumping of WebSocket Payload in printable format + + # Enable the logging of tagged packets for rules using the + # "tag" keyword. + tagged-packets: yes + # Enable logging the final action taken on a packet by the engine + # (e.g: the alert may have action 'allowed' but the verdict be + # 'drop' due to another alert. That's the engine's verdict) + # verdict: yes + # app layer frames + - frame: + # disabled by default as this is very verbose. + enabled: no + # payload-buffer-size: 4 KiB # max size of frame payload buffer to output in eve-log + - anomaly: + # Anomaly log records describe unexpected conditions such + # as truncated packets, packets with invalid IP/UDP/TCP + # length values, and other events that render the packet + # invalid for further processing or describe unexpected + # behavior on an established stream. Networks which + # experience high occurrences of anomalies may experience + # packet processing degradation. + # + # Anomalies are reported for the following: + # 1. Decode: Values and conditions that are detected while + # decoding individual packets. This includes invalid or + # unexpected values for low-level protocol lengths as well + # as stream related events (TCP 3-way handshake issues, + # unexpected sequence number, etc). + # 2. Stream: This includes stream related events (TCP + # 3-way handshake issues, unexpected sequence number, + # etc). + # 3. Application layer: These denote application layer + # specific conditions that are unexpected, invalid or are + # unexpected given the application monitoring state. + # + # By default, anomaly logging is enabled. When anomaly + # logging is enabled, applayer anomaly reporting is + # also enabled. + enabled: yes + # + # Choose one or more types of anomaly logging and whether to enable + # logging of the packet header for packet anomalies. + types: + # decode: no + # stream: no + # applayer: yes + #packethdr: no + - http: + extended: yes # enable this for extended logging information + # custom allows additional HTTP fields to be included in eve-log. + # the example below adds three additional fields when uncommented + #custom: [Accept-Encoding, Accept-Language, Authorization] + # set this value to one and only one from {both, request, response} + # to dump all HTTP headers for every HTTP request and/or response + # dump-all-headers: none + - dns: + # Suricata 8.0 uses a new DNS logging format, to keep with + # the old format while you upgrade the version can be set + # to 2. See https://docs.suricata.io/en/latest/upgrade/8.0-dns-logging-changes.html + #version: 3 + + # Enable/disable this logger. Default: enabled. + #enabled: yes + + # Control logging of requests and responses: + # - requests: enable logging of DNS queries + # - responses: enable logging of DNS answers + # By default both requests and responses are logged. + #requests: no + #responses: no + + # Format of answer logging: + # - detailed: array item per answer + # - grouped: answers aggregated by type + # Default: all + #formats: [detailed, grouped] + + # DNS record types to log, based on the query type. + # Default: all. + #types: [a, aaaa, cname, mx, ns, ptr, txt] + - tls: + extended: yes # enable this for extended logging information + # output TLS transaction where the session is resumed using a + # session id + #session-resumption: no + # custom controls which TLS fields that are included in eve-log + # WARNING: enabling custom disables extended logging. + #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s, ja4, subjectaltname, client, client_certificate, client_chain, client_alpns, server_alpns] + - files: + force-magic: no # force logging magic on all logged files + # force logging of checksums, available hash functions are md5, + # sha1 and sha256 + #force-hash: [md5] + #- drop: + # alerts: yes # log alerts that caused drops + # flows: all # start or all: 'start' logs only a single drop + # # per flow direction. All logs each dropped pkt. + # Enable logging the final action taken on a packet by the engine + # (will show more information in case of a drop caused by 'reject') + # verdict: yes + - smtp: + #extended: yes # enable this for extended logging information + # this includes: bcc, message-id, subject, x_mailer, user-agent + # custom fields logging from the list: + # reply-to, bcc, message-id, subject, x-mailer, user-agent, received, + # x-originating-ip, in-reply-to, references, importance, priority, + # sensitivity, organization, content-md5, date + #custom: [received, x-mailer, x-originating-ip, relays, reply-to, bcc] + # output md5 of fields: body, subject + # for the body you need to set app-layer.protocols.smtp.mime.body-md5 + # to yes + #md5: [body, subject] + + #- dnp3 + - websocket + #- enip + - ftp + - rdp + - nfs + - smb + - tftp + - ike + - dcerpc + - krb5 + - snmp + - rfb + - sip + - quic + - ldap + - arp: + enabled: no # Many events can be logged. Disabled by default + - dhcp: + enabled: yes + # When extended mode is on, all DHCP messages are logged + # with full detail. When extended mode is off (the + # default), just enough information to map a MAC address + # to an IP address is logged. + extended: no + - ssh + - mqtt: + # passwords: yes # enable output of passwords + # string-log-limit: 1KiB # limit size of logged strings in bytes. + # Can be specified in KiB, MiB, GiB. Just a number + # is parsed as bytes. Default is 1 KiB. + # Use a value of 0 to disable limiting. + # Note that the size is also bounded by + # the maximum parsed message size (see + # app-layer configuration) + - http2 + # dns over http2 + - doh2 + - pgsql: + enabled: no + # passwords: yes # enable output of passwords. Disabled by default + - stats: + totals: yes # stats for all threads merged together + threads: no # per thread stats + deltas: no # include delta values + # Don't log stats counters that are zero. Default: true + #null-values: false # False will NOT log stats counters: 0 + # bi-directional flows + - flow + # uni-directional flows + #- netflow + + # Metadata event type. Triggered whenever a pktvar is saved + # and will include the pktvars, flowvars, flowbits and + # flowints. + #- metadata + + # EXPERIMENTAL per packet output giving TCP state tracking details + # including internal state, flags, etc. + # This output is experimental, meant for debugging and subject to + # change in both config and output without any notice. + #- stream: + # all: false # log all TCP packets + # event-set: false # log packets that have a decoder/stream event + # state-update: false # log packets triggering a TCP state update + # spurious-retransmission: false # log spurious retransmission packets + +# Datasets default settings +datasets: + # Default fallback memcap and hashsize values for datasets in case these + # were not explicitly defined. + defaults: + #memcap: 100 MiB + #hashsize: 2048 + + rules: + # Set to true to allow absolute filenames and filenames that use + # ".." components to reference parent directories in rules that specify + # their filenames. + #allow-absolute-filenames: false + + # Allow datasets in rules write access for "save" and + # "state". This is enabled by default, however write access is + # limited to the data directory. + #allow-write: true + + conn-seen: + type: string + +security: + lua: + allow-rules: true diff --git a/tests/lua/lua-packetlib-04-icmp-spdp/test.rules b/tests/lua/lua-packetlib-04-icmp-spdp/test.rules new file mode 100644 index 000000000..534475385 --- /dev/null +++ b/tests/lua/lua-packetlib-04-icmp-spdp/test.rules @@ -0,0 +1 @@ +alert icmp any any -> any any (lua:packet.lua; sid:1;) diff --git a/tests/lua/lua-packetlib-04-icmp-spdp/test.yaml b/tests/lua/lua-packetlib-04-icmp-spdp/test.yaml new file mode 100644 index 000000000..067a5b98b --- /dev/null +++ b/tests/lua/lua-packetlib-04-icmp-spdp/test.yaml @@ -0,0 +1,13 @@ +requires: + min-version: 8 + +pcap: ../../bug-2190/input.pcap + +args: + - -k none + +checks: + - filter: + count: 1 + match: + event_type: alert diff --git a/tests/rule-hooks/http-body-hook-01/README.md b/tests/rule-hooks/http-body-hook-01/README.md new file mode 100644 index 000000000..b04d06a02 --- /dev/null +++ b/tests/rule-hooks/http-body-hook-01/README.md @@ -0,0 +1,4 @@ +PCAP +==== + +Pcap from https://redmine.openinfosecfoundation.org/issues/2369 diff --git a/tests/rule-hooks/http-body-hook-01/input.pcap b/tests/rule-hooks/http-body-hook-01/input.pcap new file mode 100644 index 000000000..a4a7500eb Binary files /dev/null and b/tests/rule-hooks/http-body-hook-01/input.pcap differ diff --git a/tests/rule-hooks/http-body-hook-01/test.rules b/tests/rule-hooks/http-body-hook-01/test.rules new file mode 100644 index 000000000..44b3fe731 --- /dev/null +++ b/tests/rule-hooks/http-body-hook-01/test.rules @@ -0,0 +1,6 @@ +alert http1:response_not_started any any -> any any (sid:1;) +alert http1:response_line any any -> any any (sid:2;) +alert http1:response_headers any any -> any any (sid:3;) +alert http1:response_body any any -> any any (sid:4;) +alert http1:response_trailer any any -> any any (sid:5;) +alert http1:response_complete any any -> any any (sid:6;) diff --git a/tests/rule-hooks/http-body-hook-01/test.yaml b/tests/rule-hooks/http-body-hook-01/test.yaml new file mode 100644 index 000000000..eefcb02c2 --- /dev/null +++ b/tests/rule-hooks/http-body-hook-01/test.yaml @@ -0,0 +1,39 @@ +requires: + min-version: 8 + +checks: +- filter: + count: 1 + match: + event_type: http + http.url: "/~regit/ids-suricata-esiea.pdf" +- filter: + count: 1 + match: + event_type: alert + alert.signature_id: 1 # not started +- filter: + count: 1 + match: + event_type: alert + alert.signature_id: 2 # request_line +- filter: + count: 1 + match: + event_type: alert + alert.signature_id: 3 # header +- filter: + count: 443 + match: + event_type: alert + alert.signature_id: 4 # body update +- filter: + count: 1 + match: + event_type: alert + alert.signature_id: 5 # trailer +- filter: + count: 1 + match: + event_type: alert + alert.signature_id: 6 # complete diff --git a/tests/rule-hooks/tls-handshake-01-ips-sni/suricata.yaml b/tests/rule-hooks/tls-handshake-01-ips-sni/suricata.yaml new file mode 100644 index 000000000..b67886b55 --- /dev/null +++ b/tests/rule-hooks/tls-handshake-01-ips-sni/suricata.yaml @@ -0,0 +1,101 @@ +%YAML 1.1 +--- + +# Global stats configuration +stats: + enabled: yes + # The interval field (in seconds) controls the interval at + # which stats are updated in the log. + interval: 8 + # Add decode events to stats. + #decoder-events: true + # Decoder event prefix in stats. Has been 'decoder' before, but that leads + # to missing events in the eve.stats records. See issue #2225. + #decoder-events-prefix: "decoder.event" + # Add stream events as stats. + #stream-events: false + +# Configure the type of alert (and other) logging you would like. +outputs: + - eve-log: + enabled: yes + filetype: regular #regular|syslog|unix_dgram|unix_stream|redis + filename: eve.json + types: + - alert: + # payload: yes # enable dumping payload in Base64 + # payload-buffer-size: 4kb # max size of payload buffer to output in eve-log + # payload-printable: yes # enable dumping payload in printable (lossy) format + # packet: yes # enable dumping of packet (without stream segments) + # metadata: no # enable inclusion of app layer metadata with alert. Default yes + # http-body: yes # Requires metadata; enable dumping of HTTP body in Base64 + # http-body-printable: yes # Requires metadata; enable dumping of HTTP body in printable format + + # Enable the logging of tagged packets for rules using the + # "tag" keyword. + tagged-packets: yes + # Enable logging the final action taken on a packet by the engine + # (e.g: the alert may have action 'allowed' but the verdict be + # 'drop' due to another alert. That's the engine's verdict) + # verdict: yes + - anomaly: + # Anomaly log records describe unexpected conditions such + # as truncated packets, packets with invalid IP/UDP/TCP + # length values, and other events that render the packet + # invalid for further processing or describe unexpected + # behavior on an established stream. Networks which + # experience high occurrences of anomalies may experience + # packet processing degradation. + # + # Anomalies are reported for the following: + # 1. Decode: Values and conditions that are detected while + # decoding individual packets. This includes invalid or + # unexpected values for low-level protocol lengths as well + # as stream related events (TCP 3-way handshake issues, + # unexpected sequence number, etc). + # 2. Stream: This includes stream related events (TCP + # 3-way handshake issues, unexpected sequence number, + # etc). + # 3. Application layer: These denote application layer + # specific conditions that are unexpected, invalid or are + # unexpected given the application monitoring state. + # + # By default, anomaly logging is enabled. When anomaly + # logging is enabled, applayer anomaly reporting is + # also enabled. + enabled: yes + # + # Choose one or more types of anomaly logging and whether to enable + # logging of the packet header for packet anomalies. + types: + # decode: no + # stream: no + # applayer: yes + #packethdr: no + - tls: + extended: yes # enable this for extended logging information + # output TLS transaction where the session is resumed using a + # session id + #session-resumption: no + # ja4 hashes in tls records will never be logged unless + # the following is set to on. (Default off) + # ja4: off + # custom controls which TLS fields that are included in eve-log + #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s, ja4] + - files: + force-magic: no # force logging magic on all logged files + # force logging of checksums, available hash functions are md5, + # sha1 and sha256 + #force-hash: [md5] + - drop: + alerts: yes # log alerts that caused drops + flows: all # start or all: 'start' logs only a single drop + # EXPERIMENTAL per packet output giving TCP state tracking details + # including internal state, flags, etc. + # This output is experimental, meant for debugging and subject to + # change in both config and output without any notice. + #- stream: + # all: false # log all TCP packets + # event-set: false # log packets that have a decoder/stream event + # state-update: false # log packets triggering a TCP state update + # spurious-retransmission: false # log spurious retransmission packets diff --git a/tests/rule-hooks/tls-handshake-01-ips-sni/test.rules b/tests/rule-hooks/tls-handshake-01-ips-sni/test.rules new file mode 100644 index 000000000..e81e5098f --- /dev/null +++ b/tests/rule-hooks/tls-handshake-01-ips-sni/test.rules @@ -0,0 +1,2 @@ +pass tls:client_hello_done any any -> any any (tls.sni; content:"www.google.com"; sid:21; alert;) +drop tls:client_hello_done any any -> any any (sid:22;) diff --git a/tests/rule-hooks/tls-handshake-01-ips-sni/test.yaml b/tests/rule-hooks/tls-handshake-01-ips-sni/test.yaml new file mode 100644 index 000000000..d0b9cd7eb --- /dev/null +++ b/tests/rule-hooks/tls-handshake-01-ips-sni/test.yaml @@ -0,0 +1,22 @@ +requires: + min-version: 8 + +pcap: ../../tls/tls-client-hello-frag-01/dump_mtu300.pcap + +args: +- -k none +- --simulate-ips + +checks: +- filter: + count: 1 + match: + event_type: alert +- filter: + count: 0 + match: + event_type: drop +- filter: + count: 1 + match: + event_type: tls diff --git a/tests/rule-hooks/tls-handshake-02-ips-sni-drop/suricata.yaml b/tests/rule-hooks/tls-handshake-02-ips-sni-drop/suricata.yaml new file mode 100644 index 000000000..b67886b55 --- /dev/null +++ b/tests/rule-hooks/tls-handshake-02-ips-sni-drop/suricata.yaml @@ -0,0 +1,101 @@ +%YAML 1.1 +--- + +# Global stats configuration +stats: + enabled: yes + # The interval field (in seconds) controls the interval at + # which stats are updated in the log. + interval: 8 + # Add decode events to stats. + #decoder-events: true + # Decoder event prefix in stats. Has been 'decoder' before, but that leads + # to missing events in the eve.stats records. See issue #2225. + #decoder-events-prefix: "decoder.event" + # Add stream events as stats. + #stream-events: false + +# Configure the type of alert (and other) logging you would like. +outputs: + - eve-log: + enabled: yes + filetype: regular #regular|syslog|unix_dgram|unix_stream|redis + filename: eve.json + types: + - alert: + # payload: yes # enable dumping payload in Base64 + # payload-buffer-size: 4kb # max size of payload buffer to output in eve-log + # payload-printable: yes # enable dumping payload in printable (lossy) format + # packet: yes # enable dumping of packet (without stream segments) + # metadata: no # enable inclusion of app layer metadata with alert. Default yes + # http-body: yes # Requires metadata; enable dumping of HTTP body in Base64 + # http-body-printable: yes # Requires metadata; enable dumping of HTTP body in printable format + + # Enable the logging of tagged packets for rules using the + # "tag" keyword. + tagged-packets: yes + # Enable logging the final action taken on a packet by the engine + # (e.g: the alert may have action 'allowed' but the verdict be + # 'drop' due to another alert. That's the engine's verdict) + # verdict: yes + - anomaly: + # Anomaly log records describe unexpected conditions such + # as truncated packets, packets with invalid IP/UDP/TCP + # length values, and other events that render the packet + # invalid for further processing or describe unexpected + # behavior on an established stream. Networks which + # experience high occurrences of anomalies may experience + # packet processing degradation. + # + # Anomalies are reported for the following: + # 1. Decode: Values and conditions that are detected while + # decoding individual packets. This includes invalid or + # unexpected values for low-level protocol lengths as well + # as stream related events (TCP 3-way handshake issues, + # unexpected sequence number, etc). + # 2. Stream: This includes stream related events (TCP + # 3-way handshake issues, unexpected sequence number, + # etc). + # 3. Application layer: These denote application layer + # specific conditions that are unexpected, invalid or are + # unexpected given the application monitoring state. + # + # By default, anomaly logging is enabled. When anomaly + # logging is enabled, applayer anomaly reporting is + # also enabled. + enabled: yes + # + # Choose one or more types of anomaly logging and whether to enable + # logging of the packet header for packet anomalies. + types: + # decode: no + # stream: no + # applayer: yes + #packethdr: no + - tls: + extended: yes # enable this for extended logging information + # output TLS transaction where the session is resumed using a + # session id + #session-resumption: no + # ja4 hashes in tls records will never be logged unless + # the following is set to on. (Default off) + # ja4: off + # custom controls which TLS fields that are included in eve-log + #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s, ja4] + - files: + force-magic: no # force logging magic on all logged files + # force logging of checksums, available hash functions are md5, + # sha1 and sha256 + #force-hash: [md5] + - drop: + alerts: yes # log alerts that caused drops + flows: all # start or all: 'start' logs only a single drop + # EXPERIMENTAL per packet output giving TCP state tracking details + # including internal state, flags, etc. + # This output is experimental, meant for debugging and subject to + # change in both config and output without any notice. + #- stream: + # all: false # log all TCP packets + # event-set: false # log packets that have a decoder/stream event + # state-update: false # log packets triggering a TCP state update + # spurious-retransmission: false # log spurious retransmission packets diff --git a/tests/rule-hooks/tls-handshake-02-ips-sni-drop/test.rules b/tests/rule-hooks/tls-handshake-02-ips-sni-drop/test.rules new file mode 100644 index 000000000..fa394d168 --- /dev/null +++ b/tests/rule-hooks/tls-handshake-02-ips-sni-drop/test.rules @@ -0,0 +1,2 @@ +pass tls:client_hello_done any any -> any any (tls.sni; content:"www.bing.com"; sid:21; alert;) +drop tls:client_hello_done any any -> any any (sid:22;) diff --git a/tests/rule-hooks/tls-handshake-02-ips-sni-drop/test.yaml b/tests/rule-hooks/tls-handshake-02-ips-sni-drop/test.yaml new file mode 100644 index 000000000..fe376a9bb --- /dev/null +++ b/tests/rule-hooks/tls-handshake-02-ips-sni-drop/test.yaml @@ -0,0 +1,22 @@ +requires: + min-version: 8 + +pcap: ../../tls/tls-client-hello-frag-01/dump_mtu300.pcap + +args: +- -k none +- --simulate-ips + +checks: +- filter: + count: 1 + match: + event_type: alert +- filter: + count: 57 + match: + event_type: drop +- filter: + count: 0 + match: + event_type: tls diff --git a/tests/tls/tls-ja3s-requires-off/test-ja3s-hash.lua b/tests/tls/tls-ja3s-requires-off/test-ja3s-hash.lua index 791afa779..061186f96 100644 --- a/tests/tls/tls-ja3s-requires-off/test-ja3s-hash.lua +++ b/tests/tls/tls-ja3s-requires-off/test-ja3s-hash.lua @@ -1,6 +1,5 @@ function init(args) local needs = {} - needs["tls"] = tostring(true) return needs end diff --git a/tests/tls/tls-ja3s-requires-off/test-ja3s-string.lua b/tests/tls/tls-ja3s-requires-off/test-ja3s-string.lua index 2ea8b0d80..7f28c5136 100644 --- a/tests/tls/tls-ja3s-requires-off/test-ja3s-string.lua +++ b/tests/tls/tls-ja3s-requires-off/test-ja3s-string.lua @@ -1,6 +1,5 @@ function init(args) local needs = {} - needs["tls"] = tostring(true) return needs end diff --git a/tests/tls/tls-ja3s-requires/test-ja3s-hash.lua b/tests/tls/tls-ja3s-requires/test-ja3s-hash.lua index 791afa779..061186f96 100644 --- a/tests/tls/tls-ja3s-requires/test-ja3s-hash.lua +++ b/tests/tls/tls-ja3s-requires/test-ja3s-hash.lua @@ -1,6 +1,5 @@ function init(args) local needs = {} - needs["tls"] = tostring(true) return needs end diff --git a/tests/tls/tls-ja3s-requires/test-ja3s-string.lua b/tests/tls/tls-ja3s-requires/test-ja3s-string.lua index 2ea8b0d80..7f28c5136 100644 --- a/tests/tls/tls-ja3s-requires/test-ja3s-string.lua +++ b/tests/tls/tls-ja3s-requires/test-ja3s-string.lua @@ -1,6 +1,5 @@ function init(args) local needs = {} - needs["tls"] = tostring(true) return needs end diff --git a/tests/tls/tls-ja3s/test-ja3s-hash.lua b/tests/tls/tls-ja3s/test-ja3s-hash.lua index 791afa779..061186f96 100644 --- a/tests/tls/tls-ja3s/test-ja3s-hash.lua +++ b/tests/tls/tls-ja3s/test-ja3s-hash.lua @@ -1,6 +1,5 @@ function init(args) local needs = {} - needs["tls"] = tostring(true) return needs end diff --git a/tests/tls/tls-ja3s/test-ja3s-string.lua b/tests/tls/tls-ja3s/test-ja3s-string.lua index 2ea8b0d80..7f28c5136 100644 --- a/tests/tls/tls-ja3s/test-ja3s-string.lua +++ b/tests/tls/tls-ja3s/test-ja3s-string.lua @@ -1,6 +1,5 @@ function init(args) local needs = {} - needs["tls"] = tostring(true) return needs end diff --git a/tests/tls/tls-ja3s/test.rules b/tests/tls/tls-ja3s/test.rules index a5b259bc4..97276aefd 100644 --- a/tests/tls/tls-ja3s/test.rules +++ b/tests/tls/tls-ja3s/test.rules @@ -1,4 +1,4 @@ alert tls any any -> any any (msg:"ja3s.hash test"; flow:established,to_client; ja3s.hash; content:"5d79edf64e03689ff559a54e9d9487bc"; sid:1;) alert tls any any -> any any (msg:"ja3s.string test"; flow:established,to_client; ja3s.string; content:"771,49199,65281-0-11-16-23"; sid:2;) -alert tls any any -> any any (msg:"ja3s.hash Lua test"; flow:established,to_client; lua:test-ja3s-hash.lua; sid:3;) -alert tls any any -> any any (msg:"ja3s.string Lua test"; flow:established,to_client; lua:test-ja3s-string.lua; sid:4;) +alert tls:server_hello any any -> any any (msg:"ja3s.hash Lua test"; flow:established,to_client; lua:test-ja3s-hash.lua; sid:3;) +alert tls:server_hello any any -> any any (msg:"ja3s.string Lua test"; flow:established,to_client; lua:test-ja3s-string.lua; sid:4;)