summaryrefslogtreecommitdiff
path: root/meta-security
diff options
context:
space:
mode:
authorAndrew Geissler <geissonator@yahoo.com>2021-04-15 23:52:46 +0300
committerBrad Bishop <bradleyb@fuzziesquirrel.com>2021-04-19 16:32:18 +0300
commitf1e440673465aa768f31e78c0c201002f9f767b7 (patch)
tree44dffb1d845b35c3f4bf0629a622d8ae04abda41 /meta-security
parent636aaa195862ab9a5442c3178e38266debab3bff (diff)
downloadopenbmc-f1e440673465aa768f31e78c0c201002f9f767b7.tar.xz
meta-security: subtree update:775870980b..ca9264b1e1
Anton Antonov (4): Use libest "main" branch instead of "master". Add meta-parsec layer into meta-security. Define secure images with parsec-service and parsec-tool included and add the images into gitlab CI Clearly define clang toolchain in Parsec recipes Armin Kuster (16): packagegroup-core-security: drop clamav-cvd clamav: upgrade 104.0 python3-privacyidea: upgrade 3.5.1 -> 3.5.2 clamav: fix systemd service install swtpm: now need python-cryptography, pull in layer swtpm: file pip3 issue swtpm: fix check for tscd deamon on host python3-suricata-update: update to 1.2.1 suricata: update to 6.0.2 layer.conf: add dynamic-layer for rust pkg README: cleanup .gitlab-ci.yml: reorder to speed up builds kas-security-base.yml: tweek build vars gitlab-ci: fine tune order clamav: remove rest of mirror.dat ref lkrg-module: Add Linux Kernel Runtime Guard Ming Liu (2): meta: drop IMA_POLICY from policy recipes initramfs-framework-ima: introduce IMA_FORCE Signed-off-by: Andrew Geissler <geissonator@yahoo.com> Change-Id: Ifac35a0d7b7e724f1e30dce5f6634d5d4fc9b5b9
Diffstat (limited to 'meta-security')
-rw-r--r--meta-security/.gitlab-ci.yml88
-rw-r--r--meta-security/README27
-rw-r--r--meta-security/conf/layer.conf4
-rw-r--r--meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/fixup.patch32
-rw-r--r--meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/run-ptest3
-rw-r--r--meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/suricata.service20
-rw-r--r--meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/suricata.yaml1326
-rw-r--r--meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/tmpfiles.suricata2
-rw-r--r--meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/volatiles.03_suricata2
-rw-r--r--meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/libhtp_0.5.37.bb27
-rw-r--r--meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/suricata.inc8
-rw-r--r--meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/suricata_6.0.2.bb193
-rw-r--r--meta-security/kas/kas-security-base.yml2
-rw-r--r--meta-security/kas/kas-security-parsec.yml21
-rw-r--r--meta-security/kas/qemuarm-parsec.yml6
-rw-r--r--meta-security/kas/qemuarm64-parsec.yml6
-rw-r--r--meta-security/kas/qemuppc-parsec.yml6
-rw-r--r--meta-security/kas/qemux86-64-parsec.yml6
-rw-r--r--meta-security/kas/qemux86-parsec.yml6
-rw-r--r--meta-security/meta-integrity/recipes-core/initrdscripts/initramfs-framework-ima.bb5
-rw-r--r--meta-security/meta-integrity/recipes-core/initrdscripts/initramfs-framework-ima/ima9
-rw-r--r--meta-security/meta-integrity/recipes-security/ima_policy_appraise_all/ima-policy-appraise-all_1.0.bb9
-rw-r--r--meta-security/meta-integrity/recipes-security/ima_policy_hashed/ima-policy-hashed_1.0.bb9
-rw-r--r--meta-security/meta-integrity/recipes-security/ima_policy_simple/ima-policy-simple_1.0.bb9
-rw-r--r--meta-security/meta-parsec/README.md186
-rw-r--r--meta-security/meta-parsec/conf/layer.conf14
-rw-r--r--meta-security/meta-parsec/recipes-parsec/parsec-service/files/cryptoki.patch18
-rw-r--r--meta-security/meta-parsec/recipes-parsec/parsec-service/files/parsec-tmpfiles.conf2
-rwxr-xr-xmeta-security/meta-parsec/recipes-parsec/parsec-service/files/parsec_init63
-rw-r--r--meta-security/meta-parsec/recipes-parsec/parsec-service/files/systemd.patch19
-rw-r--r--meta-security/meta-parsec/recipes-parsec/parsec-service/parsec-service_0.7.0.bb67
-rw-r--r--meta-security/meta-parsec/recipes-parsec/parsec-service/parsec-service_0.7.0.inc147
-rw-r--r--meta-security/meta-parsec/recipes-parsec/parsec-tool/parsec-tool_0.3.0.bb17
-rw-r--r--meta-security/meta-parsec/recipes-parsec/parsec-tool/parsec-tool_0.3.0.inc127
-rw-r--r--meta-security/meta-tpm/conf/layer.conf1
-rw-r--r--meta-security/meta-tpm/recipes-tpm/swtpm/files/oe_configure.patch65
-rw-r--r--meta-security/meta-tpm/recipes-tpm/swtpm/swtpm_0.5.2.bb7
-rw-r--r--meta-security/recipes-core/packagegroup/packagegroup-core-security.bb4
-rw-r--r--meta-security/recipes-ids/suricata/python3-suricata-update_1.2.1.bb (renamed from meta-security/recipes-ids/suricata/python3-suricata-update_1.1.1.bb)8
-rw-r--r--meta-security/recipes-kernel/lkrg/files/makefile_cleanup.patch73
-rw-r--r--meta-security/recipes-kernel/lkrg/lkrg-module_0.9.0.bb33
-rw-r--r--meta-security/recipes-scanners/clamav/clamav_0.104.0.bb (renamed from meta-security/recipes-scanners/clamav/clamav_0.101.5.bb)116
-rw-r--r--meta-security/recipes-scanners/clamav/files/headers_fixup.patch58
-rw-r--r--meta-security/recipes-scanners/clamav/files/oe_cmake_fixup.patch39
-rw-r--r--meta-security/recipes-security/libest/libest_3.2.0.bb2
-rw-r--r--meta-security/recipes-security/mfa/python3-privacyidea_3.5.2.bb (renamed from meta-security/recipes-security/mfa/python3-privacyidea_3.5.1.bb)2
46 files changed, 2721 insertions, 173 deletions
diff --git a/meta-security/.gitlab-ci.yml b/meta-security/.gitlab-ci.yml
index 1442239b20..f673ef6988 100644
--- a/meta-security/.gitlab-ci.yml
+++ b/meta-security/.gitlab-ci.yml
@@ -26,128 +26,104 @@ stages:
qemux86:
extends: .build
script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME-parsec.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME-comp.yml
+ - kas build --target harden-image-minimal kas/$CI_JOB_NAME-harden.yml
+ - kas build --target integrity-image-minimal kas/$CI_JOB_NAME-ima.yml
qemux86-64:
extends: .build
script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME-parsec.yml
+ - kas build --target dm-verity-image-initramfs kas/$CI_JOB_NAME-dm-verify.yml
+ - kas build --target integrity-image-minimal kas/$CI_JOB_NAME-ima.yml
qemuarm:
extends: .build
script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME-parsec.yml
qemuarm64:
extends: .build
script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME-parsec.yml
+ - kas build --target integrity-image-minimal kas/$CI_JOB_NAME-ima.yml
qemuppc:
extends: .build
script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME-parsec.yml
qemumips64:
extends: .build
script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME.yml
qemuriscv64:
extends: .build
script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME.yml
qemux86-64-tpm:
extends: .build
script:
- - kas build --target security-tpm-image kas/$CI_JOB_NAME.yml
-
-qemux86-64-tpm2:
- extends: .build
- script:
- - kas build --target security-tpm2-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-tpm-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-tpm2-image kas/$CI_JOB_NAME2.yml
qemuarm64-tpm2:
extends: .build
script:
- - kas build --target security-tpm2-image kas/$CI_JOB_NAME.yml
-
-qemux86-ima:
- extends: .build
- script:
- - kas build --target integrity-image-minimal kas/$CI_JOB_NAME.yml
-
-qemux86-64-ima:
- extends: .build
- script:
- - kas build --target integrity-image-minimal kas/$CI_JOB_NAME.yml
-
-qemuarm64-ima:
- extends: .build
- script:
- - kas build --target integrity-image-minimal kas/$CI_JOB_NAME.yml
-
-qemux86-64-dm-verify:
- extends: .build
- script:
- - kas build --target core-image-minimal kas/qemux86-64.yml
- - kas build --target dm-verity-image-initramfs kas/$CI_JOB_NAME.yml
-
+ - kas build --target security-tpm2-image kas/$CI_JOB_NAME.yml
qemuarm64-alt:
extends: .build
script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME.yml
qemuarm64-multi:
extends: .build
script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME.yml
qemumips64-alt:
extends: .build
script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME.yml
qemumips64-multi:
extends: .build
script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME.yml
qemux86-64-alt:
extends: .build
script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME.yml
qemux86-64-multi:
extends: .build
script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME.yml
qemux86-musl:
extends: .build
script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME.yml
qemuarm64-musl:
extends: .build
script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
-
-qemux86-harden:
- extends: .build
- script:
- - kas build --target harden-image-minimal kas/$CI_JOB_NAME.yml
-
-qemux86-comp:
- extends: .build
- script:
- - kas build --target security-build-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-build-image kas/$CI_JOB_NAME.yml
qemux86-test:
extends: .build
allow_failure: true
script:
- - kas build --target security-test-image kas/$CI_JOB_NAME.yml
- - kas build -c testimage --target security-test-image kas/$CI_JOB_NAME.yml
+ - kas build --target security-test-image kas/$CI_JOB_NAME.yml
+ - kas build -c testimage --target security-test-image kas/$CI_JOB_NAME.yml
+
diff --git a/meta-security/README b/meta-security/README
index f223feef03..eb15366753 100644
--- a/meta-security/README
+++ b/meta-security/README
@@ -11,28 +11,19 @@ This layer depends on:
URI: git://git.openembedded.org/openembedded-core
branch: master
- revision: HEAD
- prio: default
URI: git://git.openembedded.org/meta-openembedded/meta-oe
branch: master
- revision: HEAD
- prio: default
URI: git://git.openembedded.org/meta-openembedded/meta-perl
branch: master
- revision: HEAD
- prio: default
URI: git://git.openembedded.org/meta-openembedded/meta-python
branch: master
- revision: HEAD
- prio: default
URI: git://git.openembedded.org/meta-openembedded/meta-networking
branch: master
- revision: HEAD
- prio: default
+
Adding the security layer to your build
========================================
@@ -51,11 +42,23 @@ other layers needed. e.g.:
/path/to/meta-openembedded/meta-perl \
/path/to/meta-openembedded/meta-python \
/path/to/meta-openembedded/meta-networking \
- /path/to/layer/meta-security \
+ /path/to/layer/meta-security "
+
+Optional Rust dependancy
+======================================
+If you want to use the latest Suricata that needs rust, you will need to clone
+
+ URI: https://github.com/meta-rust/meta-rust.git
+ branch: master
+
+ BBLAYERS += "/path/to/layer/meta-rust"
+
+This will activate the dynamic-layer mechanism and pull in the newer suricata
+
Maintenance
------------
+======================================
Send pull requests, patches, comments or questions to yocto@lists.yoctoproject.org
diff --git a/meta-security/conf/layer.conf b/meta-security/conf/layer.conf
index fd21da1eba..906e024407 100644
--- a/meta-security/conf/layer.conf
+++ b/meta-security/conf/layer.conf
@@ -12,3 +12,7 @@ BBFILE_PRIORITY_security = "8"
LAYERSERIES_COMPAT_security = "hardknott"
LAYERDEPENDS_security = "core openembedded-layer perl-layer networking-layer meta-python"
+
+BBFILES_DYNAMIC += " \
+rust-layer:${LAYERDIR}/dynamic-layers/meta-rust/recipes-*/*/*.bb \
+"
diff --git a/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/fixup.patch b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/fixup.patch
new file mode 100644
index 0000000000..fc44ce68f5
--- /dev/null
+++ b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/fixup.patch
@@ -0,0 +1,32 @@
+Skip pkg Makefile from using its own rust steps
+
+Upstream-Status: OE Specific
+
+Signed-off-by: Armin Kuster <akuster808@gmail.com>
+
+Index: suricata-6.0.2/Makefile.am
+===================================================================
+--- suricata-6.0.2.orig/Makefile.am
++++ suricata-6.0.2/Makefile.am
+@@ -7,7 +7,7 @@ EXTRA_DIST = ChangeLog COPYING LICENSE s
+ $(SURICATA_UPDATE_DIR) \
+ lua \
+ acsite.m4
+-SUBDIRS = $(HTP_DIR) rust src qa rules doc contrib etc python ebpf \
++SUBDIRS = $(HTP_DIR) src qa rules doc contrib etc python ebpf \
+ $(SURICATA_UPDATE_DIR)
+
+ CLEANFILES = stamp-h[0-9]*
+Index: suricata-6.0.2/Makefile.in
+===================================================================
+--- suricata-6.0.2.orig/Makefile.in
++++ suricata-6.0.2/Makefile.in
+@@ -426,7 +426,7 @@ EXTRA_DIST = ChangeLog COPYING LICENSE s
+ lua \
+ acsite.m4
+
+-SUBDIRS = $(HTP_DIR) rust src qa rules doc contrib etc python ebpf \
++SUBDIRS = $(HTP_DIR) src qa rules doc contrib etc python ebpf \
+ $(SURICATA_UPDATE_DIR)
+
+ CLEANFILES = stamp-h[0-9]*
diff --git a/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/run-ptest b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/run-ptest
new file mode 100644
index 0000000000..666ba9c954
--- /dev/null
+++ b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/run-ptest
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+suricata -u
diff --git a/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/suricata.service b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/suricata.service
new file mode 100644
index 0000000000..a99a76ef86
--- /dev/null
+++ b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/suricata.service
@@ -0,0 +1,20 @@
+[Unit]
+Description=Suricata IDS/IDP daemon
+After=network.target
+Requires=network.target
+Documentation=man:suricata(8) man:suricatasc(8)
+Documentation=https://redmine.openinfosecfoundation.org/projects/suricata/wiki
+
+[Service]
+Type=simple
+CapabilityBoundingSet=CAP_NET_ADMIN CAP_NET_RAW
+RestrictAddressFamilies=
+ExecStart=/usr/bin/suricata -c /etc/suricata/suricata.yaml eth0
+ExecReload=/bin/kill -HUP $MAINPID
+PrivateTmp=yes
+ProtectHome=yes
+ProtectSystem=yes
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/suricata.yaml b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/suricata.yaml
new file mode 100644
index 0000000000..8d06a27449
--- /dev/null
+++ b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/suricata.yaml
@@ -0,0 +1,1326 @@
+%YAML 1.1
+---
+
+# Suricata configuration file. In addition to the comments describing all
+# options in this file, full documentation can be found at:
+# https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Suricatayaml
+
+
+# Number of packets allowed to be processed simultaneously. Default is a
+# conservative 1024. A higher number will make sure CPU's/CPU cores will be
+# more easily kept busy, but may negatively impact caching.
+#
+# If you are using the CUDA pattern matcher (mpm-algo: ac-cuda), different rules
+# apply. In that case try something like 60000 or more. This is because the CUDA
+# pattern matcher buffers and scans as many packets as possible in parallel.
+#max-pending-packets: 1024
+
+# Runmode the engine should use. Please check --list-runmodes to get the available
+# runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned
+# load balancing).
+#runmode: autofp
+
+# Specifies the kind of flow load balancer used by the flow pinned autofp mode.
+#
+# Supported schedulers are:
+#
+# round-robin - Flows assigned to threads in a round robin fashion.
+# active-packets - Flows assigned to threads that have the lowest number of
+# unprocessed packets (default).
+# hash - Flow alloted usihng the address hash. More of a random
+# technique. Was the default in Suricata 1.2.1 and older.
+#
+#autofp-scheduler: active-packets
+
+# If suricata box is a router for the sniffed networks, set it to 'router'. If
+# it is a pure sniffing setup, set it to 'sniffer-only'.
+# If set to auto, the variable is internally switch to 'router' in IPS mode
+# and 'sniffer-only' in IDS mode.
+# This feature is currently only used by the reject* keywords.
+host-mode: auto
+
+# Run suricata as user and group.
+#run-as:
+# user: suri
+# group: suri
+
+# Default pid file.
+# Will use this file if no --pidfile in command options.
+#pid-file: /var/run/suricata.pid
+
+# Daemon working directory
+# Suricata will change directory to this one if provided
+# Default: "/"
+#daemon-directory: "/"
+
+# Preallocated size for packet. Default is 1514 which is the classical
+# size for pcap on ethernet. You should adjust this value to the highest
+# packet size (MTU + hardware header) on your system.
+#default-packet-size: 1514
+
+# The default logging directory. Any log or output file will be
+# placed here if its not specified with a full path name. This can be
+# overridden with the -l command line parameter.
+default-log-dir: /var/log/suricata/
+
+# Unix command socket can be used to pass commands to suricata.
+# An external tool can then connect to get information from suricata
+# or trigger some modifications of the engine. Set enabled to yes
+# to activate the feature. You can use the filename variable to set
+# the file name of the socket.
+unix-command:
+ enabled: no
+ #filename: custom.socket
+
+# Configure the type of alert (and other) logging you would like.
+outputs:
+
+ # a line based alerts log similar to Snort's fast.log
+ - fast:
+ enabled: yes
+ filename: fast.log
+ append: yes
+ #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
+
+ # Extensible Event Format (nicknamed EVE) event log in JSON format
+ - eve-log:
+ enabled: yes
+ type: file #file|syslog|unix_dgram|unix_stream
+ filename: eve.json
+ # the following are valid when type: syslog above
+ #identity: "suricata"
+ #facility: local5
+ #level: Info ## possible levels: Emergency, Alert, Critical,
+ ## Error, Warning, Notice, Info, Debug
+ types:
+ - alert
+ - http:
+ extended: yes # enable this for extended logging information
+ # custom allows additional http fields to be included in eve-log
+ # the example below adds three additional fields when uncommented
+ #custom: [Accept-Encoding, Accept-Language, Authorization]
+ - dns
+ - tls:
+ extended: yes # enable this for extended logging information
+ - files:
+ force-magic: no # force logging magic on all logged files
+ force-md5: no # force logging of md5 checksums
+ #- drop
+ - ssh
+
+ # alert output for use with Barnyard2
+ - unified2-alert:
+ enabled: yes
+ filename: unified2.alert
+
+ # File size limit. Can be specified in kb, mb, gb. Just a number
+ # is parsed as bytes.
+ #limit: 32mb
+
+ # Sensor ID field of unified2 alerts.
+ #sensor-id: 0
+
+ # HTTP X-Forwarded-For support by adding the unified2 extra header that
+ # will contain the actual client IP address or by overwriting the source
+ # IP address (helpful when inspecting traffic that is being reversed
+ # proxied).
+ xff:
+ enabled: no
+ # Two operation modes are available, "extra-data" and "overwrite". Note
+ # that in the "overwrite" mode, if the reported IP address in the HTTP
+ # X-Forwarded-For header is of a different version of the packet
+ # received, it will fall-back to "extra-data" mode.
+ mode: extra-data
+ # Header name were the actual IP address will be reported, if more than
+ # one IP address is present, the last IP address will be the one taken
+ # into consideration.
+ header: X-Forwarded-For
+
+ # a line based log of HTTP requests (no alerts)
+ - http-log:
+ enabled: yes
+ filename: http.log
+ append: yes
+ #extended: yes # enable this for extended logging information
+ #custom: yes # enabled the custom logging format (defined by customformat)
+ #customformat: "%{%D-%H:%M:%S}t.%z %{X-Forwarded-For}i %H %m %h %u %s %B %a:%p -> %A:%P"
+ #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
+
+ # a line based log of TLS handshake parameters (no alerts)
+ - tls-log:
+ enabled: no # Log TLS connections.
+ filename: tls.log # File to store TLS logs.
+ append: yes
+ #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
+ #extended: yes # Log extended information like fingerprint
+ certs-log-dir: certs # directory to store the certificates files
+
+ # a line based log of DNS requests and/or replies (no alerts)
+ - dns-log:
+ enabled: no
+ filename: dns.log
+ append: yes
+ #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
+
+ # a line based log to used with pcap file study.
+ # this module is dedicated to offline pcap parsing (empty output
+ # if used with another kind of input). It can interoperate with
+ # pcap parser like wireshark via the suriwire plugin.
+ - pcap-info:
+ enabled: no
+
+ # Packet log... log packets in pcap format. 2 modes of operation: "normal"
+ # and "sguil".
+ #
+ # In normal mode a pcap file "filename" is created in the default-log-dir,
+ # or are as specified by "dir". In Sguil mode "dir" indicates the base directory.
+ # In this base dir the pcaps are created in th directory structure Sguil expects:
+ #
+ # $sguil-base-dir/YYYY-MM-DD/$filename.<timestamp>
+ #
+ # By default all packets are logged except:
+ # - TCP streams beyond stream.reassembly.depth
+ # - encrypted streams after the key exchange
+ #
+ - pcap-log:
+ enabled: no
+ filename: log.pcap
+
+ # File size limit. Can be specified in kb, mb, gb. Just a number
+ # is parsed as bytes.
+ limit: 1000mb
+
+ # If set to a value will enable ring buffer mode. Will keep Maximum of "max-files" of size "limit"
+ max-files: 2000
+
+ mode: normal # normal or sguil.
+ #sguil-base-dir: /nsm_data/
+ #ts-format: usec # sec or usec second format (default) is filename.sec usec is filename.sec.usec
+ use-stream-depth: no #If set to "yes" packets seen after reaching stream inspection depth are ignored. "no" logs all packets
+
+ # a full alerts log containing much information for signature writers
+ # or for investigating suspected false positives.
+ - alert-debug:
+ enabled: no
+ filename: alert-debug.log
+ append: yes
+ #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
+
+ # alert output to prelude (http://www.prelude-technologies.com/) only
+ # available if Suricata has been compiled with --enable-prelude
+ - alert-prelude:
+ enabled: no
+ profile: suricata
+ log-packet-content: no
+ log-packet-header: yes
+
+ # Stats.log contains data from various counters of the suricata engine.
+ # The interval field (in seconds) tells after how long output will be written
+ # on the log file.
+ - stats:
+ enabled: yes
+ filename: stats.log
+ interval: 8
+
+ # a line based alerts log similar to fast.log into syslog
+ - syslog:
+ enabled: no
+ # reported identity to syslog. If ommited the program name (usually
+ # suricata) will be used.
+ #identity: "suricata"
+ facility: local5
+ #level: Info ## possible levels: Emergency, Alert, Critical,
+ ## Error, Warning, Notice, Info, Debug
+
+ # a line based information for dropped packets in IPS mode
+ - drop:
+ enabled: no
+ filename: drop.log
+ append: yes
+ #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
+
+ # output module to store extracted files to disk
+ #
+ # The files are stored to the log-dir in a format "file.<id>" where <id> is
+ # an incrementing number starting at 1. For each file "file.<id>" a meta
+ # file "file.<id>.meta" is created.
+ #
+ # File extraction depends on a lot of things to be fully done:
+ # - stream reassembly depth. For optimal results, set this to 0 (unlimited)
+ # - http request / response body sizes. Again set to 0 for optimal results.
+ # - rules that contain the "filestore" keyword.
+ - file-store:
+ enabled: no # set to yes to enable
+ log-dir: files # directory to store the files
+ force-magic: no # force logging magic on all stored files
+ force-md5: no # force logging of md5 checksums
+ #waldo: file.waldo # waldo file to store the file_id across runs
+
+ # output module to log files tracked in a easily parsable json format
+ - file-log:
+ enabled: no
+ filename: files-json.log
+ append: yes
+ #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
+
+ force-magic: no # force logging magic on all logged files
+ force-md5: no # force logging of md5 checksums
+
+# Magic file. The extension .mgc is added to the value here.
+#magic-file: /usr/share/file/magic
+magic-file: /usr/share/misc/magic.mgc
+
+# When running in NFQ inline mode, it is possible to use a simulated
+# non-terminal NFQUEUE verdict.
+# This permit to do send all needed packet to suricata via this a rule:
+# iptables -I FORWARD -m mark ! --mark $MARK/$MASK -j NFQUEUE
+# And below, you can have your standard filtering ruleset. To activate
+# this mode, you need to set mode to 'repeat'
+# If you want packet to be sent to another queue after an ACCEPT decision
+# set mode to 'route' and set next-queue value.
+# On linux >= 3.1, you can set batchcount to a value > 1 to improve performance
+# by processing several packets before sending a verdict (worker runmode only).
+# On linux >= 3.6, you can set the fail-open option to yes to have the kernel
+# accept the packet if suricata is not able to keep pace.
+nfq:
+# mode: accept
+# repeat-mark: 1
+# repeat-mask: 1
+# route-queue: 2
+# batchcount: 20
+# fail-open: yes
+
+#nflog support
+nflog:
+ # netlink multicast group
+ # (the same as the iptables --nflog-group param)
+ # Group 0 is used by the kernel, so you can't use it
+ - group: 2
+ # netlink buffer size
+ buffer-size: 18432
+ # put default value here
+ - group: default
+ # set number of packet to queue inside kernel
+ qthreshold: 1
+ # set the delay before flushing packet in the queue inside kernel
+ qtimeout: 100
+ # netlink max buffer size
+ max-size: 20000
+
+# af-packet support
+# Set threads to > 1 to use PACKET_FANOUT support
+af-packet:
+ - interface: eth0
+ # Number of receive threads (>1 will enable experimental flow pinned
+ # runmode)
+ threads: 1
+ # Default clusterid. AF_PACKET will load balance packets based on flow.
+ # All threads/processes that will participate need to have the same
+ # clusterid.
+ cluster-id: 99
+ # Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash.
+ # This is only supported for Linux kernel > 3.1
+ # possible value are:
+ # * cluster_round_robin: round robin load balancing
+ # * cluster_flow: all packets of a given flow are send to the same socket
+ # * cluster_cpu: all packets treated in kernel by a CPU are send to the same socket
+ cluster-type: cluster_flow
+ # In some fragmentation case, the hash can not be computed. If "defrag" is set
+ # to yes, the kernel will do the needed defragmentation before sending the packets.
+ defrag: yes
+ # To use the ring feature of AF_PACKET, set 'use-mmap' to yes
+ use-mmap: yes
+ # Ring size will be computed with respect to max_pending_packets and number
+ # of threads. You can set manually the ring size in number of packets by setting
+ # the following value. If you are using flow cluster-type and have really network
+ # intensive single-flow you could want to set the ring-size independantly of the number
+ # of threads:
+ #ring-size: 2048
+ # On busy system, this could help to set it to yes to recover from a packet drop
+ # phase. This will result in some packets (at max a ring flush) being non treated.
+ #use-emergency-flush: yes
+ # recv buffer size, increase value could improve performance
+ # buffer-size: 32768
+ # Set to yes to disable promiscuous mode
+ # disable-promisc: no
+ # Choose checksum verification mode for the interface. At the moment
+ # of the capture, some packets may be with an invalid checksum due to
+ # offloading to the network card of the checksum computation.
+ # Possible values are:
+ # - kernel: use indication sent by kernel for each packet (default)
+ # - yes: checksum validation is forced
+ # - no: checksum validation is disabled
+ # - auto: suricata uses a statistical approach to detect when
+ # checksum off-loading is used.
+ # Warning: 'checksum-validation' must be set to yes to have any validation
+ #checksum-checks: kernel
+ # BPF filter to apply to this interface. The pcap filter syntax apply here.
+ #bpf-filter: port 80 or udp
+ # You can use the following variables to activate AF_PACKET tap od IPS mode.
+ # If copy-mode is set to ips or tap, the traffic coming to the current
+ # interface will be copied to the copy-iface interface. If 'tap' is set, the
+ # copy is complete. If 'ips' is set, the packet matching a 'drop' action
+ # will not be copied.
+ #copy-mode: ips
+ #copy-iface: eth1
+ - interface: eth1
+ threads: 1
+ cluster-id: 98
+ cluster-type: cluster_flow
+ defrag: yes
+ # buffer-size: 32768
+ # disable-promisc: no
+ # Put default values here
+ - interface: default
+ #threads: 2
+ #use-mmap: yes
+
+legacy:
+ uricontent: enabled
+
+# You can specify a threshold config file by setting "threshold-file"
+# to the path of the threshold config file:
+# threshold-file: /etc/suricata/threshold.config
+
+# The detection engine builds internal groups of signatures. The engine
+# allow us to specify the profile to use for them, to manage memory on an
+# efficient way keeping a good performance. For the profile keyword you
+# can use the words "low", "medium", "high" or "custom". If you use custom
+# make sure to define the values at "- custom-values" as your convenience.
+# Usually you would prefer medium/high/low.
+#
+# "sgh mpm-context", indicates how the staging should allot mpm contexts for
+# the signature groups. "single" indicates the use of a single context for
+# all the signature group heads. "full" indicates a mpm-context for each
+# group head. "auto" lets the engine decide the distribution of contexts
+# based on the information the engine gathers on the patterns from each
+# group head.
+#
+# The option inspection-recursion-limit is used to limit the recursive calls
+# in the content inspection code. For certain payload-sig combinations, we
+# might end up taking too much time in the content inspection code.
+# If the argument specified is 0, the engine uses an internally defined
+# default limit. On not specifying a value, we use no limits on the recursion.
+detect-engine:
+ - profile: medium
+ - custom-values:
+ toclient-src-groups: 2
+ toclient-dst-groups: 2
+ toclient-sp-groups: 2
+ toclient-dp-groups: 3
+ toserver-src-groups: 2
+ toserver-dst-groups: 4
+ toserver-sp-groups: 2
+ toserver-dp-groups: 25
+ - sgh-mpm-context: auto
+ - inspection-recursion-limit: 3000
+ # When rule-reload is enabled, sending a USR2 signal to the Suricata process
+ # will trigger a live rule reload. Experimental feature, use with care.
+ #- rule-reload: true
+ # If set to yes, the loading of signatures will be made after the capture
+ # is started. This will limit the downtime in IPS mode.
+ #- delayed-detect: yes
+
+# Suricata is multi-threaded. Here the threading can be influenced.
+threading:
+ # On some cpu's/architectures it is beneficial to tie individual threads
+ # to specific CPU's/CPU cores. In this case all threads are tied to CPU0,
+ # and each extra CPU/core has one "detect" thread.
+ #
+ # On Intel Core2 and Nehalem CPU's enabling this will degrade performance.
+ #
+ set-cpu-affinity: no
+ # Tune cpu affinity of suricata threads. Each family of threads can be bound
+ # on specific CPUs.
+ cpu-affinity:
+ - management-cpu-set:
+ cpu: [ 0 ] # include only these cpus in affinity settings
+ - receive-cpu-set:
+ cpu: [ 0 ] # include only these cpus in affinity settings
+ - decode-cpu-set:
+ cpu: [ 0, 1 ]
+ mode: "balanced"
+ - stream-cpu-set:
+ cpu: [ "0-1" ]
+ - detect-cpu-set:
+ cpu: [ "all" ]
+ mode: "exclusive" # run detect threads in these cpus
+ # Use explicitely 3 threads and don't compute number by using
+ # detect-thread-ratio variable:
+ # threads: 3
+ prio:
+ low: [ 0 ]
+ medium: [ "1-2" ]
+ high: [ 3 ]
+ default: "medium"
+ - verdict-cpu-set:
+ cpu: [ 0 ]
+ prio:
+ default: "high"
+ - reject-cpu-set:
+ cpu: [ 0 ]
+ prio:
+ default: "low"
+ - output-cpu-set:
+ cpu: [ "all" ]
+ prio:
+ default: "medium"
+ #
+ # By default Suricata creates one "detect" thread per available CPU/CPU core.
+ # This setting allows controlling this behaviour. A ratio setting of 2 will
+ # create 2 detect threads for each CPU/CPU core. So for a dual core CPU this
+ # will result in 4 detect threads. If values below 1 are used, less threads
+ # are created. So on a dual core CPU a setting of 0.5 results in 1 detect
+ # thread being created. Regardless of the setting at a minimum 1 detect
+ # thread will always be created.
+ #
+ detect-thread-ratio: 1.5
+
+# Cuda configuration.
+cuda:
+ # The "mpm" profile. On not specifying any of these parameters, the engine's
+ # internal default values are used, which are same as the ones specified in
+ # in the default conf file.
+ mpm:
+ # The minimum length required to buffer data to the gpu.
+ # Anything below this is MPM'ed on the CPU.
+ # Can be specified in kb, mb, gb. Just a number indicates it's in bytes.
+ # A value of 0 indicates there's no limit.
+ data-buffer-size-min-limit: 0
+ # The maximum length for data that we would buffer to the gpu.
+ # Anything over this is MPM'ed on the CPU.
+ # Can be specified in kb, mb, gb. Just a number indicates it's in bytes.
+ data-buffer-size-max-limit: 1500
+ # The ring buffer size used by the CudaBuffer API to buffer data.
+ cudabuffer-buffer-size: 500mb
+ # The max chunk size that can be sent to the gpu in a single go.
+ gpu-transfer-size: 50mb
+ # The timeout limit for batching of packets in microseconds.
+ batching-timeout: 2000
+ # The device to use for the mpm. Currently we don't support load balancing
+ # on multiple gpus. In case you have multiple devices on your system, you
+ # can specify the device to use, using this conf. By default we hold 0, to
+ # specify the first device cuda sees. To find out device-id associated with
+ # the card(s) on the system run "suricata --list-cuda-cards".
+ device-id: 0
+ # No of Cuda streams used for asynchronous processing. All values > 0 are valid.
+ # For this option you need a device with Compute Capability > 1.0.
+ cuda-streams: 2
+
+# Select the multi pattern algorithm you want to run for scan/search the
+# in the engine. The supported algorithms are b2g, b2gc, b2gm, b3g, wumanber,
+# ac and ac-gfbs.
+#
+# The mpm you choose also decides the distribution of mpm contexts for
+# signature groups, specified by the conf - "detect-engine.sgh-mpm-context".
+# Selecting "ac" as the mpm would require "detect-engine.sgh-mpm-context"
+# to be set to "single", because of ac's memory requirements, unless the
+# ruleset is small enough to fit in one's memory, in which case one can
+# use "full" with "ac". Rest of the mpms can be run in "full" mode.
+#
+# There is also a CUDA pattern matcher (only available if Suricata was
+# compiled with --enable-cuda: b2g_cuda. Make sure to update your
+# max-pending-packets setting above as well if you use b2g_cuda.
+
+mpm-algo: ac
+
+# The memory settings for hash size of these algorithms can vary from lowest
+# (2048) - low (4096) - medium (8192) - high (16384) - higher (32768) - max
+# (65536). The bloomfilter sizes of these algorithms can vary from low (512) -
+# medium (1024) - high (2048).
+#
+# For B2g/B3g algorithms, there is a support for two different scan/search
+# algorithms. For B2g the scan algorithms are B2gScan & B2gScanBNDMq, and
+# search algorithms are B2gSearch & B2gSearchBNDMq. For B3g scan algorithms
+# are B3gScan & B3gScanBNDMq, and search algorithms are B3gSearch &
+# B3gSearchBNDMq.
+#
+# For B2g the different scan/search algorithms and, hash and bloom
+# filter size settings. For B3g the different scan/search algorithms and, hash
+# and bloom filter size settings. For wumanber the hash and bloom filter size
+# settings.
+
+pattern-matcher:
+ - b2gc:
+ search-algo: B2gSearchBNDMq
+ hash-size: low
+ bf-size: medium
+ - b2gm:
+ search-algo: B2gSearchBNDMq
+ hash-size: low
+ bf-size: medium
+ - b2g:
+ search-algo: B2gSearchBNDMq
+ hash-size: low
+ bf-size: medium
+ - b3g:
+ search-algo: B3gSearchBNDMq
+ hash-size: low
+ bf-size: medium
+ - wumanber:
+ hash-size: low
+ bf-size: medium
+
+# Defrag settings:
+
+defrag:
+ memcap: 32mb
+ hash-size: 65536
+ trackers: 65535 # number of defragmented flows to follow
+ max-frags: 65535 # number of fragments to keep (higher than trackers)
+ prealloc: yes
+ timeout: 60
+
+# Enable defrag per host settings
+# host-config:
+#
+# - dmz:
+# timeout: 30
+# address: [192.168.1.0/24, 127.0.0.0/8, 1.1.1.0/24, 2.2.2.0/24, "1.1.1.1", "2.2.2.2", "::1"]
+#
+# - lan:
+# timeout: 45
+# address:
+# - 192.168.0.0/24
+# - 192.168.10.0/24
+# - 172.16.14.0/24
+
+# Flow settings:
+# By default, the reserved memory (memcap) for flows is 32MB. This is the limit
+# for flow allocation inside the engine. You can change this value to allow
+# more memory usage for flows.
+# The hash-size determine the size of the hash used to identify flows inside
+# the engine, and by default the value is 65536.
+# At the startup, the engine can preallocate a number of flows, to get a better
+# performance. The number of flows preallocated is 10000 by default.
+# emergency-recovery is the percentage of flows that the engine need to
+# prune before unsetting the emergency state. The emergency state is activated
+# when the memcap limit is reached, allowing to create new flows, but
+# prunning them with the emergency timeouts (they are defined below).
+# If the memcap is reached, the engine will try to prune flows
+# with the default timeouts. If it doens't find a flow to prune, it will set
+# the emergency bit and it will try again with more agressive timeouts.
+# If that doesn't work, then it will try to kill the last time seen flows
+# not in use.
+# The memcap can be specified in kb, mb, gb. Just a number indicates it's
+# in bytes.
+
+flow:
+ memcap: 64mb
+ hash-size: 65536
+ prealloc: 10000
+ emergency-recovery: 30
+
+# This option controls the use of vlan ids in the flow (and defrag)
+# hashing. Normally this should be enabled, but in some (broken)
+# setups where both sides of a flow are not tagged with the same vlan
+# tag, we can ignore the vlan id's in the flow hashing.
+vlan:
+ use-for-tracking: true
+
+# Specific timeouts for flows. Here you can specify the timeouts that the
+# active flows will wait to transit from the current state to another, on each
+# protocol. The value of "new" determine the seconds to wait after a hanshake or
+# stream startup before the engine free the data of that flow it doesn't
+# change the state to established (usually if we don't receive more packets
+# of that flow). The value of "established" is the amount of
+# seconds that the engine will wait to free the flow if it spend that amount
+# without receiving new packets or closing the connection. "closed" is the
+# amount of time to wait after a flow is closed (usually zero).
+#
+# There's an emergency mode that will become active under attack circumstances,
+# making the engine to check flow status faster. This configuration variables
+# use the prefix "emergency-" and work similar as the normal ones.
+# Some timeouts doesn't apply to all the protocols, like "closed", for udp and
+# icmp.
+
+flow-timeouts:
+
+ default:
+ new: 30
+ established: 300
+ closed: 0
+ emergency-new: 10
+ emergency-established: 100
+ emergency-closed: 0
+ tcp:
+ new: 60
+ established: 3600
+ closed: 120
+ emergency-new: 10
+ emergency-established: 300
+ emergency-closed: 20
+ udp:
+ new: 30
+ established: 300
+ emergency-new: 10
+ emergency-established: 100
+ icmp:
+ new: 30
+ established: 300
+ emergency-new: 10
+ emergency-established: 100
+
+# Stream engine settings. Here the TCP stream tracking and reassembly
+# engine is configured.
+#
+# stream:
+# memcap: 32mb # Can be specified in kb, mb, gb. Just a
+# # number indicates it's in bytes.
+# checksum-validation: yes # To validate the checksum of received
+# # packet. If csum validation is specified as
+# # "yes", then packet with invalid csum will not
+# # be processed by the engine stream/app layer.
+# # Warning: locally generated trafic can be
+# # generated without checksum due to hardware offload
+# # of checksum. You can control the handling of checksum
+# # on a per-interface basis via the 'checksum-checks'
+# # option
+# prealloc-sessions: 2k # 2k sessions prealloc'd per stream thread
+# midstream: false # don't allow midstream session pickups
+# async-oneside: false # don't enable async stream handling
+# inline: no # stream inline mode
+# max-synack-queued: 5 # Max different SYN/ACKs to queue
+#
+# reassembly:
+# memcap: 64mb # Can be specified in kb, mb, gb. Just a number
+# # indicates it's in bytes.
+# depth: 1mb # Can be specified in kb, mb, gb. Just a number
+# # indicates it's in bytes.
+# toserver-chunk-size: 2560 # inspect raw stream in chunks of at least
+# # this size. Can be specified in kb, mb,
+# # gb. Just a number indicates it's in bytes.
+# # The max acceptable size is 4024 bytes.
+# toclient-chunk-size: 2560 # inspect raw stream in chunks of at least
+# # this size. Can be specified in kb, mb,
+# # gb. Just a number indicates it's in bytes.
+# # The max acceptable size is 4024 bytes.
+# randomize-chunk-size: yes # Take a random value for chunk size around the specified value.
+# # This lower the risk of some evasion technics but could lead
+# # detection change between runs. It is set to 'yes' by default.
+# randomize-chunk-range: 10 # If randomize-chunk-size is active, the value of chunk-size is
+# # a random value between (1 - randomize-chunk-range/100)*randomize-chunk-size
+# # and (1 + randomize-chunk-range/100)*randomize-chunk-size. Default value
+# # of randomize-chunk-range is 10.
+#
+# raw: yes # 'Raw' reassembly enabled or disabled.
+# # raw is for content inspection by detection
+# # engine.
+#
+# chunk-prealloc: 250 # Number of preallocated stream chunks. These
+# # are used during stream inspection (raw).
+# segments: # Settings for reassembly segment pool.
+# - size: 4 # Size of the (data)segment for a pool
+# prealloc: 256 # Number of segments to prealloc and keep
+# # in the pool.
+#
+stream:
+ memcap: 32mb
+ checksum-validation: yes # reject wrong csums
+ inline: auto # auto will use inline mode in IPS mode, yes or no set it statically
+ reassembly:
+ memcap: 128mb
+ depth: 1mb # reassemble 1mb into a stream
+ toserver-chunk-size: 2560
+ toclient-chunk-size: 2560
+ randomize-chunk-size: yes
+ #randomize-chunk-range: 10
+ #raw: yes
+ #chunk-prealloc: 250
+ #segments:
+ # - size: 4
+ # prealloc: 256
+ # - size: 16
+ # prealloc: 512
+ # - size: 112
+ # prealloc: 512
+ # - size: 248
+ # prealloc: 512
+ # - size: 512
+ # prealloc: 512
+ # - size: 768
+ # prealloc: 1024
+ # - size: 1448
+ # prealloc: 1024
+ # - size: 65535
+ # prealloc: 128
+
+# Host table:
+#
+# Host table is used by tagging and per host thresholding subsystems.
+#
+host:
+ hash-size: 4096
+ prealloc: 1000
+ memcap: 16777216
+
+# Logging configuration. This is not about logging IDS alerts, but
+# IDS output about what its doing, errors, etc.
+logging:
+
+ # The default log level, can be overridden in an output section.
+ # Note that debug level logging will only be emitted if Suricata was
+ # compiled with the --enable-debug configure option.
+ #
+ # This value is overriden by the SC_LOG_LEVEL env var.
+ default-log-level: notice
+
+ # The default output format. Optional parameter, should default to
+ # something reasonable if not provided. Can be overriden in an
+ # output section. You can leave this out to get the default.
+ #
+ # This value is overriden by the SC_LOG_FORMAT env var.
+ #default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- "
+
+ # A regex to filter output. Can be overridden in an output section.
+ # Defaults to empty (no filter).
+ #
+ # This value is overriden by the SC_LOG_OP_FILTER env var.
+ default-output-filter:
+
+ # Define your logging outputs. If none are defined, or they are all
+ # disabled you will get the default - console output.
+ outputs:
+ - console:
+ enabled: yes
+ - file:
+ enabled: no
+ filename: /var/log/suricata.log
+ - syslog:
+ enabled: yes
+ facility: local5
+ format: "[%i] <%d> -- "
+
+# Tilera mpipe configuration. for use on Tilera TILE-Gx.
+mpipe:
+
+ # Load balancing modes: "static", "dynamic", "sticky", or "round-robin".
+ load-balance: dynamic
+
+ # Number of Packets in each ingress packet queue. Must be 128, 512, 2028 or 65536
+ iqueue-packets: 2048
+
+ # List of interfaces we will listen on.
+ inputs:
+ - interface: xgbe2
+ - interface: xgbe3
+ - interface: xgbe4
+
+
+ # Relative weight of memory for packets of each mPipe buffer size.
+ stack:
+ size128: 0
+ size256: 9
+ size512: 0
+ size1024: 0
+ size1664: 7
+ size4096: 0
+ size10386: 0
+ size16384: 0
+
+# PF_RING configuration. for use with native PF_RING support
+# for more info see http://www.ntop.org/PF_RING.html
+pfring:
+ - interface: eth0
+ # Number of receive threads (>1 will enable experimental flow pinned
+ # runmode)
+ threads: 1
+
+ # Default clusterid. PF_RING will load balance packets based on flow.
+ # All threads/processes that will participate need to have the same
+ # clusterid.
+ cluster-id: 99
+
+ # Default PF_RING cluster type. PF_RING can load balance per flow or per hash.
+ # This is only supported in versions of PF_RING > 4.1.1.
+ cluster-type: cluster_flow
+ # bpf filter for this interface
+ #bpf-filter: tcp
+ # Choose checksum verification mode for the interface. At the moment
+ # of the capture, some packets may be with an invalid checksum due to
+ # offloading to the network card of the checksum computation.
+ # Possible values are:
+ # - rxonly: only compute checksum for packets received by network card.
+ # - yes: checksum validation is forced
+ # - no: checksum validation is disabled
+ # - auto: suricata uses a statistical approach to detect when
+ # checksum off-loading is used. (default)
+ # Warning: 'checksum-validation' must be set to yes to have any validation
+ #checksum-checks: auto
+ # Second interface
+ #- interface: eth1
+ # threads: 3
+ # cluster-id: 93
+ # cluster-type: cluster_flow
+ # Put default values here
+ - interface: default
+ #threads: 2
+
+pcap:
+ - interface: eth0
+ # On Linux, pcap will try to use mmaped capture and will use buffer-size
+ # as total of memory used by the ring. So set this to something bigger
+ # than 1% of your bandwidth.
+ #buffer-size: 16777216
+ #bpf-filter: "tcp and port 25"
+ # Choose checksum verification mode for the interface. At the moment
+ # of the capture, some packets may be with an invalid checksum due to
+ # offloading to the network card of the checksum computation.
+ # Possible values are:
+ # - yes: checksum validation is forced
+ # - no: checksum validation is disabled
+ # - auto: suricata uses a statistical approach to detect when
+ # checksum off-loading is used. (default)
+ # Warning: 'checksum-validation' must be set to yes to have any validation
+ #checksum-checks: auto
+ # With some accelerator cards using a modified libpcap (like myricom), you
+ # may want to have the same number of capture threads as the number of capture
+ # rings. In this case, set up the threads variable to N to start N threads
+ # listening on the same interface.
+ #threads: 16
+ # set to no to disable promiscuous mode:
+ #promisc: no
+ # set snaplen, if not set it defaults to MTU if MTU can be known
+ # via ioctl call and to full capture if not.
+ #snaplen: 1518
+ # Put default values here
+ - interface: default
+ #checksum-checks: auto
+
+pcap-file:
+ # Possible values are:
+ # - yes: checksum validation is forced
+ # - no: checksum validation is disabled
+ # - auto: suricata uses a statistical approach to detect when
+ # checksum off-loading is used. (default)
+ # Warning: 'checksum-validation' must be set to yes to have checksum tested
+ checksum-checks: auto
+
+# For FreeBSD ipfw(8) divert(4) support.
+# Please make sure you have ipfw_load="YES" and ipdivert_load="YES"
+# in /etc/loader.conf or kldload'ing the appropriate kernel modules.
+# Additionally, you need to have an ipfw rule for the engine to see
+# the packets from ipfw. For Example:
+#
+# ipfw add 100 divert 8000 ip from any to any
+#
+# The 8000 above should be the same number you passed on the command
+# line, i.e. -d 8000
+#
+ipfw:
+
+ # Reinject packets at the specified ipfw rule number. This config
+ # option is the ipfw rule number AT WHICH rule processing continues
+ # in the ipfw processing system after the engine has finished
+ # inspecting the packet for acceptance. If no rule number is specified,
+ # accepted packets are reinjected at the divert rule which they entered
+ # and IPFW rule processing continues. No check is done to verify
+ # this will rule makes sense so care must be taken to avoid loops in ipfw.
+ #
+ ## The following example tells the engine to reinject packets
+ # back into the ipfw firewall AT rule number 5500:
+ #
+ # ipfw-reinjection-rule-number: 5500
+
+# Set the default rule path here to search for the files.
+# if not set, it will look at the current working dir
+default-rule-path: /etc/suricata/rules
+rule-files:
+ - botcc.rules
+ - ciarmy.rules
+ - compromised.rules
+ - drop.rules
+ - dshield.rules
+ - emerging-activex.rules
+ - emerging-attack_response.rules
+ - emerging-chat.rules
+ - emerging-current_events.rules
+ - emerging-dns.rules
+ - emerging-dos.rules
+ - emerging-exploit.rules
+ - emerging-ftp.rules
+ - emerging-games.rules
+ - emerging-icmp_info.rules
+# - emerging-icmp.rules
+ - emerging-imap.rules
+ - emerging-inappropriate.rules
+ - emerging-malware.rules
+ - emerging-misc.rules
+ - emerging-mobile_malware.rules
+ - emerging-netbios.rules
+ - emerging-p2p.rules
+ - emerging-policy.rules
+ - emerging-pop3.rules
+ - emerging-rpc.rules
+ - emerging-scada.rules
+ - emerging-scan.rules
+ - emerging-shellcode.rules
+ - emerging-smtp.rules
+ - emerging-snmp.rules
+ - emerging-sql.rules
+ - emerging-telnet.rules
+ - emerging-tftp.rules
+ - emerging-trojan.rules
+ - emerging-user_agents.rules
+ - emerging-voip.rules
+ - emerging-web_client.rules
+ - emerging-web_server.rules
+ - emerging-web_specific_apps.rules
+ - emerging-worm.rules
+ - tor.rules
+ - decoder-events.rules # available in suricata sources under rules dir
+ - stream-events.rules # available in suricata sources under rules dir
+ - http-events.rules # available in suricata sources under rules dir
+ - smtp-events.rules # available in suricata sources under rules dir
+ - dns-events.rules # available in suricata sources under rules dir
+ - tls-events.rules # available in suricata sources under rules dir
+
+classification-file: /etc/suricata/classification.config
+reference-config-file: /etc/suricata/reference.config
+
+# Holds variables that would be used by the engine.
+vars:
+
+ # Holds the address group vars that would be passed in a Signature.
+ # These would be retrieved during the Signature address parsing stage.
+ address-groups:
+
+ HOME_NET: "[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]"
+
+ EXTERNAL_NET: "!$HOME_NET"
+
+ HTTP_SERVERS: "$HOME_NET"
+
+ SMTP_SERVERS: "$HOME_NET"
+
+ SQL_SERVERS: "$HOME_NET"
+
+ DNS_SERVERS: "$HOME_NET"
+
+ TELNET_SERVERS: "$HOME_NET"
+
+ AIM_SERVERS: "$EXTERNAL_NET"
+
+ DNP3_SERVER: "$HOME_NET"
+
+ DNP3_CLIENT: "$HOME_NET"
+
+ MODBUS_CLIENT: "$HOME_NET"
+
+ MODBUS_SERVER: "$HOME_NET"
+
+ ENIP_CLIENT: "$HOME_NET"
+
+ ENIP_SERVER: "$HOME_NET"
+
+ # Holds the port group vars that would be passed in a Signature.
+ # These would be retrieved during the Signature port parsing stage.
+ port-groups:
+
+ HTTP_PORTS: "80"
+
+ SHELLCODE_PORTS: "!80"
+
+ ORACLE_PORTS: 1521
+
+ SSH_PORTS: 22
+
+ DNP3_PORTS: 20000
+
+# Set the order of alerts bassed on actions
+# The default order is pass, drop, reject, alert
+action-order:
+ - pass
+ - drop
+ - reject
+ - alert
+
+# IP Reputation
+#reputation-categories-file: /etc/suricata/iprep/categories.txt
+#default-reputation-path: /etc/suricata/iprep
+#reputation-files:
+# - reputation.list
+
+# Host specific policies for defragmentation and TCP stream
+# reassembly. The host OS lookup is done using a radix tree, just
+# like a routing table so the most specific entry matches.
+host-os-policy:
+ # Make the default policy windows.
+ windows: [0.0.0.0/0]
+ bsd: []
+ bsd-right: []
+ old-linux: []
+ linux: [10.0.0.0/8, 192.168.1.100, "8762:2352:6241:7245:E000:0000:0000:0000"]
+ old-solaris: []
+ solaris: ["::1"]
+ hpux10: []
+ hpux11: []
+ irix: []
+ macos: []
+ vista: []
+ windows2k3: []
+
+
+# Limit for the maximum number of asn1 frames to decode (default 256)
+asn1-max-frames: 256
+
+# When run with the option --engine-analysis, the engine will read each of
+# the parameters below, and print reports for each of the enabled sections
+# and exit. The reports are printed to a file in the default log dir
+# given by the parameter "default-log-dir", with engine reporting
+# subsection below printing reports in its own report file.
+engine-analysis:
+ # enables printing reports for fast-pattern for every rule.
+ rules-fast-pattern: yes
+ # enables printing reports for each rule
+ rules: yes
+
+#recursion and match limits for PCRE where supported
+pcre:
+ match-limit: 3500
+ match-limit-recursion: 1500
+
+# Holds details on the app-layer. The protocols section details each protocol.
+# Under each protocol, the default value for detection-enabled and "
+# parsed-enabled is yes, unless specified otherwise.
+# Each protocol covers enabling/disabling parsers for all ipprotos
+# the app-layer protocol runs on. For example "dcerpc" refers to the tcp
+# version of the protocol as well as the udp version of the protocol.
+# The option "enabled" takes 3 values - "yes", "no", "detection-only".
+# "yes" enables both detection and the parser, "no" disables both, and
+# "detection-only" enables detection only(parser disabled).
+app-layer:
+ protocols:
+ tls:
+ enabled: yes
+ detection-ports:
+ dp: 443
+
+ #no-reassemble: yes
+ dcerpc:
+ enabled: yes
+ ftp:
+ enabled: yes
+ ssh:
+ enabled: yes
+ smtp:
+ enabled: yes
+ imap:
+ enabled: detection-only
+ msn:
+ enabled: detection-only
+ smb:
+ enabled: yes
+ detection-ports:
+ dp: 139
+ # smb2 detection is disabled internally inside the engine.
+ #smb2:
+ # enabled: yes
+ dns:
+ # memcaps. Globally and per flow/state.
+ #global-memcap: 16mb
+ #state-memcap: 512kb
+
+ # How many unreplied DNS requests are considered a flood.
+ # If the limit is reached, app-layer-event:dns.flooded; will match.
+ #request-flood: 500
+
+ tcp:
+ enabled: yes
+ detection-ports:
+ dp: 53
+ udp:
+ enabled: yes
+ detection-ports:
+ dp: 53
+ http:
+ enabled: yes
+ # memcap: 64mb
+
+ ###########################################################################
+ # Configure libhtp.
+ #
+ #
+ # default-config: Used when no server-config matches
+ # personality: List of personalities used by default
+ # request-body-limit: Limit reassembly of request body for inspection
+ # by http_client_body & pcre /P option.
+ # response-body-limit: Limit reassembly of response body for inspection
+ # by file_data, http_server_body & pcre /Q option.
+ # double-decode-path: Double decode path section of the URI
+ # double-decode-query: Double decode query section of the URI
+ #
+ # server-config: List of server configurations to use if address matches
+ # address: List of ip addresses or networks for this block
+ # personalitiy: List of personalities used by this block
+ # request-body-limit: Limit reassembly of request body for inspection
+ # by http_client_body & pcre /P option.
+ # response-body-limit: Limit reassembly of response body for inspection
+ # by file_data, http_server_body & pcre /Q option.
+ # double-decode-path: Double decode path section of the URI
+ # double-decode-query: Double decode query section of the URI
+ #
+ # uri-include-all: Include all parts of the URI. By default the
+ # 'scheme', username/password, hostname and port
+ # are excluded. Setting this option to true adds
+ # all of them to the normalized uri as inspected
+ # by http_uri, urilen, pcre with /U and the other
+ # keywords that inspect the normalized uri.
+ # Note that this does not affect http_raw_uri.
+ # Also, note that including all was the default in
+ # 1.4 and 2.0beta1.
+ #
+ # meta-field-limit: Hard size limit for request and response size
+ # limits. Applies to request line and headers,
+ # response line and headers. Does not apply to
+ # request or response bodies. Default is 18k.
+ # If this limit is reached an event is raised.
+ #
+ # Currently Available Personalities:
+ # Minimal
+ # Generic
+ # IDS (default)
+ # IIS_4_0
+ # IIS_5_0
+ # IIS_5_1
+ # IIS_6_0
+ # IIS_7_0
+ # IIS_7_5
+ # Apache_2
+ ###########################################################################
+ libhtp:
+
+ default-config:
+ personality: IDS
+
+ # Can be specified in kb, mb, gb. Just a number indicates
+ # it's in bytes.
+ request-body-limit: 3072
+ response-body-limit: 3072
+
+ # inspection limits
+ request-body-minimal-inspect-size: 32kb
+ request-body-inspect-window: 4kb
+ response-body-minimal-inspect-size: 32kb
+ response-body-inspect-window: 4kb
+ # Take a random value for inspection sizes around the specified value.
+ # This lower the risk of some evasion technics but could lead
+ # detection change between runs. It is set to 'yes' by default.
+ #randomize-inspection-sizes: yes
+ # If randomize-inspection-sizes is active, the value of various
+ # inspection size will be choosen in the [1 - range%, 1 + range%]
+ # range
+ # Default value of randomize-inspection-range is 10.
+ #randomize-inspection-range: 10
+
+ # decoding
+ double-decode-path: no
+ double-decode-query: no
+
+ server-config:
+
+ #- apache:
+ # address: [192.168.1.0/24, 127.0.0.0/8, "::1"]
+ # personality: Apache_2
+ # # Can be specified in kb, mb, gb. Just a number indicates
+ # # it's in bytes.
+ # request-body-limit: 4096
+ # response-body-limit: 4096
+ # double-decode-path: no
+ # double-decode-query: no
+
+ #- iis7:
+ # address:
+ # - 192.168.0.0/24
+ # - 192.168.10.0/24
+ # personality: IIS_7_0
+ # # Can be specified in kb, mb, gb. Just a number indicates
+ # # it's in bytes.
+ # request-body-limit: 4096
+ # response-body-limit: 4096
+ # double-decode-path: no
+ # double-decode-query: no
+
+# Profiling settings. Only effective if Suricata has been built with the
+# the --enable-profiling configure flag.
+#
+profiling:
+ # Run profiling for every xth packet. The default is 1, which means we
+ # profile every packet. If set to 1000, one packet is profiled for every
+ # 1000 received.
+ #sample-rate: 1000
+
+ # rule profiling
+ rules:
+
+ # Profiling can be disabled here, but it will still have a
+ # performance impact if compiled in.
+ enabled: yes
+ filename: rule_perf.log
+ append: yes
+
+ # Sort options: ticks, avgticks, checks, matches, maxticks
+ sort: avgticks
+
+ # Limit the number of items printed at exit.
+ limit: 100
+
+ # per keyword profiling
+ keywords:
+ enabled: yes
+ filename: keyword_perf.log
+ append: yes
+
+ # packet profiling
+ packets:
+
+ # Profiling can be disabled here, but it will still have a
+ # performance impact if compiled in.
+ enabled: yes
+ filename: packet_stats.log
+ append: yes
+
+ # per packet csv output
+ csv:
+
+ # Output can be disabled here, but it will still have a
+ # performance impact if compiled in.
+ enabled: no
+ filename: packet_stats.csv
+
+ # profiling of locking. Only available when Suricata was built with
+ # --enable-profiling-locks.
+ locks:
+ enabled: no
+ filename: lock_stats.log
+ append: yes
+
+# Suricata core dump configuration. Limits the size of the core dump file to
+# approximately max-dump. The actual core dump size will be a multiple of the
+# page size. Core dumps that would be larger than max-dump are truncated. On
+# Linux, the actual core dump size may be a few pages larger than max-dump.
+# Setting max-dump to 0 disables core dumping.
+# Setting max-dump to 'unlimited' will give the full core dump file.
+# On 32-bit Linux, a max-dump value >= ULONG_MAX may cause the core dump size
+# to be 'unlimited'.
+
+coredump:
+ max-dump: unlimited
+
+napatech:
+ # The Host Buffer Allowance for all streams
+ # (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back)
+ hba: -1
+
+ # use_all_streams set to "yes" will query the Napatech service for all configured
+ # streams and listen on all of them. When set to "no" the streams config array
+ # will be used.
+ use-all-streams: yes
+
+ # The streams to listen on
+ streams: [1, 2, 3]
+
+# Includes. Files included here will be handled as if they were
+# inlined in this configuration file.
+#include: include1.yaml
+#include: include2.yaml
diff --git a/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/tmpfiles.suricata b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/tmpfiles.suricata
new file mode 100644
index 0000000000..fbf37848ee
--- /dev/null
+++ b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/tmpfiles.suricata
@@ -0,0 +1,2 @@
+#Type Path Mode UID GID Age Argument
+d /var/log/suricata 0755 root root
diff --git a/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/volatiles.03_suricata b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/volatiles.03_suricata
new file mode 100644
index 0000000000..4627bd3b0f
--- /dev/null
+++ b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/files/volatiles.03_suricata
@@ -0,0 +1,2 @@
+# <type> <owner> <group> <mode> <path> <linksource>
+d root root 0755 /var/log/suricata none
diff --git a/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/libhtp_0.5.37.bb b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/libhtp_0.5.37.bb
new file mode 100644
index 0000000000..34e72e9cb9
--- /dev/null
+++ b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/libhtp_0.5.37.bb
@@ -0,0 +1,27 @@
+SUMMARY = "LibHTP is a security-aware parser for the HTTP protocol and the related bits and pieces."
+
+require suricata.inc
+
+LIC_FILES_CHKSUM = "file://LICENSE;beginline=1;endline=2;md5=596ab7963a1a0e5198e5a1c4aa621843"
+
+SRC_URI = "git://github.com/OISF/libhtp.git;protocol=https;branch=0.5.x"
+SRCREV = "eaa2db29e65e7f2691c18a9022aeb5fb836ec5f1"
+
+DEPENDS = "zlib"
+
+inherit autotools-brokensep pkgconfig
+
+CFLAGS += "-D_DEFAULT_SOURCE"
+
+#S = "${WORKDIR}/suricata-${VER}/${BPN}"
+
+S = "${WORKDIR}/git"
+
+do_configure () {
+ cd ${S}
+ ./autogen.sh
+ oe_runconf
+}
+
+RDEPENDS_${PN} += "zlib"
+
diff --git a/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/suricata.inc b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/suricata.inc
new file mode 100644
index 0000000000..85f419e48a
--- /dev/null
+++ b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/suricata.inc
@@ -0,0 +1,8 @@
+HOMEPAGE = "http://suricata-ids.org/"
+SECTION = "security Monitor/Admin"
+LICENSE = "GPLv2"
+
+VER = "6.0.2"
+SRC_URI = "http://www.openinfosecfoundation.org/download/suricata-${VER}.tar.gz"
+
+SRC_URI[sha256sum] = "5e4647a07cb31b5d6d0049972a45375c137de908a964a44e2d6d231fa3ad4b52"
diff --git a/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/suricata_6.0.2.bb b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/suricata_6.0.2.bb
new file mode 100644
index 0000000000..a4255d2476
--- /dev/null
+++ b/meta-security/dynamic-layers/meta-rust/recipes-ids/suricata/suricata_6.0.2.bb
@@ -0,0 +1,193 @@
+SUMMARY = "The Suricata Engine is an Open Source Next Generation Intrusion Detection and Prevention Engine"
+
+require suricata.inc
+
+DEPENDS = "lz4 libhtp"
+
+LIC_FILES_CHKSUM = "file://LICENSE;beginline=1;endline=2;md5=c70d8d3310941dcdfcd1e02800a1f548"
+
+SRC_URI += " \
+ file://volatiles.03_suricata \
+ file://tmpfiles.suricata \
+ file://suricata.yaml \
+ file://suricata.service \
+ file://run-ptest \
+ file://fixup.patch \
+ "
+
+SRC_URI += " \
+ crate://crates.io/autocfg/1.0.1 \
+ crate://crates.io/semver-parser/0.7.0 \
+ crate://crates.io/arrayvec/0.4.12 \
+ crate://crates.io/ryu/1.0.5 \
+ crate://crates.io/libc/0.2.86 \
+ crate://crates.io/bitflags/1.2.1 \
+ crate://crates.io/version_check/0.9.2 \
+ crate://crates.io/memchr/2.3.4 \
+ crate://crates.io/nodrop/0.1.14 \
+ crate://crates.io/cfg-if/0.1.9 \
+ crate://crates.io/static_assertions/0.3.4 \
+ crate://crates.io/getrandom/0.1.16 \
+ crate://crates.io/cfg-if/1.0.0 \
+ crate://crates.io/siphasher/0.3.3 \
+ crate://crates.io/ppv-lite86/0.2.10 \
+ crate://crates.io/proc-macro-hack/0.5.19 \
+ crate://crates.io/proc-macro2/0.4.30 \
+ crate://crates.io/unicode-xid/0.1.0 \
+ crate://crates.io/syn/0.15.44 \
+ crate://crates.io/build_const/0.2.1 \
+ crate://crates.io/num-derive/0.2.5 \
+ crate://crates.io/base64/0.11.0 \
+ crate://crates.io/widestring/0.4.3 \
+ crate://crates.io/md5/0.7.0 \
+ crate://crates.io/uuid/0.8.2 \
+ crate://crates.io/byteorder/1.4.2 \
+ crate://crates.io/semver/0.9.0 \
+ crate://crates.io/nom/5.1.1 \
+ crate://crates.io/num-traits/0.2.14 \
+ crate://crates.io/num-integer/0.1.44 \
+ crate://crates.io/num-bigint/0.2.6 \
+ crate://crates.io/num-bigint/0.3.1 \
+ crate://crates.io/num-rational/0.2.4 \
+ crate://crates.io/num-complex/0.2.4 \
+ crate://crates.io/num-iter/0.1.42 \
+ crate://crates.io/phf_shared/0.8.0 \
+ crate://crates.io/crc/1.8.1 \
+ crate://crates.io/rustc_version/0.2.3 \
+ crate://crates.io/phf/0.8.0 \
+ crate://crates.io/lexical-core/0.6.7 \
+ crate://crates.io/time/0.1.44 \
+ crate://crates.io/quote/0.6.13 \
+ crate://crates.io/rand_core/0.5.1 \
+ crate://crates.io/rand_chacha/0.2.2 \
+ crate://crates.io/rand_pcg/0.2.1 \
+ crate://crates.io/num-traits/0.1.43 \
+ crate://crates.io/rand/0.7.3 \
+ crate://crates.io/enum_primitive/0.1.1 \
+ crate://crates.io/phf_generator/0.8.0 \
+ crate://crates.io/phf_codegen/0.8.0 \
+ crate://crates.io/tls-parser/0.9.4 \
+ crate://crates.io/num/0.2.1 \
+ crate://crates.io/rusticata-macros/2.1.0 \
+ crate://crates.io/ntp-parser/0.4.0 \
+ crate://crates.io/der-oid-macro/0.2.0 \
+ crate://crates.io/der-parser/3.0.4 \
+ crate://crates.io/ipsec-parser/0.5.0 \
+ crate://crates.io/x509-parser/0.6.5 \
+ crate://crates.io/der-parser/4.1.0 \
+ crate://crates.io/snmp-parser/0.6.0 \
+ crate://crates.io/kerberos-parser/0.5.0 \
+ crate://crates.io/wasi/0.10.0+wasi-snapshot-preview1 \
+ crate://crates.io/winapi/0.3.9 \
+ crate://crates.io/winapi-i686-pc-windows-gnu/0.4.0 \
+ crate://crates.io/winapi-x86_64-pc-windows-gnu/0.4.0 \
+ crate://crates.io/log/0.4.0 \
+ crate://crates.io/rand_hc/0.2.0 \
+ crate://crates.io/wasi/0.9.0+wasi-snapshot-preview1 \
+ "
+
+# test case support
+SRC_URI += " \
+ crate://crates.io/test-case/1.0.1 \
+ crate://crates.io/proc-macro2/1.0.1 \
+ crate://crates.io/quote/1.0.1 \
+ crate://crates.io/syn/1.0.1 \
+ crate://crates.io/unicode-xid/0.2.0 \
+ "
+
+inherit autotools pkgconfig python3native systemd ptest cargo
+
+EXTRA_OECONF += " --disable-debug \
+ --disable-gccmarch-native \
+ --enable-non-bundled-htp \
+ --disable-suricata-update \
+ --with-libhtp-includes=${STAGING_INCDIR} --with-libhtp-libraries=${STAGING_LIBDIR} \
+ "
+
+CARGO_SRC_DIR = "rust"
+
+B = "${S}"
+
+PACKAGECONFIG ??= "jansson file pcre yaml python pcap cap-ng net nfnetlink nss nspr "
+PACKAGECONFIG_append = " ${@bb.utils.contains('DISTRO_FEATURES', 'ptest', 'unittests', '', d)}"
+
+PACKAGECONFIG[pcre] = "--with-libpcre-includes=${STAGING_INCDIR} --with-libpcre-libraries=${STAGING_LIBDIR}, ,libpcre ,"
+PACKAGECONFIG[yaml] = "--with-libyaml-includes=${STAGING_INCDIR} --with-libyaml-libraries=${STAGING_LIBDIR}, ,libyaml ,"
+PACKAGECONFIG[pcap] = "--with-libpcap-includes=${STAGING_INCDIR} --with-libpcap-libraries=${STAGING_LIBDIR}, ,libpcap"
+PACKAGECONFIG[cap-ng] = "--with-libcap_ng-includes=${STAGING_INCDIR} --with-libcap_ng-libraries=${STAGING_LIBDIR}, ,libcap-ng , "
+PACKAGECONFIG[net] = "--with-libnet-includes=${STAGING_INCDIR} --with-libnet-libraries=${STAGING_LIBDIR}, , libnet,"
+PACKAGECONFIG[nfnetlink] = "--with-libnfnetlink-includes=${STAGING_INCDIR} --with-libnfnetlink-libraries=${STAGING_LIBDIR}, ,libnfnetlink ,"
+PACKAGECONFIG[nfq] = "--enable-nfqueue, --disable-nfqueue,libnetfilter-queue,"
+
+PACKAGECONFIG[jansson] = "--with-libjansson-includes=${STAGING_INCDIR} --with-libjansson-libraries=${STAGING_LIBDIR},,jansson, jansson"
+PACKAGECONFIG[file] = ",,file, file"
+PACKAGECONFIG[nss] = "--with-libnss-includes=${STAGING_INCDIR} --with-libnss-libraries=${STAGING_LIBDIR}, nss, nss,"
+PACKAGECONFIG[nspr] = "--with-libnspr-includes=${STAGING_INCDIR} --with-libnspr-libraries=${STAGING_LIBDIR}, nspr, nspr,"
+PACKAGECONFIG[python] = "--enable-python, --disable-python, python3, python3-core"
+PACKAGECONFIG[unittests] = "--enable-unittests, --disable-unittests,"
+
+export logdir = "${localstatedir}/log"
+
+CACHED_CONFIGUREVARS = "ac_cv_func_malloc_0_nonnull=yes ac_cv_func_realloc_0_nonnull=yes"
+
+do_configure_prepend () {
+ oe_runconf
+}
+
+do_compile () {
+ # we do this to bypass the make provided by this pkg
+ # patches Makefile to skip the subdir
+ cargo_do_compile
+
+ # Finish building
+ cd ${S}
+ make
+}
+
+do_install () {
+ install -d ${D}${sysconfdir}/suricata
+
+ oe_runmake install DESTDIR=${D}
+
+ install -d ${D}${sysconfdir}/suricata ${D}${sysconfdir}/default/volatiles
+ install -m 0644 ${WORKDIR}/volatiles.03_suricata ${D}${sysconfdir}/default/volatiles/03_suricata
+
+ install -m 0644 ${S}/threshold.config ${D}${sysconfdir}/suricata
+ install -m 0644 ${S}/suricata.yaml ${D}${sysconfdir}/suricata
+
+ if ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'true', 'false', d)}; then
+ install -d ${D}${sysconfdir}/tmpfiles.d
+ install -m 0644 ${WORKDIR}/tmpfiles.suricata ${D}${sysconfdir}/tmpfiles.d/suricata.conf
+
+ install -d ${D}${systemd_unitdir}/system
+ sed -e s:/etc:${sysconfdir}:g \
+ -e s:/var/run:/run:g \
+ -e s:/var:${localstatedir}:g \
+ -e s:/usr/bin:${bindir}:g \
+ -e s:/bin/kill:${base_bindir}/kill:g \
+ -e s:/usr/lib:${libdir}:g \
+ ${WORKDIR}/suricata.service > ${D}${systemd_unitdir}/system/suricata.service
+ fi
+
+ # Remove /var/run as it is created on startup
+ rm -rf ${D}${localstatedir}/run
+
+ sed -i -e "s:#!.*$:#!${USRBINPATH}/env ${PYTHON_PN}:g" ${D}${bindir}/suricatasc
+ sed -i -e "s:#!.*$:#!${USRBINPATH}/env ${PYTHON_PN}:g" ${D}${bindir}/suricatactl
+}
+
+pkg_postinst_ontarget_${PN} () {
+if command -v systemd-tmpfiles >/dev/null; then
+ systemd-tmpfiles --create ${sysconfdir}/tmpfiles.d/suricata.conf
+elif [ -e ${sysconfdir}/init.d/populate-volatile.sh ]; then
+ ${sysconfdir}/init.d/populate-volatile.sh update
+fi
+}
+
+SYSTEMD_PACKAGES = "${PN}"
+
+PACKAGES =+ "${PN}-python"
+FILES_${PN} += "${systemd_unitdir} ${sysconfdir}/tmpfiles.d"
+FILES_${PN}-python = "${bindir}/suricatasc ${PYTHON_SITEPACKAGES_DIR}"
+
+CONFFILES_${PN} = "${sysconfdir}/suricata/suricata.yaml"
diff --git a/meta-security/kas/kas-security-base.yml b/meta-security/kas/kas-security-base.yml
index fca0ebe095..aa68336e18 100644
--- a/meta-security/kas/kas-security-base.yml
+++ b/meta-security/kas/kas-security-base.yml
@@ -28,6 +28,7 @@ repos:
meta-perl:
meta-python:
meta-networking:
+ meta-filesystems:
local_conf_header:
base: |
@@ -42,7 +43,6 @@ local_conf_header:
INHERIT += "rm_work"
BB_NUMBER_THREADS="24"
BB_NUMBER_PARSE_THREADS="12"
- PARALLEL_MAKE="-j 8"
BB_TASK_NICE_LEVEL = '5'
BB_TASK_NICE_LEVEL_task-testimage = '0'
BB_TASK_IONICE_LEVEL = '2.7'
diff --git a/meta-security/kas/kas-security-parsec.yml b/meta-security/kas/kas-security-parsec.yml
new file mode 100644
index 0000000000..6152f0c1d2
--- /dev/null
+++ b/meta-security/kas/kas-security-parsec.yml
@@ -0,0 +1,21 @@
+header:
+ version: 9
+ includes:
+ - kas-security-base.yml
+
+repos:
+ meta-security:
+ layers:
+ meta-parsec:
+
+ meta-rust:
+ url: https://github.com/meta-rust/meta-rust.git
+ refspec: master
+
+ meta-clang:
+ url: https://github.com/kraj/meta-clang.git
+ refspec: master
+
+local_conf_header:
+ meta-parsec: |
+ IMAGE_INSTALL_append = " parsec-service parsec-tool"
diff --git a/meta-security/kas/qemuarm-parsec.yml b/meta-security/kas/qemuarm-parsec.yml
new file mode 100644
index 0000000000..cef281851d
--- /dev/null
+++ b/meta-security/kas/qemuarm-parsec.yml
@@ -0,0 +1,6 @@
+header:
+ version: 8
+ includes:
+ - kas-security-parsec.yml
+
+machine: qemuarm
diff --git a/meta-security/kas/qemuarm64-parsec.yml b/meta-security/kas/qemuarm64-parsec.yml
new file mode 100644
index 0000000000..9b593bc670
--- /dev/null
+++ b/meta-security/kas/qemuarm64-parsec.yml
@@ -0,0 +1,6 @@
+header:
+ version: 8
+ includes:
+ - kas-security-parsec.yml
+
+machine: qemuarm64
diff --git a/meta-security/kas/qemuppc-parsec.yml b/meta-security/kas/qemuppc-parsec.yml
new file mode 100644
index 0000000000..1176d13693
--- /dev/null
+++ b/meta-security/kas/qemuppc-parsec.yml
@@ -0,0 +1,6 @@
+header:
+ version: 8
+ includes:
+ - kas-security-parsec.yml
+
+machine: qemuppc
diff --git a/meta-security/kas/qemux86-64-parsec.yml b/meta-security/kas/qemux86-64-parsec.yml
new file mode 100644
index 0000000000..ec39c14eb7
--- /dev/null
+++ b/meta-security/kas/qemux86-64-parsec.yml
@@ -0,0 +1,6 @@
+header:
+ version: 8
+ includes:
+ - kas-security-parsec.yml
+
+machine: qemux86-64
diff --git a/meta-security/kas/qemux86-parsec.yml b/meta-security/kas/qemux86-parsec.yml
new file mode 100644
index 0000000000..370947d5f9
--- /dev/null
+++ b/meta-security/kas/qemux86-parsec.yml
@@ -0,0 +1,6 @@
+header:
+ version: 8
+ includes:
+ - kas-security-parsec.yml
+
+machine: qemux86
diff --git a/meta-security/meta-integrity/recipes-core/initrdscripts/initramfs-framework-ima.bb b/meta-security/meta-integrity/recipes-core/initrdscripts/initramfs-framework-ima.bb
index 77f6f7cffa..6471c532c7 100644
--- a/meta-security/meta-integrity/recipes-core/initrdscripts/initramfs-framework-ima.bb
+++ b/meta-security/meta-integrity/recipes-core/initrdscripts/initramfs-framework-ima.bb
@@ -14,6 +14,9 @@ LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384
# to this recipe can just point towards one of its own files.
IMA_POLICY ?= "ima-policy-hashed"
+# Force proceed IMA procedure even 'no_ima' boot parameter is available.
+IMA_FORCE ?= "false"
+
SRC_URI = " file://ima"
inherit features_check
@@ -23,6 +26,8 @@ do_install () {
install -d ${D}/${sysconfdir}/ima
install -d ${D}/init.d
install ${WORKDIR}/ima ${D}/init.d/20-ima
+
+ sed -i "s/@@FORCE_IMA@@/${IMA_FORCE}/g" ${D}/init.d/20-ima
}
FILES_${PN} = "/init.d ${sysconfdir}"
diff --git a/meta-security/meta-integrity/recipes-core/initrdscripts/initramfs-framework-ima/ima b/meta-security/meta-integrity/recipes-core/initrdscripts/initramfs-framework-ima/ima
index cff26a3352..897149494e 100644
--- a/meta-security/meta-integrity/recipes-core/initrdscripts/initramfs-framework-ima/ima
+++ b/meta-security/meta-integrity/recipes-core/initrdscripts/initramfs-framework-ima/ima
@@ -2,11 +2,16 @@
#
# Loads IMA policy into the kernel.
+force_ima=@@FORCE_IMA@@
+
ima_enabled() {
- if [ "$bootparam_no_ima" = "true" ]; then
+ if [ "$force_ima" = "true" ]; then
+ return 0
+ elif [ "$bootparam_no_ima" = "true" ]; then
return 1
+ else
+ return 0
fi
- return 0
}
ima_run() {
diff --git a/meta-security/meta-integrity/recipes-security/ima_policy_appraise_all/ima-policy-appraise-all_1.0.bb b/meta-security/meta-integrity/recipes-security/ima_policy_appraise_all/ima-policy-appraise-all_1.0.bb
index da62a4cf8c..84ea16120e 100644
--- a/meta-security/meta-integrity/recipes-security/ima_policy_appraise_all/ima-policy-appraise-all_1.0.bb
+++ b/meta-security/meta-integrity/recipes-security/ima_policy_appraise_all/ima-policy-appraise-all_1.0.bb
@@ -2,19 +2,14 @@ SUMMARY = "IMA sample simple appraise policy "
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
-# This policy file will get installed as /etc/ima/ima-policy.
-# It is located via the normal file search path, so a .bbappend
-# to this recipe can just point towards one of its own files.
-IMA_POLICY ?= "ima_policy_appraise_all"
-
-SRC_URI = " file://${IMA_POLICY}"
+SRC_URI = " file://ima_policy_appraise_all"
inherit features_check
REQUIRED_DISTRO_FEATURES = "ima"
do_install () {
install -d ${D}/${sysconfdir}/ima
- install ${WORKDIR}/${IMA_POLICY} ${D}/${sysconfdir}/ima/ima-policy
+ install ${WORKDIR}/ima_policy_appraise_all ${D}/${sysconfdir}/ima/ima-policy
}
FILES_${PN} = "${sysconfdir}/ima"
diff --git a/meta-security/meta-integrity/recipes-security/ima_policy_hashed/ima-policy-hashed_1.0.bb b/meta-security/meta-integrity/recipes-security/ima_policy_hashed/ima-policy-hashed_1.0.bb
index ebb0426467..ff7169ef57 100644
--- a/meta-security/meta-integrity/recipes-security/ima_policy_hashed/ima-policy-hashed_1.0.bb
+++ b/meta-security/meta-integrity/recipes-security/ima_policy_hashed/ima-policy-hashed_1.0.bb
@@ -2,13 +2,8 @@ SUMMARY = "IMA sample hash policy"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
-# This policy file will get installed as /etc/ima/ima-policy.
-# It is located via the normal file search path, so a .bbappend
-# to this recipe can just point towards one of its own files.
-IMA_POLICY ?= "ima_policy_hashed"
-
SRC_URI = " \
- file://${IMA_POLICY} \
+ file://ima_policy_hashed \
"
inherit features_check
@@ -16,7 +11,7 @@ REQUIRED_DISTRO_FEATURES = "ima"
do_install () {
install -d ${D}/${sysconfdir}/ima
- install ${WORKDIR}/${IMA_POLICY} ${D}/${sysconfdir}/ima/ima-policy
+ install ${WORKDIR}/ima_policy_hashed ${D}/${sysconfdir}/ima/ima-policy
}
FILES_${PN} = "${sysconfdir}/ima"
diff --git a/meta-security/meta-integrity/recipes-security/ima_policy_simple/ima-policy-simple_1.0.bb b/meta-security/meta-integrity/recipes-security/ima_policy_simple/ima-policy-simple_1.0.bb
index cb4b6b8abc..0e56aec515 100644
--- a/meta-security/meta-integrity/recipes-security/ima_policy_simple/ima-policy-simple_1.0.bb
+++ b/meta-security/meta-integrity/recipes-security/ima_policy_simple/ima-policy-simple_1.0.bb
@@ -2,19 +2,14 @@ SUMMARY = "IMA sample simple policy"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
-# This policy file will get installed as /etc/ima/ima-policy.
-# It is located via the normal file search path, so a .bbappend
-# to this recipe can just point towards one of its own files.
-IMA_POLICY ?= "ima_policy_simple"
-
-SRC_URI = " file://${IMA_POLICY}"
+SRC_URI = " file://ima_policy_simple"
inherit features_check
REQUIRED_DISTRO_FEATURES = "ima"
do_install () {
install -d ${D}/${sysconfdir}/ima
- install ${WORKDIR}/${IMA_POLICY} ${D}/${sysconfdir}/ima/ima-policy
+ install ${WORKDIR}/ima_policy_simple ${D}/${sysconfdir}/ima/ima-policy
}
FILES_${PN} = "${sysconfdir}/ima"
diff --git a/meta-security/meta-parsec/README.md b/meta-security/meta-parsec/README.md
new file mode 100644
index 0000000000..a2736b694f
--- /dev/null
+++ b/meta-security/meta-parsec/README.md
@@ -0,0 +1,186 @@
+meta-parsec layer
+==============
+
+This layer contains recipes for the Parsec service with Mbed-Crypto,
+Pkcs11 and TPM providers and parsec tools.
+
+Dependencies
+============
+
+This layer depends on:
+
+ URI: git://git.openembedded.org/meta-openembedded
+ branch: master
+ revision: HEAD
+ prio: default
+
+ URI git://git.yoctoproject.org/meta-security
+ branch: master
+ revision: HEAD
+ prio: default
+
+ URI https://github.com/meta-rust/meta-rust.git
+ branch: master
+ revision: HEAD
+ prio: default
+
+ URI https://github.com/kraj/meta-clang.git
+ branch: master
+ revision: HEAD
+ prio: default
+
+Adding the meta-parsec layer to your build
+==========================================
+
+In order to use this layer, you need to make the build system aware of it.
+
+You can add it to the build system by adding the
+location of the meta-parsec layer to bblayers.conf, along with any
+other layers needed. e.g.:
+
+ BBLAYERS ?= " \
+ /path/to/yocto/meta \
+ /path/to/yocto/meta-yocto \
+ /path/to/yocto/meta-yocto-bsp \
+ /path/to/meta-openembedded/meta-oe \
+ /path/to/meta-openembedded/meta-python \
+ /path/to/meta-rust \
+ /path/to/meta-clang \
+ /path/to/meta-security/meta-tpm \
+ /path/to/meta-security/meta-parsec \
+ "
+
+To include the Parsec service into your image add following into the
+local.conf:
+
+ IMAGE_INSTALL_append = " parsec-service"
+
+ The Parsec service will be deployed into the image built with all the supported
+providers and with the default config file from the Parsec repository:
+https://github.com/parallaxsecond/parsec/blob/main/config.toml
+ The default Parsec service config file contains the MbedCrypto provider
+enabled. The config file needs to be updated to use the Parsec service
+with other providers like TPM or PKCS11. The required procedures are
+covered in Parsec documentation.
+https://parallaxsecond.github.io/parsec-book/
+
+Updating recipes
+================
+
+ The parsec-service and parsec-tool recipes use include files with lists
+of all rust crates required. This allows bitbake to fetch all the necessary
+dependent crates, as well as a pegged version of the crates.io index,
+to ensure maximum reproducibility.
+ It's recommended to use cargo-bitbake to generate include files for new
+versions of parsec recipes.
+https://github.com/meta-rust/cargo-bitbake
+
+ When you have crago-bitbake built:
+1. Checkout the required version of parsec repository.
+2. Run cargo-bitbake inside the repository. It will produce a BB file.
+3. Create a new include file with SRC_URI and LIC_FILES_CHKSUM from the BB file.
+
+Manual testing with runqemu
+===========================
+
+ This layer also contains a recipe for pasec-tool which can be used for
+manual testing of the Parsec service:
+
+ IMAGE_INSTALL_append += " parsec-tools"
+
+ There are a series of Parsec Demo videos showing how to use parsec-tool
+to test the Parsec service base functionality:
+https://www.youtube.com/watch?v=ido0CyUdMHM&list=PLKjl7IFAwc4S7WQqqphCsyy6DPDxJ2Skg&index=4
+
+ You can use runqemu to start a VM with a built image file and run
+manual tests with parsec-tool.
+
+1. MbedCrypto provider
+ The default Parsec service config file contains the MbedCrypto provider
+enabled. No changes required for manual testing.
+
+2. PKCS11 provider
+ The Software HSM can be used for manual testing of the provider by
+including it into your test image:
+
+ IMAGE_INSTALL_append += " softhsm"
+
+Inside the running VM:
+- Stop Parsec
+```bash
+systemctl stop parsec
+```
+- Initialise a token and notice the result slot number
+```bash
+softhsm2-util --init-token --slot 0 --label "Parsec Service" --pin 123456 --so-pin 123456
+```
+- Change the token ownership:
+```bash
+for d in /var/lib/softhsm/tokens/*; do chown -R parsec $d; done
+```
+- Enable the PKCS11 provider and update its parameters in the Parsec config file
+/etc/parsec/config.toml
+```
+library_path = "/usr/lib/softhsm/libsofthsm2.so"
+slot_number = <slot number>
+user_pin = "123456"
+```
+- Start Parsec
+```bash
+systemctl start parsec
+```
+
+3. TPM provider
+ The IBM Software TPM service can be used for manual testing of the provider by
+including it into your test image:
+
+ IMAGE_INSTALL_append += " ibmswtpm2 tpm2-tools libtss2 libtss2-tcti-mssim"
+
+Inside the running VM:
+- Stop Parsec
+```bash
+systemctl stop parsec
+```
+- Start and configure the Software TPM server
+```bash
+ /usr/bin/tpm_server &
+ sleep 5
+ /usr/bin/tpm2_startup -c -T mssim
+ /usr/bin/tpm2_changeauth -c owner tpm_pass
+```
+- Enable the TPM provider and update its parameters in the Parsec config file
+/etc/parsec/config.toml
+```
+tcti = "mssim"
+owner_hierarchy_auth = "hex:74706d5f70617373"
+```
+- Start Parsec
+```bash
+systemctl start parsec
+```
+
+Maintenance
+-----------
+
+Send pull requests, patches, comments or questions to yocto@yoctoproject.org
+
+When sending single patches, please using something like:
+'git send-email -1 --to yocto@yoctoproject.org --subject-prefix=meta-parsec][PATCH'
+
+These values can be set as defaults for this repository:
+
+$ git config sendemail.to yocto@yoctoproject.org
+$ git config format.subjectPrefix meta-parsec][PATCH
+
+Now you can just do 'git send-email origin/master' to send all local patches.
+
+Maintainers: Anton Antonov <Anton.Antonov@arm.com>
+ Armin Kuster <akuster808@gmail.com>
+
+
+License
+=======
+
+All metadata is MIT licensed unless otherwise stated. Source code included
+in tree for individual recipes is under the LICENSE stated in each recipe
+(.bb file) unless otherwise stated.
diff --git a/meta-security/meta-parsec/conf/layer.conf b/meta-security/meta-parsec/conf/layer.conf
new file mode 100644
index 0000000000..2d4aa12fbe
--- /dev/null
+++ b/meta-security/meta-parsec/conf/layer.conf
@@ -0,0 +1,14 @@
+# We have a conf and classes directory, add to BBPATH
+BBPATH .= ":${LAYERDIR}"
+
+# We have a recipes directory, add to BBFILES
+BBFILES += "${LAYERDIR}/recipes*/*/*.bb ${LAYERDIR}/recipes*/*/*.bbappend"
+
+BBFILE_COLLECTIONS += "parsec-layer"
+BBFILE_PATTERN_parsec-layer = "^${LAYERDIR}/"
+BBFILE_PRIORITY_parsec-layer = "5"
+
+LAYERSERIES_COMPAT_parsec-layer = "hardknott gatesgarth"
+
+LAYERDEPENDS_parsec-layer = "core rust-layer clang-layer tpm-layer"
+BBLAYERS_LAYERINDEX_NAME_parsec-layer = "meta-parsec"
diff --git a/meta-security/meta-parsec/recipes-parsec/parsec-service/files/cryptoki.patch b/meta-security/meta-parsec/recipes-parsec/parsec-service/files/cryptoki.patch
new file mode 100644
index 0000000000..c23447967a
--- /dev/null
+++ b/meta-security/meta-parsec/recipes-parsec/parsec-service/files/cryptoki.patch
@@ -0,0 +1,18 @@
+
+Use cryptoki v0.1.1 which supports the "generate-bindings" feature
+required for building Parsec service 0.7.0 in Yocto.
+
+Signed-off-by: Anton Antonov <Anton.Antonov@arm.com>
+Upstream-Status: Submitted
+
+--- a/Cargo.toml 2021-04-01 10:29:50.333687763 +0100
++++ b/Cargo.toml 2021-04-01 10:27:13.051860002 +0100
+@@ -37,7 +37,7 @@
+ version = "1.3.1"
+
+ [dependencies.cryptoki]
+-version = "0.1.0"
++version = "0.1.1"
+ features = ["psa-crypto-conversions"]
+ optional = true
+
diff --git a/meta-security/meta-parsec/recipes-parsec/parsec-service/files/parsec-tmpfiles.conf b/meta-security/meta-parsec/recipes-parsec/parsec-service/files/parsec-tmpfiles.conf
new file mode 100644
index 0000000000..fe576a27fe
--- /dev/null
+++ b/meta-security/meta-parsec/recipes-parsec/parsec-service/files/parsec-tmpfiles.conf
@@ -0,0 +1,2 @@
+#Type Path Mode User Group Age Argument
+d /run/parsec 755 parsec parsec - -
diff --git a/meta-security/meta-parsec/recipes-parsec/parsec-service/files/parsec_init b/meta-security/meta-parsec/recipes-parsec/parsec-service/files/parsec_init
new file mode 100755
index 0000000000..58a2897276
--- /dev/null
+++ b/meta-security/meta-parsec/recipes-parsec/parsec-service/files/parsec_init
@@ -0,0 +1,63 @@
+#! /bin/sh -e
+
+# ------------------------------------------------------------------------------
+# Copyright (c) 2021, Arm Limited, All Rights Reserved
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+# Parsec Service SysV init script
+
+test -x /usr/libexec/parsec/parsec || exit 0
+
+case "$1" in
+ start)
+ echo -n "Starting Parsec daemon: "
+ if [ ! -f /etc/parsec/config.toml ]; then
+ echo "There is no Parsec service configuration file."
+ else
+ if [ ! -d /run/parsec ]; then
+ mkdir /run/parsec
+ chown parsec:parsec /run/parsec
+ chmod 755 /run/parsec
+ fi
+ # start-stop-daemon used in poky busybox doesn't support
+ # '--chdir' parameter. So, let's do it manually
+ cd /var/lib/parsec
+ RUST_LOG=info start-stop-daemon --oknodo --start --background \
+ --chuid parsec:parsec --exec /usr/libexec/parsec/parsec \
+ -- --config /etc/parsec/config.toml
+ echo "parsec."
+ fi
+ ;;
+ stop)
+ echo -n "Stopping Parsec daemon: "
+ start-stop-daemon --oknodo --stop --exec /usr/libexec/parsec/parsec
+ echo "parsec."
+ ;;
+ reload)
+ echo -n "Reloading Parsec daemon: "
+ start-stop-daemon --stop --signal SIGHUP --exec /usr/libexec/parsec/parsec
+ echo "parsec."
+ ;;
+ restart|force-reload)
+ $0 stop
+ $0 start
+ ;;
+ *)
+ echo "Usage: /etc/init.d/parsec {start|stop|restart|reload|force-reload}"
+ exit 1
+esac
+
+exit 0
diff --git a/meta-security/meta-parsec/recipes-parsec/parsec-service/files/systemd.patch b/meta-security/meta-parsec/recipes-parsec/parsec-service/files/systemd.patch
new file mode 100644
index 0000000000..c01ff065c9
--- /dev/null
+++ b/meta-security/meta-parsec/recipes-parsec/parsec-service/files/systemd.patch
@@ -0,0 +1,19 @@
+
+Run the Parsec service as parsec user in /var/lib/parsec/ working directory.
+
+Signed-off-by: Anton Antonov <Anton.Antonov@arm.com>
+Upstream-Status: Inappropriate [deployment configuration]
+
+--- a/systemd-daemon/parsec.service 2021-03-28 18:34:18.703196235 +0100
++++ b/systemd-daemon/parsec.service 2021-03-28 18:35:14.279830299 +0100
+@@ -3,7 +3,9 @@
+ Documentation=https://parallaxsecond.github.io/parsec-book/parsec_service/install_parsec_linux.html
+
+ [Service]
+-WorkingDirectory=/home/parsec/
++User=parsec
++Group=parsec
++WorkingDirectory=/var/lib/parsec/
+ ExecStart=/usr/libexec/parsec/parsec --config /etc/parsec/config.toml
+
+ [Install]
diff --git a/meta-security/meta-parsec/recipes-parsec/parsec-service/parsec-service_0.7.0.bb b/meta-security/meta-parsec/recipes-parsec/parsec-service/parsec-service_0.7.0.bb
new file mode 100644
index 0000000000..0e149558c0
--- /dev/null
+++ b/meta-security/meta-parsec/recipes-parsec/parsec-service/parsec-service_0.7.0.bb
@@ -0,0 +1,67 @@
+SUMMARY = "Platform AbstRaction for SECurity Daemon"
+HOMEPAGE = "https://github.com/parallaxsecond/parsec"
+LICENSE = "Apache-2.0"
+
+inherit cargo
+
+SRC_URI += "crate://crates.io/parsec-service/${PV} \
+ file://parsec_init \
+ file://systemd.patch \
+ file://parsec-tmpfiles.conf \
+"
+
+DEPENDS = "tpm2-tss"
+TOOLCHAIN = "clang"
+
+CARGO_BUILD_FLAGS += " --features all-providers,cryptoki/generate-bindings,tss-esapi/generate-bindings"
+
+inherit systemd
+SYSTEMD_SERVICE_${PN} = "parsec.service"
+
+inherit update-rc.d
+INITSCRIPT_NAME = "parsec"
+
+# A local file can be defined in build/local.conf
+# The file should also be included into SRC_URI then
+PARSEC_CONFIG ?= "${S}/config.toml"
+
+do_install_append () {
+ # Binaries
+ install -d -m 700 -o parsec -g parsec "${D}${libexecdir}/parsec"
+ install -m 700 -o parsec -g parsec "${WORKDIR}/build/target/${CARGO_TARGET_SUBDIR}/parsec" ${D}${libexecdir}/parsec/parsec
+
+ # Config file
+ install -d -m 700 -o parsec -g parsec "${D}${sysconfdir}/parsec"
+ install -m 400 -o parsec -g parsec "${PARSEC_CONFIG}" ${D}${sysconfdir}/parsec/config.toml
+
+ # Data dir
+ install -d -m 700 -o parsec -g parsec "${D}${localstatedir}/lib/parsec"
+
+ if ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'true', 'false', d)}; then
+ install -d ${D}${systemd_unitdir}/system
+ install -m 644 ${S}/systemd-daemon/parsec.service ${D}${systemd_unitdir}/system
+
+ install -d ${D}${libdir}/tmpfiles.d
+ install -m 644 ${WORKDIR}/parsec-tmpfiles.conf ${D}${libdir}/tmpfiles.d
+ fi
+
+ if ${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'true', 'false', d)}; then
+ install -d ${D}${sysconfdir}/init.d
+ install -m 755 ${WORKDIR}/parsec_init ${D}${sysconfdir}/init.d/parsec
+ fi
+}
+
+inherit useradd
+USERADD_PACKAGES = "${PN}"
+USERADD_PARAM_${PN} = "-r -g parsec -s /bin/false -d ${localstatedir}/lib/parsec parsec"
+GROUPADD_PARAM_${PN} = "-r parsec"
+
+FILES_${PN} += " \
+ ${sysconfdir}/parsec/config.toml \
+ ${libexecdir}/parsec/parsec \
+ ${systemd_unitdir}/system/parsec.service \
+ ${libdir}/tmpfiles.d/parsec-tmpfiles.conf \
+ ${sysconfdir}/init.d/parsec \
+"
+
+require parsec-service_${PV}.inc
diff --git a/meta-security/meta-parsec/recipes-parsec/parsec-service/parsec-service_0.7.0.inc b/meta-security/meta-parsec/recipes-parsec/parsec-service/parsec-service_0.7.0.inc
new file mode 100644
index 0000000000..59a47f92a8
--- /dev/null
+++ b/meta-security/meta-parsec/recipes-parsec/parsec-service/parsec-service_0.7.0.inc
@@ -0,0 +1,147 @@
+# This file is created from parsec-service repository Cargo.lock using cargo-bitbake tool
+
+SRC_URI += " \
+ crate://crates.io/aho-corasick/0.7.15 \
+ crate://crates.io/ansi_term/0.11.0 \
+ crate://crates.io/anyhow/1.0.38 \
+ crate://crates.io/atty/0.2.14 \
+ crate://crates.io/autocfg/1.0.1 \
+ crate://crates.io/base64/0.12.3 \
+ crate://crates.io/base64/0.13.0 \
+ crate://crates.io/bincode/1.3.2 \
+ crate://crates.io/bindgen/0.56.0 \
+ crate://crates.io/bindgen/0.57.0 \
+ crate://crates.io/bitfield/0.13.2 \
+ crate://crates.io/bitflags/1.2.1 \
+ crate://crates.io/byteorder/1.3.4 \
+ crate://crates.io/bytes/0.5.6 \
+ crate://crates.io/bytes/1.0.1 \
+ crate://crates.io/cc/1.0.67 \
+ crate://crates.io/cexpr/0.4.0 \
+ crate://crates.io/cfg-if/1.0.0 \
+ crate://crates.io/clang-sys/1.1.1 \
+ crate://crates.io/clap/2.33.3 \
+ crate://crates.io/cmake/0.1.45 \
+ crate://crates.io/cryptoauthlib-sys/0.1.0 \
+ crate://crates.io/cryptoki-sys/0.1.1 \
+ crate://crates.io/cryptoki/0.1.1 \
+ crate://crates.io/derivative/2.2.0 \
+ crate://crates.io/either/1.6.1 \
+ crate://crates.io/enumflags2/0.6.4 \
+ crate://crates.io/enumflags2_derive/0.6.4 \
+ crate://crates.io/env_logger/0.8.3 \
+ crate://crates.io/fixedbitset/0.2.0 \
+ crate://crates.io/getrandom/0.2.2 \
+ crate://crates.io/glob/0.3.0 \
+ crate://crates.io/hashbrown/0.9.1 \
+ crate://crates.io/heck/0.3.2 \
+ crate://crates.io/hermit-abi/0.1.18 \
+ crate://crates.io/hex/0.4.3 \
+ crate://crates.io/hostname-validator/1.0.0 \
+ crate://crates.io/humantime/2.1.0 \
+ crate://crates.io/indexmap/1.6.2 \
+ crate://crates.io/itertools/0.8.2 \
+ crate://crates.io/itertools/0.9.0 \
+ crate://crates.io/lazy_static/1.4.0 \
+ crate://crates.io/lazycell/1.3.0 \
+ crate://crates.io/libc/0.2.89 \
+ crate://crates.io/libloading/0.7.0 \
+ crate://crates.io/log/0.4.14 \
+ crate://crates.io/mbox/0.5.0 \
+ crate://crates.io/memchr/2.3.4 \
+ crate://crates.io/multimap/0.8.3 \
+ crate://crates.io/nom/5.1.2 \
+ crate://crates.io/num-bigint/0.3.2 \
+ crate://crates.io/num-complex/0.3.1 \
+ crate://crates.io/num-derive/0.3.3 \
+ crate://crates.io/num-integer/0.1.44 \
+ crate://crates.io/num-iter/0.1.42 \
+ crate://crates.io/num-rational/0.3.2 \
+ crate://crates.io/num-traits/0.2.14 \
+ crate://crates.io/num/0.3.1 \
+ crate://crates.io/num_cpus/1.13.0 \
+ crate://crates.io/oid/0.1.1 \
+ crate://crates.io/parsec-interface/0.24.0 \
+ crate://crates.io/peeking_take_while/0.1.2 \
+ crate://crates.io/petgraph/0.5.1 \
+ crate://crates.io/picky-asn1-der/0.2.4 \
+ crate://crates.io/picky-asn1-x509/0.4.0 \
+ crate://crates.io/picky-asn1/0.3.1 \
+ crate://crates.io/pkg-config/0.3.19 \
+ crate://crates.io/ppv-lite86/0.2.10 \
+ crate://crates.io/proc-macro-error-attr/1.0.4 \
+ crate://crates.io/proc-macro-error/1.0.4 \
+ crate://crates.io/proc-macro2/1.0.24 \
+ crate://crates.io/prost-build/0.6.1 \
+ crate://crates.io/prost-build/0.7.0 \
+ crate://crates.io/prost-derive/0.6.1 \
+ crate://crates.io/prost-derive/0.7.0 \
+ crate://crates.io/prost-types/0.6.1 \
+ crate://crates.io/prost-types/0.7.0 \
+ crate://crates.io/prost/0.6.1 \
+ crate://crates.io/prost/0.7.0 \
+ crate://crates.io/psa-crypto-sys/0.8.0 \
+ crate://crates.io/psa-crypto/0.8.0 \
+ crate://crates.io/quote/1.0.9 \
+ crate://crates.io/rand/0.8.3 \
+ crate://crates.io/rand_chacha/0.3.0 \
+ crate://crates.io/rand_core/0.6.2 \
+ crate://crates.io/rand_hc/0.3.0 \
+ crate://crates.io/redox_syscall/0.2.5 \
+ crate://crates.io/regex-syntax/0.6.23 \
+ crate://crates.io/regex/1.4.5 \
+ crate://crates.io/remove_dir_all/0.5.3 \
+ crate://crates.io/rust-cryptoauthlib/0.1.0 \
+ crate://crates.io/rustc-hash/1.1.0 \
+ crate://crates.io/rustc_version/0.2.3 \
+ crate://crates.io/same-file/1.0.6 \
+ crate://crates.io/sd-notify/0.2.0 \
+ crate://crates.io/secrecy/0.7.0 \
+ crate://crates.io/semver-parser/0.7.0 \
+ crate://crates.io/semver/0.9.0 \
+ crate://crates.io/serde/1.0.124 \
+ crate://crates.io/serde_bytes/0.11.5 \
+ crate://crates.io/serde_derive/1.0.124 \
+ crate://crates.io/shlex/0.1.1 \
+ crate://crates.io/signal-hook-registry/1.3.0 \
+ crate://crates.io/signal-hook/0.3.7 \
+ crate://crates.io/stable_deref_trait/1.2.0 \
+ crate://crates.io/strsim/0.8.0 \
+ crate://crates.io/structopt-derive/0.4.14 \
+ crate://crates.io/structopt/0.3.21 \
+ crate://crates.io/strum_macros/0.19.4 \
+ crate://crates.io/syn/1.0.64 \
+ crate://crates.io/synstructure/0.12.4 \
+ crate://crates.io/tempfile/3.2.0 \
+ crate://crates.io/termcolor/1.1.2 \
+ crate://crates.io/textwrap/0.11.0 \
+ crate://crates.io/thiserror-impl/1.0.24 \
+ crate://crates.io/thiserror/1.0.24 \
+ crate://crates.io/threadpool/1.8.1 \
+ crate://crates.io/toml/0.5.8 \
+ crate://crates.io/tss-esapi-sys/0.1.0 \
+ crate://crates.io/tss-esapi/5.0.0 \
+ crate://crates.io/unicode-segmentation/1.7.1 \
+ crate://crates.io/unicode-width/0.1.8 \
+ crate://crates.io/unicode-xid/0.2.1 \
+ crate://crates.io/users/0.11.0 \
+ crate://crates.io/uuid/0.8.2 \
+ crate://crates.io/vec_map/0.8.2 \
+ crate://crates.io/version/3.0.0 \
+ crate://crates.io/version_check/0.9.3 \
+ crate://crates.io/walkdir/2.3.1 \
+ crate://crates.io/wasi/0.10.2+wasi-snapshot-preview1 \
+ crate://crates.io/which/3.1.1 \
+ crate://crates.io/which/4.0.2 \
+ crate://crates.io/winapi-i686-pc-windows-gnu/0.4.0 \
+ crate://crates.io/winapi-util/0.1.5 \
+ crate://crates.io/winapi-x86_64-pc-windows-gnu/0.4.0 \
+ crate://crates.io/winapi/0.3.9 \
+ crate://crates.io/zeroize/1.2.0 \
+ crate://crates.io/zeroize_derive/1.0.1 \
+ file://cryptoki.patch \
+"
+
+LIC_FILES_CHKSUM = " \
+ file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+"
diff --git a/meta-security/meta-parsec/recipes-parsec/parsec-tool/parsec-tool_0.3.0.bb b/meta-security/meta-parsec/recipes-parsec/parsec-tool/parsec-tool_0.3.0.bb
new file mode 100644
index 0000000000..35c65c02af
--- /dev/null
+++ b/meta-security/meta-parsec/recipes-parsec/parsec-tool/parsec-tool_0.3.0.bb
@@ -0,0 +1,17 @@
+SUMMARY = "Parsec Command Line Interface"
+HOMEPAGE = "https://github.com/parallaxsecond/parsec-tool"
+LICENSE = "Apache-2.0"
+
+inherit cargo
+
+SRC_URI += "crate://crates.io/parsec-tool/${PV} \
+"
+
+TOOLCHAIN = "clang"
+
+do_install() {
+ install -d ${D}/${bindir}
+ install -m 755 "${B}/target/${TARGET_SYS}/release/parsec-tool" "${D}${bindir}/parsec-tool"
+}
+
+require parsec-tool_${PV}.inc
diff --git a/meta-security/meta-parsec/recipes-parsec/parsec-tool/parsec-tool_0.3.0.inc b/meta-security/meta-parsec/recipes-parsec/parsec-tool/parsec-tool_0.3.0.inc
new file mode 100644
index 0000000000..9560dcff17
--- /dev/null
+++ b/meta-security/meta-parsec/recipes-parsec/parsec-tool/parsec-tool_0.3.0.inc
@@ -0,0 +1,127 @@
+# This file is created from parsec-tool repository Cargo.lock using cargo-bitbake tool
+
+SRC_URI += " \
+ crate://crates.io/aho-corasick/0.7.15 \
+ crate://crates.io/ansi_term/0.11.0 \
+ crate://crates.io/ansi_term/0.12.1 \
+ crate://crates.io/anyhow/1.0.38 \
+ crate://crates.io/atty/0.2.14 \
+ crate://crates.io/autocfg/1.0.1 \
+ crate://crates.io/base64/0.13.0 \
+ crate://crates.io/bincode/1.3.1 \
+ crate://crates.io/bitflags/1.2.1 \
+ crate://crates.io/block-buffer/0.9.0 \
+ crate://crates.io/byteorder/1.4.2 \
+ crate://crates.io/bytes/0.5.6 \
+ crate://crates.io/cc/1.0.66 \
+ crate://crates.io/cfg-if/1.0.0 \
+ crate://crates.io/clap/2.33.3 \
+ crate://crates.io/clap/3.0.0-beta.2 \
+ crate://crates.io/clap_derive/3.0.0-beta.2 \
+ crate://crates.io/cmake/0.1.45 \
+ crate://crates.io/cpuid-bool/0.1.2 \
+ crate://crates.io/derivative/2.2.0 \
+ crate://crates.io/digest/0.9.0 \
+ crate://crates.io/either/1.6.1 \
+ crate://crates.io/env_logger/0.8.3 \
+ crate://crates.io/fixedbitset/0.2.0 \
+ crate://crates.io/form_urlencoded/1.0.0 \
+ crate://crates.io/generic-array/0.14.4 \
+ crate://crates.io/getrandom/0.2.2 \
+ crate://crates.io/hashbrown/0.9.1 \
+ crate://crates.io/heck/0.3.2 \
+ crate://crates.io/hermit-abi/0.1.18 \
+ crate://crates.io/humantime/2.1.0 \
+ crate://crates.io/idna/0.2.1 \
+ crate://crates.io/indexmap/1.6.1 \
+ crate://crates.io/itertools/0.8.2 \
+ crate://crates.io/lazy_static/1.4.0 \
+ crate://crates.io/libc/0.2.86 \
+ crate://crates.io/log/0.4.14 \
+ crate://crates.io/matches/0.1.8 \
+ crate://crates.io/memchr/2.3.4 \
+ crate://crates.io/multimap/0.8.2 \
+ crate://crates.io/num-bigint/0.3.1 \
+ crate://crates.io/num-complex/0.3.1 \
+ crate://crates.io/num-derive/0.3.3 \
+ crate://crates.io/num-integer/0.1.44 \
+ crate://crates.io/num-iter/0.1.42 \
+ crate://crates.io/num-rational/0.3.2 \
+ crate://crates.io/num-traits/0.2.14 \
+ crate://crates.io/num/0.3.1 \
+ crate://crates.io/oid/0.1.1 \
+ crate://crates.io/once_cell/1.5.2 \
+ crate://crates.io/opaque-debug/0.3.0 \
+ crate://crates.io/os_str_bytes/2.4.0 \
+ crate://crates.io/parsec-client/0.12.0 \
+ crate://crates.io/parsec-interface/0.24.0 \
+ crate://crates.io/pem/0.8.3 \
+ crate://crates.io/percent-encoding/2.1.0 \
+ crate://crates.io/petgraph/0.5.1 \
+ crate://crates.io/picky-asn1-der/0.2.4 \
+ crate://crates.io/picky-asn1/0.3.1 \
+ crate://crates.io/ppv-lite86/0.2.10 \
+ crate://crates.io/proc-macro-error-attr/1.0.4 \
+ crate://crates.io/proc-macro-error/1.0.4 \
+ crate://crates.io/proc-macro2/1.0.24 \
+ crate://crates.io/prost-build/0.6.1 \
+ crate://crates.io/prost-derive/0.6.1 \
+ crate://crates.io/prost-types/0.6.1 \
+ crate://crates.io/prost/0.6.1 \
+ crate://crates.io/psa-crypto-sys/0.8.0 \
+ crate://crates.io/psa-crypto/0.8.0 \
+ crate://crates.io/quote/1.0.9 \
+ crate://crates.io/rand/0.8.3 \
+ crate://crates.io/rand_chacha/0.3.0 \
+ crate://crates.io/rand_core/0.6.2 \
+ crate://crates.io/rand_hc/0.3.0 \
+ crate://crates.io/redox_syscall/0.2.5 \
+ crate://crates.io/regex-syntax/0.6.22 \
+ crate://crates.io/regex/1.4.3 \
+ crate://crates.io/remove_dir_all/0.5.3 \
+ crate://crates.io/same-file/1.0.6 \
+ crate://crates.io/secrecy/0.7.0 \
+ crate://crates.io/serde/1.0.123 \
+ crate://crates.io/serde_bytes/0.11.5 \
+ crate://crates.io/serde_derive/1.0.123 \
+ crate://crates.io/sha2/0.9.3 \
+ crate://crates.io/strsim/0.10.0 \
+ crate://crates.io/strsim/0.8.0 \
+ crate://crates.io/structopt-derive/0.4.14 \
+ crate://crates.io/structopt/0.3.21 \
+ crate://crates.io/syn/1.0.60 \
+ crate://crates.io/synstructure/0.12.4 \
+ crate://crates.io/tempfile/3.2.0 \
+ crate://crates.io/termcolor/1.1.2 \
+ crate://crates.io/textwrap/0.11.0 \
+ crate://crates.io/textwrap/0.12.1 \
+ crate://crates.io/thiserror-impl/1.0.23 \
+ crate://crates.io/thiserror/1.0.23 \
+ crate://crates.io/thread_local/1.1.3 \
+ crate://crates.io/tinyvec/1.1.1 \
+ crate://crates.io/tinyvec_macros/0.1.0 \
+ crate://crates.io/typenum/1.12.0 \
+ crate://crates.io/unicode-bidi/0.3.4 \
+ crate://crates.io/unicode-normalization/0.1.17 \
+ crate://crates.io/unicode-segmentation/1.7.1 \
+ crate://crates.io/unicode-width/0.1.8 \
+ crate://crates.io/unicode-xid/0.2.1 \
+ crate://crates.io/url/2.2.0 \
+ crate://crates.io/users/0.10.0 \
+ crate://crates.io/uuid/0.8.2 \
+ crate://crates.io/vec_map/0.8.2 \
+ crate://crates.io/version_check/0.9.2 \
+ crate://crates.io/walkdir/2.3.1 \
+ crate://crates.io/wasi/0.10.2+wasi-snapshot-preview1 \
+ crate://crates.io/which/3.1.1 \
+ crate://crates.io/winapi-i686-pc-windows-gnu/0.4.0 \
+ crate://crates.io/winapi-util/0.1.5 \
+ crate://crates.io/winapi-x86_64-pc-windows-gnu/0.4.0 \
+ crate://crates.io/winapi/0.3.9 \
+ crate://crates.io/zeroize/1.2.0 \
+ crate://crates.io/zeroize_derive/1.0.1 \
+"
+
+LIC_FILES_CHKSUM = " \
+ file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+"
diff --git a/meta-security/meta-tpm/conf/layer.conf b/meta-security/meta-tpm/conf/layer.conf
index 65788eb0ea..1b766cba28 100644
--- a/meta-security/meta-tpm/conf/layer.conf
+++ b/meta-security/meta-tpm/conf/layer.conf
@@ -13,6 +13,7 @@ LAYERSERIES_COMPAT_tpm-layer = "hardknott"
LAYERDEPENDS_tpm-layer = " \
core \
openembedded-layer \
+ meta-python \
"
BBLAYERS_LAYERINDEX_NAME_tpm-layer = "meta-tpm"
diff --git a/meta-security/meta-tpm/recipes-tpm/swtpm/files/oe_configure.patch b/meta-security/meta-tpm/recipes-tpm/swtpm/files/oe_configure.patch
new file mode 100644
index 0000000000..5aee933b92
--- /dev/null
+++ b/meta-security/meta-tpm/recipes-tpm/swtpm/files/oe_configure.patch
@@ -0,0 +1,65 @@
+Don't check for tscd deamon on host.
+
+Upstream-Status: OE Specific
+
+Signed-off-by: Armin Kuster <akuster808@gmail.com>
+
+Index: git/configure.ac
+===================================================================
+--- git.orig/configure.ac
++++ git/configure.ac
+@@ -179,15 +179,6 @@ AC_SUBST([LIBTPMS_LIBS])
+ AC_CHECK_LIB(c, clock_gettime, LIBRT_LIBS="", LIBRT_LIBS="-lrt")
+ AC_SUBST([LIBRT_LIBS])
+
+-AC_PATH_PROG([TCSD], tcsd)
+-if test "x$TCSD" = "x"; then
+- have_tcsd=no
+- AC_MSG_WARN([tcsd could not be found; typically need it for tss user account and tests])
+-else
+- have_tcsd=yes
+-fi
+-AM_CONDITIONAL([HAVE_TCSD], test "$have_tcsd" != "no")
+-
+ dnl We either need netstat (more common across systems) or 'ss' for test cases
+ AC_PATH_PROG([NETSTAT], [netstat])
+ if test "x$NETSTAT" = "x"; then
+@@ -440,23 +431,6 @@ AC_ARG_WITH([tss-group],
+ [TSS_GROUP="tss"]
+ )
+
+-case $have_tcsd in
+-yes)
+- AC_MSG_CHECKING([whether TSS_USER $TSS_USER is available])
+- if ! test $(id -u $TSS_USER); then
+- AC_MSG_ERROR(["$TSS_USER is not available"])
+- else
+- AC_MSG_RESULT([yes])
+- fi
+- AC_MSG_CHECKING([whether TSS_GROUP $TSS_GROUP is available])
+- if ! test $(id -g $TSS_GROUP); then
+- AC_MSG_ERROR(["$TSS_GROUP is not available"])
+- else
+- AC_MSG_RESULT([yes])
+- fi
+- ;;
+-esac
+-
+ AC_SUBST([TSS_USER])
+ AC_SUBST([TSS_GROUP])
+
+Index: git/tests/Makefile.am
+===================================================================
+--- git.orig/tests/Makefile.am
++++ git/tests/Makefile.am
+@@ -83,10 +83,6 @@ TESTS += \
+ test_tpm2_swtpm_cert \
+ test_tpm2_swtpm_cert_ecc \
+ test_tpm2_swtpm_setup_create_cert
+-if HAVE_TCSD
+-TESTS += \
+- test_tpm2_samples_create_tpmca
+-endif
+ endif
+
+ EXTRA_DIST=$(TESTS) \
diff --git a/meta-security/meta-tpm/recipes-tpm/swtpm/swtpm_0.5.2.bb b/meta-security/meta-tpm/recipes-tpm/swtpm/swtpm_0.5.2.bb
index b7ff2ad59f..caf99e823e 100644
--- a/meta-security/meta-tpm/recipes-tpm/swtpm/swtpm_0.5.2.bb
+++ b/meta-security/meta-tpm/recipes-tpm/swtpm/swtpm_0.5.2.bb
@@ -7,18 +7,19 @@ DEPENDS = "libtasn1 coreutils-native expect socat glib-2.0 net-tools-native libt
# configure checks for the tools already during compilation and
# then swtpm_setup needs them at runtime
-DEPENDS += "tpm-tools-native expect-native socat-native"
+DEPENDS_append = " tpm-tools-native expect-native socat-native python3-pip-native python3-cryptography-native"
SRCREV = "e59c0c1a7b4c8d652dbb280fd6126895a7057464"
SRC_URI = "git://github.com/stefanberger/swtpm.git;branch=stable-0.5 \
file://ioctl_h.patch \
+ file://oe_configure.patch \
"
PE = "1"
S = "${WORKDIR}/git"
-inherit autotools pkgconfig python3-dir
PARALLEL_MAKE = ""
+inherit autotools pkgconfig python3native
TSS_USER="tss"
TSS_GROUP="tss"
@@ -41,7 +42,7 @@ USERADD_PARAM_${PN} = "--system -g ${TSS_GROUP} --home-dir \
PACKAGES =+ "${PN}-python"
-FILES_${PN}-python = "${nonarch_libdir}/${PYTHON_PN}/dist-packages/* "
+FILES_${PN}-python = "${PYTHON_SITEPACKAGES_DIR}"
PACKAGE_BEFORE_PN = "${PN}-cuse"
FILES_${PN}-cuse = "${bindir}/swtpm_cuse"
diff --git a/meta-security/recipes-core/packagegroup/packagegroup-core-security.bb b/meta-security/recipes-core/packagegroup/packagegroup-core-security.bb
index 0a4452eeac..9ac0d2c25f 100644
--- a/meta-security/recipes-core/packagegroup/packagegroup-core-security.bb
+++ b/meta-security/recipes-core/packagegroup/packagegroup-core-security.bb
@@ -51,9 +51,9 @@ RDEPENDS_packagegroup-security-scanners = "\
isic \
nikto \
checksecurity \
- ${@bb.utils.contains_any("TUNE_FEATURES", "riscv32 riscv64", "", " clamav clamav-freshclam clamav-cvd",d)} \
+ ${@bb.utils.contains_any("TUNE_FEATURES", "riscv32 riscv64", "", " clamav clamav-freshclam",d)} \
"
-RDEPENDS_packagegroup-security-scanners_remove_libc-musl = "clamav clamav-freshclam clamav-cvd"
+RDEPENDS_packagegroup-security-scanners_remove_libc-musl = "clamav clamav-freshclam"
SUMMARY_packagegroup-security-audit = "Security Audit tools "
RDEPENDS_packagegroup-security-audit = " \
diff --git a/meta-security/recipes-ids/suricata/python3-suricata-update_1.1.1.bb b/meta-security/recipes-ids/suricata/python3-suricata-update_1.2.1.bb
index 732ca9a11a..bbdce69ab6 100644
--- a/meta-security/recipes-ids/suricata/python3-suricata-update_1.1.1.bb
+++ b/meta-security/recipes-ids/suricata/python3-suricata-update_1.2.1.bb
@@ -5,11 +5,13 @@ LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://LICENSE;beginline=1;endline=2;md5=c70d8d3310941dcdfcd1e02800a1f548"
-SRCREV = "9630630ffc493ca26299d174ee2066aa1405b2d4"
-SRC_URI = "git://github.com/OISF/suricata-update;branch='master-1.1.x'"
+SRCREV = "50e857f75e576e239d8306a6ac55946a1ce252a6"
+SRC_URI = "git://github.com/OISF/suricata-update;branch='master-1.2.x'"
S = "${WORKDIR}/git"
inherit python3native python3targetconfig setuptools3
-RDEPENDS_${PN} = "python3-pyyaml"
+RDEPENDS_${PN} = "python3-pyyaml python3-logging python3-compression"
+
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta-security/recipes-kernel/lkrg/files/makefile_cleanup.patch b/meta-security/recipes-kernel/lkrg/files/makefile_cleanup.patch
new file mode 100644
index 0000000000..106dc3f1ee
--- /dev/null
+++ b/meta-security/recipes-kernel/lkrg/files/makefile_cleanup.patch
@@ -0,0 +1,73 @@
+Upstream-Status: Pending
+
+This needs more work. Its my starting point.
+
+Signed-off-by: Armin Kuster <akuster808@gmail.com>
+
+Index: lkrg-0.9.0/Makefile
+===================================================================
+--- lkrg-0.9.0.orig/Makefile
++++ lkrg-0.9.0/Makefile
+@@ -4,28 +4,10 @@
+ # Author:
+ # - Adam 'pi3' Zabrocki (http://pi3.com.pl)
+ ##
+-
+-P_OUTPUT = output
+ P_PWD ?= $(shell pwd)
+-P_KVER ?= $(shell uname -r)
+-P_BOOTUP_SCRIPT ?= scripts/bootup/lkrg-bootup.sh
+-TARGET := p_lkrg
+-ifneq ($(KERNELRELEASE),)
+- KERNEL := /lib/modules/$(KERNELRELEASE)/build
+-else
+- ## KERNELRELEASE not set.
+- KERNEL := /lib/modules/$(P_KVER)/build
+-endif
+-
+-#
+-# Uncomment for debug compilation
+-#
+-# ccflags-m := -ggdb -DP_LKRG_DEBUG_BUILD -finstrument-functions
+-# ccflags-y := ${ccflags-m}
+-# p_lkrg-objs += src/modules/print_log/p_lkrg_debug_log.o
+
+-obj-m += $(TARGET).o
+-$(TARGET)-objs += src/modules/ksyms/p_resolve_ksym.o \
++obj-m := p_lkrg.o
++p_lkrg-y := src/modules/ksyms/p_resolve_ksym.o \
+ src/modules/hashing/p_lkrg_fast_hash.o \
+ src/modules/comm_channel/p_comm_channel.o \
+ src/modules/integrity_timer/p_integrity_timer.o \
+@@ -91,23 +73,14 @@ $(TARGET)-objs += src/modules/ksyms/p_re
+ src/p_lkrg_main.o
+
+
+-all:
+-# $(MAKE) -C $(KERNEL) M=$(P_PWD) modules CONFIG_DEBUG_SECTION_MISMATCH=y
+- $(MAKE) -C $(KERNEL) M=$(P_PWD) modules
+- mkdir -p $(P_OUTPUT)
+- cp $(P_PWD)/$(TARGET).ko $(P_OUTPUT)
+-
+-install:
+- $(MAKE) -C $(KERNEL) M=$(P_PWD) modules_install
+- depmod -a
+- $(P_PWD)/$(P_BOOTUP_SCRIPT) install
+
+-uninstall:
+- $(P_PWD)/$(P_BOOTUP_SCRIPT) uninstall
++modules:
++ $(MAKE) -C $(KERNEL_SRC) M=$(P_PWD) modules
++
++modules_install:
++ $(MAKE) -C $(KERNEL_SRC) M=$(P_PWD) modules_install
+
+ clean:
+- $(MAKE) -C $(KERNEL) M=$(P_PWD) clean
+- $(RM) Module.markers modules.order
+- $(RM) $(P_PWD)/src/modules/kmod/client/kmod/Module.markers
+- $(RM) $(P_PWD)/src/modules/kmod/client/kmod/modules.order
+- $(RM) -rf $(P_OUTPUT)
++ rm -f *.o *~ core .depend .*.cmd *.ko *.mod.c
++ rm -f Module.markers Module.symvers modules.order
++ rm -rf .tmp_versions Modules.symvers
diff --git a/meta-security/recipes-kernel/lkrg/lkrg-module_0.9.0.bb b/meta-security/recipes-kernel/lkrg/lkrg-module_0.9.0.bb
new file mode 100644
index 0000000000..dbc195d354
--- /dev/null
+++ b/meta-security/recipes-kernel/lkrg/lkrg-module_0.9.0.bb
@@ -0,0 +1,33 @@
+SUMMARY = "Linux Kernel Runtime Guard"
+DESCRIPTION="LKRG performs runtime integrity checking of the Linux \
+kernel and detection of security vulnerability exploits against the kernel."
+SECTION = "security"
+HOMEPAGE = "https://www.openwall.com/lkrg/"
+LICENSE = "GPLv2"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=d931f44a1f4be309bcdac742d7ed92f9"
+
+DEPENDS = "virtual/kernel elfutils"
+
+SRC_URI = "https://www.openwall.com/lkrg/lkrg-${PV}.tar.gz \
+ file://makefile_cleanup.patch "
+
+SRC_URI[sha256sum] = "a997e4d98962c359f3af163bbcfa38a736d2a50bfe35c15065b74cb57f8742bf"
+
+S = "${WORKDIR}/lkrg-${PV}"
+
+inherit module kernel-module-split
+
+MAKE_TARGETS = "modules"
+
+MODULE_NAME = "p_lkrg"
+
+module_do_install() {
+ install -d ${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel/${MODULE_NAME}
+ install -m 0644 ${MODULE_NAME}.ko \
+ ${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel/${MODULE_NAME}/${MODULE_NAME}.ko
+}
+
+RPROVIDES_${PN} += "kernel-module-lkrg"
+
+COMPATIBLE_HOST = "(i.86|x86_64|arm|aarch64).*-linux"
diff --git a/meta-security/recipes-scanners/clamav/clamav_0.101.5.bb b/meta-security/recipes-scanners/clamav/clamav_0.104.0.bb
index 7dad263151..36e498dfbb 100644
--- a/meta-security/recipes-scanners/clamav/clamav_0.101.5.bb
+++ b/meta-security/recipes-scanners/clamav/clamav_0.104.0.bb
@@ -4,94 +4,68 @@ HOMEPAGE = "http://www.clamav.net/index.html"
SECTION = "security"
LICENSE = "LGPL-2.1"
-DEPENDS = "libtool db libxml2 openssl zlib curl llvm clamav-native libmspack bison-native"
-DEPENDS_class-native = "db-native openssl-native zlib-native llvm-native curl-native bison-native"
+DEPENDS = "glibc llvm libtool db openssl zlib curl libxml2 bison pcre2 json-c libcheck"
-LIC_FILES_CHKSUM = "file://COPYING.LGPL;beginline=2;endline=3;md5=4b89c05acc71195e9a06edfa2fa7d092"
+LIC_FILES_CHKSUM = "file://COPYING.txt;beginline=2;endline=3;md5=f7029fbbc5898b273d5902896f7bbe17"
-SRCREV = "482fcd413b07e9fd3ef9850e6d01a45f4e187108"
+SRCREV = "5553a5e206ceae5d920368baee7d403f823bcb6f"
-SRC_URI = "git://github.com/vrtadmin/clamav-devel;branch=rel/0.101 \
+SRC_URI = "git://github.com/vrtadmin/clamav-devel;branch=dev/0.104 \
file://clamd.conf \
file://freshclam.conf \
file://volatiles.03_clamav \
file://tmpfiles.clamav \
file://${BPN}.service \
- file://freshclam-native.conf \
- "
-
+ file://headers_fixup.patch \
+ file://oe_cmake_fixup.patch \
+"
S = "${WORKDIR}/git"
LEAD_SONAME = "libclamav.so"
-SO_VER = "9.0.4"
+SO_VER = "9.6.0"
+
+BINCONFIG = "${bindir}/clamav-config"
-inherit autotools pkgconfig useradd systemd multilib_header multilib_script
+inherit cmake chrpath pkgconfig useradd systemd multilib_header multilib_script
CLAMAV_UID ?= "clamav"
CLAMAV_GID ?= "clamav"
-INSTALL_CLAMAV_CVD ?= "1"
-
-CLAMAV_USR_DIR = "${STAGING_DIR_NATIVE}/usr"
-CLAMAV_USR_DIR_class-target = "${STAGING_DIR_HOST}/usr"
-
-PACKAGECONFIG_class-target ?= "ncurses bz2"
-PACKAGECONFIG_class-target += " ${@bb.utils.contains("DISTRO_FEATURES", "ipv6", "ipv6", "", d)}"
-PACKAGECONFIG_class-target += "${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'systemd', '', d)}"
-
-PACKAGECONFIG[pcre] = "--with-pcre=${STAGING_LIBDIR}, --without-pcre, libpcre"
-PACKAGECONFIG[json] = "--with-libjson=${STAGING_LIBDIR}, --without-libjson, json-c,"
-PACKAGECONFIG[ipv6] = "--enable-ipv6, --disable-ipv6"
-PACKAGECONFIG[bz2] = "--with-libbz2-prefix=${CLAMAV_USR_DIR}, --disable-bzip2, bzip2"
-PACKAGECONFIG[ncurses] = "--with-libncurses-prefix=${CLAMAV_USR_DIR}, --without-libncurses-prefix, ncurses, "
-PACKAGECONFIG[systemd] = "--with-systemdsystemunitdir=${systemd_unitdir}/system/, --without-systemdsystemunitdir, "
-
-MULTILIB_SCRIPTS = "${PN}-dev:${bindir}/clamav-config ${PN}-cvd:${localstatedir}/lib/clamav/mirrors.dat"
-
-EXTRA_OECONF_CLAMAV = "--without-libcheck-prefix --disable-unrar \
- --disable-mempool \
- --program-prefix="" \
- --disable-zlib-vcheck \
- --with-xml=${CLAMAV_USR_DIR} \
- --with-zlib=${CLAMAV_USR_DIR} \
- --with-openssl=${CLAMAV_USR_DIR} \
- --with-libcurl=${CLAMAV_USR_DIR} \
- --with-system-libmspack=${CLAMAV_USR_DIR} \
- --with-iconv=no \
- --enable-check=no \
- "
-
-EXTRA_OECONF_class-native += "${EXTRA_OECONF_CLAMAV}"
-EXTRA_OECONF_class-target += "--with-user=${CLAMAV_UID} --with-group=${CLAMAV_GID} ${EXTRA_OECONF_CLAMAV}"
-
-do_configure () {
- ${S}/configure ${CONFIGUREOPTS} ${EXTRA_OECONF}
-}
-do_configure_class-native () {
- ${S}/configure ${CONFIGUREOPTS} ${EXTRA_OECONF}
-}
+MULTILIB_SCRIPTS = "${PN}-dev:${bindir}/clamav-config"
-do_compile_append_class-target() {
- if [ "${INSTALL_CLAMAV_CVD}" = "1" ]; then
- bbnote "CLAMAV creating cvd"
- install -d ${S}/clamav_db
- ${STAGING_BINDIR_NATIVE}/freshclam --datadir=${S}/clamav_db --config=${WORKDIR}/freshclam-native.conf
- fi
-}
+EXTRA_OECMAKE = " -DCMAKE_BUILD_TYPE=Release -DOPTIMIZE=ON -DENABLE_JSON_SHARED=OFF \
+ -DCLAMAV_GROUP=${CLAMAV_GID} -DCLAMAV_USER=${CLAMAV_UID} \
+ -DENABLE_TESTS=OFF -DBUILD_SHARED_LIBS=ON \
+ -DDISABLE_MPOOL=ON -DENABLE_FRESHCLAM_DNS_FIX=ON \
+ "
+
+PACKAGECONFIG ?= " clamonacc \
+ ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "systemd", "", d)}"
-do_install_append_class-target () {
+PACKAGECONFIG[milter] = "-DENABLE_MILTER=ON ,-DENABLE_MILTER=OFF, curl, curl"
+PACKAGECONFIG[clamonacc] = "-DENABLE_CLAMONACC=ON ,-DENABLE_CLAMONACC=OFF,"
+PACKAGECONFIG[unrar] = "-DENABLE_UNRAR=ON ,-DENABLE_UNRAR=OFF,"
+PACKAGECONFIG[systemd] = "-DENABLE_SYSTEMD=ON -DSYSTEMD_UNIT_DIR=${systemd_system_unitdir}, -DENABLE_SYSTEMD=OFF, systemd"
+
+export OECMAKE_C_FLAGS += " -I${STAGING_INCDIR} -L ${RECIPE_SYSROOT}${nonarch_libdir} -L${STAGING_LIBDIR} -lpthread"
+
+do_install_append () {
install -d ${D}/${sysconfdir}
install -d ${D}/${localstatedir}/lib/clamav
install -d ${D}${sysconfdir}/clamav ${D}${sysconfdir}/default/volatiles
- install -m 644 ${WORKDIR}/clamd.conf ${D}/${sysconfdir}
- install -m 644 ${WORKDIR}/freshclam.conf ${D}/${sysconfdir}
+ install -m 644 ${WORKDIR}/clamd.conf ${D}/${prefix}/${sysconfdir}
+ install -m 644 ${WORKDIR}/freshclam.conf ${D}/${prefix}/${sysconfdir}
install -m 0644 ${WORKDIR}/volatiles.03_clamav ${D}${sysconfdir}/default/volatiles/03_clamav
sed -i -e 's#${STAGING_DIR_HOST}##g' ${D}${libdir}/pkgconfig/libclamav.pc
rm ${D}/${libdir}/libclamav.so
if [ "${INSTALL_CLAMAV_CVD}" = "1" ]; then
install -m 666 ${S}/clamav_db/* ${D}/${localstatedir}/lib/clamav/.
fi
+
+ rm ${D}/${libdir}/libfreshclam.so
+ rm ${D}/${libdir}/libmspack.so
+
if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)};then
install -D -m 0644 ${WORKDIR}/clamav.service ${D}${systemd_unitdir}/system/clamav.service
install -d ${D}${sysconfdir}/tmpfiles.d
@@ -111,13 +85,13 @@ pkg_postinst_ontarget_${PN} () {
}
-PACKAGES = "${PN} ${PN}-dev ${PN}-dbg ${PN}-daemon ${PN}-doc ${PN}-cvd \
+PACKAGES = "${PN} ${PN}-dev ${PN}-dbg ${PN}-daemon ${PN}-doc \
${PN}-clamdscan ${PN}-freshclam ${PN}-libclamav ${PN}-staticdev"
-FILES_${PN} = "${bindir}/clambc ${bindir}/clamscan ${bindir}/clamsubmit \
+FILES_${PN} = "${bindir}/clambc ${bindir}/clamscan ${bindir}/clamsubmit ${sbindir}/clamonacc \
${bindir}/*sigtool ${mandir}/man1/clambc* ${mandir}/man1/clamscan* \
${mandir}/man1/sigtool* ${mandir}/man1/clambsubmit* \
- ${docdir}/clamav/* "
+ ${docdir}/clamav/* ${libdir}/libmspack* "
FILES_${PN}-clamdscan = " ${bindir}/clamdscan \
${docdir}/clamdscan/* \
@@ -128,12 +102,17 @@ FILES_${PN}-daemon = "${bindir}/clamconf ${bindir}/clamdtop ${sbindir}/clamd \
${mandir}/man1/clamconf* ${mandir}/man1/clamdtop* \
${mandir}/man5/clamd* ${mandir}/man8/clamd* \
${sysconfdir}/clamd.conf* \
+ /usr/etc/clamd.conf* \
${systemd_unitdir}/system/clamav-daemon/* \
${docdir}/clamav-daemon/* ${sysconfdir}/clamav-daemon \
- ${sysconfdir}/logcheck/ignore.d.server/clamav-daemon "
+ ${sysconfdir}/logcheck/ignore.d.server/clamav-daemon \
+ ${systemd_unitdir}/system/clamav-daemon.service \
+ ${systemd_unitdir}/system/clamav-clamonacc.service \
+ "
FILES_${PN}-freshclam = "${bindir}/freshclam \
${sysconfdir}/freshclam.conf* \
+ /usr/etc/freshclam.conf* \
${sysconfdir}/clamav ${sysconfdir}/default/volatiles \
${sysconfdir}/tmpfiles.d/*.conf \
${localstatedir}/lib/clamav \
@@ -148,15 +127,13 @@ FILES_${PN}-dev = " ${bindir}/clamav-config ${libdir}/*.la \
FILES_${PN}-staticdev = "${libdir}/*.a"
-FILES_${PN}-libclamav = "${libdir}/libclamav.so* ${libdir}/libclammspack.so*\
- ${docdir}/libclamav/* "
+FILES_${PN}-libclamav = "${libdir}/libclamav.so* ${libdir}/libclammspack.so* \
+ ${libdir}/libfreshclam.so* ${docdir}/libclamav/* "
FILES_${PN}-doc = "${mandir}/man/* \
${datadir}/man/* \
${docdir}/* "
-FILES_${PN}-cvd = "${localstatedir}/lib/clamav/*.cvd ${localstatedir}/lib/clamav/*.dat"
-
USERADD_PACKAGES = "${PN}"
GROUPADD_PARAM_${PN} = "--system ${CLAMAV_UID}"
USERADD_PARAM_${PN} = "--system -g ${CLAMAV_GID} --home-dir \
@@ -169,6 +146,3 @@ RCONFLICTS_${PN} += "${PN}-systemd"
SYSTEMD_SERVICE_${PN} = "${BPN}.service"
RDEPENDS_${PN} = "openssl ncurses-libncurses libxml2 libbz2 ncurses-libtinfo curl libpcre2 clamav-freshclam clamav-libclamav"
-RDEPENDS_${PN}_class-native = ""
-
-BBCLASSEXTEND = "native"
diff --git a/meta-security/recipes-scanners/clamav/files/headers_fixup.patch b/meta-security/recipes-scanners/clamav/files/headers_fixup.patch
new file mode 100644
index 0000000000..9de0a26dbb
--- /dev/null
+++ b/meta-security/recipes-scanners/clamav/files/headers_fixup.patch
@@ -0,0 +1,58 @@
+Fixes checks not needed do to glibc 2.33
+
+Upstream-Status: Pending
+Signed-off-by: Armin Kuster <akuster808@gmail.com>
+
+Index: git/CMakeLists.txt
+===================================================================
+--- git.orig/CMakeLists.txt
++++ git/CMakeLists.txt
+@@ -374,8 +373,6 @@ check_include_file("stdlib.h"
+ check_include_file("string.h" HAVE_STRING_H)
+ check_include_file("strings.h" HAVE_STRINGS_H)
+ check_include_file("sys/cdefs.h" HAVE_SYS_CDEFS_H)
+-check_include_file("sys/dl.h" HAVE_SYS_DL_H)
+-check_include_file("sys/fileio.h" HAVE_SYS_FILIO_H)
+ check_include_file("sys/mman.h" HAVE_SYS_MMAN_H)
+ check_include_file("sys/param.h" HAVE_SYS_PARAM_H)
+ check_include_file("sys/queue.h" HAVE_SYS_QUEUE_H)
+@@ -410,8 +407,6 @@ endif()
+
+ # int-types variants
+ check_include_file("inttypes.h" HAVE_INTTYPES_H)
+-check_include_file("sys/inttypes.h" HAVE_SYS_INTTYPES_H)
+-check_include_file("sys/int_types.h" HAVE_SYS_INT_TYPES_H)
+ check_include_file("stdint.h" HAVE_STDINT_H)
+
+ # this hack required to silence warnings on systems with inttypes.h
+@@ -539,17 +528,11 @@ check_type_size("time_t" SIZEOF_TIME_T)
+ # Checks for library functions.
+ include(CheckSymbolExists)
+ check_symbol_exists(_Exit "stdlib.h" HAVE__EXIT)
+-check_symbol_exists(accept4 "sys/types.h" HAVE_ACCEPT4)
+ check_symbol_exists(snprintf "stdio.h" HAVE_SNPRINTF)
+-check_symbol_exists(stat64 "sys/stat.h" HAVE_STAT64)
+-check_symbol_exists(strcasestr "string.h" HAVE_STRCASESTR)
+ check_symbol_exists(strerror_r "string.h" HAVE_STRERROR_R)
+-check_symbol_exists(strlcat "string.h" HAVE_STRLCAT)
+-check_symbol_exists(strlcpy "string.h" HAVE_STRLCPY)
+ check_symbol_exists(strndup "string.h" HAVE_STRNDUP)
+ check_symbol_exists(strnlen "string.h" HAVE_STRNLEN)
+-check_symbol_exists(strnstr "string.h" HAVE_STRNSTR)
+-check_symbol_exists(sysctlbyname "sysctl.h" HAVE_SYSCTLBYNAME)
++check_symbol_exists(strcasecmp "string.h" HAVE_STRNCMP)
+ check_symbol_exists(timegm "time.h" HAVE_TIMEGM)
+ check_symbol_exists(vsnprintf "stdio.h" HAVE_VSNPRINTF)
+
+@@ -563,10 +546,9 @@ else()
+ check_symbol_exists(fseeko "stdio.h" HAVE_FSEEKO)
+ check_symbol_exists(getaddrinfo "netdb.h" HAVE_GETADDRINFO)
+ check_symbol_exists(getpagesize "unistd.h" HAVE_GETPAGESIZE)
+- check_symbol_exists(mkstemp "unistd.h" HAVE_MKSTEMP)
+ check_symbol_exists(poll "poll.h" HAVE_POLL)
+- check_symbol_exists(setgroups "unistd.h" HAVE_SETGROUPS)
+ check_symbol_exists(setsid "unistd.h" HAVE_SETSID)
++ set(HAVE_SYSCONF_SC_PAGESIZE 1)
+ endif()
+
+ include(CheckSymbolExists)
diff --git a/meta-security/recipes-scanners/clamav/files/oe_cmake_fixup.patch b/meta-security/recipes-scanners/clamav/files/oe_cmake_fixup.patch
new file mode 100644
index 0000000000..b284915b88
--- /dev/null
+++ b/meta-security/recipes-scanners/clamav/files/oe_cmake_fixup.patch
@@ -0,0 +1,39 @@
+Issue with rpath including /usr/lib and crosscompile checkes causing oe configure to fail
+
+Use oe's cmake rpath framework and exclude some of the cmake checks that fail in our env
+
+Upstream-Status: Inappropriate [configuration]
+Singed-off-by: Armin Kuster <akuster808@gmail.com>
+
+Index: git/CMakeLists.txt
+===================================================================
+--- git.orig/CMakeLists.txt
++++ git/CMakeLists.txt
+@@ -162,12 +162,6 @@ endif()
+
+ include(GNUInstallDirs)
+
+-if(CMAKE_INSTALL_FULL_LIBDIR)
+- set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_FULL_LIBDIR}")
+-else()
+- set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib")
+-endif()
+-
+ if(C_LINUX)
+ if(CMAKE_COMPILER_IS_GNUCXX)
+ # Set _GNU_SOURCE for O_LARGEFILE, O_CLOEXEC, O_DIRECTORY, O_NOFOLLOW, etc flags on older systems
+@@ -512,14 +506,8 @@ include(TestInline)
+ include(CheckFileOffsetBits)
+ # Determine how to pack structs on this platform.
+ include(CheckStructPacking)
+-# Check for signed right shift implementation.
+-include(CheckSignedRightShift)
+ # Check if systtem fts implementation available
+ include(CheckFTS)
+-# Check if uname(2) follows POSIX standard.
+-include(CheckUnamePosix)
+-# Check support for file descriptor passing
+-include(CheckFDPassing)
+
+ # Check if big-endian
+ include(TestBigEndian)
diff --git a/meta-security/recipes-security/libest/libest_3.2.0.bb b/meta-security/recipes-security/libest/libest_3.2.0.bb
index f993bd65ec..5b6dc995ce 100644
--- a/meta-security/recipes-security/libest/libest_3.2.0.bb
+++ b/meta-security/recipes-security/libest/libest_3.2.0.bb
@@ -6,7 +6,7 @@ LICENSE = "OpenSSL"
LIC_FILES_CHKSUM = "file://LICENSE;md5=ecb78acde8e3b795de8ef6b61aed5885"
SRCREV = "4ca02c6d7540f2b1bcea278a4fbe373daac7103b"
-SRC_URI = "git://github.com/cisco/libest"
+SRC_URI = "git://github.com/cisco/libest;branch=main"
DEPENDS = "openssl"
diff --git a/meta-security/recipes-security/mfa/python3-privacyidea_3.5.1.bb b/meta-security/recipes-security/mfa/python3-privacyidea_3.5.2.bb
index fb84411921..cd0acf8695 100644
--- a/meta-security/recipes-security/mfa/python3-privacyidea_3.5.1.bb
+++ b/meta-security/recipes-security/mfa/python3-privacyidea_3.5.2.bb
@@ -6,7 +6,7 @@ LICENSE = "AGPL-3.0"
LIC_FILES_CHKSUM = "file://LICENSE;md5=c0acfa7a8a03b718abee9135bc1a1c55"
PYPI_PACKAGE = "privacyIDEA"
-SRC_URI[sha256sum] = "c10f8e9ec681af4cb42fde70864c2b9a4b47e2bcccfc1290f83c1283748772c6"
+SRC_URI[sha256sum] = "26aeb0d353af1f212c4df476202516953c20f7f31566cfe0b67cbb553de04763"
inherit pypi setuptools3