update letsencrypt deloy - fix matrix ssl-cert for coturn

This commit is contained in:
creme 2020-02-09 08:14:08 +00:00
parent 54051ab9f1
commit 9da351875b
2 changed files with 48 additions and 1 deletions

View File

@ -17,7 +17,7 @@ for domain in $RENEWED_DOMAINS; do
cp "$daemon_cert_root/privkey.pem" "$matrix_dir"/
cp "$daemon_cert_root/chain.pem" "$matrix_dir"/
cp "$daemon_cert_root/fullchain.pem" "$matrix_dir"/
chmod 600 "$matrix_dir"/*.pem
chmod 644 "$matrix_dir"/*.pem
chown 108:0 "$matrix_dir"/*.pem
lxc-attach -n matrix -- bash -c "systemctl reload nginx ; systemctl restart coturn"

View File

@ -47,3 +47,50 @@ net.ipv4.tcp_challenge_ack_limit = 9999
# Don't slow network - save congestion window after idle
# https://github.com/ton31337/tools/wiki/tcp_slow_start_after_idle---tcp_no_metrics_save-performance
net.ipv4.tcp_slow_start_after_idle = 0
net.ipv4.tcp_no_metrics_save=0
# Optimize connection queues
# https://www.linode.com/docs/web-servers/nginx/configure-nginx-for-optimized-performance
# Increase the number of packets that can be queued
net.core.netdev_max_backlog = 3240000
# Max number of "backlogged sockets" (connection requests that can be queued for any given listening socket)
net.core.somaxconn = 50000
# Increase max number of sockets allowed in TIME_WAIT
net.ipv4.tcp_max_tw_buckets = 1440000
# Number of packets to keep in the backlog before the kernel starts dropping them
# A sane value is net.ipv4.tcp_max_syn_backlog = 3240000
net.ipv4.tcp_max_syn_backlog = 3240000
# TCP memory tuning
# View memory TCP actually uses with: cat /proc/net/sockstat
# *** These values are auto-created based on your server specs ***
# *** Edit these parameters with caution because they will use more RAM ***
# Changes suggested by IBM on https://www.ibm.com/developerworks/community/wikis/home?lang=en#!/wiki/Welcome%20to%20High%20Performance%20Computing%20%28HPC%29%20Central/page/Linux%20System%20Tuning%20Recommendations
# Increase the default socket buffer read size (rmem_default) and write size (wmem_default)
# *** Maybe recommended only for high-RAM servers? ***
#net.core.rmem_default=16777216
#net.core.wmem_default=16777216
# Increase the max socket buffer size (optmem_max), max socket buffer read size (rmem_max), max socket buffer write size (wmem_max)
# 16MB per socket - which sounds like a lot, but will virtually never consume that much
# rmem_max over-rides tcp_rmem param, wmem_max over-rides tcp_wmem param and optmem_max over-rides tcp_mem param
#net.core.optmem_max=16777216
#net.core.rmem_max=16777216
#net.core.wmem_max=16777216
# Configure the Min, Pressure, Max values (units are in page size)
# Useful mostly for very high-traffic websites that have a lot of RAM
# Consider that we already set the *_max values to 16777216
# So you may eventually comment these three lines
#net.ipv4.tcp_mem=16777216 16777216 16777216
#net.ipv4.tcp_wmem=4096 87380 16777216
#net.ipv4.tcp_rmem=4096 87380 16777216
# Disable TCP SACK (TCP Selective Acknowledgement), DSACK (duplicate TCP SACK), and FACK (Forward Acknowledgement)
# SACK requires enabling tcp_timestamps and adds some packet overhead
# Only advised in cases of packet loss on the network
#net.ipv4.tcp_sack = 0
#net.ipv4.tcp_dsack = 0
#net.ipv4.tcp_fack = 0
# Disable TCP timestamps
# Can have a performance overhead and is only advised in cases where sack is needed (see tcp_sack)
#net.ipv4.tcp_timestamps=0