Compare commits

40 Commits

Author SHA1 Message Date
nom
acbe218f78 upgrade traefik 3.6.2 2025-11-22 07:57:57 +01:00
9731aa7631 modif du Readme 2025-11-19 19:48:04 +01:00
eccd0a5ddc ldaps commenté le temps de mettre en prod 2025-11-19 10:40:46 +01:00
5e858b86f3 ldap: autoriser ldaps via traefik 2025-11-19 10:30:05 +01:00
nom
5dc5155c53 upgrade MM 11.1 2025-11-19 00:10:59 +01:00
f90620b268 python: lib mattermost 2025-11-16 10:30:37 +01:00
f5678d6c67 cosmétique 2025-11-16 10:00:48 +01:00
3acc408eac date 2025-11-16 09:52:20 +01:00
789917abae ajout suppression du mail de scours dans la liste infos 2025-11-16 09:49:53 +01:00
d6dbe1212c chechpaekoldap: forwardings 2025-11-14 21:20:52 +01:00
e50911b40e nettoyage 2025-11-14 14:32:27 +01:00
2100395ef3 modif 2025-11-14 14:15:44 +01:00
nom
d1d637f213 suppr Dockerfile 2025-11-12 17:40:48 +01:00
972006cc81 upgrade dokuwiki to "dokuwiki/dokuwiki" 2025-11-09 07:04:46 +01:00
nom
4fc0f4ed74 maj display_name en display-name pour mmctl 2025-11-03 09:02:56 +01:00
ca98a51ff2 git en gitea 2025-10-28 05:36:56 +01:00
e462c383a5 ajout de git dans le save 2025-10-28 05:34:00 +01:00
39aa417d11 Merge branch 'master' of ssh://git.kaz.bzh:2202/KAZ/KazV2 2025-10-24 16:40:54 +02:00
544c2aa98c pahekoldap 2025-10-24 16:40:38 +02:00
12e2d44b49 modif des sauvegardes mysql 2025-10-24 11:48:16 +02:00
a434fd0a0b modif des sauvegardes postgres 2025-10-24 11:41:38 +02:00
dec60816e9 backup mattermost 2025-10-24 11:30:14 +02:00
80aa289e8b mattermost mostlymatter 2025-10-24 11:17:30 +02:00
777f7a8412 mattermost mostlymatter commenté 2025-10-23 16:54:49 +02:00
3bf2fc45de mattermost mostlymatter 2025-10-23 16:45:38 +02:00
b0b0059d47 dockerfile mattermost 2025-10-23 15:40:58 +02:00
d63f662f22 pahekoldap 2025-10-22 17:51:52 +02:00
e3c76ea3eb pahekoldap 2025-10-22 17:42:23 +02:00
c453deddfd python libs 2025-10-22 17:14:47 +02:00
nom
519e34e018 upgrade mattermost en 11.0 2025-10-21 22:23:41 +02:00
964597094a pahekoldap 2025-10-21 18:05:51 +02:00
nom
d9c4853e49 upgrade paheko en 1.3.16 2025-10-20 20:47:17 +02:00
d85517268b pahekoldap 2025-10-17 16:11:30 +02:00
5fa99990ed pahekoldap 2025-10-17 12:34:32 +02:00
b15b23f37e nom du serveur# 2025-10-16 17:17:18 +02:00
bb0711e3d5 suppression du repertoire des mail et ajout de la place prise pour les
mails
2025-10-16 16:33:21 +02:00
49fca02bf7 pahekoldap 2025-10-15 20:35:41 +02:00
79e8d18be3 pahekoldap 2025-10-15 10:54:08 +02:00
08889f8671 supp 2025-10-15 09:09:28 +02:00
f2854c4280 sup 2025-10-15 09:02:51 +02:00
23 changed files with 409 additions and 299 deletions

View File

@@ -4,45 +4,190 @@ import sys
from lib.paheko import Paheko
from lib.ldap import Ldap
from lib.ssh import Ssh
from lib.misc import get_disk_size
paheko = Paheko()
categorie_membres = paheko.get_categorie_id("Membres")
membres = paheko.get_users_in_categorie(categorie_membres)
categorie_collegiale = paheko.get_categorie_id("Collégiale")
membres += paheko.get_users_in_categorie(categorie_collegiale)
categorie_administrateurs = paheko.get_categorie_id("Administrateurs")
membres += paheko.get_users_in_categorie(categorie_administrateurs)
def test_services(paheko_entry, ldap_entry):
# Vérifie que les services orga activés sont bien désactivés sur le mutu. Juste nextcloud pour l'instant.
is_orga = paheko_entry["admin_orga"].strip("'") == "Oui"
if is_orga:
paheko_has_cloud = paheko_entry["cloud"].strip("'") == "Oui"
ldap_cloud_enabled = ldap_entry[1]['nextcloudEnabled'][0] == b"TRUE"
# if (paheko_has_cloud and ldap_cloud_enabled):
# path = f"/var/lib/docker/volumes/cloud_cloudData/_data/{ldap_entry[1]['identifiantKaz'][0].decode()}"
# size = get_disk_size(path)
# if size < 40529249:
# print(f"{ldap_entry[1]['identifiantKaz'][0].decode()} {size}")
# with Ldap() as ldap:
# print(ldap_entry[1]['mail'][0].decode())
# ldap.update_user(ldap_entry[1]['mail'][0].decode(), "nextcloudEnabled", b"FALSE")
return not (paheko_has_cloud and ldap_cloud_enabled)
return True
def test_quota(paheko_entry, ldap_entry):
ok = True
quota_disque = paheko_entry["quota_disque"].strip("'")
if f"{quota_disque}G".encode() != ldap_entry[1]['mailQuota'][0]:
ok = False
quota_paheko = int(paheko_entry["quota_disque"].strip("'"))
quota_nextcloud = int(ldap_entry[1]['nextcloudQuota'][0][:-3])
quota_mail = int(ldap_entry[1]['mailQuota'][0][:-1])
quota_global = int(ldap_entry[1]['quota'][0])
is_orga = paheko_entry["admin_orga"].strip("'") == "Oui"
has_mail = ldap_entry[1]['mailEnabled'][0] == b"TRUE"
has_nextcloud = ldap_entry[1]['nextcloudEnabled'][0] == b"TRUE"
if quota_paheko != quota_global:
return False
quota_given = 0
if has_mail:
quota_given += quota_mail
if has_nextcloud:
quota_given += quota_nextcloud
if is_orga:
linked_emails = paheko_entry["emails_rattaches"]
if linked_emails:
for linked_email in linked_emails.splitlines():
ldap_linked_entry = ldap.get_email(linked_email)
if ldap_linked_entry:
quota_given += int(ldap_linked_entry[0][1]['mailQuota'][0][:-1])
# beaucoup ont en fait xGO de mail et xGO de cloud pour xGO en tout, à corriger à terme.
if quota_global * 2 == quota_given or 1 + quota_global * 2 == quota_given:
return True
# On laisse 1GO de rab' pour tous ceux qui ont 10 de cloud et 1 de mail.
if quota_given <= quota_global + 1:
return True
return False
return ok
def test_mail_secours(paheko_entry, ldap_entry):
try:
if paheko_entry["email_secours"]:
return paheko_entry["email_secours"].strip("'").encode() == ldap_entry[1]['mailDeSecours'][0]
else:
# Vérifie que le mail de secours dans le LDAP correspond à celui dans paheko.
if paheko_entry["email_secours"]:
return paheko_entry["email_secours"].strip("'").encode() == ldap_entry[1]['mailDeSecours'][0]
else:
return False
def test_mails_orga(paheko_entry):
# Vérifie que les mails des orgas sont bien dans le LDAP.
is_orga = paheko_entry["admin_orga"].strip("'") == "Oui"
res = []
if is_orga:
linked_emails = paheko_entry["emails_rattaches"]
if linked_emails:
for linked_email in linked_emails.splitlines():
ldap_linked_entry = ldap.get_email(linked_email)
if not ldap_linked_entry:
res.append(linked_email)
return res
def test_server_location(paheko_entry):
# Vérifie que le serveur est bien renseigné.
is_orga = paheko_entry["admin_orga"].strip("'") == "Oui"
if is_orga and (paheko_entry["cloud"].strip("'") == "Oui" or paheko_entry["wordpress"].strip("'") == "Oui"):
serveur_prod = paheko_entry["serveur_prod"]
if not serveur_prod:
return False
except e:
print(paheko_entry)
print(ldap_entry)
raise e
directory = f"/kaz/dockers/{paheko_entry['nom_orga']}-orga"
with Ssh(serveur_prod) as ssh:
return ssh.check_return_code(f"ls {directory}") == 0
return True
with Ldap() as ldap:
try:
not_in_ldap = []
not_in_paheko = []
mail_secours = []
quota = []
services = []
mails_orgas = []
servers_locations = []
for membre in membres:
ldap_entry = ldap.get_email(membre["email"])[0]
ldap_entry = ldap.get_email(membre["email"])
if ldap_entry:
ldap_entry = ldap_entry[0]
ok = True
if not test_mail_secours(membre, ldap_entry):
mail_secours.append(f"{membre['email']}: Paheko {membre['email_secours']}, LDAP {ldap_entry[1]['mailDeSecours'][0].decode()}")
#ok &= test_quota(membre, ldap_entry)
ok &= test_mail_secours(membre, ldap_entry)
if not test_services(membre, ldap_entry):
path = f"/var/lib/docker/volumes/cloud_cloudData/_data/{ldap_entry[1]['identifiantKaz'][0].decode()}"
size = int(get_disk_size(path) / 1024 / 1024)
services.append(f"{membre['email']}: Paheko {membre['cloud']}, LDAP {ldap_entry[1]['nextcloudEnabled'][0].decode()}, espace cloud commun {size}Mo")
if not ok:
print(membre)
print(ldap_entry)
print()
if not test_quota(membre, ldap_entry):
suffix = ""
if membre['emails_rattaches']:
suffix = " rattachés\n " + "\n ".join(membre['emails_rattaches'].splitlines())
quota.append(f"{membre['email']}: Paheko {membre['quota_disque']}, LDAP mail {ldap_entry[1]['mailQuota'][0].decode()} cloud {ldap_entry[1]['nextcloudQuota'][0].decode()} quotaGlobal {ldap_entry[1]['quota'][0].decode()}{suffix}")
mails_orga = test_mails_orga(membre)
if mails_orga:
suffix = '\n '.join(mails_orga)
mails_orgas.append(f"{membre['email']}:\n {suffix}")
# if not test_server_location(membre):
# servers_locations.append(f"{membre['email']} - {membre['nom_orga']}, pas trouvé sur {membre['serveur_prod']}")
else:
ldap_entry = ldap.get_mail_forwarding(membre["email"])
if not ldap_entry:
not_in_ldap.append(f"{membre['email']} / id : {membre['id']}")
except Exception as e:
print(membre)
print(ldap.get_email(membre["email"]))
raise e
ldap_users = ldap.get_users()
for ldap_user in ldap_users:
ldap_user = ldap_user[1]
paheko_entry = [x for x in membres if x["email"] == ldap_user["mail"][0].decode() or (x["emails_rattaches"] and ldap_user["mail"][0].decode() in x["emails_rattaches"])]
paheko_entry = paheko_entry[0] if len(paheko_entry) else None
if paheko_entry:
pass
else:
not_in_paheko.append(ldap_user["mail"][0].decode())
ldap_forwardings = ldap.get_mail_forwardings()
for ldap_user in ldap_forwardings:
ldap_user = ldap_user[1]
paheko_entry = [x for x in membres if x["email"] == ldap_user["mailAlias"][0].decode() or (x["forward"] and ldap_user["mailAlias"][0].decode() in x["forward"])]
paheko_entry = paheko_entry[0] if len(paheko_entry) else None
if paheko_entry:
pass
else:
not_in_paheko.append(ldap_user["mailAlias"][0].decode() + " (forwarding)")
print("Mails dans paheko mais pas dans le LDAP :")
print("\n".join(not_in_ldap))
print("Mails dans le LDAP mais pas dans paheko :")
print("\n".join(not_in_paheko))
print("\nMails de secours pas ok dans le LDAP :")
print("\n".join(mail_secours))
print("\nServices pas ok dans le LDAP (ont nextcloud commun + dédié) :")
print("\n".join(services))
print("\nQuotas pas ok dans le LDAP :")
print("\n".join(quota))
print("\nMails d'orga dans paheko mais manquant dans le LDAP :")
print("\n".join(mails_orgas))
print("\nOrgas pas trouvées sur le serveur renseigné dans paheko :")
print("\n".join(servers_locations))

View File

@@ -181,7 +181,7 @@ saveComposes () {
sympa)
echo "save sympa"
. $KAZ_KEY_DIR/env-sympaDB
saveDB ${sympaDBName} "${DB_MYSQL_USER}" "${DB_MYSQL_PASSWORD}" "${DB_MYSQL_DATABASE}" sympa mysql
saveDB ${sympaDBName} "${MYSQL_USER}" "${MYSQL_PASSWORD}" "${MYSQL_DATABASE}" sympa mysql
;;
web)
# rien à faire (fichiers)
@@ -189,17 +189,22 @@ saveComposes () {
etherpad)
echo "save pad"
. $KAZ_KEY_DIR/env-etherpadDB
saveDB ${etherpadDBName} "${DB_MYSQL_USER}" "${DB_MYSQL_PASSWORD}" "${DB_MYSQL_DATABASE}" etherpad mysql
saveDB ${etherpadDBName} "${MYSQL_USER}" "${MYSQL_PASSWORD}" "${MYSQL_DATABASE}" etherpad mysql
;;
gitea)
echo "save gitea"
. $KAZ_KEY_DIR/env-gitDB
saveDB ${gitDBName} "${MYSQL_USER}" "${MYSQL_PASSWORD}" "${MYSQL_DATABASE}" gitea mysql
;;
framadate)
echo "save date"
. $KAZ_KEY_DIR/env-framadateDB
saveDB ${framadateDBName} "${DB_MYSQL_USER}" "${DB_MYSQL_PASSWORD}" "${DB_MYSQL_DATABASE}" framadate mysql
saveDB ${framadateDBName} "${MYSQL_USER}" "${MYSQL_PASSWORD}" "${MYSQL_DATABASE}" framadate mysql
;;
cloud)
echo "save cloud"
. $KAZ_KEY_DIR/env-nextcloudDB
saveDB ${nextcloudDBName} "${DB_MYSQL_USER}" "${DB_MYSQL_PASSWORD}" "${DB_MYSQL_DATABASE}" nextcloud mysql
saveDB ${nextcloudDBName} "${MYSQL_USER}" "${MYSQL_PASSWORD}" "${MYSQL_DATABASE}" nextcloud mysql
;;
paheko)
# rien à faire (fichiers)
@@ -207,32 +212,32 @@ saveComposes () {
mattermost)
echo "save mattermost"
. $KAZ_KEY_DIR/env-mattermostDB
saveDB matterPG "${DB_POSTGRES_USER}" "${DB_POSTGRES_PASSWORD}" "${DB_POSTGRES_DB}" mattermost postgres
saveDB matterPG "${POSTGRES_USER}" "${POSTGRES_PASSWORD}" "${POSTGRES_DB}" mattermost postgres
;;
mobilizon)
echo "save mobilizon"
. $KAZ_KEY_DIR/env-mobilizonDB
saveDB ${mobilizonDBName} "${DB_POSTGRES_USER}" "${DB_POSTGRES_PASSWORD}" "${DB_POSTGRES_DB}" mobilizon postgres
saveDB ${mobilizonDBName} "${POSTGRES_USER}" "${POSTGRES_PASSWORD}" "${POSTGRES_DB}" mobilizon postgres
;;
peertube)
echo "save peertube"
. $KAZ_KEY_DIR/env-peertubeDB
saveDB ${peertubeDBName} "${DB_POSTGRES_USER}" "${DB_POSTGRES_PASSWORD}" "${DB_PEERTUBE_DB_HOSTNAME}" peertube postgres
saveDB ${peertubeDBName} "${POSTGRES_USER}" "${POSTGRES_PASSWORD}" "${PEERTUBE_DB_HOSTNAME}" peertube postgres
;;
mastodon)
echo "save mastodon"
. $KAZ_KEY_DIR/env-mastodonDB
saveDB ${mastodonDBName} "${DB_POSTGRES_USER}" "${DB_POSTGRES_PASSWORD}" "${DB_POSTGRES_DB}" mastodon postgres
saveDB ${mastodonDBName} "${POSTGRES_USER}" "${POSTGRES_PASSWORD}" "${POSTGRES_DB}" mastodon postgres
;;
roundcube)
echo "save roundcube"
. $KAZ_KEY_DIR/env-roundcubeDB
saveDB ${roundcubeDBName} "${DB_MYSQL_USER}" "${DB_MYSQL_PASSWORD}" "${DB_MYSQL_DATABASE}" roundcube mysql
saveDB ${roundcubeDBName} "${MYSQL_USER}" "${MYSQL_PASSWORD}" "${MYSQL_DATABASE}" roundcube mysql
;;
vaultwarden)
echo "save vaultwarden"
. $KAZ_KEY_DIR/env-vaultwardenDB
saveDB ${vaultwardenDBName} "${DB_MYSQL_USER}" "${DB_MYSQL_PASSWORD}" "${DB_MYSQL_DATABASE}" vaultwarden mysql
saveDB ${vaultwardenDBName} "${MYSQL_USER}" "${MYSQL_PASSWORD}" "${MYSQL_DATABASE}" vaultwarden mysql
;;
dokuwiki)
# rien à faire (fichiers)

View File

@@ -606,7 +606,7 @@ userPassword: {CRYPT}${pass}\n\n' | ldapmodify -c -H ldap://${LDAP_IP} -D \"cn=$
# l'équipe existe t-elle déjà ?
nb=$(docker exec mattermostServ bin/mmctl team list | grep -w "${EQUIPE_AGORA}" | wc -l)
if [ "${nb}" == "0" ];then # non, on la créé en mettant le user en admin de l'équipe
echo "docker exec -i mattermostServ bin/mmctl team create --name ${EQUIPE_AGORA} --display_name ${EQUIPE_AGORA} --email ${EMAIL_SOUHAITE}" --private | tee -a "${CMD_INIT}"
echo "docker exec -i mattermostServ bin/mmctl team create --name ${EQUIPE_AGORA} --display-name ${EQUIPE_AGORA} --email ${EMAIL_SOUHAITE}" --private | tee -a "${CMD_INIT}"
fi
# puis ajouter le user à l'équipe
echo "docker exec -i mattermostServ bin/mmctl team users add ${EQUIPE_AGORA} ${EMAIL_SOUHAITE}" | tee -a "${CMD_INIT}"

View File

@@ -13,7 +13,7 @@ setKazVars
. $KAZ_KEY_DIR/env-sympaServ
. $KAZ_KEY_DIR/env-paheko
VERSION="13-10-2025"
VERSION="16-11-2025"
PRG=$(basename $0)
RACINE=$(echo $PRG | awk '{print $1}')
IFS=' '
@@ -139,7 +139,7 @@ searchEmail() {
searchMattermost() {
#Ici $1 est une adresse email
. $KAZ_KEY_DIR/env-mattermostAdmin
docker exec -ti ${mattermostServName} bin/mmctl --suppress-warnings auth login $httpProto://$URL_AGORA --name local-server --username $mattermost_user --password $mattermost_pass >/dev/null 2>&1
docker exec -ti ${mattermostServName} bin/mmctl --suppress-warnings auth login $httpProto://$URL_AGORA --name local-server --username $mattermost_user --password $mattermost_pass >/dev/null 2>&1
docker exec -ti ${mattermostServName} bin/mmctl --suppress-warnings config set ServiceSettings.EnableAPIUserDeletion "true" >/dev/null 2>&1
#on créé la list des mails dans mattermost
docker exec -ti ${mattermostServName} bin/mmctl --suppress-warnings user list --all >${TFILE_MAILS_MATTERMOST} 2>/dev/null
@@ -167,9 +167,9 @@ infoEmail() {
while :
do
clear
echo "------------------------------------------------"
echo "------------------------------------------------"
printKazMsg "${ACTION_EN_COURS}"
echo "------------------------------------------------"
echo "------------------------------------------------"
read -p "Alias ou Mail ? (R pour retour ou M/A [M] :" RINFOMAIL
case ${RINFOMAIL} in
"" | M | m )
@@ -184,9 +184,6 @@ infoEmail() {
echo " ------------------------------------------------"
printKazMsg " DETAILS DU COMPTE DANS NEXTCLOUD PRINCIPAL"
echo -e ""
#TEMP_USER_NC=$(mktemp /tmp/$RACINE.XXXXXXXXX.TEMP_USER_NC)
#curl -s -o $TEMP_USER_NC -X GET -H 'OCS-APIRequest:true' $httpProto://admin:$NEXTCLOUD_ADMIN_PASSWORD@$URL_NC/ocs/v1.php/cloud/users?search=$CHOIX_MAIL
#cat $TEMP_USER_NC | grep -i "element" | sed -e s/[\<\>\/]//g | sed -e s/element//g
echo -ne "${NC}"
echo -ne " - Nextcloud enable : "
echo -ne "${GREEN}"
@@ -195,11 +192,14 @@ infoEmail() {
echo -e "${NC} ------------------------------------------------"
printKazMsg " DETAILS DU COMPTE DANS LDAP ET PAHEKO"
echo ""
curl -s ${URL_PAHEKO}/api/sql -d "SELECT nom,adresse,code_postal,ville,email,email_secours,admin_orga,nom_orga,quota_disque FROM users where email='${CHOIX_MAIL}' LIMIT 1;" >/tmp/$CHOIX_MAIL-paheko.json
curl -s ${URL_PAHEKO}/api/sql -d "SELECT nom,adresse,code_postal,ville,email,email_secours,admin_orga,nom_orga,quota_disque,serveur_prod FROM users where email='${CHOIX_MAIL}' LIMIT 1;" >/tmp/$CHOIX_MAIL-paheko.json
jq .results[].nom /tmp/$CHOIX_MAIL-paheko.json
jq .results[].adresse /tmp/$CHOIX_MAIL-paheko.json
jq .results[].code_postal /tmp/$CHOIX_MAIL-paheko.json
jq .results[].ville /tmp/$CHOIX_MAIL-paheko.json
SERVEUR_PROD=$(jq .results[].serveur_prod /tmp/$CHOIX_MAIL-paheko.json)
NOM_ORGA=$(jq .results[].nom_orga /tmp/$CHOIX_MAIL-paheko.json)
[ "${NOM_ORGA}" = "null" ] || echo -e " - ORGA : ${GREEN}${NOM_ORGA}${NC} sur serveur ${GREEN}${SERVEUR_PROD}${NC} "
echo -n " - Quota (Paheko) : "
echo -ne "${GREEN}"
jq .results[].quota_disque /tmp/$CHOIX_MAIL-paheko.json
@@ -208,6 +208,11 @@ infoEmail() {
echo -ne "${GREEN}"
ldapsearch -H ldap://${LDAP_IP} -D "cn=${LDAP_ADMIN_USERNAME},${ldap_root}" -x -w "${LDAP_ADMIN_PASSWORD}" -b "cn=${CHOIX_MAIL},ou=users,${ldap_root}" | grep -i mailquota | cut -c 11-60
echo -ne "${NC}"
echo -ne " - Place disque des mails : "
cd ${DOCK_VOL}/postfix_mailData/_data/${DOMAINE_EN_COURS}
echo -ne "${GREEN}"
du -sh $(echo ${CHOIX_MAIL} | sed -e 's/@.*//') | cut -c 1-4
echo -ne "${NC}"
echo -n " - Quota Nextcloud (Ldap) : "
echo -ne "${GREEN}"
ldapsearch -H ldap://${LDAP_IP} -D "cn=${LDAP_ADMIN_USERNAME},${ldap_root}" -x -w "${LDAP_ADMIN_PASSWORD}" -b "cn=${CHOIX_MAIL},ou=users,${ldap_root}" | grep -i nextcloudquota | cut -c 17-60
@@ -239,11 +244,11 @@ infoEmail() {
;;
A | a )
searchEmail alias
echo "------------------------------------------------"
echo "------------------------------------------------"
echo " Alias : ${CHOIX_MAIL} "
echo ""
for INFOALIAS in $(ldapsearch -H ldap://${LDAP_IP} -x -D "cn=${LDAP_ADMIN_USERNAME},${ldap_root}" \
-w "${LDAP_ADMIN_PASSWORD}" -b "${ldap_root}" "(&(objectclass=PostfixBookMailForward)(cn=*${CHOIX_MAIL}*))" mail \
-w "${LDAP_ADMIN_PASSWORD}" -b "${ldap_root}" "(&(objectclass=PostfixBookMailForward)(cn=*${CHOIX_MAIL}*))" mail \
| grep ^mail: | sed -e 's/^mail://')
do
echo -ne "=====> ${GREEN} "
@@ -270,16 +275,21 @@ searchDestroy() {
CHOIX_MAIL=""
searchEmail
REP_SEARCH_DESTROY=$CHOIX_MAIL
echo "CHOIX=$REP_SEARCH_DESTROY"
echo "domaine en cours : ${DOMAINE_EN_COURS}"
MAIL_SECOURS=$(ldapsearch -H ldap://${LDAP_IP} \
-x -D "cn=${LDAP_ADMIN_USERNAME},${ldap_root}" \
-w "${LDAP_ADMIN_PASSWORD}" \
-b "${ldap_root}" "(&(objectclass=inetOrgPerson)(cn=*${REP_SEARCH_DESTROY}*))" | grep ^mailDeSecours | sed -e 's/^mailDeSecours: //')
echo "Mail en cours = $REP_SEARCH_DESTROY"
echo "Mail de secours = ${MAIL_SECOURS}"
echo "Domaine en cours : ${DOMAINE_EN_COURS}"
echo "--------------------------------- SUPPRESION ----------------------------------------"
while :
do
echo "----------------------------------------------------------------------"
printKazMsg "${GREEN}${ACTION_EN_COURS}${NC}"
echo "----------------------------------------------------------------------"
echo "----------------------------------------------------------------------"
printKazMsg "${GREEN}${ACTION_EN_COURS}${NC}"
echo "----------------------------------------------------------------------"
echo -e "${BLINK} TOUT RETOUR EN ARRIERE EST IMPOSSIBLE ${NC}"
read -p "ON CONTINUE ? [ o / n ]: " SEARCH_DESTROY_INPUT
read -p "ON CONTINUE ? [ o / n ]: " SEARCH_DESTROY_INPUT
if [ "$SEARCH_DESTROY_INPUT" = "n" ] || [ "$SEARCH_DESTROY_INPUT" = "N" ]
then
searchDestroy
@@ -327,10 +337,11 @@ searchDestroy() {
fi
echo -e "${NC}"
echo ""
echo -e "${RED} suppression de ${REP_SEARCH_DESTROY} dans la liste info de sympa"
echo -e "${RED} suppression de ${REP_SEARCH_DESTROY} et ${MAIL_SECOURS} dans la liste info de sympa"
echo -e "${NC}"
echo ""
docker exec -ti sympaServ /usr/lib/sympa/bin/sympa_soap_client.pl --soap_url=${httpProto}://${URL_LISTE}/sympasoap --trusted_application=${SOAP_USER} --trusted_application_password=${SOAP_PASSWORD} --proxy_vars=USER_EMAIL=${LISTMASTER} --service=del --service_parameters="${NL_LIST},${REP_SEARCH_DESTROY}"
docker exec -ti sympaServ /usr/lib/sympa/bin/sympa_soap_client.pl --soap_url=${httpProto}://${URL_LISTE}/sympasoap --trusted_application=${SOAP_USER} --trusted_application_password=${SOAP_PASSWORD} --proxy_vars=USER_EMAIL=${LISTMASTER} --service=del --service_parameters="${NL_LIST},${MAIL_SECOURS}"
echo -e "${NC}"
echo ""
echo -e "${RED} suppression de ${REP_SEARCH_DESTROY} dans le serveur de mail"
@@ -347,19 +358,21 @@ searchDestroy() {
echo -e "${RED} suppression de ${REP_SEARCH_DESTROY} dans le ldap"
echo -e "${NC}"
echo ""
MAIL_SECOURS=$(ldapsearch -H ldap://${LDAP_IP} \
-x -D "cn=${LDAP_ADMIN_USERNAME},${ldap_root}" \
-w "${LDAP_ADMIN_PASSWORD}" \
-b "${ldap_root}" "(&(objectclass=inetOrgPerson)(cn=*${REP_SEARCH_DESTROY}*))" | grep ^mailDeSecours | sed -e 's/^mailDeSecours: //')
ldapdelete -H ldap://${LDAP_IP} -D "cn=${LDAP_ADMIN_USERNAME},${ldap_root}" -x -w "${LDAP_ADMIN_PASSWORD}" "cn=${REP_SEARCH_DESTROY},ou=users,${ldap_root}"
if [ "$?" -eq "0" ]
then
printKazMsg "Suppresion ok"
cd ${DOCK_VOL}/postfix_mailData/_data/${DOMAINE_EN_COURS}
USER_DEL_MAIL=$(echo ${REP_SEARCH_DESTROY} | sed -e 's/@.*//')
echo "Suppression des données ${USER_DEL_MAIL}"
rm -rf ${USER_DEL_MAIL} 2>/dev/null
sleep 2
else
printKazError "Erreur de suppression"
fi
printKazMsg "Envoi d'un message dans mattermost pour la suppression du compte"
docker exec -ti mattermostServ bin/mmctl post create kaz:Creation-Comptes --message "Le compte ${REP_SEARCH_DESTROY} est supprimé" >/dev/null 2>&1
docker exec -ti mattermostServ bin/mmctl post create kaz:Creation-Comptes --message "Le compte ${REP_SEARCH_DESTROY} est supprimé , mail envoyé à ${MAIL_SECOURS}" >/dev/null 2>&1
MAIL_SUPPR="Le compte ${REP_SEARCH_DESTROY} est supprimé"
OLDIFS=${IFS}
IFS=''
@@ -1185,4 +1198,3 @@ esac
[ ! -e ${KAZ_CONF_DIR}/autorized-domains.txt ] && { echo "création de ${KAZ_CONF_DIR}/autorized-domains.txt" ; touch ${KAZ_CONF_DIR}/autorized-domains.txt;}
! grep $domain ${KAZ_CONF_DIR}/autorized-domains.txt && echo $domain >> ${KAZ_CONF_DIR}/autorized-domains.txt
Main

View File

@@ -34,6 +34,34 @@ class Ldap:
return result
def get_users(self):
"""
Renvoie tous les utilisateurs.
"""
# Créer une chaîne de filtre pour rechercher dans les champs "cn" et "mailAlias"
result = self.ldap_connection.search_s("ou=users,{}".format(self.ldap_root), ldap.SCOPE_ONELEVEL)
return result
def get_mail_forwarding(self, email):
"""
Renvoie le mail forwarding
"""
# Créer une chaîne de filtre pour rechercher dans les champs "cn" et "mailAlias"
filter_str = "(|(cn={})(mailAlias={}))".format(email, email)
result = self.ldap_connection.search_s("ou=mailForwardings,{}".format(self.ldap_root), ldap.SCOPE_SUBTREE, filter_str)
return result
def get_mail_forwardings(self):
"""
Renvoie tous les mail forwardings.
"""
# Créer une chaîne de filtre pour rechercher dans les champs "cn" et "mailAlias"
result = self.ldap_connection.search_s("ou=mailForwardings,{}".format(self.ldap_root), ldap.SCOPE_ONELEVEL)
return result
def delete_user(self, email):
"""
Supprimer un utilisateur du LDAP par son adresse e-mail
@@ -99,3 +127,17 @@ class Ldap:
return True
def update_user(self, email, field, value):
"""
Mettre à jour un champ.
"""
if not validate_email(email):
return False
# Construire le DN
dn = f"cn={email},ou=users,{self.ldap_root}"
mod_attrs = [(ldap.MOD_REPLACE, field, value)]
self.ldap_connection.modify_s(dn, mod_attrs)
return True

View File

@@ -2,8 +2,9 @@ import subprocess
from .config import getDockersConfig, getSecretConfig
mattermost_user = getSecretConfig("mattermostServ", "MM_ADMIN_USER")
mattermost_pass = getSecretConfig("mattermostServ", "MM_ADMIN_PASSWORD")
mattermost_user = getSecretConfig("mattermostAdmin", "mattermost_user")
mattermost_pass = getSecretConfig("mattermostAdmin", "mattermost_pass")
# mattermost_token = getSecretConfig("mattermostAdmin", "mattermost_token")
mattermost_url = f"https://{getDockersConfig('matterHost')}.{getDockersConfig('domain')}"
mmctl = "docker exec -i mattermostServ bin/mmctl"
@@ -23,6 +24,8 @@ class Mattermost:
def authenticate(self):
# Authentification sur MM
cmd = f"{mmctl} auth login {mattermost_url} --name local-server --username {mattermost_user} --password {mattermost_pass}"
# ou (si ça casse le token ?)
# cmd = f"{mmctl} auth login {mattermost_url} --name local-server --access-token {mattermost_token}"
subprocess.run(cmd, shell=True, stderr=subprocess.STDOUT, check=True)
@@ -131,4 +134,3 @@ class Mattermost:
cmd = f"{mmctl} team delete {equipe} --confirm"
output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
return output.decode()

12
bin/lib/misc.py Normal file
View File

@@ -0,0 +1,12 @@
import os
def get_disk_size(path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
return total_size

24
bin/lib/ssh.py Normal file
View File

@@ -0,0 +1,24 @@
import paramiko
class Ssh:
def __init__(self, server, username="root"):
self.ssh_connection = None
self.server = server
self.username = username
def __enter__(self):
self.ssh_connection = paramiko.SSHClient()
self.ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh_connection.connect(self.server, port=2201, username=self.username)
return self
def __exit__(self, tp, e, traceback):
self.ssh_connection.close()
def check_output(self, command):
ssh_stdin, ssh_stdout, ssh_stderr = self.ssh_connection.exec_command(command)
return ssh_stdout.read().decode()
def check_return_code(self, command):
ssh_stdin, ssh_stdout, ssh_stderr = self.ssh_connection.exec_command(command)
return ssh_stdout.channel.recv_exit_status()

View File

@@ -117,7 +117,7 @@ Version(){
Reload(){
# $1 ContainerName
if [ -f "${VOL_PREFIX}wikiData/_data/farms/init.sh" ]; then
${SIMU} docker exec -ti "${1}" /dokuwiki/data/farms/init.sh
${SIMU} docker exec -ti "${1}" /storage/data/farms/init.sh
${SIMU} pkill -KILL lighttpd
fi
}

View File

@@ -1,43 +0,0 @@
#!/bin/bash
CV1=/kaz-old/bin/container.sh
DV1=/kaz-old/dockers
EV1=/kaz-old/config
SV1=/kaz-old/secret
BV2=/kaz/bin
DV2=/kaz/dockers
EV2=/kaz/config
SV2=/kaz/secret
OV2=/kaz/config/orgaTmpl/orga-gen.sh
[ -x "${CV1}" ] || exit
[ -d "${BV2}" ] || exit
SIMU="echo SIMU"
${SIMU} "${CV1}" stop orga
${SIMU} "${CV1}" stop
${SIMU} rsync "${EV1}/dockers.env" "${EV2}/"
${SIMU} rsync "${SV1}/" "${SV2}/"
# XXX ? rsync /kaz/secret/allow_admin_ip /kaz-git/secret/allow_admin_ip
${SIMU} "${BV2}/container.sh" start cloud dokuwiki ethercalc etherpad framadate paheko gitea jirafeau mattermost postfix proxy roundcube web
${SIMU} rsync -aAHXh --info=progress2 "${DV1}/web/html/" "/var/lib/docker/volumes/web_html/_data/"
${SIMU} chown -R www-data: "/var/lib/docker/volumes/web_html/_data/"
${SIMU} cd "${DV1}"
cd "${DV1}"
for ORGA_DIR in *-orga; do
services=$(echo $([ -x "${ORGA_DIR}/tmpl-gen.sh" ] && "${ORGA_DIR}/tmpl-gen.sh" -l))
if [ -n "${services}" ]; then
ORGA="${ORGA_DIR%-orga}"
echo " * ${ORGA}: ${services}"
${SIMU} "${OV2}" "${ORGA}" $(for s in ${services}; do echo "+${s}"; done)
fi
done

View File

@@ -1,13 +0,0 @@
#!/bin/bash
OLDVERSION=$(docker-compose -v | sed -En 's/.*version ([a-z0-9\.]*).*/\1/p')
DOCKERCOMPOSE_VERSION="v2.17.3"
if [ "$OLDVERSION" = "$DOCKERCOMPOSE_VERSION" ]
then
echo -e "Docker Compose déjà en version $DOCKERCOMPOSE_VERSION"
exit
fi
curl -SL https://github.com/docker/compose/releases/download/$DOCKERCOMPOSE_VERSION/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose

View File

@@ -153,18 +153,19 @@ services:
#}}
#{{wiki
dokuwiki:
image: mprasil/dokuwiki
image: dokuwiki/dokuwiki
container_name: ${orga}-${dokuwikiServName}
#disk_quota: 10G
restart: ${restartPolicy}
labels:
- "traefik.enable=true"
- "traefik.http.routers.${orga}-${dokuwikiServName}.rule=Host(`${orga}-${dokuwikiHost}.${domain}`){{FOREIGN_DW}}"
- "traefik.http.services.${orga}-${dokuwikiServName}.loadbalancer.server.port=8080"
volumes:
- wikiData:/dokuwiki/data
- wikiConf:/dokuwiki/conf
- wikiPlugins:/dokuwiki/lib/plugins
- wikiLibtpl:/dokuwiki/lib/tpl
- wikiData:/storage/data
- wikiConf:/storage/conf
- wikiPlugins:/storage/lib/plugins
- wikiLibtpl:/storage/lib/tpl
- wikiLogs:/var/log
networks:
- orgaNet

View File

@@ -1,85 +0,0 @@
FROM --platform=${TARGETPLATFORM:-linux/amd64} crazymax/alpine-s6:3.12
ARG TARGETPLATFORM
ARG BUILDPLATFORM
RUN printf "I am running on ${BUILDPLATFORM:-linux/amd64}, building for ${TARGETPLATFORM:-linux/amd64}\n$(uname -a)\n"
LABEL maintainer="CrazyMax"
########################################
# APT local cache
# work around because COPY failed if no source file
COPY .dummy .apt-mirror-confi[g] .proxy-confi[g] /
RUN cp /.proxy-config /etc/profile.d/proxy.sh 2> /dev/null || true
RUN if [ -f /.apt-mirror-config ] ; then . /.apt-mirror-config && sed -i \
-e "s%s\?://deb.debian.org%://${APT_MIRROR_DEBIAN}%g" \
-e "s%s\?://security.debian.org%://${APT_MIRROR_DEBIAN_SECURITY}%g" \
-e "s%s\?://archive.ubuntu.com%://${APT_MIRROR_UBUNTU}%g" \
-e "s%s\?://security.ubuntu.com%://${APT_MIRROR_UBUNTU_SECURITY}%g" \
/etc/apt/sources.list; fi
########################################
RUN apk --update --no-cache add \
curl \
imagemagick \
inotify-tools \
libgd \
nginx \
php7 \
php7-cli \
php7-ctype \
php7-curl \
php7-fpm \
php7-gd \
php7-imagick \
php7-json \
php7-ldap \
php7-mbstring \
php7-openssl \
php7-pdo \
php7-pdo_sqlite \
php7-session \
php7-simplexml \
php7-sqlite3 \
php7-xml \
php7-zip \
php7-zlib \
shadow \
su-exec \
tar \
tzdata \
&& rm -rf /tmp/* /var/cache/apk/* /var/www/*
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS="2" \
DOKUWIKI_VERSION="2020-07-29" \
DOKUWIKI_MD5="8867b6a5d71ecb5203402fe5e8fa18c9" \
TZ="UTC" \
PUID="1500" \
PGID="1500"
RUN apk --update --no-cache add -t build-dependencies \
gnupg \
wget \
&& cd /tmp \
&& wget -q "https://download.dokuwiki.org/src/dokuwiki/dokuwiki-$DOKUWIKI_VERSION.tgz" \
&& echo "$DOKUWIKI_MD5 /tmp/dokuwiki-$DOKUWIKI_VERSION.tgz" | md5sum -c - | grep OK \
&& tar -xzf "dokuwiki-$DOKUWIKI_VERSION.tgz" --strip 1 -C /var/www \
&& apk del build-dependencies \
&& rm -rf /root/.gnupg /tmp/* /var/cache/apk/*
COPY rootfs /
RUN rm -f /dokuwiki.tgz
COPY htaccess /dokuwiki/.htaccess
RUN chmod a+x /usr/local/bin/* \
&& addgroup -g ${PGID} dokuwiki \
&& adduser -D -H -u ${PUID} -G dokuwiki -s /bin/sh dokuwiki
EXPOSE 8000
WORKDIR /var/www
VOLUME [ "/data" ]
ENTRYPOINT [ "/init" ]
HEALTHCHECK --interval=10s --timeout=5s --start-period=20s \
CMD curl --fail http://127.0.0.1:12345/ping || exit 1

View File

@@ -1,7 +1,7 @@
services:
dokuwiki:
image: mprasil/dokuwiki
image: dokuwiki/dokuwiki
container_name: ${dokuwikiServName}
restart: ${restartPolicy}
# ports:
@@ -12,14 +12,15 @@ services:
external_links:
- ${smtpServName}:${smtpHost}.${domain}
volumes:
- "dokuwikiData:/dokuwiki/data"
- "dokuwikiConf:/dokuwiki/conf"
- "dokuwikiPlugins:/dokuwiki/lib/plugins"
- "dokuwikiLibtpl:/dokuwiki/lib/tpl"
- "dokuwikiData:/storage/data"
- "dokuwikiConf:/storage/conf"
- "dokuwikiPlugins:/storage/lib/plugins"
- "dokuwikiLibtpl:/storage/lib/tpl"
- "dokuwikiLogs:/var/log"
labels:
- "traefik.enable=true"
- "traefik.http.routers.${dokuwikiServName}.rule=Host(`${dokuwikiHost}.${domain}`)"
- "traefik.http.services.${dokuwikiServName}.loadbalancer.server.port=8080"
- "traefik.docker.network=dokuwikiNet"
volumes:

View File

@@ -68,6 +68,16 @@ services:
- /etc/timezone:/etc/timezone:ro
networks:
- ldapNet
# labels:
# - "traefik.enable=true"
# - "traefik.tcp.routers.${ldapServName}.rule=HostSNI(`ldap.${domain}`)"
# - "traefik.tcp.routers.${ldapServName}.entrypoints=ldapsecure"
# - "traefik.tcp.routers.${ldapServName}.tls=true"
# - "traefik.tcp.routers.${ldapServName}.tls.domains[0].main=ldap.${domain}"
# - "traefik.tcp.routers.${ldapServName}.tls.certResolver=letsencrypt"
# - "traefik.tcp.routers.${ldapServName}.middlewares=ldap-ip-allowlist@file"
# - "traefik.tcp.services.${ldapServName}.loadbalancer.server.port=389"
# - "traefik.docker.network=ldapNet"
volumes:
openldapData:

View File

@@ -4,3 +4,9 @@ docker-compose run --rm web bundle exec rails db:setup
Créer un compte admin :
tootctl accounts create adminkaz --email admin@kaz.bzh --confirmed --role Owner
tootctl accounts approve adminkaz
après un upgrade mastodon j'ai du faire ça
docker-compose run --rm web bundle exec rails db:migrate
De la doc sur ldap :
https://gist.github.com/sigmaris/5db742083a3406c7c385315634640650

View File

@@ -1,6 +1,3 @@
# This file is designed for production server deployment, not local development work
# For a containerized local dev environment, see: https://github.com/mastodon/mastodon/blob/main/docs/DEVELOPMENT.md#docker
services:
db:
container_name: ${mastodonDBName}
@@ -13,8 +10,6 @@ services:
test: ['CMD', 'pg_isready', '-U', 'postgres']
volumes:
- postgres:/var/lib/postgresql/data
# environment:
# - 'POSTGRES_HOST_AUTH_METHOD=trust'
env_file:
- ../../secret/env-mastodonDB
@@ -61,16 +56,11 @@ services:
# - '127.0.0.1:9200:9200'
web:
# You can uncomment the following line if you want to not use the prebuilt image, for example if you have local code changes
# build: .
container_name: ${mastodonServName}
image: ghcr.io/mastodon/mastodon:v4.3.6
image: ghcr.io/mastodon/mastodon:v4.5.1
restart: ${restartPolicy}
environment:
- LOCAL_DOMAIN=${mastodonHost}.${domain}
- SMTP_SERVER=smtp.${domain}
- SMTP_LOGIN=admin@${domain}
- SMTP_FROM_ADDRESS=admin@${domain}
env_file:
- env-config
- ../../secret/env-mastodonServ
@@ -92,27 +82,20 @@ services:
- images:/mastodon/app/javascript/images
labels:
- "traefik.enable=true"
- "traefik.http.routers.koz.rule=Host(`${mastodonHost}.${domain}`)"
- "traefik.http.services.koz.loadbalancer.server.port=3000"
- "traefik.http.routers.mastodon.rule=Host(`${mastodonHost}.${domain}`)"
- "traefik.http.services.mastodon.loadbalancer.server.port=3000"
- "traefik.docker.network=mastodonNet"
streaming:
# You can uncomment the following lines if you want to not use the prebuilt image, for example if you have local code changes
# build:
# dockerfile: ./streaming/Dockerfile
# context: .
container_name: ${mastodonStreamingName}
image: ghcr.io/mastodon/mastodon-streaming:v4.3.6
image: ghcr.io/mastodon/mastodon-streaming:v4.5.1
restart: ${restartPolicy}
environment:
- LOCAL_DOMAIN=${mastodonHost}.${domain}
- SMTP_SERVER=smtp.${domain}
- SMTP_LOGIN=admin@${domain}
- SMTP_FROM_ADDRESS=admin@${domain}
env_file:
- env-config
- ../../secret/env-mastodonServ
- ../../secret/env-mastodonDB
command: node ./streaming/index.js
networks:
- mastodonNet
@@ -126,24 +109,20 @@ services:
- redis
labels:
- "traefik.enable=true"
- "traefik.http.routers.kozs.rule=(Host(`${mastodonHost}.${domain}`) && PathPrefix(`/api/v1/streaming`))"
- "traefik.http.services.kozs.loadbalancer.server.port=4000"
- "traefik.http.routers.mastodons.rule=(Host(`${mastodonHost}.${domain}`) && PathPrefix(`/api/v1/streaming`))"
- "traefik.http.services.mastodons.loadbalancer.server.port=4000"
- "traefik.docker.network=mastodonNet"
sidekiq:
# You can uncomment the following line if you want to not use the prebuilt image, for example if you have local code changes
# build: .
container_name: ${mastodonSidekiqName}
image: ghcr.io/mastodon/mastodon:v4.3.6
image: ghcr.io/mastodon/mastodon:v4.5.1
restart: ${restartPolicy}
environment:
- LOCAL_DOMAIN=${mastodonHost}.${domain}
- SMTP_SERVER=smtp.${domain}
- SMTP_LOGIN=admin@${domain}
- SMTP_FROM_ADDRESS=admin@${domain}
env_file:
- env-config
- ../../secret/env-mastodonServ
- ../../secret/env-mastodonDB
command: bundle exec sidekiq
depends_on:
- db

View File

@@ -67,7 +67,7 @@ ES_PASS=password
# Sending mail
# ------------
#SMTP_SERVER=
SMTP_PORT=587
#SMTP_PORT=587
#SMTP_LOGIN=
#SMTP_PASSWORD=
#SMTP_FROM_ADDRESS=

View File

@@ -1,7 +1,9 @@
# Mostlymatter from: https://framagit.org/framasoft/framateam/mostlymatter
# Don't forget to chmod +x
services:
app:
image: mattermost/mattermost-team-edition:10.12
image: mattermost/mattermost-team-edition:11.1
container_name: ${mattermostServName}
restart: ${restartPolicy}
volumes:
@@ -14,6 +16,7 @@ services:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
- /etc/environment:/etc/environment:ro
- ./mostlymatter-amd64-v11.1.0:/mattermost/bin/mattermost
env_file:
- ../../secret/env-${mattermostServName}
environment:

View File

@@ -1,4 +1,4 @@
FROM paheko/paheko:1.3.15
FROM paheko/paheko:1.3.16
ENV PAHEKO_DIR /var/www/paheko

View File

@@ -9,3 +9,10 @@ http:
ipallowlist:
sourceRange:
- "127.0.0.1"
tcp:
middlewares:
ldap-ip-allowlist:
ipAllowList:
sourceRange:
- "127.0.0.1"

View File

@@ -1,12 +1,13 @@
services:
reverse-proxy:
image: traefik:v3.5.1
image: traefik:v3.6.2
container_name: ${traefikServName}
restart: ${restartPolicy}
# Enables the web UI and tells Traefik to listen to docker
ports:
- ${MAIN_IP}:80:80
- ${MAIN_IP}:443:443
- ${MAIN_IP}:636:636
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./conf:/etc/traefik/
@@ -24,6 +25,7 @@ services:
- TRAEFIK_ENTRYPOINTS_websecure_HTTP_MIDDLEWARES=hsts@file,test-ipallowlist@file
- TRAEFIK_ENTRYPOINTS_websecure_TRANSPORT_RESPONDINGTIMEOUTS_READTIMEOUT=600
- TRAEFIK_ENTRYPOINTS_websecure_TRANSPORT_RESPONDINGTIMEOUTS_IDLETIMEOUT=600
- TRAEFIK_ENTRYPOINTS_ldapsecure_ADDRESS=:636
- TRAEFIK_CERTIFICATESRESOLVERS_letsencrypt_ACME_EMAIL=admin@${domain}
- TRAEFIK_CERTIFICATESRESOLVERS_letsencrypt_ACME_CASERVER=${acme_server}
- TRAEFIK_CERTIFICATESRESOLVERS_letsencrypt_ACME_STORAGE=/letsencrypt/acme.json