François
2 years ago
691 changed files with 49852 additions and 0 deletions
@ -0,0 +1,30 @@ |
|||
#/usr/bin/env bash |
|||
|
|||
_applyTemplate_completions () { |
|||
declare -a options |
|||
OPTIONS=("-help" "-timestamp") |
|||
COMPREPLY=() |
|||
|
|||
local CUR OPTIONS_COUNT=0 |
|||
CUR=${COMP_WORDS[COMP_CWORD]} |
|||
|
|||
# compte les options déjà utilisé et les retire de la liste des possible |
|||
for ITEM in ${COMP_WORDS[@]:1} |
|||
do |
|||
if [[ " ${OPTIONS[*]} " =~ " ${ITEM} " ]] ; then |
|||
let "OPTIONS_COUNT++" |
|||
OPTIONS=(${OPTIONS[@]/${ITEM}}) |
|||
else |
|||
break |
|||
fi |
|||
done |
|||
|
|||
# si la position est dans des options "ou juste après" |
|||
((COMP_CWORD <= OPTIONS_COUNT+1)) && [[ "${CUR}" =~ ^- ]] && COMPREPLY=( $(compgen -W "${OPTIONS[*]}" -- "${CUR}" ) ) && return 0 |
|||
|
|||
# si la position est après des options ou que l'on ne commence pas par "-" on cherche des fichiers |
|||
((COMP_CWORD <= OPTIONS_COUNT+2)) && COMPREPLY=($(compgen -f -- "${CUR}")) && return 0 |
|||
|
|||
return 0 |
|||
} |
|||
complete -F _applyTemplate_completions applyTemplate.sh |
@ -0,0 +1,214 @@ |
|||
# commun fonctions for KAZ |
|||
|
|||
BOLD='[1m' |
|||
RED='[0;31m' |
|||
GREEN='[0;32m' |
|||
YELLOW='[0;33m' |
|||
BLUE='[0;34m' |
|||
MAGENTA='[0;35m' |
|||
CYAN='[0;36m' |
|||
NC='[0m' # No Color |
|||
NL=' |
|||
' |
|||
|
|||
printKazMsg () { |
|||
# $1 msg |
|||
echo -e "${CYAN}${BOLD}$1${NC}" |
|||
} |
|||
|
|||
printKazError () { |
|||
# $1 msb |
|||
echo -e "${RED}${BOLD}$1${NC}" |
|||
} |
|||
|
|||
setKazVars () { |
|||
# KAZ_ROOT must be set |
|||
if [ -z "${KAZ_ROOT}" ]; then |
|||
printKazError "\n\n *** KAZ_ROOT not defined! ***\n" |
|||
exit |
|||
fi |
|||
export KAZ_KEY_DIR="${KAZ_ROOT}/secret" |
|||
export KAZ_BIN_DIR="${KAZ_ROOT}/bin" |
|||
export KAZ_CONF_DIR="${KAZ_ROOT}/config" |
|||
export KAZ_CONF_PROXY_DIR="${KAZ_CONF_DIR}/proxy" |
|||
export KAZ_COMP_DIR="${KAZ_ROOT}/dockers" |
|||
export KAZ_STATE_DIR="${KAZ_ROOT}/state" |
|||
|
|||
export KAZ_GIT_DIR="${KAZ_ROOT}/git" |
|||
export KAZ_DNLD_DIR="${KAZ_ROOT}/download" |
|||
export KAZ_DNLD_GAR_DIR="${KAZ_DNLD_DIR}/garradin" |
|||
|
|||
export APPLY_TMPL=${KAZ_BIN_DIR}/applyTemplate.sh |
|||
export DOCKERS_ENV="${KAZ_CONF_DIR}/dockers.env" |
|||
|
|||
export DOCK_LIB="/var/lib/docker" |
|||
export DOCK_VOL="${DOCK_LIB}/volumes" |
|||
export DOCK_VOL_GAR_ORGA="${DOCK_LIB}/volumes/garradin_assoUsers/_data/" |
|||
} |
|||
|
|||
function testValidIp () { |
|||
# $1 ip |
|||
local ip=$1 |
|||
local stat=1 |
|||
|
|||
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then |
|||
OIFS=$IFS |
|||
IFS='.' |
|||
ip=($ip) |
|||
IFS=$OIFS |
|||
[[ ${ip[0]} -le 255 && ${ip[1]} -le 255 && ${ip[2]} -le 255 && ${ip[3]} -le 255 ]] |
|||
stat=$? |
|||
fi |
|||
return $stat |
|||
} |
|||
|
|||
getValInFile () { |
|||
# $1 filename |
|||
# $2 varname |
|||
grep "^\s*$2\s*=" $1 2>/dev/null | head -1 | sed "s%^\s*$2\s*=\(.*\)$%\1%" |
|||
} |
|||
|
|||
getList () { |
|||
# $1 filename |
|||
(cat "$1"; echo) | sed -e "s/\(.*\)[ \t]*#.*$/\1/" -e "s/^[ \t]*\(.*\)$/\1/" -e "/^$/d" |
|||
} |
|||
|
|||
getGarradinPluginList () { |
|||
ls "${KAZ_DNLD_GAR_DIR}" | grep -v "garradin-" |
|||
} |
|||
|
|||
getGarradinOrgaList () { |
|||
ls "${DOCK_VOL_GAR_ORGA}" |
|||
} |
|||
|
|||
getAvailableComposes () { |
|||
ls "${KAZ_COMP_DIR}" | grep -v -- "^.*-orga$" |
|||
} |
|||
|
|||
getAvailableOrgas () { |
|||
ls "${KAZ_COMP_DIR}" | grep -- "^.*-orga$" |
|||
} |
|||
|
|||
getAvailableServices () { |
|||
local service |
|||
for service in garradin cloud collabora agora wiki wp; do |
|||
echo "${service}" |
|||
done |
|||
} |
|||
|
|||
filterInList () { |
|||
# $* ref list filter |
|||
# stdin candidats |
|||
local compose |
|||
while read compose ; do |
|||
if [[ " $* " =~ " ${compose} " ]]; then |
|||
echo ${compose} |
|||
fi |
|||
done | sort -u |
|||
} |
|||
|
|||
filterNotInList () { |
|||
# $* ref list filter |
|||
# stdin candidats |
|||
local compose |
|||
while read compose ; do |
|||
if [[ ! " $* " =~ " ${compose} " ]]; then |
|||
echo ${compose} |
|||
fi |
|||
done | sort -u |
|||
} |
|||
|
|||
filterAvailableComposes () { |
|||
# $* candidats |
|||
local AVAILABLE_COMPOSES=$(getAvailableComposes;getAvailableOrgas) |
|||
if [ $# -eq 0 ] ; then |
|||
echo ${AVAILABLE_COMPOSES} |
|||
fi |
|||
local compose |
|||
for compose in $* |
|||
do |
|||
compose=${compose%/} |
|||
if [[ ! "${NL}${AVAILABLE_COMPOSES}${NL}" =~ "${NL}${compose}${NL}" ]]; then |
|||
local subst="" |
|||
for item in ${AVAILABLE_COMPOSES}; do |
|||
[[ "${item}" =~ "${compose}" ]] && echo ${item} && subst="${subst} ${item}" |
|||
done |
|||
if [ -z "${subst}" ] ; then |
|||
echo "${RED}${BOLD}Unknown compose: ${compose} not in "${AVAILABLE_COMPOSES}"${NC}" >&2 |
|||
exit 1 |
|||
else |
|||
echo "${BLUE}${BOLD}substitute compose: ${compose} => "${subst}"${NC}" >&2 |
|||
fi |
|||
else |
|||
echo "${compose}" |
|||
fi |
|||
done | sort -u |
|||
} |
|||
|
|||
serviceOnInOrga () { |
|||
# $1 orga name |
|||
# $2 service name |
|||
# default value |
|||
local composeFile="${KAZ_COMP_DIR}/$1-orga/docker-compose.yml" |
|||
if [[ ! -f "${composeFile}" ]] |
|||
then |
|||
echo "$3" |
|||
else |
|||
grep -q "$2" "${composeFile}" 2>/dev/null && echo on || echo off |
|||
fi |
|||
} |
|||
|
|||
waitUrl () { |
|||
if [[ $(curl --connect-timeout 2 -s -D - "$1" -o /dev/null 2>/dev/null | head -n1) != *[23]0[0-9]* ]]; then |
|||
printKazMsg "service not available ($1). Please wait..." |
|||
echo curl --connect-timeout 2 -s -D - "$1" -o /dev/null \| head -n1 |
|||
while [[ $(curl --connect-timeout 2 -s -D - "$1" -o /dev/null 2>/dev/null | head -n1) != *[23]0[0-9]* ]] |
|||
do |
|||
sleep 5 |
|||
done |
|||
fi |
|||
} |
|||
|
|||
unzipInDir () { |
|||
# $1 zipfile |
|||
# $2 destDir |
|||
|
|||
if [ $# -ne 2 ]; then |
|||
printKazError "unzipInDir: bad arg number" |
|||
return |
|||
fi |
|||
if ! [[ $1 == *.zip ]]; then |
|||
printKazError "unzipInDir: $1 is not a zip file" |
|||
return |
|||
fi |
|||
if ! [[ -d $2 ]]; then |
|||
printKazError "$2 is not destination dir" |
|||
return |
|||
fi |
|||
|
|||
destName="$2/$(basename "${1%.zip}")" |
|||
if [[ -d "${destName}" ]]; then |
|||
printKazError "${destName} already exist" |
|||
return |
|||
fi |
|||
|
|||
tmpDir=$2/tmp-$$ |
|||
trap 'rm -rf "${tmpDir}"' EXIT |
|||
unzip "$1" -d "${tmpDir}" |
|||
srcDir=$(ls -1 "${tmpDir}") |
|||
case $(wc -l <<< $srcDir) in |
|||
0) |
|||
printKazError "empty zip file : $1" |
|||
rmdir "${tmpDir}" |
|||
return |
|||
;; |
|||
1) |
|||
mv "${tmpDir}/${srcDir}" "${destName}" |
|||
rmdir "${tmpDir}" |
|||
;; |
|||
*) |
|||
printKazError "zip file $1 is not a tree (${srcDir})" |
|||
return |
|||
;; |
|||
esac |
|||
} |
@ -0,0 +1,66 @@ |
|||
#/usr/bin/env bash |
|||
|
|||
_container_completions () { |
|||
KAZ_ROOT=$(cd "$(dirname ${COMP_WORDS[0]})"/..; pwd) |
|||
COMPREPLY=() |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
local cword=${COMP_CWORD} cur=${COMP_WORDS[COMP_CWORD]} card=${#COMP_WORDS[@]} i w skip=0 |
|||
for ((i=1 ; i<cword; i++)) ; do |
|||
w="${COMP_WORDS[i]}" |
|||
[[ "${w}" == -* ]] && ((skip++)) |
|||
done |
|||
local arg_pos w i cmd= names= |
|||
((arg_pos = cword - skip)) |
|||
for ((i=1 ; i<card; i++)) ; do |
|||
w="${COMP_WORDS[i]}" |
|||
if [ -z "${cmd}" ] ; then |
|||
[[ "${w}" == -* ]] || cmd="${w}" |
|||
continue |
|||
fi |
|||
names="${names} ${w}" |
|||
done |
|||
|
|||
case "$cur" in |
|||
-*) |
|||
COMPREPLY=( $(compgen -W "-h -n" -- "${cur}" ) ) ;; |
|||
*) |
|||
local cmd_available="status start stop save" |
|||
case "${arg_pos}" in |
|||
1) |
|||
# $1 of container.sh |
|||
COMPREPLY=($(compgen -W "${cmd_available}" -- "${cur}")) |
|||
;; |
|||
*) |
|||
# $2-* of container.sh |
|||
[[ " ${cmd_available} " =~ " ${cmd} " ]] || return 0 |
|||
# select set of names |
|||
local names_set="available" |
|||
case "${cmd}" in |
|||
status) |
|||
names_set="available" |
|||
;; |
|||
start) |
|||
names_set="disable" |
|||
;; |
|||
stop) |
|||
names_set="enable" |
|||
;; |
|||
save) |
|||
names_set="validate" |
|||
;; |
|||
esac |
|||
local available_args=$("${KAZ_ROOT}/bin/kazList.sh" "compose" "${names_set}") |
|||
# remove previous selected target |
|||
local proposal item |
|||
for item in ${available_args} ; do |
|||
[[ " ${names} " =~ " ${item} " ]] || proposal="${proposal} ${item}" |
|||
done |
|||
COMPREPLY=($(compgen -W "${proposal}" -- "${cur}")) |
|||
;; |
|||
esac |
|||
esac |
|||
return 0 |
|||
} |
|||
complete -F _container_completions container.sh |
@ -0,0 +1,19 @@ |
|||
#/usr/bin/env bash |
|||
|
|||
_dns_completions () { |
|||
local cur find |
|||
COMPREPLY=() |
|||
cur=${COMP_WORDS[COMP_CWORD]} |
|||
case "$cur" in |
|||
-*) |
|||
COMPREPLY=( $(compgen -W "-h -n" -- "${cur}" ) ) ;; |
|||
*) |
|||
find="" |
|||
for arg in ${COMP_WORDS[@]} ; do |
|||
[[ " list add del " =~ " ${arg} " ]] && find="arg" |
|||
done |
|||
[ -z "${find}" ] && COMPREPLY=($(compgen -W "list add del" -- "${cur}")) ;; |
|||
esac |
|||
return 0 |
|||
} |
|||
complete -F _dns_completions dns.sh |
@ -0,0 +1,53 @@ |
|||
#/usr/bin/env bash |
|||
|
|||
_foreign_domain_completions () { |
|||
KAZ_ROOT=$(cd "$(dirname ${COMP_WORDS[0]})"/..; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
local cur find |
|||
COMPREPLY=() |
|||
cur=${COMP_WORDS[COMP_CWORD]} |
|||
case "$cur" in |
|||
-*) |
|||
COMPREPLY=( $(compgen -W "-h -n" -- "${cur}" ) ) ;; |
|||
*) |
|||
cmdIdx="" |
|||
for i in "${!COMP_WORDS[@]}"; do |
|||
[[ " list add del " =~ " ${COMP_WORDS[${i}]} " ]] && cmdIdx="${i}" && break |
|||
done |
|||
if [ -z "${cmdIdx}" ] ;then |
|||
COMPREPLY=($(compgen -W "list add del" -- "${cur}")) |
|||
elif [ ! "list" == "${COMP_WORDS[${cmdIdx}]}" ] ; then |
|||
declare -a availableComposes |
|||
. "${KAZ_CONF_DIR}/dockers.env" |
|||
availableComposes=(${garHost} ${cloudHost} ${dokuwikiHost} ${wordpressHost} ${matterHost}) |
|||
|
|||
if [ "add" == "${COMP_WORDS[${cmdIdx}]}" ] ; then |
|||
case $(expr "${#COMP_WORDS[@]}" - "${cmdIdx}") in |
|||
3) |
|||
declare -a availableOrga |
|||
availableOrga=($(sed -e "s/\(.*\)[ \t]*#.*$/\1/" -e "s/^[ \t]*\(.*\)-orga$/\1/" -e "/^$/d" "${KAZ_CONF_DIR}/container-orga.list")) |
|||
COMPREPLY=($(compgen -W "${availableOrga[*]}" -- "${cur}")) |
|||
;; |
|||
4) |
|||
COMPREPLY=($(compgen -W "${availableComposes[*]}" -- "${cur}")) |
|||
;; |
|||
esac |
|||
elif [ "del" == "${COMP_WORDS[${cmdIdx}]}" ] ; then |
|||
case $(expr "${#COMP_WORDS[@]}" - "${cmdIdx}") in |
|||
2) |
|||
declare -a availableDomaine |
|||
availableDomaine=($(for compose in ${availableComposes[@]} ; do |
|||
sed -e "s/[ \t]*\([^#]*\)#.*/\1/g" -e "/^$/d" -e "s/.*server_name[ \t]\([^ ;]*\).*/\1/" "${KAZ_CONF_PROXY_DIR}/${compose}_kaz_name.${domain}" |
|||
done)) |
|||
COMPREPLY=($(compgen -W "${availableDomaine[*]}" -- "${cur}")) |
|||
;; |
|||
esac |
|||
fi |
|||
fi |
|||
;; |
|||
esac |
|||
return 0 |
|||
} |
|||
complete -F _foreign_domain_completions foreign-domain.sh |
@ -0,0 +1,51 @@ |
|||
#/usr/bin/env bash |
|||
|
|||
_kazDockerNet_completion () { |
|||
KAZ_ROOT=$(cd "$(dirname ${COMP_WORDS[0]})"/..; pwd) |
|||
COMPREPLY=() |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
local cword=${COMP_CWORD} cur=${COMP_WORDS[COMP_CWORD]} card=${#COMP_WORDS[@]} i w skip=0 |
|||
for ((i=1 ; i<cword; i++)) ; do |
|||
w="${COMP_WORDS[i]}" |
|||
[[ "${w}" == -* ]] && ((skip++)) |
|||
done |
|||
local arg_pos w i cmd= names= |
|||
((arg_pos = cword - skip)) |
|||
for ((i=1 ; i<card; i++)) ; do |
|||
w="${COMP_WORDS[i]}" |
|||
if [ -z "${cmd}" ] ; then |
|||
[[ "${w}" == -* ]] || cmd="${w}" |
|||
continue |
|||
fi |
|||
names="${names} ${w}" |
|||
done |
|||
|
|||
local cword=${COMP_CWORD} cur=${COMP_WORDS[COMP_CWORD]} |
|||
case "$cur" in |
|||
-*) |
|||
COMPREPLY=( $(compgen -W "-h -n" -- "${cur}" ) ) |
|||
;; |
|||
*) |
|||
local cmd_available="list add" |
|||
case "${cword}" in |
|||
1) |
|||
COMPREPLY=($(compgen -W "${cmd_available}" -- "${cur}")) |
|||
;; |
|||
*) |
|||
[[ "${cmd}" = "add" ]] || return 0 |
|||
local available_args=$("${KAZ_BIN_DIR}/kazList.sh" "compose" "available" 2>/dev/null) |
|||
local used=$("${KAZ_BIN_DIR}/kazDockerNet.sh" "list" | grep "name:" | sed -e "s%\bname:\s*%%" -e "s%\bbridge\b\s*%%" -e "s%Net\b%%g") |
|||
local proposal item |
|||
for item in ${available_args} ; do |
|||
[[ " ${names} " =~ " ${item} " ]] || [[ " ${used} " =~ " ${item} " ]] || proposal="${proposal} ${item}" |
|||
done |
|||
COMPREPLY=($(compgen -W "${proposal}" -- "${cur}")) |
|||
;; |
|||
esac |
|||
;; |
|||
esac |
|||
return 0 |
|||
} |
|||
complete -F _kazDockerNet_completion kazDockerNet.sh |
@ -0,0 +1,83 @@ |
|||
#/usr/bin/env bash |
|||
|
|||
_kazList_completions () { |
|||
#KAZ_ROOT=$(cd "$(dirname ${COMP_WORDS[0]})"/..; pwd) |
|||
COMPREPLY=() |
|||
local cword=${COMP_CWORD} cur=${COMP_WORDS[COMP_CWORD]} card=${#COMP_WORDS[@]} i w skip=0 |
|||
for ((i=1 ; i<cword; i++)) ; do |
|||
w="${COMP_WORDS[i]}" |
|||
[[ "${w}" == -* ]] && ((skip++)) |
|||
done |
|||
local arg_pos w i cmd= opt= names= |
|||
((arg_pos = cword - skip)) |
|||
for ((i=1 ; i<card; i++)) ; do |
|||
w="${COMP_WORDS[i]}" |
|||
if [ -z "${cmd}" ] ; then |
|||
[[ "${w}" == -* ]] || cmd="${w}" |
|||
continue |
|||
fi |
|||
if [ -z "${opt}" ] ; then |
|||
[[ "${w}" == -* ]] || opt="${w}" |
|||
continue |
|||
fi |
|||
names="${names} ${w}" |
|||
done |
|||
#(echo "A cword:${cword} / arg_pos:${arg_pos} / card:${card} / cur:${cur} / cmd:${cmd} / opt:${opt} / names:${names} " >> /dev/pts/1) |
|||
|
|||
case "${cur}" in |
|||
-*) |
|||
COMPREPLY=($(compgen -W "-h --help" -- "${cur}")) |
|||
;; |
|||
*) |
|||
local cmd_available="compose service" |
|||
local opt_available="available validate enable disable status" |
|||
case "${arg_pos}" in |
|||
1) |
|||
# $1 of kazList.sh |
|||
COMPREPLY=($(compgen -W "${cmd_available}" -- "${cur}")) |
|||
;; |
|||
2) |
|||
# $2 of kazList.sh |
|||
COMPREPLY=($(compgen -W "${opt_available}" -- "${cur}")) |
|||
;; |
|||
*) |
|||
# $3-* of kazList.sh |
|||
[[ " ${cmd_available} " =~ " ${cmd} " ]] || return 0 |
|||
# select set of names |
|||
local names_set="${opt}" |
|||
local available_args |
|||
case "${cmd}" in |
|||
service) |
|||
case "${names_set}" in |
|||
available|validate) |
|||
return 0 |
|||
;; |
|||
*) |
|||
available_args=$("${COMP_WORDS[0]}" "compose" "enable" "orga" 2>/dev/null) |
|||
;; |
|||
esac |
|||
;; |
|||
compose) |
|||
case "${names_set}" in |
|||
validate|enable|disable) |
|||
;; |
|||
*) |
|||
names_set="available" |
|||
;; |
|||
esac |
|||
available_args=$("${COMP_WORDS[0]}" "${cmd}" "${names_set}") |
|||
;; |
|||
esac |
|||
# remove previous selected target |
|||
local proposal item |
|||
for item in ${available_args} ; do |
|||
[[ " ${names} " =~ " ${item} " ]] || proposal="${proposal} ${item}" |
|||
done |
|||
COMPREPLY=($(compgen -W "${proposal}" -- "${cur}")) |
|||
;; |
|||
esac |
|||
esac |
|||
return 0 |
|||
} |
|||
|
|||
complete -F _kazList_completions kazList.sh |
@ -0,0 +1,63 @@ |
|||
#/usr/bin/env bash |
|||
|
|||
_orga_gen_completion () { |
|||
KAZ_ROOT=$(cd "$(dirname ${COMP_WORDS[0]})"/../..; pwd) |
|||
ORGA_DIR=$(cd "$(dirname ${COMP_WORDS[0]})"; basename $(pwd)) |
|||
COMPREPLY=() |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
local cword=${COMP_CWORD} cur=${COMP_WORDS[COMP_CWORD]} card=${#COMP_WORDS[@]} i w skip=0 |
|||
for ((i=1 ; i<cword; i++)) ; do |
|||
w="${COMP_WORDS[i]}" |
|||
[[ "${w}" == -* ]] && ((skip++)) |
|||
[[ "${w}" == +* ]] && ((skip++)) |
|||
done |
|||
local arg_pos w i addOpt= rmOpt= names= |
|||
((arg_pos = cword - skip)) |
|||
for ((i=1 ; i<card; i++)) ; do |
|||
w="${COMP_WORDS[i]}" |
|||
if [[ "${w}" == -* ]]; then |
|||
rmOpt="${rmOpt} ${w}" |
|||
continue |
|||
fi |
|||
if [[ "${w}" == '+'* ]]; then |
|||
addOpt="${addOpt} ${w}" |
|||
continue |
|||
fi |
|||
names="${names} ${w}" |
|||
done |
|||
local KAZ_LIST="${KAZ_BIN_DIR}/kazList.sh" |
|||
case "$cur" in |
|||
-*) |
|||
local available_services item proposal="-h -l" listOpt="available" |
|||
[ -n "${names}" ] && listOpt="enable ${names}" |
|||
[[ "${ORGA_DIR}" = "orgaTmpl" ]] || listOpt="enable ${ORGA_DIR%-orga}" |
|||
available_services=$("${KAZ_LIST}" service ${listOpt} 2>/dev/null | tr ' ' '\n' | sed "s/\(..*\)/-\1/") |
|||
for item in ${available_services} ; do |
|||
[[ " ${rmOpt} " =~ " ${item} " ]] || proposal="${proposal} ${item}" |
|||
done |
|||
COMPREPLY=( $(compgen -W "${proposal}" -- "${cur}" ) ) |
|||
;; |
|||
'+'*) |
|||
local available_services item proposal= listOpt="available" |
|||
[ -n "${names}" ] && listOpt="disable ${names}" |
|||
[[ "${ORGA_DIR}" = "orgaTmpl" ]] || listOpt="disable ${ORGA_DIR%-orga}" |
|||
available_services=$("${KAZ_LIST}" service ${listOpt} 2>/dev/null | tr ' ' '\n' | sed "s/\(..*\)/+\1/") |
|||
for item in ${available_services} ; do |
|||
[[ " ${addOpt} " =~ " ${item} " ]] || proposal="${proposal} ${item}" |
|||
done |
|||
COMPREPLY=( $(compgen -W "${proposal}" -- "${cur}" ) ) |
|||
;; |
|||
*) |
|||
[[ "${ORGA_DIR}" = "orgaTmpl" ]] || return 0; |
|||
local available_orga=$("${KAZ_LIST}" "compose" "enable" "orga" 2>/dev/null | sed "s/-orga\b//g") |
|||
local proposal= item |
|||
for item in ${available_orga} ; do |
|||
[[ " ${names} " =~ " ${item} " ]] || proposal="${proposal} ${item}" |
|||
done |
|||
COMPREPLY=($(compgen -W "${proposal}" -- "${cur}")) |
|||
;; |
|||
esac |
|||
return 0 |
|||
} |
|||
complete -F _orga_gen_completion orga-gen.sh |
@ -0,0 +1,20 @@ |
|||
#/usr/bin/env bash |
|||
|
|||
_updateCloud_completion () { |
|||
KAZ_ROOT=$(cd "$(dirname ${COMP_WORDS[0]})"/..; pwd) |
|||
COMPREPLY=() |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
local cword=${COMP_CWORD} cur=${COMP_WORDS[COMP_CWORD]} |
|||
case "${cword}" in |
|||
1) |
|||
# orga name |
|||
local available_orga=$("${KAZ_BIN_DIR}/kazList.sh" "compose" "enable" "orga" 2>/dev/null | sed "s/-orga\b//g") |
|||
COMPREPLY=($(compgen -W "${available_orga}" -- "${cur}")) |
|||
;; |
|||
*) |
|||
;; |
|||
esac |
|||
return 0 |
|||
} |
|||
complete -F _updateCloud_completion updateCloud.sh |
@ -0,0 +1,92 @@ |
|||
#!/bin/bash |
|||
|
|||
# Met à jour la configuration de ${CONF} en fonction du modèle ${TMPL} |
|||
# Viariables misent à jour : |
|||
# - __DOMAIN__ |
|||
# Il est possible de prendre en considération ou d'occulter des blocks. |
|||
# Le début du block est repéré par une ligne contenant {{XXX |
|||
# La fin du block est repéré par une ligne contenant }} |
|||
# L'affiche est fonction de XXX |
|||
# XXX = on => affichage systématique |
|||
# XXX = off => masquage systématique |
|||
# XXX = compose => affichage si la variable d'environnement proxy_compose à la valeur on |
|||
|
|||
KAZ_ROOT=$(cd "$(dirname $0)/.."; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
. "${DOCKERS_ENV}" |
|||
. "${KAZ_KEY_DIR}/SetAllPass.sh" |
|||
|
|||
usage () { |
|||
echo $(basename "$0") " [-h] [-help] [-timestamp] template dst" |
|||
echo " -h" |
|||
echo " -help Display this help." |
|||
echo " -timestamp produce timestamp comment." |
|||
} |
|||
|
|||
TIMESTAMP="" |
|||
|
|||
case "$1" in |
|||
'-h' | '-help' ) |
|||
usage |
|||
shift |
|||
exit;; |
|||
'-time' | '-timestamp' ) |
|||
TIMESTAMP=YES |
|||
shift;; |
|||
esac |
|||
|
|||
# no more export in .env |
|||
PROXY_VARS=$(set | grep "proxy_.*=") |
|||
for var in ${PROXY_VARS} |
|||
do |
|||
export ${var} |
|||
done |
|||
|
|||
( |
|||
# $1 = template |
|||
# $2 = target |
|||
if [ "${TIMESTAMP}" == "YES" ]; then |
|||
echo "# Generated by $(pwd)$(basename $0)" |
|||
echo "# à partir du modèle $1" |
|||
echo "#" $(date "+%x %X") |
|||
echo |
|||
fi |
|||
|
|||
sed \ |
|||
-e "/^[ \t]*$/d"\ |
|||
-e "/^[ ]*#.*$/d"\ |
|||
-e "s|__CALC_HOST__|${calcHost}|g"\ |
|||
-e "s|__CLOUD_HOST__|${cloudHost}|g"\ |
|||
-e "s|__DATE_HOST__|${dateHost}|g"\ |
|||
-e "s|__DOKUWIKI_HOST__|${dokuwikiHost}|g"\ |
|||
-e "s|__DOMAIN__|${domain}|g"\ |
|||
-e "s|__FILE_HOST__|${fileHost}|g"\ |
|||
-e "s|__GAR_HOST__|${garHost}|g"\ |
|||
-e "s|__GIT_HOST__|${gitHost}|g"\ |
|||
-e "s|__GRAV_HOST__|${gravHost}|g"\ |
|||
-e "s|__HTTP_PROTO__|${httpProto}|g"\ |
|||
-e "s|__MATTER_HOST__|${matterHost}|g"\ |
|||
-e "s|__OFFICE_HOST__|${officeHost}|g"\ |
|||
-e "s|__PAD_HOST__|${padHost}|g"\ |
|||
-e "s|__SMTP_HOST__|${smtpHost}|g"\ |
|||
-e "s|__SYMPADB__|${sympaDBName}|g"\ |
|||
-e "s|__SYMPA_HOST__|${sympaHost}|g"\ |
|||
-e "s|__SYMPA_MYSQL_DATABASE__|${sympa_MYSQL_DATABASE}|g"\ |
|||
-e "s|__SYMPA_MYSQL_PASSWORD__|${sympa_MYSQL_PASSWORD}|g"\ |
|||
-e "s|__SYMPA_MYSQL_USER__|${sympa_MYSQL_USER}|g"\ |
|||
-e "s|__GARRADIN_API_USER__|${garradin_API_USER}|g"\ |
|||
-e "s|__GARRADIN_API_PASSWORD__|${garradin_API_PASSWORD}|g"\ |
|||
-e "s|__VIGILO_HOST__|${vigiloHost}|g"\ |
|||
-e "s|__WEBMAIL_HOST__|${webmailHost}|g"\ |
|||
-e "s|__WORDPRESS_HOST__|${wordpressHost}|g"\ |
|||
-e "s|__DOMAIN_SYMPA__|${domain_sympa}|g"\ |
|||
$1 | awk ' |
|||
BEGIN {cp=1} |
|||
/}}/ {cp=1 ; next}; |
|||
/{{on/ {cp=1; next}; |
|||
/{{off/ {cp=0; next}; |
|||
match($0, /{{[a-zA-Z0-9_]+/) {cp=(ENVIRON["proxy_" substr($0,RSTART+2,RLENGTH)] == "on"); next}; |
|||
{if (cp) print $0};' |
|||
) > $2 |
@ -0,0 +1,302 @@ |
|||
#!/bin/bash |
|||
|
|||
# En cas d'absence de postfix, il faut lancer : |
|||
# docker network create postfix_mailNet |
|||
|
|||
# démare/arrête un compose |
|||
# sauvegarde la base de données d'un compose |
|||
# met à jours les paramètres de configuration du mandataire (proxy) |
|||
|
|||
KAZ_ROOT=$(cd "$(dirname $0)/.."; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
cd "${KAZ_BIN_DIR}" |
|||
PATH_SAUVE="/home/sauve/" |
|||
export SIMU="" |
|||
|
|||
declare -a availableComposesNoNeedMail availableMailComposes availableComposesNeedMail availableProxyComposes availableOrga |
|||
availableComposesNoNeedMail=($(getList "${KAZ_CONF_DIR}/container-withoutMail.list")) |
|||
availableMailComposes=($(getList "${KAZ_CONF_DIR}/container-mail.list")) |
|||
availableComposesNeedMail=($(getList "${KAZ_CONF_DIR}/container-withMail.list")) |
|||
availableProxyComposes=($(getList "${KAZ_CONF_DIR}/container-proxy.list")) |
|||
availableOrga=($(getList "${KAZ_CONF_DIR}/container-orga.list")) |
|||
availableComposesNeedMail+=( "${availableOrga[@]}" ) |
|||
|
|||
knownedComposes+=( ${availableMailComposes[@]} ) |
|||
knownedComposes+=( ${availableProxyComposes[@]} ) |
|||
knownedComposes+=( ${availableComposesNoNeedMail[@]} ) |
|||
knownedComposes+=( ${availableComposesNeedMail[@]} ) |
|||
|
|||
usage () { |
|||
echo "Usage: $0 [-n] {status|start|stop|save} [compose]..." |
|||
echo " -n : simulation" |
|||
echo " status : docker-compose status (default all compose available)" |
|||
echo " start : start composes (default all compose validate)" |
|||
echo " stop : stop composes (default all compose enable)" |
|||
echo " save : save all known database" |
|||
echo " [compose] : in ${knownedComposes[@]}" |
|||
exit 1 |
|||
} |
|||
|
|||
doCompose () { |
|||
# $1 dans ("up -d" "down") |
|||
# $2 nom du répertoire du compose |
|||
echo "compose: $1 $2" |
|||
${SIMU} cd "${KAZ_COMP_DIR}/$2" |
|||
if [ ! -h .env ] ; then |
|||
echo "create .env in $2" |
|||
${SIMU} ln -fs ../../config/dockers.env .env |
|||
fi |
|||
${SIMU} docker-compose $1 |
|||
} |
|||
|
|||
doComposes () { |
|||
# $1 dans ("up -d" "down") |
|||
# $2+ nom des répertoires des composes |
|||
cmd=$1 |
|||
shift |
|||
for compose in $@ ; do |
|||
doCompose "${cmd}" ${compose} |
|||
done |
|||
} |
|||
|
|||
updateProxy () { |
|||
# $1 dans ("on" "off") |
|||
# $2 nom des répertoires des composes |
|||
cmd=$1 |
|||
shift |
|||
echo "update proxy ${cmd}: $@" |
|||
date=$(date "+%x %X") |
|||
for compose in $@ ; do |
|||
composeFlag=${compose//-/_} |
|||
entry="proxy_${composeFlag}=" |
|||
newline="${entry}${cmd} # update by $(basename $0) at ${date}" |
|||
if ! grep -q "proxy_${composeFlag}=" "${DOCKERS_ENV}" 2> /dev/null ; then |
|||
if [[ -n "${SIMU}" ]] ; then |
|||
echo "${newline} >> ${DOCKERS_ENV}" |
|||
else |
|||
echo "${newline}" >> "${DOCKERS_ENV}" |
|||
fi |
|||
else |
|||
${SIMU} sed -i \ |
|||
-e "s|${entry}.*|${newline}|g" \ |
|||
"${DOCKERS_ENV}" |
|||
fi |
|||
done |
|||
${SIMU} ${KAZ_COMP_DIR}/proxy/proxy-gen.sh |
|||
} |
|||
|
|||
saveDB () { |
|||
#attention, soucis avec l'option "-ti" qui ne semble pas rendre la main avec docker exec |
|||
|
|||
containerName=$1 |
|||
userName=$2 |
|||
userPass=$3 |
|||
dbName=$4 |
|||
backName=$5 |
|||
if [[ -n "${SIMU}" ]] ; then |
|||
${SIMU} "docker exec ${containerName} mysqldump --user=${userName} --password=${userPass} ${dbName} | gzip > $PATH_SAUVE${backName}.sql.gz" |
|||
else |
|||
docker exec ${containerName} mysqldump --user=${userName} --password=${userPass} ${dbName} | gzip > $PATH_SAUVE${backName}.sql.gz |
|||
fi |
|||
} |
|||
|
|||
declare -a enableComposesNoNeedMail enableMailComposes enableComposesNeedMail enableProxyComposes |
|||
|
|||
enableComposesNoNeedMail=() |
|||
enableMailComposes=() |
|||
enableComposesNeedMail=() |
|||
enableProxyComposes=() |
|||
|
|||
startComposes () { |
|||
./kazDockerNet.sh add ${enableComposesNoNeedMail[@]} ${enableProxyComposes[@]} ${enableMailComposes[@]} ${enableComposesNeedMail[@]} |
|||
[[ "${enableComposesNeedMail[@]}" =~ "garradin" ]] && ${SIMU} ${KAZ_COMP_DIR}/garradin/garradin-gen.sh |
|||
doComposes "up -d" ${enableComposesNoNeedMail[@]} |
|||
doComposes "up -d" ${enableMailComposes[@]} |
|||
doComposes "up -d" ${enableComposesNeedMail[@]} |
|||
updateProxy "on" ${enableComposesNoNeedMail[@]} ${enableComposesNeedMail[@]} |
|||
doComposes "up -d" ${enableProxyComposes[@]} |
|||
if ! grep -q "^.s*proxy_web.s*=.s*on" "${DOCKERS_ENV}" 2> /dev/null ; then |
|||
${SIMU} ${KAZ_COMP_DIR}/web/web-gen.sh |
|||
fi |
|||
# |
|||
# . "${DOCKERS_ENV}" |
|||
# docker exec -it ${proxyServName} compose nginx restart |
|||
} |
|||
|
|||
stopComposes () { |
|||
updateProxy "off" ${enableComposesNoNeedMail[@]} ${enableComposesNeedMail[@]} |
|||
doComposes "down" ${enableProxyComposes[@]} |
|||
doComposes "down" ${enableComposesNeedMail[@]} |
|||
doComposes "down" ${enableMailComposes[@]} |
|||
doComposes "down" ${enableComposesNoNeedMail[@]} |
|||
if ! grep -q "^.s*proxy_web.s*=.s*on" "${DOCKERS_ENV}" 2> /dev/null ; then |
|||
${SIMU} ${KAZ_COMP_DIR}/web/web-gen.sh |
|||
fi |
|||
} |
|||
|
|||
statusComposes () { |
|||
${KAZ_ROOT}/bin/kazList.sh compose status ${enableMailComposes[@]} ${enableProxyComposes[@]} ${enableComposesNoNeedMail[@]} ${enableComposesNeedMail[@]} |
|||
} |
|||
|
|||
saveComposes () { |
|||
. "${DOCKERS_ENV}" |
|||
. "${KAZ_CONF_DIR}/password/SetAllPass.sh" |
|||
|
|||
savedComposes+=( ${enableMailComposes[@]} ) |
|||
savedComposes+=( ${enableProxyComposes[@]} ) |
|||
savedComposes+=( ${enableComposesNoNeedMail[@]} ) |
|||
savedComposes+=( ${enableComposesNeedMail[@]} ) |
|||
|
|||
for compose in ${savedComposes[@]} |
|||
do |
|||
case "${compose}" in |
|||
jirafeau) |
|||
# rien à faire (fichiers) |
|||
;; |
|||
ethercalc) |
|||
#inutile car le backup de /var/lib/docker/volumes/ethercalc_calcDB/_data/dump.rdb est suffisant |
|||
;; |
|||
#grav) |
|||
# ??? |
|||
#;; |
|||
#postfix) |
|||
sympa) |
|||
echo "save sympa" |
|||
saveDB ${sympaDBName} "${sympa_MYSQL_USER}" "${sympa_MYSQL_PASSWORD}" "${sympa_MYSQL_DATABASE}" sympa |
|||
;; |
|||
web) |
|||
# rien à faire (fichiers) |
|||
;; |
|||
etherpad) |
|||
echo "save pad" |
|||
saveDB ${etherpadDBName} "${etherpad_MYSQL_USER}" "${etherpad_MYSQL_PASSWORD}" "${etherpad_MYSQL_DATABASE}" etherpad |
|||
;; |
|||
framadate) |
|||
echo "save date" |
|||
saveDB ${framadateDBName} "${framadate_MYSQL_USER}" "${framadate_MYSQL_PASSWORD}" "${framadate_MYSQL_DATABASE}" framadate |
|||
;; |
|||
cloud) |
|||
echo "save cloud" |
|||
saveDB ${nextcloudDBName} "${nextcloud_MYSQL_USER}" "${nextcloud_MYSQL_PASSWORD}" "${nextcloud_MYSQL_DATABASE}" nextcloud |
|||
;; |
|||
garradin) |
|||
# rien à faire (fichiers) |
|||
;; |
|||
mattermost) |
|||
echo "save mattermost" |
|||
saveDB ${mattermostDBName} "${mattermost_MYSQL_USER}" "${mattermost_MYSQL_PASSWORD}" "${mattermost_MYSQL_DATABASE}" mattermost |
|||
;; |
|||
dokuwiki) |
|||
# rien à faire (fichiers) |
|||
;; |
|||
*-orga) |
|||
ORGA=${compose%-orga} |
|||
echo "save ${ORGA}" |
|||
if grep -q "nextcloud" "./${compose}/docker-compose.yml" 2> /dev/null ; then |
|||
echo " => cloud" |
|||
saveDB "${ORGA}-DB" "${nextcloud_MYSQL_USER}" "${nextcloud_MYSQL_PASSWORD}" "${nextcloud_MYSQL_DATABASE}" "${ORGA}-cloud" |
|||
fi |
|||
if grep -q "mattermost" "./${compose}/docker-compose.yml" 2> /dev/null ; then |
|||
echo " => mattermost" |
|||
saveDB "${ORGA}-DB" "${mattermost_MYSQL_USER}" "${mattermost_MYSQL_PASSWORD}" "${mattermost_MYSQL_DATABASE}" "${ORGA}-mattermost" |
|||
fi |
|||
if grep -q "wordpress" "./${compose}/docker-compose.yml" 2> /dev/null ; then |
|||
echo " => wordpress" |
|||
saveDB "${ORGA}-DB" "${wp_MYSQL_USER}" "${wp_MYSQL_PASSWORD}" "${wp_MYSQL_DATABASE}" "${ORGA}-wordpress" |
|||
fi |
|||
;; |
|||
esac |
|||
done |
|||
} |
|||
|
|||
if [ "$#" -eq 0 ] ; then |
|||
usage |
|||
fi |
|||
|
|||
if [ "$1" == "-h" ] ; then |
|||
usage |
|||
shift |
|||
fi |
|||
|
|||
if [ "$1" == "-n" ] ; then |
|||
export SIMU=echo |
|||
shift |
|||
fi |
|||
|
|||
DCK_CMD="" |
|||
SAVE_CMD="" |
|||
case "$1" in |
|||
start) |
|||
DCK_CMD="startComposes" |
|||
shift |
|||
;; |
|||
|
|||
stop) |
|||
DCK_CMD="stopComposes" |
|||
shift |
|||
;; |
|||
|
|||
save) |
|||
SAVE_CMD="saveComposes" |
|||
shift |
|||
;; |
|||
|
|||
status) |
|||
DCK_CMD="statusComposes" |
|||
shift |
|||
;; |
|||
*) |
|||
usage |
|||
;; |
|||
esac |
|||
|
|||
if [ $# -eq 0 ] ; then |
|||
enableComposesNoNeedMail=("${availableComposesNoNeedMail[@]}") |
|||
enableMailComposes=("${availableMailComposes[@]}") |
|||
enableComposesNeedMail=("${availableComposesNeedMail[@]}") |
|||
enableProxyComposes=("${availableProxyComposes[@]}") |
|||
else |
|||
if [ "${DCK_CMD}" = "startComposes" ] ; then |
|||
enableProxyComposes=("${availableProxyComposes[@]}") |
|||
fi |
|||
fi |
|||
|
|||
for compose in $* |
|||
do |
|||
compose=${compose%/} |
|||
if [[ ! " ${knownedComposes[@]} " =~ " ${compose} " ]]; then |
|||
declare -a subst |
|||
subst=() |
|||
for item in "${knownedComposes[@]}"; do |
|||
[[ "${item}" =~ "${compose}" ]] && subst+=(${item}) |
|||
done |
|||
if [ "${subst}" = "" ] ; then |
|||
echo |
|||
echo "Unknown compose: ${compose} not in ${knownedComposes[@]}" |
|||
echo |
|||
exit 1 |
|||
else |
|||
echo "substitute compose: ${compose} => ${subst[@]}" |
|||
fi |
|||
fi |
|||
for item in "${availableMailComposes[@]}"; do |
|||
[[ "${item}" =~ "${compose}" ]] && enableMailComposes+=("${item}") |
|||
done |
|||
for item in "${availableProxyComposes[@]}"; do |
|||
[[ "${item}" =~ "${compose}" ]] && enableProxyComposes=("${item}") |
|||
done |
|||
for item in "${availableComposesNoNeedMail[@]}"; do |
|||
[[ "${item}" =~ "${compose}" ]] && enableComposesNoNeedMail+=("${item}") |
|||
done |
|||
for item in "${availableComposesNeedMail[@]}"; do |
|||
[[ "${item}" =~ "${compose}" ]] && enableComposesNeedMail+=("${item}") |
|||
done |
|||
done |
|||
|
|||
[[ ! -z "${DCK_CMD}" ]] && "${DCK_CMD}" && exit 0 |
|||
|
|||
[[ ! -z "${SAVE_CMD}" ]] && "${SAVE_CMD}" && exit 0 |
|||
|
|||
exit 1 |
@ -0,0 +1,104 @@ |
|||
#!/bin/bash |
|||
|
|||
cd $(dirname $0)/.. |
|||
|
|||
mkdir -p emptySecret |
|||
rsync -aHAX --info=progress2 --delete secret/ emptySecret/ |
|||
|
|||
cd emptySecret/ |
|||
|
|||
. ../config/dockers.env |
|||
. ./SetAllPass.sh |
|||
|
|||
# pour mise au point |
|||
# SIMU=echo |
|||
|
|||
cleanEnvDB(){ |
|||
# $1 = prefix |
|||
# $2 = envName |
|||
# $3 = containerName of DB |
|||
rootPass="--root_password--" |
|||
dbName="--database_name--" |
|||
userName="--user_name--" |
|||
userPass="--user_password--" |
|||
|
|||
${SIMU} sed -i \ |
|||
-e "s/MYSQL_ROOT_PASSWORD=.*/MYSQL_ROOT_PASSWORD=${rootPass}/g" \ |
|||
-e "s/MYSQL_DATABASE=.*/MYSQL_DATABASE=${dbName}/g" \ |
|||
-e "s/MYSQL_USER=.*/MYSQL_USER=${userName}/g" \ |
|||
-e "s/MYSQL_PASSWORD=.*/MYSQL_PASSWORD=${userPass}/g" \ |
|||
"$2" |
|||
} |
|||
|
|||
cleanEnv(){ |
|||
# $1 = prefix |
|||
# $2 = envName |
|||
for varName in $(grep "^[a-zA-Z_]*=" $2 | sed "s/^\([^=]*\)=.*/\1/g") |
|||
do |
|||
srcName="$1_${varName}" |
|||
srcVal="--clean_val--" |
|||
${SIMU} sed -i \ |
|||
-e "s~^[ ]*${varName}=.*$~${varName}=${srcVal}~" \ |
|||
"$2" |
|||
done |
|||
} |
|||
|
|||
cleanPasswd(){ |
|||
${SIMU} sed -i \ |
|||
-e 's/^\([# ]*[^#= ]*\)=".[^{][^"]*"/\1="--clean_val--"/g' \ |
|||
./SetAllPass.sh |
|||
} |
|||
|
|||
#################### |
|||
# main |
|||
|
|||
# read -r -p "Do you want to remove all password? [Y/n] " input |
|||
|
|||
# case $input in |
|||
# [yY][eE][sS]|[yY]) |
|||
# echo "Remove all password" |
|||
# ;; |
|||
# [nN][oO]|[nN]) |
|||
# echo "Abort" |
|||
# ;; |
|||
# *) |
|||
# echo "Invalid input..." |
|||
# exit 1 |
|||
# ;; |
|||
# esac |
|||
|
|||
cleanPasswd |
|||
|
|||
cleanEnvDB "etherpad" "./env-${etherpadDBName}" "${etherpadDBName}" |
|||
cleanEnvDB "framadate" "./env-${framadateDBName}" "${framadateDBName}" |
|||
cleanEnvDB "git" "./env-${gitDBName}" "${gitDBName}" |
|||
cleanEnvDB "mattermost" "./env-${mattermostDBName}" "${mattermostDBName}" |
|||
cleanEnvDB "nextcloud" "./env-${nextcloudDBName}" "${nextcloudDBName}" |
|||
cleanEnvDB "roundcube" "./env-${roundcubeDBName}" "${roundcubeDBName}" |
|||
cleanEnvDB "sso" "./env-${ssoDBName}" "${ssoDBName}" |
|||
cleanEnvDB "sympa" "./env-${sympaDBName}" "${sympaDBName}" |
|||
cleanEnvDB "vigilo" "./env-${vigiloDBName}" "${vigiloDBName}" |
|||
cleanEnvDB "wp" "./env-${wordpressDBName}" "${wordpressDBName}" |
|||
|
|||
cleanEnv "etherpad" "./env-${etherpadServName}" |
|||
cleanEnv "gandi" "./env-gandi" |
|||
cleanEnv "jirafeau" "./env-${jirafeauServName}" |
|||
cleanEnv "mattermost" "./env-${mattermostServName}" |
|||
cleanEnv "nextcloud" "./env-${nextcloudServName}" |
|||
cleanEnv "office" "./env-${officeServName}" |
|||
cleanEnv "roundcube" "./env-${roundcubeServName}" |
|||
cleanEnv "sso" "./env-${ssoServName}" |
|||
cleanEnv "vigilo" "./env-${vigiloServName}" |
|||
cleanEnv "wp" "./env-${wordpressServName}" |
|||
|
|||
cat > allow_admin_ip <<EOF |
|||
# ip for admin access only |
|||
|
|||
# local test |
|||
allow 127.0.0.0/8; |
|||
allow 192.168.0.0/16; |
|||
|
|||
EOF |
|||
|
|||
chmod -R go= . |
|||
chmod -R +X . |
@ -0,0 +1,16 @@ |
|||
#!/bin/bash |
|||
|
|||
cd $(dirname $0) |
|||
|
|||
./setOwner.sh |
|||
./createEmptyPasswd.sh |
|||
|
|||
cd ../.. |
|||
|
|||
FILE_NAME="/tmp/$(date +'%Y%M%d')-KAZ.tar.bz2" |
|||
|
|||
tar -cjf "${FILE_NAME}" --transform s/emptySecret/secret/ \ |
|||
./kaz/emptySecret/ ./kaz/bin ./kaz/config ./kaz/dockers |
|||
|
|||
ls -l "${FILE_NAME}" |
|||
|
@ -0,0 +1,679 @@ |
|||
#!/bin/bash |
|||
|
|||
#kan: 30/03/2021 |
|||
#koi: créer les users dans le système KAZ, le KazWorld, to become a kaznaute, a kaaaaaaaznaute! |
|||
#ki : fab |
|||
|
|||
#fonctionnement: |
|||
# vérification de l'existence du fichier des demandes et création si absent |
|||
# on garnit les variables |
|||
# on vérifie les variables |
|||
# on créé un mdp utilisable par tous les services (identifiant: email kaz) |
|||
# pour chacun des services KAZ (NC / WP / DOKUWIKI) |
|||
# * on vérifie si le sous-domaine existe, on le créé sinon |
|||
# * on créé le user et le met admin si nécessaire |
|||
# * s'il existe déjà, rollback (y compris sur les autres services) |
|||
# pour garradin, on vérifie si le sous-domaine existe, on le créé sinon |
|||
# pour mattermost, on créé le user et l'équipe si nécé=essaire, sur l'agora de base |
|||
# tout est ok, on créé l'email |
|||
# on créé le mail d'inscription avec tout le détail des services créés (url/user) |
|||
# on inscrit le user dans la liste infos@listes.kaz.bzh |
|||
# on avertit contact@kaz.bzh et on post dans l'agora/creation_compte |
|||
|
|||
#TODO: utilisez la req sql pour attaquer garradin et créer createUser.txt en auto et modifier le champ dans garradin ACTION de "à créer" à "aucune" |
|||
|
|||
################################################################################################################################### |
|||
####### DANGER: 16/12/21: en cours de modifs! |
|||
################################################################################################################################### |
|||
|
|||
# rechercher tous les TODO du script pour le reste à faire |
|||
|
|||
|
|||
#on récupère toutes les variables et mdp |
|||
# on prend comme source des repertoire le dossier du dessus ( /kaz dans notre cas ) |
|||
KAZ_ROOT=$(cd "$(dirname $0)"/..; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
cd $(dirname $0)/.. |
|||
. "${DOCKERS_ENV}" |
|||
. "${KAZ_KEY_DIR}/SetAllPass.sh" |
|||
|
|||
#on détermine le script appelant, le fichier log et le fichier source, tous issus de la même racine |
|||
PRG=$(basename $0) |
|||
RACINE=${PRG%.sh} |
|||
|
|||
CREATE_ORGA_CMD="/kaz/config/orgaTmpl/orga-gen.sh" |
|||
|
|||
GREEN='[0;32m' |
|||
RED='[0;31m' |
|||
NC='[0m' # No Color |
|||
|
|||
#fichier source dans lequel se trouve les infos sur les utilisateurs à créer |
|||
FILE="$(pwd)/tmp/${RACINE}.txt" |
|||
#fichier de log pour |
|||
LOG="$(pwd)/log/${RACINE}.log" |
|||
# XXX risque si 2 admins lance en même temps |
|||
CMD="$(pwd)/tmp/${RACINE}_cmds_to_run.sh" |
|||
|
|||
URL_WEBMAIL=$(echo $webmailHost).$(echo $domain) |
|||
URL_LISTE=$(echo $sympaHost).$(echo $domain) |
|||
URL_AGORA=$(echo $matterHost).$(echo $domain) |
|||
URL_GARRADIN=kaz-$(echo $garHost).$(echo $domain) |
|||
|
|||
NL_LIST=infos@listes.kaz.bzh |
|||
|
|||
#indiqué dans le mail d'inscription |
|||
#(mail+cloud base+agora: max=3, min=2) |
|||
NB_SERVICES_BASE=0 |
|||
#max: 5, min:0 |
|||
NB_SERVICES_DEDIES=0 |
|||
#note qu'on rajoute dans le mail pour les orgas |
|||
MESSAGE_MAIL_ORGA_1="" |
|||
MESSAGE_MAIL_ORGA_2="" |
|||
|
|||
usage () { |
|||
echo "${PRG} [-h] [-s] [-e]" |
|||
echo " version 1.0" |
|||
echo " Create users in kaz world using ${FILE} as source file. All logs in ${LOG}" |
|||
echo " -h Display this help." |
|||
echo " -s Simulate. none user created but you can see the result in ${CMD}" |
|||
echo " -e Execute commands. user will be created !!!" |
|||
} |
|||
|
|||
case "$1" in |
|||
'-h' | '-help' ) |
|||
usage |
|||
shift |
|||
exit;; |
|||
-s) |
|||
SIMULATION=YES |
|||
shift;; |
|||
-e) |
|||
SIMULATION=NO |
|||
shift;; |
|||
*) |
|||
echo "${RED}" |
|||
echo "unknown parameter" |
|||
echo "${NC}" |
|||
usage |
|||
shift |
|||
exit;; |
|||
esac |
|||
|
|||
|
|||
################################################################################################################ |
|||
############### on créé le fichier createUser.txt graĉe aux infos de garradin ################################## |
|||
################################################################################################################ |
|||
|
|||
# résultat de la req select sur garradin |
|||
TMP_SELECT_GARRADIN=$(mktemp /tmp/${RACINE}.XXXXXXXXX.TMP_SELECT_GARRADIN.json) |
|||
|
|||
#curl -s ${httpProto}://${garradin_API_USER}:${garradin_API_PASSWORD}@${URL_GARRADIN}/api/sql -d "SELECT nom,adresse,code_postal,ville,email_kaz,email FROM membres where email_kaz='${CHOIX_MAIL}' LIMIT 1;" >/tmp/$CHOIX_MAIL-garradin.json |
|||
|
|||
#echo ${httpProto}://${garradin_API_USER}:${garradin_API_PASSWORD}@${URL_GARRADIN}/api/sql -d "SELECT * FROM membres where action_auto='"A créer"';" |
|||
echo ${httpProto}://${garradin_API_USER}:${garradin_API_PASSWORD}@${URL_GARRADIN}/api/sql -d "SELECT * FROM membres;" |
|||
|
|||
curl -s ${httpProto}://${garradin_API_USER}:${garradin_API_PASSWORD}@${URL_GARRADIN}/api/sql -d "SELECT * FROM membres;" > $TMP_SELECT_GARRADIN |
|||
|
|||
exit; |
|||
|
|||
################################################################################################################ |
|||
################################################################################################################ |
|||
################################################################################################################ |
|||
|
|||
if [ ! -s "${FILE}" ];then |
|||
echo "${RED}" |
|||
echo "ERREUR: le fichier ${FILE} n'existait pas" |
|||
echo "Il vient d'être créé. Vous pouvez le compéter." |
|||
echo "${NC}" |
|||
cat > "${FILE}" <<EOF |
|||
# -- fichier de création des comptes KAZ |
|||
# -- |
|||
# -- 1 ligne par compte |
|||
# -- champs séparés par ";". les espaces en début et en fin sont enlevés |
|||
# -- laisser vide si pas de donnée |
|||
# -- pas d'espace dans les variables |
|||
# -- |
|||
# -- ORGA: nom de l'organisation (max 15 car), vide sinon |
|||
# -- ADMIN_ORGA: O/N indique si le user est admin de l'orga (va le créer comme admin du NC de l'orga et admin de l'équipe agora) |
|||
# -- NC_ORGA: O/N indique si l'orga a demandé un NC |
|||
# -- GARRADIN_ORGA: O/N indique si l'orga a demandé un garradin |
|||
# -- WP_ORGA: O/N indique si l'orga a demandé un wp |
|||
# -- AGORA_ORGA: O/N indique si l'orga a demandé un mattermost |
|||
# -- WIKI_ORGA: O/N indique si l'orga a demandé un wiki |
|||
# -- NC_BASE: O/N indique si le user doit être inscrit dans le NC de base |
|||
# -- GROUPE_NC_BASE: soit null soit le groupe dans le NC de base |
|||
# -- EQUIPE_AGORA: soit null soit equipe agora (max 15 car) |
|||
# -- QUOTA=(1/10/20/...) en GB |
|||
# -- |
|||
# NOM ; PRENOM ; EMAIL_SOUHAITE ; EMAIL_SECOURS ; ORGA ; ADMIN_ORGA ; NC_ORGA ; GARRADIN_ORGA ; WP_ORGA ; AGORA_ORGA ; WIKI_ORGA ; NC_BASE ; GROUPE_NC_BASE ; EQUIPE_AGORA ; QUOTA |
|||
|
|||
#exemple pour un compte découverte: |
|||
#loufoque ; le_mec; loufoque.le-mec@kaz.bzh ; gregomondo@kaz.bzh; ; N; N; N; N; N; N;N;;; 1 |
|||
|
|||
#exemple pour un compte asso de l'orga gogol avec le service dédié NC uniquement + une équipe dans l'agora |
|||
#loufoque ; le_mec; loufoque.le-mec@kaz.bzh ; gregomondo@kaz.bzh; gogol ; O; O; N; N; N; N;N;;gogol_team; 10 |
|||
|
|||
EOF |
|||
exit |
|||
fi |
|||
ALL_LINES=$(sed -e "/^[ \t]*#.*$/d" -e "/^[ \t]*$/d" ${FILE}) |
|||
if [ -z "${ALL_LINES}" ];then |
|||
echo "${RED}" |
|||
echo "ERREUR: le fichier ${FILE} est vide" |
|||
echo "${NC}" |
|||
usage |
|||
exit |
|||
fi |
|||
|
|||
# emails et les alias KAZ déjà créés |
|||
TFILE_EMAIL=$(mktemp /tmp/${RACINE}.XXXXXXXXX.TFILE_EMAIL) |
|||
#l'ident NextCloud |
|||
TEMP_USER_NC=$(mktemp /tmp/${RACINE}.XXXXXXXXX.TEMP_USER_NC) |
|||
# le groupe NextCloud |
|||
TEMP_GROUP_NC=$(mktemp /tmp/${RACINE}.XXXXXXXXX.TEMP_GROUP_NC) |
|||
#l'ident WP |
|||
TEMP_USER_WP=$(mktemp /tmp/${RACINE}.XXXXXXXXX.TEMP_USER_WP) |
|||
|
|||
trap "rm -f ${TFILE_EMAIL} ${TEMP_USER_NC} ${TEMP_GROUP_NC} ${TEMP_USER_WP}" 0 1 2 3 15 |
|||
|
|||
echo "#!/bin/bash" > ${CMD} && chmod +x ${CMD} |
|||
|
|||
|
|||
#on lit le fichier d'entrée createUser.txt |
|||
( |
|||
echo |
|||
echo "$(date '+%Y-%m-%d %H:%M:%S') : ${PRG} - sauvegarde des utilisateurs à créer" |
|||
) | tee ${LOG} |
|||
cat ${FILE} >> ${LOG} |
|||
|
|||
#on stocke les emails et les alias KAZ déjà créés |
|||
( |
|||
${DOCK_DIR}/postfix/setup.sh email list |
|||
${DOCK_DIR}/postfix/setup.sh alias list | awk '{print $1}' |
|||
) > ${TFILE_EMAIL} |
|||
|
|||
|
|||
#se connecter à l'agora pour ensuite pouvoir passer toutes les commandes mmctl |
|||
echo "docker exec -ti mattermostServ bin/mmctl auth login ${httpProto}://${URL_AGORA} --name local-server --username ${mattermost_user} --password ${mattermost_pass}" | tee -a ${CMD} |
|||
|
|||
echo "${ALL_LINES}" | while read ligne |
|||
do |
|||
|
|||
# | xargs permet de faire un trim |
|||
NOM=$(awk -F ";" '{print $1}' <<< ${ligne} | xargs) |
|||
PRENOM=$(awk -F ";" '{print $2}' <<< ${ligne} | xargs) |
|||
|
|||
declare -A tab_email |
|||
tab_email[EMAIL_SOUHAITE]=$(awk -F ";" '{print $3}' <<< ${ligne} | xargs) |
|||
tab_email[EMAIL_SECOURS]=$(awk -F ";" '{print $4}' <<< ${ligne} | xargs) |
|||
|
|||
ORGA=$(awk -F ";" '{print $5}' <<< ${ligne} | xargs) |
|||
ORGA=${ORGA,,} |
|||
|
|||
declare -A service |
|||
service[ADMIN_ORGA]=$(awk -F ";" '{print $6}' <<< ${ligne} | xargs) |
|||
service[NC_ORGA]=$(awk -F ";" '{print $7}' <<< ${ligne} | xargs) |
|||
service[GARRADIN_ORGA]=$(awk -F ";" '{print $8}' <<< ${ligne} | xargs) |
|||
service[WP_ORGA]=$(awk -F ";" '{print $9}' <<< ${ligne} | xargs) |
|||
service[AGORA_ORGA]=$(awk -F ";" '{print $10}' <<< ${ligne} | xargs) |
|||
service[WIKI_ORGA]=$(awk -F ";" '{print $11}' <<< ${ligne} | xargs) |
|||
service[NC_BASE]=$(awk -F ";" '{print $12}' <<< ${ligne} | xargs) |
|||
|
|||
GROUPE_NC_BASE=$(awk -F ";" '{print $13}' <<< ${ligne} | xargs) |
|||
GROUPE_NC_BASE=${GROUPE_NC_BASE,,} |
|||
EQUIPE_AGORA=$(awk -F ";" '{print $14}' <<< ${ligne} | xargs) |
|||
EQUIPE_AGORA=${EQUIPE_AGORA,,} |
|||
QUOTA=$(awk -F ";" '{print $15}' <<< ${ligne} | xargs) |
|||
|
|||
IDENT_KAZ=$(unaccent utf8 ${PRENOM,,}.${NOM,,}) |
|||
EMAIL_SOUHAITE=${tab_email[EMAIL_SOUHAITE]} |
|||
EMAIL_SECOURS=${tab_email[EMAIL_SECOURS]} |
|||
|
|||
( |
|||
echo |
|||
echo "*****************************traitement de ${ligne}" |
|||
) | tee -a $LOG |
|||
|
|||
#**************************************************************************************** |
|||
#*************************** VERIF DES CHAMPS ******************************************* |
|||
#**************************************************************************************** |
|||
|
|||
#vérif des emails |
|||
regex="^(([A-Za-z0-9]+((\.|\-|\_|\+)?[A-Za-z0-9]?)*[A-Za-z0-9]+)|[A-Za-z0-9]+)@(([A-Za-z0-9]+)+((\.|\-|\_)?([A-Za-z0-9]+)+)*)+\.([A-Za-z]{2,})+$" |
|||
function validator { |
|||
if [[ $1 =~ ${regex} ]]; then |
|||
#printf "* %-48s \e[1;32m[pass]\e[m\n" "${1}" |
|||
: |
|||
else |
|||
#printf "* %-48s \e[1;31m[fail]\e[m\n" "${1}" |
|||
( |
|||
echo "${RED}" |
|||
echo "ERREUR: le paramètre $1 n'est pas un email valide - on stoppe tout - aucun utilisateur de créé" |
|||
echo "${NC}" |
|||
) | tee -a ${LOG} |
|||
exit 1 |
|||
fi |
|||
} |
|||
|
|||
for k in "${!tab_email[@]}"; do |
|||
validator "${tab_email[$k]}" |
|||
done |
|||
|
|||
#vérif des champs O/N |
|||
for k in "${!service[@]}"; do |
|||
if [ "${service[$k]}" != "O" -a "${service[$k]}" != "N" ]; then |
|||
( |
|||
echo "${RED}" |
|||
echo "$k:${service[$k]}" |
|||
echo "ERREUR: le paramètre $k accepte O ou N - on stoppe tout - aucun utilisateur de créé" |
|||
echo "${NC}" |
|||
) | tee -a ${LOG} |
|||
exit 1 |
|||
fi |
|||
done |
|||
|
|||
#taille ORGA et EQUIPE_AGORA |
|||
TAILLE_MAX="15" |
|||
if [ ${#ORGA} -gt ${TAILLE_MAX} ]; then |
|||
( |
|||
echo "${RED}" |
|||
echo "ERREUR: le paramètre ORGA est trop grand: ${ORGA} , taille max: ${TAILLE_MAX} - on stoppe tout - aucun utilisateur de créé" |
|||
echo "${NC}" |
|||
) | tee -a ${LOG} |
|||
exit 1 |
|||
fi |
|||
|
|||
if [ ${#EQUIPE_AGORA} -gt ${TAILLE_MAX} ]; then |
|||
( |
|||
echo "${RED}" |
|||
echo "ERREUR: le paramètre EQUIPE_AGORA est trop grand: ${EQUIPE_AGORA} , taille max: ${TAILLE_MAX} - on stoppe tout - aucun utilisateur de créé" |
|||
echo "${NC}" |
|||
) | tee -a ${LOG} |
|||
exit 1 |
|||
fi |
|||
|
|||
#vérif quota est entier |
|||
if [[ ${QUOTA} =~ ^[[:digit:]]+$ ]];then |
|||
: |
|||
else |
|||
( |
|||
echo "${RED}" |
|||
echo "ERREUR: QUOTA n'est pas numérique: ${QUOTA} - on stoppe tout - aucun utilisateur de créé" |
|||
) | tee -a ${LOG} |
|||
|
|||
fi |
|||
|
|||
#le mail existe t-il déjà ? |
|||
nb_ligne=$(grep "^${EMAIL_SOUHAITE}$" ${TFILE_EMAIL} | wc -l) |
|||
if [ ${nb_ligne} != 0 ];then |
|||
( |
|||
echo "${RED}" |
|||
echo "ERREUR: ${EMAIL_SOUHAITE} existe déjà - on stoppe tout - aucun utilisateur de créé" |
|||
echo "${NC}" |
|||
) | tee -a ${LOG} |
|||
exit 1 |
|||
fi |
|||
|
|||
#************************************************************************************ |
|||
#*************************** FIN VERIF CHAMPS *************************************** |
|||
#************************************************************************************ |
|||
|
|||
echo ${EMAIL_SOUHAITE} "n'existe pas, on continue" | tee -a $LOG |
|||
|
|||
#************************************************************************************ |
|||
#cree un mdp acceptable par postfix/nc/mattermost |
|||
#************************************************************************************ |
|||
PASSWORD=_`apg -n 1 -m 10 -M NCL -d`- |
|||
|
|||
#************************************************************************************ |
|||
#*************************** NEXTCLOUD ********************************************* |
|||
#************************************************************************************ |
|||
|
|||
|
|||
#on recalcul l'url de NC |
|||
if [ "${ORGA}" != "" -a "${service[NC_ORGA]}" == "O" ]; then |
|||
URL_NC=$(echo $ORGA)"-"$(echo $cloudHost).$(echo $domain) |
|||
|
|||
#si le cloud de l'orga n'est pas up alors on le créé |
|||
nb=$(docker ps | grep ${ORGA}-{$cloudHost} | wc -l) |
|||
if [ "${nb}" == "0" ];then |
|||
echo ${CREATE_ORGA_CMD} +cloud +collabora ${ORGA} | tee -a ${CMD} |
|||
#on installe les plugins initiaux |
|||
#TODO |
|||
fi |
|||
|
|||
NB_SERVICES_DEDIES=$((NB_SERVICES_DEDIES+1)) |
|||
MESSAGE_MAIL_ORGA_1="${MESSAGE_MAIL_ORGA_1} |
|||
* un bureau virtuel pour stocker des fichiers/calendriers/contacts et partager avec vos utilisateurs : ${URL_NC}" |
|||
|
|||
else |
|||
URL_NC=$(echo $cloudHost).$(echo $domain) |
|||
NB_SERVICES_BASE=$((NB_SERVICES_BASE+1)) |
|||
fi |
|||
|
|||
#le user existe t-il déjà sur NC ? |
|||
curl -o $TEMP_USER_NC -X GET -H 'OCS-APIRequest:true' "${httpProto}://admin:${nextcloud_NEXTCLOUD_ADMIN_PASSWORD}@${URL_NC}/ocs/v1.php/cloud/users?search=${IDENT_KAZ}" |
|||
nb_user_nc_orga=$(grep "<element>${IDENT_KAZ}</element>" ${TEMP_USER_NC} | wc -l) |
|||
if [ "${nb_user_nc_orga}" != "0" ];then |
|||
( |
|||
# echo "${DOCK_DIR}/postfix/setup.sh email del ${EMAIL_SOUHAITE}" |
|||
echo "${RED}" |
|||
echo "ERREUR: ${IDENT_KAZ} existe déjà sur ${URL_NC} - on stoppe tout - aucun utilisateur de créé" |
|||
echo "${NC}" |
|||
) | tee -a ${LOG} |
|||
exit 1 |
|||
fi |
|||
|
|||
#on créé l'utilisateur sur NC. |
|||
echo "curl -X POST -H 'OCS-APIRequest:true' ${httpProto}://admin:${nextcloud_NEXTCLOUD_ADMIN_PASSWORD}@${URL_NC}/ocs/v1.php/cloud/users \ |
|||
-d userid='${IDENT_KAZ}' \ |
|||
-d displayName='${PRENOM} ${NOM}' \ |
|||
-d password='${PASSWORD}' \ |
|||
-d email='${EMAIL_SOUHAITE}' \ |
|||
-d quota='${QUOTA}GB' \ |
|||
-d language='fr' \ |
|||
" | tee -a ${CMD} |
|||
|
|||
#s'il est admin de son orga, on le met admin |
|||
if [ "${service[ADMIN_ORGA]}" == "O" -a "${ORGA}" != "" -a "${service[NC_ORGA]}" == "O" ]; then |
|||
echo "curl -X POST -H 'OCS-APIRequest:true' ${httpProto}://${nextcloud_NEXTCLOUD_ADMIN_USER}:${nextcloud_NEXTCLOUD_ADMIN_PASSWORD}@${URL_NC}/ocs/v1.php/cloud/users/${IDENT_KAZ}/groups -d groupid='admin'" | tee -a ${CMD} |
|||
fi |
|||
|
|||
#faut-il mettre le user NC dans un groupe particulier sur le NC de base ? |
|||
if [ "${GROUPE_NC_BASE}" != "" -a "${service[NC_BASE]}" == "O" ]; then |
|||
# le groupe existe t-il déjà ? |
|||
curl -o ${TEMP_GROUP_NC} -X GET -H 'OCS-APIRequest:true' "${httpProto}://admin:${nextcloud_NEXTCLOUD_ADMIN_PASSWORD}@${URL_NC}/ocs/v1.php/cloud/groups?search=${GROUPE_NC_BASE}" |
|||
nb=$(grep "<element>$GROUPE_NC_BASE</element>" ${TEMP_GROUP_NC} | wc -l) |
|||
if [ "${nb}" == "0" ];then |
|||
echo "curl -X POST -H 'OCS-APIRequest:true' ${httpProto}://admin:${nextcloud_NEXTCLOUD_ADMIN_PASSWORD}@${URL_NC}/ocs/v1.php/cloud/groups -d groupid=${GROUPE_NC_BASE}" | tee -a ${CMD} |
|||
fi |
|||
#puis attacher le user au groupe |
|||
echo "curl -X POST -H 'OCS-APIRequest:true' ${httpProto}://admin:${nextcloud_NEXTCLOUD_ADMIN_PASSWORD}@${URL_NC}/ocs/v1.php/cloud/users/${IDENT_KAZ}/groups -d groupid=${GROUPE_NC_BASE}" | tee -a ${CMD} |
|||
fi |
|||
|
|||
|
|||
#************************************************************************************ |
|||
#*************************** WORDPRESS ********************************************** |
|||
#************************************************************************************ |
|||
|
|||
#TODO:pour l'utilisation de l'api : https://www.hostinger.com/tutorials/wordpress-rest-api |
|||
|
|||
if [ "${ORGA}" != "" -a "${service[WP_ORGA]}" == "O" ]; then |
|||
|
|||
URL_WP_ORGA=$(echo $ORGA)"-"$(echo $wordpressHost).$(echo $domain) |
|||
|
|||
#si le wp de l'orga n'est pas up alors on le créé |
|||
nb=$(docker ps | grep ${ORGA}-${wordpressHost} | wc -l) |
|||
if [ "${nb}" == "0" ];then |
|||
echo ${CREATE_ORGA_CMD} +wp ${ORGA} | tee -a ${CMD} |
|||
fi |
|||
|
|||
NB_SERVICES_DEDIES=$((NB_SERVICES_DEDIES+1)) |
|||
MESSAGE_MAIL_ORGA_1="${MESSAGE_MAIL_ORGA_1} |
|||
* un site web de type wordpress : ${URL_WP_ORGA}/wp-admin/" |
|||
|
|||
#TODO : vérif existance user |
|||
# #le user existe t-il déjà sur le wp ? |
|||
# curl -o $TEMP_USER_WP -X GET "${httpProto}://${wp_WORDPRESS_ADMIN_USER}:${wp_WORDPRESS_ADMIN_PASSWORD}@${URL_WP_ORGA}/ocs/v1.php/cloud/users?search=${IDENT_KAZ}" |
|||
# nb_user_wp_orga=$(grep "<element>${IDENT_KAZ}</element>" ${TEMP_USER_WP} | wc -l) |
|||
# if [ "${nb_user_wp_orga}" != "0" ];then |
|||
# ( |
|||
# #echo "${DOCK_DIR}/postfix/setup.sh email del ${EMAIL_SOUHAITE}" |
|||
# echo "${RED}" |
|||
# echo "ERREUR: ${IDENT_KAZ} existe déjà sur ${URL_WP_ORGA} - on stoppe tout - aucun utilisateur de créé" |
|||
# echo "${NC}" |
|||
# ) | tee -a ${LOG} |
|||
# |
|||
# # ROLLBACK - on vire le user de NC |
|||
# if [ "${nb_user_nc_orga}" != "0" ];then |
|||
# ( |
|||
# echo "${RED}" |
|||
# echo "ERREUR: ${IDENT_KAZ} existe déjà sur ${URL_NC} - on stoppe tout - aucun utilisateur de créé" |
|||
# echo "${NC}" |
|||
# ) | tee -a ${LOG} |
|||
# |
|||
# #on supprime l'utilisateur sur NC. |
|||
# echo "curl -X DELETE -H 'OCS-APIRequest:true' ${httpProto}://admin:${nextcloud_NEXTCLOUD_ADMIN_PASSWORD}@${URL_NC}/ocs/v1.php/cloud/users \ |
|||
# -d userid='${IDENT_KAZ}' \ |
|||
# " | tee -a ${CMD} |
|||
# fi |
|||
# |
|||
# exit 1 |
|||
# fi |
|||
|
|||
#TODO : créer le user et le mettre admin si nécessaire |
|||
# if [ "${service[ADMIN_ORGA]}" == "O" ]; then |
|||
# : |
|||
# else |
|||
# : |
|||
# fi |
|||
# fi |
|||
|
|||
#************************************************************************************ |
|||
#*************************** GARRADIN ********************************************* |
|||
#************************************************************************************ |
|||
|
|||
if [ "${ORGA}" != "" -a "${service[GARRADIN_ORGA]}" == "O" ]; then |
|||
|
|||
URL_GARRADIN_ORGA=$(echo $ORGA)"-"$(echo $garHost).$(echo $domain) |
|||
|
|||
#il n'y a pas de docker spécifique garradin (je cree toujours garradin) |
|||
echo ${CREATE_ORGA_CMD} +garradin ${ORGA} | tee -a ${CMD} |
|||
|
|||
NB_SERVICES_DEDIES=$((NB_SERVICES_DEDIES+1)) |
|||
MESSAGE_MAIL_ORGA_1="${MESSAGE_MAIL_ORGA_1} |
|||
* un service de gestion adhérents/clients : ${URL_GARRADIN_ORGA}" |
|||
|
|||
if [ "${service[ADMIN_ORGA]}" == "O" ]; then |
|||
MESSAGE_MAIL_ORGA_1="${MESSAGE_MAIL_ORGA_1} |
|||
(l'installation est à terminer en vous rendant sur le site)" |
|||
fi |
|||
fi |
|||
|
|||
#************************************************************************************ |
|||
#*************************** DOKUWIKI *********************************************** |
|||
#************************************************************************************ |
|||
|
|||
if [ "${ORGA}" != "" -a "${service[WIKI_ORGA]}" == "O" ]; then |
|||
|
|||
URL_WIKI_ORGA=$(echo $ORGA)"-"$(echo $dokuwikiHost).$(echo $domain) |
|||
|
|||
#si le wiki de l'orga n'est pas up alors on le créé |
|||
nb=$(docker ps | grep ${ORGA}-${dokuwikiHost} | wc -l) |
|||
if [ "${nb}" == "0" ];then |
|||
echo ${CREATE_ORGA_CMD} +wiki ${ORGA} | tee -a ${CMD} |
|||
fi |
|||
|
|||
NB_SERVICES_DEDIES=$((NB_SERVICES_DEDIES+1)) |
|||
MESSAGE_MAIL_ORGA_1="${MESSAGE_MAIL_ORGA_1} |
|||
* un wiki dédié pour votre documentation : ${URL_WIKI_ORGA}" |
|||
|
|||
|
|||
#TODO: à voir https://www.dokuwiki.org/devel:xmlrpc:clients |
|||
echo "# ******************************************************************************" | tee -a ${CMD} |
|||
echo "# **************************** ATTENTION ***************************************" | tee -a ${CMD} |
|||
echo "# ******************************************************************************" | tee -a ${CMD} |
|||
echo "# DOKUWIKI: TODO: créer le user et le mettre admin." | tee -a ${CMD} |
|||
echo "# ******************************************************************************" | tee -a ${CMD} |
|||
|
|||
# #le user existe t-il déjà sur le wp ? |
|||
# curl -o $TEMP_USER_WP -X GET "${httpProto}://${wp_WORDPRESS_ADMIN_USER}:${wp_WORDPRESS_ADMIN_PASSWORD}@${URL_WP_ORGA}/ocs/v1.php/cloud/users?search=${IDENT_KAZ}" |
|||
# nb=$(grep "<element>${IDENT_KAZ}</element>" ${TEMP_USER_WP} | wc -l) |
|||
# if [ "${nb}" != "0" ];then |
|||
# ( |
|||
# #echo "${DOCK_DIR}/postfix/setup.sh email del ${EMAIL_SOUHAITE}" |
|||
# echo "${RED}" |
|||
# echo "ERREUR: ${IDENT_KAZ} existe déjà sur ${URL_WP_ORGA} - on stoppe tout - aucun utilisateur de créé" |
|||
# echo "${NC}" |
|||
# ) | tee -a ${LOG} |
|||
# exit 1 |
|||
# fi |
|||
|
|||
# #créer le user et le mettre admin si nécessaitre |
|||
# if [ "${service[ADMIN_ORGA]}" == "O" ]; then |
|||
# : |
|||
# else |
|||
# : |
|||
# fi |
|||
|
|||
fi |
|||
|
|||
#************************************************************************************ |
|||
#*************************** MATTERMOST ********************************************* |
|||
#************************************************************************************ |
|||
|
|||
#on ne gère pas la création du docker dédié mattermost |
|||
if [ "${ORGA}" != "" -a "${service[AGORA_ORGA]}" == "O" ]; then |
|||
echo "# ******************************************************************************" | tee -a ${CMD} |
|||
echo "# **************************** ATTENTION ***************************************" | tee -a ${CMD} |
|||
echo "# ******************************************************************************" | tee -a ${CMD} |
|||
echo "# Mattermost dédié: on ne fait rien." | tee -a ${CMD} |
|||
echo "# ******************************************************************************" | tee -a ${CMD} |
|||
fi |
|||
|
|||
#on créé le compte mattermost |
|||
echo "docker exec -ti mattermostServ bin/mmctl user create --email ${EMAIL_SOUHAITE} --username ${IDENT_KAZ} --password ${PASSWORD}" | tee -a ${CMD} |
|||
|
|||
NB_SERVICES_BASE=$((NB_SERVICES_BASE+1)) |
|||
|
|||
if [ "${EQUIPE_AGORA}" != "" -a "${EQUIPE_AGORA}" != "kaz" ]; then |
|||
# l'équipe existe t-elle déjà ? |
|||
nb=$(docker exec mattermostServ bin/mmctl team list | grep -w ${EQUIPE_AGORA} | wc -l) |
|||
if [ "${nb}" == "0" ];then #non, on la créé en mettant le user en admin de l'équipe |
|||
echo "docker exec -ti mattermostServ bin/mmctl team create --name ${EQUIPE_AGORA} --display_name ${EQUIPE_AGORA} --email ${EMAIL_SOUHAITE}" | tee -a ${CMD} |
|||
fi |
|||
# puis ajouter le user à l'équipe |
|||
echo "docker exec -ti mattermostServ bin/mmctl team users add ${EQUIPE_AGORA} ${EMAIL_SOUHAITE}" | tee -a ${CMD} |
|||
fi |
|||
|
|||
#et enfin on ajoute toujours le user à l'équipe KAZ et aux 2 channels publiques |
|||
echo "docker exec -ti mattermostServ bin/mmctl team users add kaz ${EMAIL_SOUHAITE}" | tee -a ${CMD} |
|||
echo "docker exec -ti mattermostServ bin/mmctl channel users add kaz:une-question--un-soucis ${EMAIL_SOUHAITE}" | tee -a ${CMD} |
|||
echo "docker exec -ti mattermostServ bin/mmctl channel users add kaz:cafe-du-commerce--ouvert-2424h ${EMAIL_SOUHAITE}" | tee -a ${CMD} |
|||
|
|||
#************************************************************************************ |
|||
#on créé le nouvel email avec le quota en GB |
|||
#************************************************************************************ |
|||
#tout s'est bien passé, on créé l'email |
|||
echo "${DOCK_DIR}/postfix/setup.sh email add ${EMAIL_SOUHAITE} ${PASSWORD}" | tee -a ${CMD} |
|||
echo "${DOCK_DIR}/postfix/setup.sh quota set ${EMAIL_SOUHAITE} ${QUOTA}G" | tee -a ${CMD} |
|||
|
|||
NB_SERVICES_BASE=$((NB_SERVICES_BASE+1)) |
|||
|
|||
#************************************************************************************ |
|||
#*************************** INSCRIPTION NEWSLETTER ********************************* |
|||
#************************************************************************************ |
|||
|
|||
#TODO: utiliser liste sur dev également |
|||
|
|||
#on inscrit le user sur sympa, à la liste infos@listes.kaz.bzh |
|||
if [[ "${domain}" =~ ^dev.*$ ]]; then |
|||
echo "# DEV, pas d'inscription à sympa"| tee -a ${CMD} |
|||
else |
|||
echo "# PROD, inscription à sympa"| tee -a ${CMD} |
|||
echo "docker exec -ti sympaServ /usr/lib/sympa/bin/sympa_soap_client.pl --soap_url=${httpProto}://${URL_LISTE}/sympasoap --user_email=${sympa_user} --user_password=${sympa_pass} --service=add --service_parameters=\"${NL_LIST},${EMAIL_SOUHAITE}\"" | tee -a $CMD |
|||
fi |
|||
|
|||
if [ "${service[ADMIN_ORGA]}" == "O" ]; then |
|||
MESSAGE_MAIL_ORGA_2="${MESSAGE_MAIL_ORGA_2}Comme administrateur de votre organisation, vous pouvez créer des listes de diffusion en vous rendant sur ${URL_LISTE}" |
|||
fi |
|||
|
|||
|
|||
#************************************************************************************ |
|||
#*************************** MAIL INSCRIPTION *************************************** |
|||
#************************************************************************************ |
|||
|
|||
if [ "${NB_SERVICES_DEDIES}" != "0" ];then |
|||
MESSAGE_MAIL_ORGA_1=" |
|||
dont ${NB_SERVICES_DEDIES} service(s) dédié(s) pour votre organisation: |
|||
${MESSAGE_MAIL_ORGA_1}" |
|||
fi |
|||
|
|||
#on envoie le mail de bienvenu |
|||
MAIL_KAZ="Bonjour ${PRENOM}, |
|||
|
|||
Bienvenue chez KAZ! |
|||
|
|||
Vous disposez de $((${NB_SERVICES_BASE} + ${NB_SERVICES_DEDIES})) services kaz avec authentification: |
|||
|
|||
* une messagerie classique |
|||
* une messagerie instantanée pour discuter au sein d'équipes |
|||
${MESSAGE_MAIL_ORGA_1} |
|||
|
|||
Votre email et identifiant pour tous ces services: ${EMAIL_SOUHAITE} |
|||
Le mot de passe: ${PASSWORD} |
|||
|
|||
Nous vous déconseillons de changer votre mot de passe dans l'immédiat (nous sommes en train d'y travailler!). |
|||
|
|||
Vous pouvez accéder à votre messagerie : |
|||
* soit depuis votre webmail: ${URL_WEBMAIL} |
|||
* soit depuis votre bureau virtuel: ${URL_NC} |
|||
* soit depuis un client de messagerie comme thunderbird |
|||
|
|||
Tout est expliqué ici https://wiki.kaz.bzh/messagerie/start et là https://wiki.kaz.bzh/nextcloud/start |
|||
|
|||
Votre quota est de ${QUOTA}GB. Si vous souhaitez plus de place pour vos fichiers ou la messagerie, faites-nous signe! |
|||
|
|||
Pour accéder à la messagerie instantanée et communiquer avec les membres de votre équipe ou ceux de kaz: |
|||
https://agora.kaz.bzh/login |
|||
|
|||
${MESSAGE_MAIL_ORGA_2} |
|||
|
|||
Enfin, vous disposez de tous les autres services KAZ où l'authentification n'est pas nécessaire: https://kaz.bzh |
|||
|
|||
En cas de soucis, n'hésitez pas poser à vos questions sur le canal 'Une question ? un soucis' de l'agora dispo ici: https://agora.kaz.bzh/kaz/ |
|||
|
|||
À bientôt ;) |
|||
|
|||
La collégiale de KAZ. " |
|||
|
|||
echo "docker exec -i mailServ mailx -a 'Content-Type: text/plain; charset=\"UTF-8\"' -r contact@kaz.bzh -s \"KAZ: confirmation d'inscription\" ${EMAIL_SOUHAITE} ${EMAIL_SECOURS} << EOF |
|||
${MAIL_KAZ} |
|||
EOF" | tee -a ${CMD} |
|||
|
|||
#on envoie le mail de confirmation d'inscription à contact |
|||
MAIL_KAZ="*****POST AUTOMATIQUE****** |
|||
Hello, |
|||
${NOM} ${PRENOM} vient d'être inscrit avec l'email ${EMAIL_SOUHAITE} |
|||
quota: ${QUOTA}GB |
|||
|
|||
NC_BASE: ${service[NC_BASE]} |
|||
groupe NC base: ${GROUPE_NC_BASE} |
|||
équipe agora base: ${EQUIPE_AGORA} |
|||
email de secours: ${EMAIL_SECOURS} |
|||
|
|||
ORGA: ${ORGA} |
|||
ADMIN_ORGA: ${service[ADMIN_ORGA]} |
|||
NC_ORGA: ${service[NC_ORGA]} |
|||
GARRADIN_ORGA: ${service[GARRADIN_ORGA]} |
|||
WP_ORGA: ${service[WP_ORGA]} |
|||
AGORA_ORGA: ${service[AGORA_ORGA]} |
|||
WIKI_ORGA: ${service[WIKI_ORGA]} |
|||
|
|||
bisou!" |
|||
echo "docker exec -i mailServ mailx -a 'Content-Type: text/plain; charset=\"UTF-8\"' -r contact@kaz.bzh -s \"KAZ: confirmation d'inscription\" ${EMAIL_CONTACT} << EOF |
|||
${MAIL_KAZ} |
|||
EOF" | tee -a ${CMD} |
|||
|
|||
echo " #on envoie la confirmation d'inscription sur l'agora " | tee -a ${CMD} |
|||
echo "docker exec -ti mattermostServ bin/mmctl post create kaz:Creation-Comptes --message \"${MAIL_KAZ}\"" | tee -a ${CMD} |
|||
|
|||
#fin des inscriptions |
|||
done |
|||
|
|||
echo "# ******************************************************************************" | tee -a ${CMD} |
|||
echo "# **************************** TODO ********************************************" | tee -a ${CMD} |
|||
echo "# ******************************************************************************" | tee -a ${CMD} |
|||
echo "# Garradin KAZ: pensez à inscrire ${EMAIL_SOUHAITE} en compta en allant sur https://kaz-garradin.kaz.bzh/admin/ si ce n'est pas déjà fait et à lui mettre action='aucune' sur sa fiche membre" | tee -a ${CMD} |
|||
|
|||
# des commandes à lancer ? |
|||
if [ "${SIMULATION}" == "NO" ];then |
|||
echo "on exécute" |
|||
${CMD} |
|||
else |
|||
echo "Aucune commande n'a été lancée: Possibilité de le faire à la main. cf ${CMD}" |
|||
fi |
|||
|
|||
#END |
@ -0,0 +1,7 @@ |
|||
#!/bin/bash |
|||
|
|||
docker exec -u www-data nextcloudServ php cron.php |
|||
for cloud in $(docker ps | grep -i cloud |awk '{print $12}') |
|||
do |
|||
docker exec -u www-data $cloud php cron.php |
|||
done |
@ -0,0 +1,219 @@ |
|||
#!/bin/bash |
|||
|
|||
# list/ajout/supprime/ un sous-domaine kaz.bzh |
|||
|
|||
KAZ_ROOT=$(cd "$(dirname $0)"/..; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
|
|||
export PRG="$0" |
|||
cd $(dirname $0)/.. |
|||
export IP="127.75.65.90" |
|||
export ETC_HOSTS="/etc/hosts" |
|||
|
|||
. "${DOCKERS_ENV}" |
|||
# no more export in .env |
|||
export $(set | grep "domain=") |
|||
|
|||
declare -a forbidenName |
|||
forbidenName=(dev ${calcHost} calc ${cloudHost} bureau ${dateHost} date ${dokuwikiHost} dokuwiki ${fileHost} file ${garHost} ${gitHost} ${gravHost} ${matterHost} ${officeHost} collabra ${padHost} ${sympaHost} listes ${webmailHost} ${wordpressHost} www ${vigiloHost} form) |
|||
|
|||
export FORCE="NO" |
|||
export CMD="" |
|||
export SIMU="" |
|||
|
|||
usage(){ |
|||
echo "Usage: ${PRG} list [sub-domain...]" |
|||
echo " ${PRG} [-n] [-f] {add/del} sub-domain..." |
|||
echo " -h help" |
|||
echo " -n simulation" |
|||
echo " -f force protected domain" |
|||
exit 1 |
|||
} |
|||
|
|||
for ARG in $@ |
|||
do |
|||
case "${ARG}" in |
|||
'-h' | '-help' ) |
|||
usage |
|||
;; |
|||
'-f' ) |
|||
shift |
|||
export FORCE="YES" |
|||
;; |
|||
'-n' ) |
|||
shift |
|||
export SIMU="echo" |
|||
;; |
|||
'list'|'add'|'del' ) |
|||
shift |
|||
CMD="${ARG}" |
|||
break |
|||
;; |
|||
* ) |
|||
usage |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
if [ -z "${CMD}" ]; then |
|||
usage |
|||
fi |
|||
|
|||
. "${KAZ_KEY_DIR}/env-gandi" |
|||
|
|||
if [[ -z "${GANDI_KEY}" ]] ; then |
|||
echo |
|||
echo "no GANDI_KEY set in ${KAZ_KEY_DIR}/env-gandi" |
|||
usage |
|||
fi |
|||
|
|||
|
|||
waitNet () { |
|||
### wait when error code 503 |
|||
|
|||
#fab |
|||
#TOTO="curl -H \"authorization: Apikey ${GANDI_KEY}\" --connect-timeout 2 -s -D - \"${GANDI_API}\"" |
|||
#echo $TOTO |
|||
|
|||
if [[ $(curl -H "authorization: Apikey ${GANDI_KEY}" --connect-timeout 2 -s -D - "${GANDI_API}" -o /dev/null 2>/dev/null | head -n1) != *200* ]]; then |
|||
echo "DNS not available. Please wait..." |
|||
while [[ $(curl -H "authorization: Apikey ${GANDI_KEY}" --connect-timeout 2 -s -D - "${GANDI_API}" -o /dev/null 2>/dev/null | head -n1) != *200* ]] |
|||
do |
|||
sleep 5 |
|||
done |
|||
exit |
|||
fi |
|||
} |
|||
|
|||
list(){ |
|||
waitNet |
|||
trap 'rm -f "${TMPFILE}"' EXIT |
|||
TMPFILE="$(mktemp)" || exit 1 |
|||
if [[ -n "${SIMU}" ]] ; then |
|||
${SIMU} curl -X GET "${GANDI_API}/records" -H "authorization: Apikey ${GANDI_KEY}" |
|||
else |
|||
curl -X GET "${GANDI_API}/records" -H "authorization: Apikey ${GANDI_KEY}" 2>/dev/null | sed "s/,{/\n/g" | sed 's/.*rrset_name":"\([^"]*\)".*rrset_values":\["\([^"]*\)".*/\1:\2/g'| grep -v '^[_@]'| grep -e 'dev$' -e 'kaz.bzh\.*$' > ${TMPFILE} |
|||
fi |
|||
if [ $# -lt 1 ]; then |
|||
cat ${TMPFILE} |
|||
else |
|||
for ARG in $@ |
|||
do |
|||
cat ${TMPFILE} | grep "${ARG}.*:" |
|||
done |
|||
fi |
|||
} |
|||
|
|||
saveDns () { |
|||
for ARG in $@ ; do |
|||
if [[ "${ARG}" =~ .local$ ]] ; then |
|||
echo "${PRG}: old fasion style (remove .local at the end)" |
|||
usage; |
|||
fi |
|||
if [[ "${ARG}" =~ .dev$ ]] ; then |
|||
echo "${PRG}: old fasion style (remove .dev at the end)" |
|||
usage; |
|||
fi |
|||
if [[ "${ARG}" =~ .bzh$ ]] ; then |
|||
echo "${PRG}: old fasion style (remove .bzh at the end)" |
|||
usage; |
|||
fi |
|||
done |
|||
if [[ "${domain}" = "kaz.local" ]]; then |
|||
return |
|||
fi |
|||
waitNet |
|||
${SIMU} curl -X POST "${GANDI_API}/snapshots" -H "authorization: Apikey ${GANDI_KEY}" 2>/dev/null |
|||
} |
|||
|
|||
badName(){ |
|||
[[ -z "$1" ]] && return 0; |
|||
for item in "${forbidenName[@]}"; do |
|||
[[ "${item}" == "$1" ]] && [[ "${FORCE}" == "NO" ]] && return 0 |
|||
done |
|||
return 1 |
|||
} |
|||
|
|||
add(){ |
|||
if [ $# -lt 1 ]; then |
|||
exit |
|||
fi |
|||
saveDns $@ |
|||
declare -a ADDED |
|||
for ARG in $@ |
|||
do |
|||
if badName "${ARG}" ; then |
|||
echo "can't manage '${ARG}'. Use -f option" |
|||
continue |
|||
fi |
|||
case "${domain}" in |
|||
kaz.local ) |
|||
if grep -q --perl-regex "^127.75.65.90.*[ \t]${ARG}.${domain}" "${ETC_HOSTS}" 2> /dev/null ; then |
|||
break |
|||
fi |
|||
if grep -q --perl-regex "^127.75.65.90[ \t]" "${ETC_HOSTS}" 2> /dev/null ; then |
|||
${SIMU} sudo sed -i -e "0,/^127.75.65.90[ \t]/s/^\(127.75.65.90[ \t]\)/\1${ARG}.${domain} /g" "${ETC_HOSTS}" |
|||
else |
|||
${SIMU} sudo sed -i -e "$ a 127.75.65.90\t${ARG}.${domain}" "${ETC_HOSTS}" 2> /dev/null |
|||
fi |
|||
;; |
|||
dev.kaz.bzh ) |
|||
${SIMU} curl -X POST "${GANDI_API}/records" -H "authorization: Apikey ${GANDI_KEY}" -H 'content-type: application/json' -d '{"rrset_type":"CNAME", "rrset_name":"'${ARG}.dev'", "rrset_values":["dev"]}' |
|||
echo |
|||
;; |
|||
kaz.bzh) |
|||
${SIMU} curl -X POST "${GANDI_API}/records" -H "authorization: Apikey ${GANDI_KEY}" -H 'content-type: application/json' -d '{"rrset_type":"CNAME", "rrset_name":"'${ARG}'", "rrset_values":["kaz.bzh."]}' |
|||
echo |
|||
;; |
|||
*) |
|||
echo "domain environnement not set!" |
|||
usage |
|||
;; |
|||
esac |
|||
ADDED+=("${ARG}") |
|||
done |
|||
echo "Domains added to ${domain}: ${ADDED[@]}" |
|||
} |
|||
|
|||
del(){ |
|||
if [ $# -lt 1 ]; then |
|||
exit |
|||
fi |
|||
saveDns $@ |
|||
declare -a REMOVED |
|||
for ARG in $@ |
|||
do |
|||
if badName "${ARG}" ; then |
|||
echo "can't manage '${ARG}'. Use -f option" |
|||
continue |
|||
fi |
|||
case "${domain}" in |
|||
kaz.local ) |
|||
if !grep -q --perl-regex "^127.75.65.90.*[ \t]${ARG}.${domain}" "${ETC_HOSTS}" 2> /dev/null ; then |
|||
break |
|||
fi |
|||
${SIMU} sudo sed -i -e "/^127.75.65.90[ \t]*${ARG}.${domain}[ \t]*$/d" \ |
|||
-e "s|^\(127.75.65.90.*\)[ \t]${ARG}.${domain}|\1|g" "${ETC_HOSTS}" |
|||
;; |
|||
dev.kaz.bzh ) |
|||
${SIMU} curl -X DELETE "${GANDI_API}/records/${ARG}.dev" -H "authorization: Apikey ${GANDI_KEY}" |
|||
echo |
|||
;; |
|||
kaz.bzh ) |
|||
${SIMU} curl -X DELETE "${GANDI_API}/records/${ARG}" -H "authorization: Apikey ${GANDI_KEY}" |
|||
echo |
|||
;; |
|||
*) |
|||
echo "domain environnement not set!" |
|||
usage |
|||
;; |
|||
esac |
|||
REMOVED+=("${ARG}") |
|||
done |
|||
echo "Domains removed from ${domain}: ${REMOVED[@]}" |
|||
} |
|||
|
|||
#echo "CMD: ${CMD} $*" |
|||
${CMD} $* |
@ -0,0 +1,165 @@ |
|||
#!/bin/bash |
|||
|
|||
# list/ajout/supprime/ les domaines extérieurs à kaz.bzh |
|||
|
|||
KAZ_ROOT=$(cd "$(dirname $0)"/..; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
export PRG="$0" |
|||
cd $(dirname $0) |
|||
|
|||
. "${DOCKERS_ENV}" |
|||
|
|||
declare -a availableComposes availableOrga |
|||
availableComposes=(${garHost} ${cloudHost} ${dokuwikiHost} ${wordpressHost} ${matterHost}) |
|||
availableOrga=($(sed -e "s/\(.*\)[ \t]*#.*$/\1/" -e "s/^[ \t]*\(.*\)-orga$/\1/" -e "/^$/d" "${KAZ_CONF_DIR}/container-orga.list")) |
|||
|
|||
# no more export in .env |
|||
export $(set | grep "domain=") |
|||
|
|||
export CMD="" |
|||
export SIMU="" |
|||
export CHANGE="" |
|||
|
|||
usage(){ |
|||
echo "Usage: ${PRG} list [sub-domain...]" |
|||
echo " ${PRG} [-n] {add/del} friend-domain orga compose" |
|||
echo " ${PRG} -l" |
|||
echo " -l short list" |
|||
echo " -h help" |
|||
echo " -n simulation" |
|||
exit 1 |
|||
} |
|||
|
|||
for ARG in $@ |
|||
do |
|||
case "${ARG}" in |
|||
'-h' | '-help' ) |
|||
usage |
|||
;; |
|||
'-n' ) |
|||
shift |
|||
export SIMU="echo" |
|||
;; |
|||
'-l') |
|||
for compose in ${availableComposes[@]} ; do |
|||
sed -e "s/[ \t]*\([^#]*\)#.*/\1/g" -e "/^$/d" -e "s/.*server_name[ \t]\([^ ;]*\).*/\1/" "${KAZ_CONF_PROXY_DIR}/${compose}_kaz_name.${domain}" |
|||
done |
|||
exit |
|||
;; |
|||
'list'|'add'|'del' ) |
|||
shift |
|||
CMD="${ARG}" |
|||
break |
|||
;; |
|||
* ) |
|||
usage |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
if [ -z "${CMD}" ]; then |
|||
echo "Commande missing" |
|||
usage |
|||
fi |
|||
|
|||
######################################## |
|||
badDomaine () { |
|||
[[ -z "$1" ]] && return 0; |
|||
[[ ! "$1" =~ ^[-.a-zA-Z0-9]*$ ]] && return 0; |
|||
return 1 |
|||
} |
|||
badOrga () { |
|||
[[ -z "$1" ]] && return 0; |
|||
[[ ! " ${availableOrga[*]} " =~ " $1 " ]] && return 0 |
|||
return 1 |
|||
} |
|||
badCompose () { |
|||
[[ -z "$1" ]] && return 0; |
|||
[[ ! " ${availableComposes[*]} " =~ " $1 " ]] && return 0 |
|||
return 1 |
|||
} |
|||
|
|||
######################################## |
|||
listServ () { |
|||
for compose in ${availableComposes[@]} ; do |
|||
sed -e "s/[ \t]*\([^#]*\)#.*/\1/g" -e "/^$/d" -e "s/.*server_name[ \t]\([^ ;]*\).*/\1 : ${compose}/" "${KAZ_CONF_PROXY_DIR}/${compose}_kaz_name.${domain}" |
|||
done |
|||
} |
|||
|
|||
listOrgaServ () { |
|||
for compose in ${availableComposes[@]} ; do |
|||
sed -e "s/[ \t]*\([^#]*\)#.*/\1/g" -e "/^$/d" -e "s/\([^ ]*\)[ \t]*\([^ \t;]*\).*/\1 => \2 : ${compose}/" "${KAZ_CONF_PROXY_DIR}/${compose}_kaz_map.${domain}" |
|||
done |
|||
} |
|||
|
|||
######################################## |
|||
list () { |
|||
previousOrga=$(listOrgaServ) |
|||
previousServ=$(listServ) |
|||
if [ $# -lt 1 ]; then |
|||
[ -n "${previousOrga}" ] && echo "${previousOrga}" |
|||
[ -n "${previousServ}" ] && echo "${previousServ}" |
|||
return |
|||
fi |
|||
for ARG in $@ |
|||
do |
|||
orga=$(echo "${previousOrga}" | grep "${ARG}.* =>") |
|||
serv=$(echo "${previousServ}" | grep "${ARG}.* =>") |
|||
[ -n "${orga}" ] && echo "${orga}" |
|||
[ -n "${serv}" ] && echo "${serv}" |
|||
done |
|||
} |
|||
|
|||
######################################## |
|||
add () { |
|||
[ $# -ne 3 ] && usage |
|||
badDomaine $1 && echo "bad domaine: ${RED}$1${NC}" && usage |
|||
badOrga $2 && echo "bad orga: ${RED}$2${NC} not in ${GREEN}${availableOrga[@]}${NC}" && usage |
|||
badCompose $3 && echo "bad compose: ${RED}$3${NC} not in ${GREEN}${availableComposes[@]}${NC}" && usage |
|||
# XXX check compose exist in orga ? |
|||
previousOrga=$(listOrgaServ | grep $1) |
|||
[[ " ${previousOrga}" =~ " $1 => $2 : $3" ]] && echo "done" && return |
|||
[[ " ${previousOrga}" =~ " $1 " ]] && echo "deleted ${RED}$(echo "${previousOrga}" | grep -e "$1")${NC} before" && return |
|||
if [[ -n "${SIMU}" ]] ; then |
|||
echo "$1 $2; => ${KAZ_CONF_PROXY_DIR}/$3_kaz_map.${domain}" |
|||
echo "server_name $1; => ${KAZ_CONF_PROXY_DIR}/$3_kaz_name.${domain}" |
|||
else |
|||
echo "$1 $2;" >> "${KAZ_CONF_PROXY_DIR}/$3_kaz_map.${domain}" |
|||
echo "server_name $1;" >> "${KAZ_CONF_PROXY_DIR}/$3_kaz_name.${domain}" |
|||
fi |
|||
echo "${PRG}: $1 added" |
|||
CHANGE="add" |
|||
} |
|||
|
|||
######################################## |
|||
del () { |
|||
[ $# -ne 1 ] && usage |
|||
badDomaine $1 && echo "bad domaine: ${RED}$1${NC}" && usage |
|||
previous=$(listOrgaServ | grep -e "$1") |
|||
[[ ! "${previous}" =~ ^$1 ]] && echo "$1 not found in ${previous}" && return |
|||
# XXX if done OK |
|||
# XXX del |
|||
for compose in ${availableComposes[@]} ; do |
|||
if grep -q -e "^[ \t]*$1[ \t]" "${KAZ_CONF_PROXY_DIR}/${compose}_kaz_map.${domain}" ; then |
|||
${SIMU} sed -e "/^[ \t]*$1[ \t]/d" -i "${KAZ_CONF_PROXY_DIR}/${compose}_kaz_map.${domain}" |
|||
fi |
|||
if grep -q -e "^[ \t]*server_name[ \t]" "${KAZ_CONF_PROXY_DIR}/${compose}_kaz_name.${domain}" ; then |
|||
${SIMU} sed -e "/^[ \t]*server_name[ \t]/d" -i "${KAZ_CONF_PROXY_DIR}/${compose}_kaz_name.${domain}" |
|||
fi |
|||
done |
|||
echo "${PRG}: $1 deleted" |
|||
CHANGE="del" |
|||
} |
|||
|
|||
######################################## |
|||
${CMD} $@ |
|||
|
|||
if [ -n "${CHANGE}" ] ; then |
|||
echo "Reload proxy conf" |
|||
${SIMU} ${KAZ_KAZ_CONF_PROXY_DIR}/proxy-gen.sh |
|||
${SIMU} ${KAZ_KAZ_CONF_PROXY_DIR}/reload.sh |
|||
fi |
|||
|
|||
######################################## |
@ -0,0 +1,57 @@ |
|||
#!/bin/bash |
|||
# 23/04/2021 |
|||
# script de mise a jour du fichier de collecte pour future intégration dans la base de donneyyy |
|||
# did |
|||
|
|||
KAZ_ROOT=$(cd $(dirname $0)/..; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
FIC_COLLECTE=${KAZ_STATE_DIR}/collecte.csv |
|||
FIC_ACTIVITE_MAILBOX=${KAZ_STATE_DIR}/activites_mailbox.csv |
|||
|
|||
#Jirafeau |
|||
echo "$(date +%Y-%m-%d-%H-%M-%S);" \ |
|||
"depot-count;" \ |
|||
"$(find ${DOCK_VOL}/jirafeau_fileData/_data/files/ -name \*count| wc -l)" >> "${FIC_COLLECTE}" |
|||
echo "$(date +%Y-%m-%d-%H-%M-%S);" \ |
|||
"depot-size;" \ |
|||
"$(du -ks ${DOCK_VOL}/jirafeau_fileData/_data/files/ | awk -F " " '{print $1}')" >> "{$FIC_COLLECTE}" |
|||
|
|||
#PLACE DISQUE sur serveur |
|||
echo "$(date +%Y-%m-%d-%H-%M-%S);" \ |
|||
"disk-system-size-used;" \ |
|||
"$(df | grep sda | awk -F " " '{print $3}')" >> "${FIC_COLLECTE}" |
|||
|
|||
#nombre de mails kaz: |
|||
echo "$(date +%Y-%m-%d-%H-%M-%S);" \ |
|||
"mailboxes;" \ |
|||
"$(cat ${KAZ_COMP_DIR}/postfix/config/postfix-accounts.cf | wc -l)" >> "${FIC_COLLECTE}" |
|||
|
|||
#nombre d'alias kaz: |
|||
echo "$(date +%Y-%m-%d-%H-%M-%S);" \ |
|||
"mail_alias;" \ |
|||
"$(cat ${KAZ_COMP_DIR}/postfix/config/postfix-virtual.cf | wc -l)" >> "${FIC_COLLECTE}" |
|||
|
|||
#stats des 2 postfix (mail+sympa) |
|||
EXP=$(/usr/bin/hostname -s) |
|||
|
|||
STATS1=$(cat ${DOCK_VOL}/sympa_sympaLog/_data/mail.log | /usr/sbin/pflogsumm) |
|||
docker exec -i mailServ mailx -r $EXP -s "stats Sympa" root <<DEB_MESS |
|||
$STATS1 |
|||
DEB_MESS |
|||
|
|||
STATS2=$(cat ${DOCK_VOL}/postfix_mailLog/_data/mail.log | /usr/sbin/pflogsumm) |
|||
docker exec -i mailServ mailx -r $EXP -s "stats Postfix" root <<DEB_MESS |
|||
$STATS2 |
|||
DEB_MESS |
|||
|
|||
IFS='' |
|||
for line in $(ls -lt --time-style=long-iso "${DOCK_VOL}/postfix_mailData/_data/kaz.bzh/"); do |
|||
echo "${line}" | awk '{print $6";"$7";"$8";"$9}' > "${FIC_ACTIVITE_MAILBOX}" |
|||
done |
|||
|
|||
#pour pister les fuites mémoires |
|||
docker stats --no-stream --format "table {{.Name}}\t{{.Container}}\t{{.MemUsage}}" | sort -k 3 -h > "${KAZ_STATE_DIR}/metro/$(date +"%Y%m%d")_docker_kaz.log" |
|||
ps aux --sort -rss > "${KAZ_STATE_DIR}/metro/$(date +"%Y%m%d")_ps_kaz.log" |
|||
|
@ -0,0 +1,218 @@ |
|||
#!/bin/bash |
|||
|
|||
KAZ_ROOT=$(cd "$(dirname $0)/.."; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
cd "${KAZ_ROOT}" |
|||
|
|||
MY_MAIN_IP=$(ip a | grep "inet " | head -2 | tail -1 | sed "s%.*inet *\([0-9.]*\)/.*%\1%") |
|||
MY_SECOND_IP=$(ip a | grep "inet " | head -3 | tail -1 | sed "s%.*inet *\([0-9.]*\)/.*%\1%") |
|||
|
|||
DOMAIN="kaz.local" |
|||
DOMAIN_SYMPA="kaz.local" |
|||
HTTP_PROTO="https" |
|||
MAIN_IP="${MY_MAIN_IP}" |
|||
SYMPA_IP="127.0.0.2" |
|||
RESTART_POLICY="no" |
|||
JIRAFEAU_DIR="/var/jirafeauData/$(apg -n 1 -m 16 -M NCL)/" |
|||
|
|||
DOCKERS_TMPL_ENV="${KAZ_CONF_DIR}/dockers.tmpl.env" |
|||
|
|||
RESET_ENV="true" |
|||
if [ -f "${DOCKERS_ENV}" ]; then |
|||
DOMAIN=$(getValInFile "${DOCKERS_ENV}" "domain") |
|||
DOMAIN_SYMPA=$(getValInFile "${DOCKERS_ENV}" "domain_sympa") |
|||
HTTP_PROTO=$(getValInFile "${DOCKERS_ENV}" "httpProto") |
|||
MAIN_IP=$(getValInFile "${DOCKERS_ENV}" "MAIN_IP") |
|||
SYMPA_IP=$(getValInFile "${DOCKERS_ENV}" "SYMPA_IP") |
|||
RESTART_POLICY=$(getValInFile "${DOCKERS_ENV}" "restartPolicy") |
|||
JIRAFEAU_DIR=$(getValInFile "${DOCKERS_ENV}" "jirafeauDir") |
|||
while : ; do |
|||
read -p "Change '${DOCKERS_ENV}'? " resetEnv |
|||
case "${resetEnv}" in |
|||
[yYoO]* ) |
|||
break |
|||
;; |
|||
""|[Nn]* ) |
|||
RESET_ENV="" |
|||
break |
|||
;; |
|||
* ) |
|||
echo "Please answer yes no." |
|||
;; |
|||
esac |
|||
done |
|||
fi |
|||
|
|||
[ -n "${RESET_ENV}" ] && { |
|||
echo "Reset '${DOCKERS_ENV}'" |
|||
read -p " * domain (kaz.bzh / dev.kaz.bzh / kaz.local)? [${YELLOW}${DOMAIN}${NC}] " domain |
|||
case "${domain}" in |
|||
"" ) |
|||
DOMAIN="${DOMAIN}" |
|||
;; |
|||
* ) |
|||
DOMAIN="${domain}" |
|||
;; |
|||
esac |
|||
|
|||
read -p " * lists domain (kaz.bzh / kaz2.ovh / kaz.local)? [${YELLOW}${DOMAIN_SYMPA}${NC}] " domain |
|||
case "${domain}" in |
|||
"" ) |
|||
DOMAIN_SYMPA="${DOMAIN_SYMPA}" |
|||
;; |
|||
* ) |
|||
DOMAIN_SYMPA="${domain}" |
|||
;; |
|||
esac |
|||
|
|||
while : ; do |
|||
read -p " * protocol (https / http)? [${YELLOW}${HTTP_PROTO}${NC}] " proto |
|||
case "${proto}" in |
|||
"" ) |
|||
HTTP_PROTO="${HTTP_PROTO}" |
|||
break |
|||
;; |
|||
"https"|"http" ) |
|||
HTTP_PROTO="${proto}" |
|||
break |
|||
;; |
|||
* ) echo "Please answer joe, emacs, vim or no." |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
while : ; do |
|||
read -p " * main IP (ip)? [${YELLOW}${MAIN_IP}${NC}] " ip |
|||
case "${ip}" in |
|||
"" ) |
|||
MAIN_IP="${MAIN_IP}" |
|||
break |
|||
;; |
|||
* ) |
|||
if testValidIp "${ip}" ; then |
|||
MAIN_IP="${ip}" |
|||
break |
|||
else |
|||
echo "Please answer x.x.x.x format." |
|||
fi |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
while : ; do |
|||
read -p " * lists IP (ip)? [${YELLOW}${SYMPA_IP}${NC}] " ip |
|||
case "${ip}" in |
|||
"" ) |
|||
SYMPA_IP="${SYMPA_IP}" |
|||
break |
|||
;; |
|||
* ) |
|||
if testValidIp "${ip}" ; then |
|||
SYMPA_IP="${ip}" |
|||
break |
|||
else |
|||
echo "Please answer x.x.x.x format." |
|||
fi |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
while : ; do |
|||
read -p " * restart policy (always / unless-stopped / no)? [${YELLOW}${RESTART_POLICY}${NC}] " policy |
|||
case "${policy}" in |
|||
"" ) |
|||
RESTART_POLICY="${RESTART_POLICY}" |
|||
break |
|||
;; |
|||
"always"|"unless-stopped"|"no") |
|||
RESTART_POLICY="${policy}" |
|||
break |
|||
;; |
|||
* ) echo "Please answer always, unless-stopped or no." |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
while : ; do |
|||
read -p " * Jirafeau dir? [${YELLOW}${JIRAFEAU_DIR}${NC}] " jirafeauDir |
|||
case "${jirafeauDir}" in |
|||
"" ) |
|||
JIRAFEAU_DIR="${JIRAFEAU_DIR}" |
|||
break |
|||
;; |
|||
* ) |
|||
if [[ "${jirafeauDir}" =~ ^/var/jirafeauData/[0-9A-Za-z]{1,16}/$ ]]; then |
|||
JIRAFEAU_DIR="${jirafeauDir}" |
|||
break |
|||
else |
|||
echo "Please give dir name (/var/jirafeauData/[0-9A-Za-z]{1,3}/)." |
|||
fi |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
[ -f "${DOCKERS_ENV}" ] || cp "${DOCKERS_TMPL_ENV}" "${DOCKERS_ENV}" |
|||
|
|||
sed -i "${DOCKERS_ENV}" \ |
|||
-e "s%^\s*domain\s*=.*$%domain=${DOMAIN}%" \ |
|||
-e "s%^\s*domain_sympa\s*=.*$%domain_sympa=${DOMAIN_SYMPA}%" \ |
|||
-e "s%^\s*httpProto\s*=.*$%httpProto=${HTTP_PROTO}%" \ |
|||
-e "s%^\s*MAIN_IP\s*=.*$%MAIN_IP=${MAIN_IP}%" \ |
|||
-e "s%^\s*SYMPA_IP\s*=.*$%SYMPA_IP=${SYMPA_IP}%" \ |
|||
-e "s%^\s*restartPolicy\s*=.*$%restartPolicy=${RESTART_POLICY}%" \ |
|||
-e "s%^\s*jirafeauDir\s*=.*$%jirafeauDir=${JIRAFEAU_DIR}%" |
|||
} |
|||
|
|||
if [ ! -f "${KAZ_CONF_DIR}/container-mail.list" ]; then |
|||
cat > "${KAZ_CONF_DIR}/container-mail.list" <<EOF |
|||
# e-mail server composer |
|||
postfix |
|||
#sympa |
|||
EOF |
|||
fi |
|||
|
|||
if [ ! -f "${KAZ_CONF_DIR}/container-orga.list" ]; then |
|||
cat > "${KAZ_CONF_DIR}/container-orga.list" <<EOF |
|||
# orga composer |
|||
EOF |
|||
fi |
|||
|
|||
if [ ! -f "${KAZ_CONF_DIR}/container-proxy.list" ]; then |
|||
cat > "${KAZ_CONF_DIR}/container-proxy.list" <<EOF |
|||
proxy |
|||
EOF |
|||
fi |
|||
|
|||
if [ ! -f "${KAZ_CONF_DIR}/container-withMail.list" ]; then |
|||
cat > "${KAZ_CONF_DIR}/container-withMail.list" <<EOF |
|||
web |
|||
etherpad |
|||
roundcube |
|||
framadate |
|||
garradin |
|||
dokuwiki |
|||
gitea |
|||
mattermost |
|||
cloud |
|||
#keycloak |
|||
EOF |
|||
fi |
|||
|
|||
if [ ! -f "${KAZ_CONF_DIR}/container-withoutMail.list" ]; then |
|||
cat > "${KAZ_CONF_DIR}/container-withoutMail.list" <<EOF |
|||
jirafeau |
|||
ethercalc |
|||
collabora |
|||
#vigilo |
|||
#grav |
|||
EOF |
|||
fi |
|||
|
|||
if [ ! -d "${KAZ_ROOT}/secret" ]; then |
|||
rsync -a "${KAZ_ROOT}/secret.tmpl/" "${KAZ_ROOT}/secret/" |
|||
. "${KAZ_ROOT}/secret/SetAllPass.sh" |
|||
"${KAZ_BIN_DIR}/secretGen.sh" |
|||
"${KAZ_BIN_DIR}/updateDockerPassword.sh" |
|||
fi |
@ -0,0 +1,83 @@ |
|||
#!/bin/bash |
|||
|
|||
# set -e |
|||
# on pourra inclure le fichier dockers.env pour |
|||
# gérer l' environnement DEV, PROD ou LOCAL |
|||
|
|||
KAZ_ROOT=$(cd "$(dirname $0)/.."; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
export VAGRANT_SRC_DIR=/vagrant/files |
|||
cd "${KAZ_ROOT}" |
|||
|
|||
if [ ! -f "${KAZ_ROOT}/config/dockers.env" ]; then |
|||
printKazError "dockers.env not found" |
|||
exit 1 |
|||
fi |
|||
for type in mail orga proxy withMail withoutMail ; do |
|||
if [ ! -f "${KAZ_ROOT}/config/container-${type}.list" ]; then |
|||
printKazError "container-${type}.list not found" |
|||
exit 1 |
|||
fi |
|||
done |
|||
|
|||
mkdir -p "${KAZ_ROOT}/log/" |
|||
export DebugLog="${KAZ_ROOT}/log/log-install-$(date +%y-%m-%d-%T)-" |
|||
( |
|||
echo "########## ********** Start Vagrant $(date +%D-%T)" |
|||
|
|||
# dockers à démarrer (manque : sympa, wordpress, orga) |
|||
DOCKERS_LIST="web jirafeau ethercalc etherpad postfix roundcube proxy framadate garradin dokuwiki gitea mattermost cloud collabora" |
|||
# test 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 |
|||
# pour ne tester qu'un sous-ensemble de service |
|||
if [ $# -ne 0 ]; then |
|||
DOCKERS_LIST="$*" |
|||
fi |
|||
|
|||
DOCKERS_LIST=$(echo "$(filterAvailableComposes ${DOCKERS_LIST})") |
|||
|
|||
# pour tous les récupération de dépôt GIT |
|||
mkdir -p /kaz/git /kaz/download |
|||
|
|||
# on pré-télécharge à l'origine Vagrant (jirafeau...) |
|||
for DOCKER in ${DOCKERS_LIST}; do |
|||
if [ -f "${VAGRANT_SRC_DIR}/kaz/dockers/${DOCKER}/dowshenload.sh" ]; then |
|||
cd "${VAGRANT_SRC_DIR}/kaz/dockers/${DOCKER}" |
|||
./download.sh |
|||
fi |
|||
done |
|||
|
|||
# on construit les dockers qui contiennent un script de création (etherpad, framadate, jirafeau...) |
|||
for DOCKER in ${DOCKERS_LIST}; do |
|||
if [ -f "/kaz/dockers/${DOCKER}/build.sh" ]; then |
|||
cd "/kaz/dockers/${DOCKER}" |
|||
./build.sh |
|||
fi |
|||
done |
|||
|
|||
[ -d "${VAGRANT_SRC_DIR}/kaz/download" ] && |
|||
rsync -a /kaz/download/ "${VAGRANT_SRC_DIR}/kaz/download/" |
|||
[ -d "${VAGRANT_SRC_DIR}/kaz/git" ] && |
|||
rsync -a /kaz/git/ "${VAGRANT_SRC_DIR}/kaz/git/" |
|||
|
|||
# on démare les containers de la liste uniquement (en une fois par cohérence de proxy) |
|||
/kaz/bin/container.sh stop ${DOCKERS_LIST} |
|||
/kaz/bin/container.sh start ${DOCKERS_LIST} |
|||
|
|||
if [[ "${DOCKERS_LIST}" =~ "etherpad" ]]; then |
|||
# pb avec la lanbteur de démarrage du pad :-( |
|||
sleep 5 |
|||
/kaz/bin/container.sh start etherpad proxy |
|||
fi |
|||
|
|||
|
|||
# on construit les dockers qui contiennent un script de création (etherpad, framadate, jirafeau...) |
|||
for DOCKER in ${DOCKERS_LIST}; do |
|||
if [ -f "/kaz/dockers/${DOCKER}/first.sh" ]; then |
|||
cd "/kaz/dockers/${DOCKER}" |
|||
./first.sh |
|||
fi |
|||
done |
|||
|
|||
echo "########## ********** End install $(date +%D-%T)" |
|||
) > >(tee ${DebugLog}stdout.log) 2> >(tee ${DebugLog}stderr.log >&2) |
@ -0,0 +1,104 @@ |
|||
#!/bin/bash |
|||
|
|||
# faire un completion avec les composant dispo |
|||
|
|||
PRG=$(basename $0) |
|||
|
|||
usage () { |
|||
echo "Usage: ${PRG} [-n] [-h] list|add [netName]..." |
|||
echo " -n : simulation" |
|||
echo " -h|--help : help" |
|||
exit 1 |
|||
} |
|||
|
|||
allNetName="" |
|||
export CMD="" |
|||
for ARG in $@; do |
|||
case "${ARG}" in |
|||
'-h' | '-help' ) |
|||
usage |
|||
;; |
|||
'-n' ) |
|||
shift |
|||
export SIMU="echo" |
|||
;; |
|||
-*) |
|||
usage |
|||
;; |
|||
list|add) |
|||
CMD="${ARG}" |
|||
shift; |
|||
;; |
|||
*) |
|||
allNetName="${allNetName} ${ARG}" |
|||
shift |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
if [ -z "${CMD}" ] ; then |
|||
usage |
|||
fi |
|||
|
|||
# running composes |
|||
export allBridgeName="$(docker network list | grep bridge | awk '{print $2}')" |
|||
# running network |
|||
export allBridgeNet=$(for net in ${allBridgeName} ; do docker inspect ${net} | grep Subnet | sed 's#.*"\([0-9]*\.[0-9]*\.[0-9]*\.[0-9]*/[0-9]*\)".*# \1#'; done) |
|||
|
|||
minB=0 |
|||
minC=0 |
|||
minD=0 |
|||
|
|||
getNet() { |
|||
netName="$1Net" |
|||
|
|||
if [[ "${allBridgeName}" =~ "${netName}" ]]; then |
|||
echo "${netName} already created" |
|||
return |
|||
fi |
|||
# echo "start 10.${minB}.${minC}.$((${minD}*16))" |
|||
|
|||
find="" |
|||
for b in $(eval echo {${minB}..255}); do |
|||
for c in $(eval echo {${minC}..255}); do |
|||
for d in $(eval echo {${minD}..15}); do |
|||
if [ ! -z "${find}" ]; then |
|||
minB=${b} |
|||
minC=${c} |
|||
minD=${d} |
|||
return |
|||
fi |
|||
# to try |
|||
subnet="10.${b}.${c}.$((d*16))" |
|||
if [[ "${allBridgeNet}" =~ " ${subnet}/" ]]; |
|||
then |
|||
# used |
|||
# XXX check netmask |
|||
continue |
|||
fi |
|||
# the winner is... |
|||
echo "${netName} => ${subnet}/28" |
|||
${SIMU} docker network create --subnet "${subnet}/28" "${netName}" |
|||
find="ok" |
|||
done |
|||
minD=0 |
|||
done |
|||
minC=0 |
|||
done |
|||
} |
|||
|
|||
list () { |
|||
echo "name: " ${allBridgeName} |
|||
echo "net: " ${allBridgeNet} |
|||
} |
|||
|
|||
add () { |
|||
if [ -z "${allNetName}" ] ; then |
|||
usage |
|||
fi |
|||
for netName in ${allNetName}; do |
|||
getNet "${netName}" |
|||
done |
|||
} |
|||
|
|||
"${CMD}" |
@ -0,0 +1,152 @@ |
|||
#!/bin/bash |
|||
|
|||
PRG=$(basename $0) |
|||
SIMU="" |
|||
KAZ_ROOT=$(cd "$(dirname $0)"/..; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
. "${DOCKERS_ENV}" |
|||
|
|||
cd "$(dirname $0)" |
|||
|
|||
ALL_STATUS=$(docker ps -a --format "{{.ID}} {{.Names}} {{.Status}}") |
|||
|
|||
usage () { |
|||
echo "${RED}${BOLD}" \ |
|||
"Usage: $0 [-h] {compose|orga|service} {available|validate|enable|disable|status} [names]...${NL}" \ |
|||
" -h help${NL}" \ |
|||
" compose {available|validate|enable|disable} : list docker-compose name${NL}" \ |
|||
" compose status : status of docker-compose (default available)${NL}" \ |
|||
" service {available|validate} : list services name${NL}" \ |
|||
" service {enable|disable} : list services in orga${NL}" \ |
|||
" service status : status of services in orga${NL}" \ |
|||
" [compose] in${NL}" \ |
|||
" ${CYAN}$(${KAZ_ROOT}/bin/${PRG} compose available)${NL}" \ |
|||
"${NC}" |
|||
exit 1 |
|||
} |
|||
|
|||
# ======================================== |
|||
compose_available () { |
|||
echo $* |
|||
} |
|||
|
|||
getComposeEnableByProxy () { |
|||
onList=$( |
|||
for type in ${KAZ_CONF_DIR}/container-*.list ; do |
|||
getList "${type}" |
|||
done) |
|||
local compose |
|||
for compose in ${onList} ; do |
|||
composeFlag="proxy_${compose//-/_}" |
|||
[[ "${!composeFlag}" == "on" ]] && echo ${compose} |
|||
done |
|||
} |
|||
|
|||
compose_validate () { |
|||
echo $( |
|||
for type in ${KAZ_CONF_DIR}/container-*.list ; do |
|||
getList "${type}" |
|||
done | filterInList $*) |
|||
} |
|||
|
|||
compose_enable () { |
|||
echo $(getComposeEnableByProxy | filterInList $*) |
|||
} |
|||
|
|||
compose_disable () { |
|||
echo $(getAvailableComposes | filterNotInList $(getComposeEnableByProxy) | filterInList $*) |
|||
} |
|||
|
|||
compose_status () { |
|||
for compose in $*; do |
|||
cd "${KAZ_COMP_DIR}/${compose}" |
|||
echo "${compose}:" |
|||
for service in $(docker-compose ps --services 2>/dev/null); do |
|||
id=$(docker-compose ps -q "${service}" | cut -c 1-12) |
|||
if [ -z "${id}" ]; then |
|||
echo " - ${RED}${BOLD}[Down]${NC} ${service}" |
|||
else |
|||
status=$(grep "^${id}\b" <<< "${ALL_STATUS}" | sed "s/.*${id}\s\s*\S*\s\s*\(\S*.*\)/\1/") |
|||
COLOR=$([[ "${status}" =~ Up ]] && echo "${GREEN}" || echo "${RED}") |
|||
echo " - ${COLOR}${BOLD}[${status}]${NC} ${service}" |
|||
fi |
|||
done |
|||
done |
|||
} |
|||
|
|||
# ======================================== |
|||
service_available () { |
|||
echo $(getAvailableServices) |
|||
} |
|||
|
|||
service_validate () { |
|||
echo $(getAvailableServices) |
|||
} |
|||
|
|||
getServiceInOrga () { |
|||
for orga in $*; do |
|||
[[ "${orga}" = *"-orga" ]] || continue |
|||
local ORGA_DIR="${KAZ_COMP_DIR}/${orga}" |
|||
ORGA_COMPOSE="${ORGA_DIR}/docker-compose.yml" |
|||
# XXX check name =~ *-orga |
|||
[[ -f "${ORGA_COMPOSE}" ]] || continue |
|||
for service in $($0 service available); do |
|||
case "${service}" in |
|||
garradin) |
|||
[ -f "${ORGA_DIR}/useGarradin" ] && echo "${service}" |
|||
;; |
|||
wiki) |
|||
grep -q "dokuwiki:" "${ORGA_COMPOSE}" 2>/dev/null && echo "${service}" |
|||
;; |
|||
wp) |
|||
grep -q "wordpress:" "${ORGA_COMPOSE}" 2>/dev/null && echo "${service}" |
|||
;; |
|||
*) |
|||
grep -q "${service}:" "${ORGA_COMPOSE}" 2>/dev/null && echo "${service}" |
|||
esac |
|||
done |
|||
done |
|||
} |
|||
|
|||
service_enable () { |
|||
echo $(getServiceInOrga $*) |
|||
} |
|||
|
|||
service_disable () { |
|||
echo $(getAvailableServices | filterNotInList $(getServiceInOrga $*)) |
|||
} |
|||
|
|||
service_status () { |
|||
# ps per enable |
|||
echo "*** TODO ***" |
|||
} |
|||
|
|||
# ======================================== |
|||
|
|||
KAZ_CMD="" |
|||
case "$1" in |
|||
'-h' | '-help' ) |
|||
usage |
|||
;; |
|||
compose|service) |
|||
KAZ_CMD="$1" |
|||
shift |
|||
;; |
|||
*) |
|||
usage |
|||
;; |
|||
esac |
|||
|
|||
KAZ_OPT="" |
|||
case "$1" in |
|||
available|validate|enable|disable|status) |
|||
KAZ_OPT="$1" |
|||
shift |
|||
;; |
|||
*) |
|||
usage |
|||
;; |
|||
esac |
|||
|
|||
${KAZ_CMD}_${KAZ_OPT} $(filterAvailableComposes $*) |
@ -0,0 +1,44 @@ |
|||
#!/bin/bash |
|||
|
|||
CV1=/kaz-old/bin/container.sh |
|||
DV1=/kaz-old/dockers |
|||
EV1=/kaz-old/config |
|||
SV1=/kaz-old/secret |
|||
|
|||
BV2=/kaz/bin |
|||
DV2=/kaz/dockers |
|||
EV2=/kaz/config |
|||
SV2=/kaz/secret |
|||
OV2=/kaz/config/orgaTmpl/orga-gen.sh |
|||
|
|||
[ -x "${CV1}" ] || exit |
|||
[ -d "${BV2}" ] || exit |
|||
|
|||
SIMU="echo SIMU" |
|||
|
|||
${SIMU} "${CV1}" stop orga |
|||
${SIMU} "${CV1}" stop |
|||
|
|||
${SIMU} rsync "${EV1}/dockers.env" "${EV2}/" |
|||
${SIMU} rsync "${SV1}/SetAllPass.sh" "${SV2}/" |
|||
${SIMU} "${BV2}/updateDockerPassword.sh" |
|||
|
|||
# XXX ? rsync /kaz/secret/allow_admin_ip /kaz-git/secret/allow_admin_ip |
|||
|
|||
${SIMU} "${BV2}/container.sh" start cloud dokuwiki ethercalc etherpad framadate garradin gitea jirafeau mattermost postfix proxy roundcube web |
|||
|
|||
${SIMU} rsync -aAHXh --info=progress2 "${DV1}/web/html/" "/var/lib/docker/volumes/web_html/_data/" |
|||
${SIMU} chown -R www-data: "/var/lib/docker/volumes/web_html/_data/" |
|||
|
|||
${SIMU} cd "${DV1}" |
|||
cd "${DV1}" |
|||
for ORGA_DIR in *-orga; do |
|||
services=$(echo $([ -x "${ORGA_DIR}/tmpl-gen.sh" ] && "${ORGA_DIR}/tmpl-gen.sh" -l)) |
|||
if [ -n "${services}" ]; then |
|||
ORGA="${ORGA_DIR%-orga}" |
|||
|
|||
echo " * ${ORGA}: ${services}" |
|||
${SIMU} "${OV2}" "${ORGA}" $(for s in ${services}; do echo "+${s}"; done) |
|||
fi |
|||
done |
|||
|
@ -0,0 +1,336 @@ |
|||
#!/bin/bash |
|||
|
|||
HOSTNAME=$(hostname) |
|||
if [ "$HOSTNAME" = "kazdev" ] |
|||
then |
|||
KAZ_ROOT=$(cd $(dirname $0)/..; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
SETUP_MAIL="/kaz/dockers/postfix/setup.sh" |
|||
else |
|||
. /dockersConfig/dockers.env |
|||
. /dockersConfig/password/SetAllPass.sh |
|||
SETUP_MAIL="/dockers/postfix/setup.sh" |
|||
RED='\033[0;31m' |
|||
GREEN='\033[0;32m' |
|||
NEUTRE='\e[0;m' |
|||
BLINK='\033[0;5m' |
|||
fi |
|||
URL_GARRADIN="$httpProto://${API_USER}:${API_PASSWORD}@kaz-garradin.${domain}" |
|||
|
|||
|
|||
#on récupère toutes les variables et mdp |
|||
. "${DOCKERS_ENV}" |
|||
. "${KAZ_KEY_DIR}/SetAllPass.sh" |
|||
|
|||
VERSION="Version de DEV Ne pas mettre en prod elle supprime vraiment les comptes" |
|||
PRG=$(basename $0) |
|||
IFS='.' |
|||
RACINE=$(echo $PRG | awk '{print $1}') |
|||
IFS=' ' |
|||
|
|||
LOG=$RACINE".log" |
|||
URL_NC=$(echo $cloudHost).$(echo $domain) |
|||
URL_AGORA=$(echo $matterHost).$(echo $domain) |
|||
|
|||
TFILE_EMAILS=$(mktemp /tmp/$RACINE.XXXXXXXXX.TFILE_EMAILS) |
|||
TFILE_MAILS_TROUVE=$(mktemp /tmp/$RACINE.XXXXXXXXX.TFILE_MAILS_TROUVE) |
|||
TFILE_MAILS_MATTERMOST=$(mktemp /tmp/$RACINE.XXXXXXXXTFILE_MAILS_MATTERMOST) |
|||
|
|||
rm -rf /tmp/$RACINE* |
|||
rm -rf /tmp/*.json |
|||
|
|||
############################################ Fonctions ####################################################### |
|||
|
|||
Search_Email() { |
|||
CHOIX_MAIL="" |
|||
rm -rf $TFILE_MAILS_TROUVE |
|||
rm -rf $TFILE_MAILS |
|||
read -p "Mail ou caractere contenu dans le mail (r ou R pour retour ) ? : " RMAIL |
|||
if [ $RMAIL = "r" ] || [ $RMAIL = "R" ] || [ $RMAIL = "q" ] || [ $RMAIL = "Q" ] |
|||
then |
|||
Main |
|||
fi |
|||
${SETUP_MAIL} email list | grep -i $RMAIL > $TFILE_EMAILS |
|||
COMPTEUR_LIGNE=0 |
|||
while read LIGNE |
|||
do |
|||
COMPTEUR_LIGNE=$(expr $COMPTEUR_LIGNE + 1) |
|||
printf "%d - %s\n" $COMPTEUR_LIGNE $LIGNE >>$TFILE_MAILS_TROUVE |
|||
done <$TFILE_EMAILS |
|||
if [ $COMPTEUR_LIGNE -gt 0 ] |
|||
then |
|||
cat $TFILE_MAILS_TROUVE |
|||
read -p "Choisir le numéro correspondant ( R ou r ou 0 pour retour ): " NB_LIGNE_MAIL |
|||
if [ $NB_LIGNE_MAIL = "r" ] || [ $NB_LIGNE_MAIL = "R" ] || [ $NB_LIGNE_MAIL = "0" ] || [ $NB_LIGNE_MAIL -gt $COMPTEUR_LIGNE ] |
|||
then |
|||
Search_Email |
|||
fi |
|||
CHOIX_MAIL=$(cat ${TFILE_MAILS_TROUVE} | grep "^${NB_LIGNE_MAIL}\b" | awk '{print $3}' | tr -d '[:space:]') |
|||
else |
|||
Search_Email |
|||
fi |
|||
} |
|||
|
|||
|
|||
Search_Mattermost() { |
|||
#Ici $1 est une adresse email |
|||
docker exec -ti ${mattermostServName} bin/mmctl --suppress-warnings auth login $httpProto://$URL_AGORA --name local-server --username $mattermost_user --password $mattermost_pass >/dev/null 2>&1 |
|||
docker exec -ti ${mattermostServName} bin/mmctl --suppress-warnings config set ServiceSettings.EnableAPIUserDeletion "true" >/dev/null 2>&1 |
|||
#on créé la list des mails dans mattermost |
|||
docker exec -ti ${mattermostServName} bin/mmctl --suppress-warnings user list --all >${TFILE_MAILS_MATTERMOST} 2>/dev/null |
|||
REP_SEARCH_MATTERMOST=$(cat ${TFILE_MAILS_MATTERMOST} | grep $1 | awk '{print $2}' | tr -d '[:space:]') |
|||
if [ ! -z ${REP_SEARCH_MATTERMOST} ] |
|||
then |
|||
if [ "$2" = "DETAILS" ] |
|||
then |
|||
touch /tmp/$1-mattermost.json |
|||
docker exec -ti ${mattermostServName} bin/mmctl --suppress-warnings --format json user search $1 >/tmp/$1-mattermost.json 2>/dev/null |
|||
echo -e "${RED} Compte : ${GREEN} " ;jq .username /tmp/$1-mattermost.json |
|||
echo -e "${RED} Mail : ${GREEN} " ; jq .email /tmp/$1-mattermost.json |
|||
echo -e "${RED} Role : ${GREEN} " ; jq .roles /tmp/$1-mattermost.json |
|||
else |
|||
echo "${REP_SEARCH_MATTERMOST}" |
|||
fi |
|||
else |
|||
echo "" |
|||
fi |
|||
} |
|||
|
|||
|
|||
Recherche_par_fic() { |
|||
echo "Menu de recherche et modif par fichier" |
|||
Main |
|||
} |
|||
|
|||
Info_Email() { |
|||
Search_Email |
|||
clear |
|||
echo "------------------------------------------------" |
|||
echo -e "${RED} ADRESSE DE MESSAGERIE : $GREEN $CHOIX_MAIL" |
|||
echo -e "$NEUTRE ------------------------------------------------" |
|||
echo -e " ${RED} DETAILS DU COMPTE DANS MATTERMOST ${GREEN}" |
|||
Search_Mattermost $CHOIX_MAIL DETAILS |
|||
echo -e " ------------------------------------------------" |
|||
echo -e "${RED} DETAILS DU COMPTE DANS NEXTCLOUD PRINCIPAL ${GREEN}" |
|||
echo -e "" |
|||
TEMP_USER_NC=$(mktemp /tmp/$RACINE.XXXXXXXXX.TEMP_USER_NC) |
|||
curl -s -o $TEMP_USER_NC -X GET -H 'OCS-APIRequest:true' $httpProto://admin:$nextcloud_NEXTCLOUD_ADMIN_PASSWORD@$URL_NC/ocs/v1.php/cloud/users?search=$CHOIX_MAIL |
|||
cat $TEMP_USER_NC | grep -i "element" | sed -e s/[\<\>\/]//g | sed -e s/element//g |
|||
echo -e "${NEUTRE} ------------------------------------------------" |
|||
echo -e " ${RED} DETAILS DU COMPTE DANS GARRADIN" |
|||
echo -e "${GREEN}" |
|||
curl -s ${URL_GARRADIN}/api/sql -d "SELECT nom,adresse,code_postal,ville,email_kaz,email FROM membres where email_kaz='${CHOIX_MAIL}' LIMIT 1;" >/tmp/$CHOIX_MAIL-garradin.json |
|||
sed -i -e 's/^[\{]"results"\:\[//g' -e 's/\]\}$//g' /tmp/$CHOIX_MAIL-garradin.json |
|||
jq .nom /tmp/$CHOIX_MAIL-garradin.json |
|||
jq .adresse /tmp/$CHOIX_MAIL-garradin.json |
|||
jq .code_postal /tmp/$CHOIX_MAIL-garradin.json |
|||
jq .ville /tmp/$CHOIX_MAIL-garradin.json |
|||
echo "Mail de secours : "; jq .email /tmp/$CHOIX_MAIL-garradin.json |
|||
echo -e "" |
|||
echo -e "${NEUTRE}------------------------------------------------" |
|||
echo -e "" |
|||
} |
|||
|
|||
Search_destroy() { |
|||
clear |
|||
#TODO pourquoi REP_SEARCH_DESTROY=$(Search_Email) ne marche pas et fait déconner la fonction search_mail, j' en suis la à m' esbaudir |
|||
Search_Email |
|||
REP_SEARCH_DESTROY=$CHOIX_MAIL |
|||
echo "CHOIX=$REP_SEARCH_DESTROY" |
|||
echo "--------------------------------- SUPPRESION ----------------------------------------" |
|||
while : |
|||
do |
|||
echo -e "${BLINK} TOUT RETOUR EN ARRIERE EST IMPOSSIBLE ${NEUTRE}" |
|||
read -p "ON CONTINUE ? [ o / n ]: " SEARCH_DESTROY_INPUT |
|||
if [ "$SEARCH_DESTROY_INPUT" = "n" ] || [ "$SEARCH_DESTROY_INPUT" = "N" ] |
|||
then |
|||
Search_destroy |
|||
fi |
|||
if [ "$SEARCH_DESTROY_INPUT" = "o" ] || [ "$SEARCH_DESTROY_INPUT" = "O" ] |
|||
then |
|||
REP_SEARCH=$(Search_Mattermost $REP_SEARCH_DESTROY) |
|||
echo -e "${GREEN} réponse de mattermost : ${REP_SEARCH_DESTROY} ${NEUTRE}" |
|||
if [ ! -z ${REP_SEARCH} ] |
|||
then |
|||
echo -e "${RED} suppression de ${REP_SEARCH_DESTROY} dans mattermost ${NEUTRE}" |
|||
docker exec -ti ${mattermostServName} bin/mmctl user delete $REP_SEARCH_DESTROY --confirm >/dev/null 2>&1 |
|||
if [ "$?" -eq "0" ] |
|||
then |
|||
echo -e "${RED}Suppresion ok" |
|||
else |
|||
echo -e "Erreur de suppression" |
|||
fi |
|||
echo " ----------------- " |
|||
else |
|||
echo "Rien a supprimer dans mattermost" |
|||
fi |
|||
echo -e "${NEUTRE}" |
|||
echo -e "Recherche de ${GREEN} ${REP_SEARCH_DESTROY} ${NEUTRE} dans nextcloud" |
|||
USER_NEXTCLOUD_SUPPR=$(curl -s -X GET -H 'OCS-APIRequest:true' $httpProto://admin:$nextcloud_NEXTCLOUD_ADMIN_PASSWORD@$URL_NC/ocs/v1.php/cloud/users?search=${REP_SEARCH_DESTROY} | grep element | sed -s 's/[ \<\>\/]//g' | sed 's/element//g') |
|||
if [ ! -z ${USER_NEXTCLOUD_SUPPR} ] |
|||
then |
|||
echo -e "${GREEN} le user trouvé est : ${USER_NEXTCLOUD_SUPPR}" |
|||
echo -e "${RED} Suppresion de ${USER_NEXTCLOUD_SUPPR}" |
|||
curl -H 'OCS-APIREQUEST: true' -X DELETE $httpProto://admin:$nextcloud_NEXTCLOUD_ADMIN_PASSWORD@$URL_NC/ocs/v1.php/cloud/users/${USER_NEXTCLOUD_SUPPR} >/dev/null 2>&1 |
|||
if [ "$?" -eq "0" ] |
|||
then |
|||
echo -e "${RED}Suppresion ok" |
|||
else |
|||
echo -e "Erreur de suppression" |
|||
fi |
|||
else |
|||
echo -e "${GREEEN} rien à supprimer dans Nextcloud" |
|||
fi |
|||
echo -e "${NEUTRE}" |
|||
echo "" |
|||
echo -e "${RED} suppression de ${REP_SEARCH_DESTROY} dans le serveur de mail" |
|||
${SETUP_MAIL} email del -y ${REP_SEARCH_DESTROY} >/dev/null 2>&1 |
|||
if [ "$?" -eq "0" ] |
|||
then |
|||
echo -e "${RED}Suppresion ok" |
|||
else |
|||
echo -e "Erreur de suppression" |
|||
fi |
|||
echo -e "${NEUTRE}" |
|||
echo "" |
|||
echo -e "${NEUTRE}" |
|||
read -p " ---------------------- Appuyer sur une touche pour continuer -------------------------" |
|||
Search_destroy |
|||
fi |
|||
done |
|||
} |
|||
|
|||
Gest_Password() { |
|||
Search_Email |
|||
#cree un mdp acceptable par postfix/nc/mattermost |
|||
PASSWORD=_`apg -n 1 -m 10 -M NCL -d`- |
|||
COMPTE_A_MODIFIER=${CHOIX_MAIL} |
|||
MAIL_SECOURS=$(curl -s ${URL_GARRADIN}/api/sql -d "SELECT email FROM membres where email_kaz='${COMPTE_A_MODIFIER}' LIMIT 1 ;" | sed -e 's/results//g' -e 's/email//g' -e 's/[\"\:\[\{\}]//g' -e 's/\]//g') |
|||
if [ "$MAIL_SECOURS" = "" ] |
|||
then |
|||
ADRESSE_SEC="NON" |
|||
else |
|||
ADRESSE_SEC="OUI" |
|||
fi |
|||
############################################################################## |
|||
# on cherche le moyen de savoir si ce mail est le mail dans une orga |
|||
# si c' est le cas ça veut dire qu' il faut changer le mot de passe dans le cloud de l' orga |
|||
rm -rf /tmp/FICMAILORGA* |
|||
EMAIL_ORGA_TROUVE=0 |
|||
for EMAIL_CHOIX in responsable_email email email_kaz email2 email3 email4 email5 |
|||
do |
|||
FICMAILORGA=$(mktemp /tmp/FICMAILORGAXXXXXXXXX.json) |
|||
curl -s ${URL_GARRADIN}/api/sql -d "SELECT * FROM membres where $EMAIL_CHOIX='$CHOIX_MAIL' LIMIT 1;" > $FICMAILORGA |
|||
sed -i -e 's/^[\{]"results"\:\[//g' -e 's/\]\}$//g' $FICMAILORGA |
|||
if [ $(jq .$EMAIL_CHOIX $FICMAILORGA | grep -i @) ] |
|||
then |
|||
echo -e "${RED} $EMAIL_CHOIX=${GREEN}$(jq .$EMAIL_CHOIX $FICMAILORGA)${NEUTRE}" |
|||
if [ $EMAIL_CHOIX != "email_kaz" ] |
|||
then |
|||
EMAIL_ORGA_TROUVE=$(expr $EMAIL_ORGA_TROUVE + 1 ) |
|||
fi |
|||
fi |
|||
done |
|||
if [ $EMAIL_ORGA_TROUVE -ge 1 ] |
|||
then |
|||
echo -e "${RED} L' adresse est dans une orga, IL EST PREFERABLE DE REPONDRE NON !!! ${NEUTRE}" |
|||
fi |
|||
########################################################################## |
|||
read -p "ON CONTINUE ? [ o / n ]: " SEARCH_RESET_INPUT |
|||
if [ "$SEARCH_RESET_INPUT" = "n" ] || [ "$SEARCH_RESET_INPUT" = "N" ] |
|||
then |
|||
clear |
|||
Gest_Password |
|||
fi |
|||
if [ "$SEARCH_RESET_INPUT" = "o" ] || [ "$SEARCH_RESET_INPUT" = "O" ] |
|||
then |
|||
USER_NEXTCLOUD_MODIF=$(curl -s -X GET -H 'OCS-APIRequest:true' $httpProto://admin:$nextcloud_NEXTCLOUD_ADMIN_PASSWORD@$URL_NC/ocs/v1.php/cloud/users?search=${COMPTE_A_MODIFIER} | grep element | sed -e 's/[ \<\>\/]//g' -e 's/element//g') |
|||
echo -e "$GREEN Compte à modifier = $RED ${COMPTE_A_MODIFIER} ${NEUTRE}" |
|||
echo -e "$GREEN Mail de secours = $RED ${MAIL_SECOURS} ${NEUTRE}" |
|||
echo -e "$GREEN Compte $RED $(Search_Mattermost $COMPTE_A_MODIFIER) ${NEUTRE}" |
|||
echo -e "$GREEN Compte Nextcloud $RED ${USER_NEXTCLOUD_MODIF} ${NEUTRE}" |
|||
echo -e "$GREEN Le mot de passe sera = $RED ${PASSWORD} ${NEUTRE}" |
|||
docker exec -ti mattermostServ bin/mmctl user change-password $(Search_Mattermost $COMPTE_A_MODIFIER) -p $PASSWORD |
|||
curl -H 'OCS-APIREQUEST: true' -X PUT $httpProto://admin:$nextcloud_NEXTCLOUD_ADMIN_PASSWORD@$URL_NC/ocs/v1.php/cloud/users/${USER_NEXTCLOUD_MODIF} -d key=password -d value=${PASSWORD} |
|||
${SETUP_MAIL} email update ${COMPTE_A_MODIFIER} ${PASSWORD} |
|||
if [ $ADRESSE_SEC == "OUI" ] |
|||
then |
|||
echo -e "Mail a expédier a cette adresse : ${GREEN} $MAIL_SECOURS ${NEUTRE}" |
|||
fi |
|||
if [ $ADRESSE_SEC == "NON" ] |
|||
then |
|||
echo -e "${RED} Pas d adresse de secours ${NEUTRE}" |
|||
fi |
|||
fi |
|||
|
|||
} |
|||
############################################################################################################## |
|||
usage () { |
|||
echo $PRG "[-h] [-v]" |
|||
echo " script de gestion des kaznaute" |
|||
echo " -h aide sur ce script" |
|||
echo " -v version du script." |
|||
} |
|||
############################################################################################################## |
|||
# test des dépendances |
|||
type -P jq | grep -i jq || { echo "erreur jq n' est pas installé ( apt install jq ? ) " ; exit;} |
|||
################ Main loop ############################################ |
|||
Main() { |
|||
|
|||
echo -e "${NEUTRE}" |
|||
clear |
|||
while true |
|||
do |
|||
echo "--------------------------------------------" |
|||
echo $VERSION |
|||
echo "--------------------------------------------" |
|||
echo " 1 - Suppression d' un compte ?" |
|||
echo " 2 - Information sur un compte" |
|||
echo " 3 - Changement de mot de passe d' un compte" |
|||
#echo " 3 - Utilisation d' un fichier de destruction massive" |
|||
#echo " h - Aide" |
|||
echo " q - Quit" |
|||
read -p "Choice ? : " CHOICE |
|||
case "$CHOICE" in |
|||
'1' ) |
|||
Search_destroy |
|||
;; |
|||
'2' ) |
|||
Info_Email |
|||
;; |
|||
'3' ) |
|||
Gest_Password |
|||
;; |
|||
'h'| "H" ) |
|||
clear |
|||
echo "--------------------------------------------" |
|||
usage |
|||
echo "--------------------------------------------" |
|||
;; |
|||
'q'| "Q" ) |
|||
exit;; |
|||
'*' ) |
|||
clear;; |
|||
esac |
|||
done |
|||
} |
|||
|
|||
|
|||
################################################################################################################################## |
|||
|
|||
# Lancement de la boucle principale |
|||
#traitement des options ( usage ) |
|||
|
|||
case "$1" in |
|||
'-h' ) |
|||
usage |
|||
exit |
|||
;; |
|||
'-v' ) |
|||
echo "$PRG version $VERSION" |
|||
exit |
|||
;; |
|||
esac |
|||
|
|||
Main |
@ -0,0 +1,83 @@ |
|||
#!/bin/bash |
|||
|
|||
POUBELLE="${HOME}/tmp/POUBELLE" |
|||
mkdir -p "${POUBELLE}" |
|||
|
|||
usage () { |
|||
echo `basename "$0"` " [-] [-h] [-help] [-clean] [-wipe] [-n] [directory ...]" |
|||
echo " remove temporaries files" |
|||
echo " - Treat the following arguments as filenames \`-\' so that" |
|||
echo " you can specify filenames starting with a minus." |
|||
echo " -h" |
|||
echo " -help Display this help." |
|||
echo " -n Simulate the remove (juste print files)." |
|||
echo " directories are the roots where the purge had to be done. If no" |
|||
echo " roots are given, the root is the home directory." |
|||
} |
|||
|
|||
DETRUIT="" |
|||
ANT_OPT="" |
|||
ANT_CMD="" |
|||
case "$1" in |
|||
'-' ) |
|||
shift;; |
|||
'-n' ) |
|||
DETRUIT="echo" |
|||
ANT_OPT="-p" |
|||
shift;; |
|||
'-clean' ) |
|||
ANT_CMD="clean" |
|||
shift;; |
|||
'-wipe' ) |
|||
ANT_CMD="wipe" |
|||
shift;; |
|||
'-h' | '-help' ) |
|||
usage |
|||
shift |
|||
exit;; |
|||
esac |
|||
|
|||
DIRS=$* |
|||
if test "$#" -le 1 |
|||
then |
|||
DIRS="$*" |
|||
if test -z "$1" -o -d "$1" |
|||
then |
|||
cd $1 || exit |
|||
DIRS=. |
|||
fi |
|||
fi |
|||
|
|||
if test "${ANT_CMD}" != "" |
|||
then |
|||
find $DIRS -type f -name build.xml -execdir ant -f {} "${ANT_CMD}" \; |
|||
find $DIRS -type f -name Makefile\* -execdir make -f {} "${ANT_CMD}" \; |
|||
exit |
|||
fi |
|||
|
|||
find $DIRS -type d -name .xvpics -exec $DETRUIT rm -r {} \; -prune |
|||
|
|||
find $DIRS '(' \ |
|||
-type d -name POUBELLE -prune \ |
|||
-o \ |
|||
-type f '(' \ |
|||
-name core -o -name '*.BAK' -o -name '*.bak' -o -name '*.CKP' \ |
|||
-o -name '.*.BAK' -o -name '.*.bak' -o -name '.*.CKP' \ |
|||
-o -name '.*.back' -o -name '*.back' \ |
|||
-o -name '*.backup' -o -name '*.backup ' \ |
|||
-o -name '.*.backup' -o -name '.*.backup ' \ |
|||
-o -name .make.state \ |
|||
-o -name 'untitled*' -o -name 'Sansnom' \ |
|||
-o -name '.emacs_*' -o -name '.wi_*' \ |
|||
-o -name 'ws_ftp.log' -o -name 'hs_err*.log' \ |
|||
-o -name '#*' -o -name '*~' -o -name '.*~' -o -name junk \ |
|||
-o -name '.~lock.*#' \ |
|||
-o -name '*%' -o -name '.*%' \ |
|||
')'\ |
|||
-print -exec $DETRUIT mv -f '{}' "${POUBELLE}" \; \ |
|||
')' |
|||
|
|||
# -o -name '*.ps' -o -name '.*.ps' \ |
|||
# -o -name '*.i' -o -name '*.ixx' \ |
|||
# -o -name '.*.sav' -o -name '*.sav' \ |
|||
|
@ -0,0 +1,143 @@ |
|||
#!/bin/bash |
|||
|
|||
# l'idée et de faire un rsync dans un répertoir provisoire et de téléverser les différences. |
|||
|
|||
# initialilisation : |
|||
# cd /MonRepDeTest |
|||
# mkdir -p kazdev kazprod |
|||
# rsync -rlptDEHAX --delete --info=progress2 root@kazdev:/kaz/ ./kazdev/ |
|||
# rsync -rlptDEHAX --delete --info=progress2 root@kazprod:/kaz/ ./kazprod/ |
|||
|
|||
# exemple : |
|||
# cd /MonRepDeTest/kazdev/ |
|||
# ./dockers/rdiff.sh /MonRepDeTest/kazprod/ root@kazprod |
|||
# cd /MonRepDeTest/kazprod/ |
|||
# ./dockers/rdiff.sh /MonRepDeTest/kazdev/ root@kazdev |
|||
|
|||
export REF_DIRS="bin config dockers secret state" |
|||
export SIMU="" |
|||
export GREEN='[0;32m' |
|||
export RED='[0;31m' |
|||
export NC='[0m' # No Color |
|||
|
|||
usage () { |
|||
echo "Usage: $0 [-n] [-h] /kazProdOuDev/ root@kazDevOuProd" |
|||
echo " -h help" |
|||
echo " -n simulation" |
|||
exit 1 |
|||
} |
|||
|
|||
for ARG in $@ |
|||
do |
|||
case "${ARG}" in |
|||
'-h' | '-help' ) |
|||
usage |
|||
;; |
|||
'-n' ) |
|||
shift |
|||
export SIMU="echo" |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
if [[ $# -ne 2 ]]; then |
|||
echo "Illegal number of parameters" |
|||
usage |
|||
fi |
|||
|
|||
if [ ! -d "$1" ]; then |
|||
echo "Bad target" |
|||
usage |
|||
fi |
|||
|
|||
export SRC_DIR="$(cd $(dirname $0)/..; pwd)/" |
|||
export TARGET_DIR="$(cd $1 ; pwd)/" |
|||
export REMORE_USER=$2 |
|||
declare -a SKIP_FILE |
|||
export SKIP_FILE=$(grep -v -e ^# -e ^$ "${SRC_DIR}/config/skip-file.txt") |
|||
|
|||
cd "${SRC_DIR}" |
|||
|
|||
${SIMU} rsync -rlptDEHAX --no-o --delete --info=progress2 $(for i in ${REF_DIRS} ; do echo "${REMORE_USER}:/$i" ; done) "${TARGET_DIR}" |
|||
|
|||
badName(){ |
|||
[[ -z "$1" ]] && return 0 |
|||
for item in ${SKIP_FILE[@]}; do |
|||
[[ "$1/" =~ "${item}" ]] && return 0 |
|||
done |
|||
return 1 |
|||
} |
|||
|
|||
CHANGED_DIRS=$(find ${REF_DIRS} -type d ! -exec /bin/test -d "${TARGET_DIR}{}" \; -print -prune) |
|||
for file in ${CHANGED_DIRS[@]}; do |
|||
if badName "${file}" ; then |
|||
echo SKIP ${file} |
|||
continue |
|||
fi |
|||
echo "New dir ${file}" |
|||
while true; do |
|||
read -p "Synchronize ${GREEN}${file}/${NC} to ${GREEN}${REMORE_USER}:/${file}/${NC}? [y/n]: " yn |
|||
case $yn in |
|||
[Yy]*) |
|||
${SIMU} rsync -rlptDEHAX --info=progress2 "${file}/" "${TARGET_DIR}${file}/" |
|||
${SIMU} rsync -rlptDEHAX --info=progress2 "${file}/" "${REMORE_USER}:/${file}/" |
|||
break |
|||
;; |
|||
[Nn]*) |
|||
break |
|||
;; |
|||
esac |
|||
done |
|||
done |
|||
|
|||
NEW_FILES=$(find ${REF_DIRS} '(' -type d ! -exec /bin/test -d "${TARGET_DIR}/{}" \; -prune ')' -o '(' -type f ! -exec /bin/test -f "${TARGET_DIR}/{}" \; -print ')') |
|||
for file in ${NEW_FILES[@]}; do |
|||
if badName "${file}" ; then |
|||
echo SKIP ${file} |
|||
continue |
|||
fi |
|||
echo "New file ${file}" |
|||
while true; do |
|||
read -p "Synchronize ${GREEN}${file}${NC} to ${GREEN}${REMORE_USER}:/${file}${NC}? [y/n]: " yn |
|||
case $yn in |
|||
[Yy]*) |
|||
${SIMU} rsync -rlptDEHAX --info=progress2 "${file}" "${TARGET_DIR}${file}" |
|||
${SIMU} rsync -rlptDEHAX --info=progress2 "${file}" "${REMORE_USER}:/${file}" |
|||
break |
|||
;; |
|||
[Nn]*) |
|||
break |
|||
;; |
|||
esac |
|||
done |
|||
done |
|||
|
|||
trap 'rm -f "${TMPFILE}"' EXIT |
|||
export TMPFILE="$(mktemp)" || exit 1 |
|||
|
|||
CHANGED_FILES=$(find ${REF_DIRS} '(' -type d ! -exec /bin/test -d "${TARGET_DIR}/{}" \; -prune ')' -o '(' -type f -exec /bin/test -f "${TARGET_DIR}/{}" \; ! -exec cmp -s "{}" "${TARGET_DIR}{}" \; -print ')') |
|||
for file in ${CHANGED_FILES[@]}; do |
|||
if badName "${file}" ; then |
|||
echo SKIP ${file} |
|||
continue |
|||
fi |
|||
echo "TEST ${file}" |
|||
kompare "${file}" "${TARGET_DIR}${file}" |
|||
if [ "${TARGET_DIR}${file}" -ot "${TMPFILE}" ]; then |
|||
echo "No change of ${TARGET_DIR}${file}" |
|||
continue |
|||
fi |
|||
while true; do |
|||
read -p "Synchronize ${GREEN}${TARGET_DIR}${file}${NC} to ${GREEN}${REMORE_USER}:/${file}${NC}? [y/n]: " yn |
|||
case $yn in |
|||
[Yy]*) |
|||
${SIMU} chmod $(stat -c "%a" "${file}") "${TARGET_DIR}${file}" |
|||
${SIMU} rsync -rlptDEHAX --info=progress2 "${TARGET_DIR}${file}" "${REMORE_USER}:/${file}" |
|||
break |
|||
;; |
|||
[Nn]*) |
|||
break |
|||
;; |
|||
esac |
|||
done |
|||
done |
@ -0,0 +1,67 @@ |
|||
#!/bin/bash |
|||
|
|||
KAZ_ROOT=$(cd "$(dirname $0)/.."; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
cd "${KAZ_ROOT}" |
|||
|
|||
NEW_DIR="secret" |
|||
TMPL_DIR="secret.tmpl" |
|||
|
|||
if [ ! -d "${NEW_DIR}/" ]; then |
|||
rsync -a "${TMPL_DIR}/" "${NEW_DIR}/" |
|||
fi |
|||
|
|||
NEW_FILE="${NEW_DIR}/SetAllPass-new.sh" |
|||
TMPL_FILE="${NEW_DIR}/SetAllPass.sh" |
|||
|
|||
while read line ; do |
|||
if [[ "${line}" =~ ^# ]] || [ -z "${line}" ] ; then |
|||
echo "${line}" |
|||
continue |
|||
fi |
|||
if [[ "${line}" =~ "--clean_val--" ]] ; then |
|||
case "${line}" in |
|||
*jirafeau_DATA_DIR*) |
|||
JIRAFEAU_DIR=$(getValInFile "${DOCKERS_ENV}" "jirafeauDir") |
|||
echo "coucou ${JIRAFEAU_DIR}" >&2 |
|||
[ -z "${JIRAFEAU_DIR}" ] && |
|||
echo "${line}" || |
|||
sed "s%\(.*\)--clean_val--\(.*\)%\1${JIRAFEAU_DIR}\2%" <<< ${line} |
|||
echo "coucou fin" >&2 |
|||
continue |
|||
;; |
|||
*DATABASE*) |
|||
dbName="$(sed "s/\([^_]*\)_.*/\1/" <<< ${line})_$(apg -n 1 -m 2 -M NCL | cut -c 1-2)" |
|||
sed "s/\(.*\)--clean_val--\(.*\)/\1${dbName}\2/" <<< ${line} |
|||
continue |
|||
;; |
|||
*ROOT_PASSWORD*|*PASSWORD*) |
|||
pass="$(apg -n 1 -m 16 -M NCL)" |
|||
sed "s/\(.*\)--clean_val--\(.*\)/\1${pass}\2/" <<< ${line} |
|||
continue |
|||
;; |
|||
*USER*) |
|||
user="$(sed "s/\([^_]*\)_.*/\1/" <<< ${line})_$(apg -n 1 -m 2 -M NCL | cut -c 1-2)" |
|||
sed "s/\(.*\)--clean_val--\(.*\)/\1${user}\2/" <<< ${line} |
|||
continue |
|||
;; |
|||
*RAIN_LOOP*|*office_password*|*mattermost_*|*sympa_*|*gitea_*) |
|||
pass="$(apg -n 1 -m 16 -M NCL)" |
|||
sed "s/\(.*\)--clean_val--\(.*\)/\1${pass}\2/" <<< ${line} |
|||
continue |
|||
;; |
|||
esac |
|||
else |
|||
echo "${line}" |
|||
continue |
|||
fi |
|||
printKazError "${line}" >&2 |
|||
done < "${TMPL_FILE}" > "${NEW_FILE}" |
|||
|
|||
mv "${NEW_FILE}" "${TMPL_FILE}" |
|||
|
|||
chmod a+x "${TMPL_FILE}" |
|||
. "${TMPL_FILE}" |
|||
"${KAZ_BIN_DIR}/updateDockerPassword.sh" |
@ -0,0 +1,58 @@ |
|||
#!/bin/bash |
|||
|
|||
cd $(dirname $0)/.. |
|||
KAZ=$(pwd) |
|||
owner=root |
|||
|
|||
usage(){ |
|||
echo "Usage: $0 [root|user]" |
|||
exit 1 |
|||
} |
|||
|
|||
case $# in |
|||
0) |
|||
;; |
|||
1) |
|||
owner=$1 |
|||
;; |
|||
*) |
|||
usage |
|||
;; |
|||
esac |
|||
|
|||
#################### |
|||
# config |
|||
cd ${KAZ} |
|||
DIRS="config secret bin" |
|||
|
|||
chown -hR ${owner}: ${DIRS} |
|||
find ${DIRS} -type f -exec chmod a-x {} \; |
|||
find ${DIRS} -type f -name \*.sh -exec chmod a+x {} \; |
|||
chmod -R a+X ${DIRS} |
|||
chmod -R go= ${DIRS} |
|||
|
|||
chmod a+x bin/*.sh |
|||
chown -hR www-data: config/orgaTmpl/wiki-conf/ |
|||
|
|||
#################### |
|||
# dockers |
|||
cd ${KAZ}/dockers |
|||
|
|||
chown -h ${owner}: . * */.env */* */config/* |
|||
chmod a-x,a+r * */* |
|||
chmod a+X . * */* |
|||
chmod a+x */*.sh |
|||
|
|||
chown -hR ${owner}: \ |
|||
etherpad/etherpad-lite/ \ |
|||
garradin/extensions garradin/garradin-* \ |
|||
jirafeau/Jirafeau \ |
|||
mattermost/app |
|||
|
|||
chown -hR www-data: \ |
|||
vigilo \ |
|||
web/html |
|||
|
|||
chmod -R a+rX web/html |
|||
|
|||
|
@ -0,0 +1,15 @@ |
|||
#!/bin/bash |
|||
|
|||
KAZ_ROOT=$(cd "$(dirname $0)/../.."; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
cd "${KAZ_COMP_DIR}" |
|||
# after update docker-compose |
|||
for orga in ./dockers/*-orga |
|||
do |
|||
${orga}/orga-gen.sh |
|||
./bin/container.sh stop "${orga}" |
|||
./bin/container.sh start "${orga}" |
|||
done |
|||
|
@ -0,0 +1,82 @@ |
|||
#!/bin/bash |
|||
|
|||
KAZ_ROOT=$(cd $(dirname $0)/..; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
cd $(dirname $0) |
|||
. "${DOCKERS_ENV}" |
|||
|
|||
available_orga=$("${KAZ_BIN_DIR}/kazList.sh" "compose" "enable" "orga" 2>/dev/null | sed "s/-orga\b//g") |
|||
usage () { |
|||
echo "Usage: $0 orga" |
|||
echo " -n : simulation" |
|||
echo " [orga] : in ${available_orga}" |
|||
exit 1 |
|||
} |
|||
|
|||
case "$#" in |
|||
0) |
|||
# common |
|||
CloudName="<common cloud>" |
|||
OFFICE_URL="${httpProto}://${officeHost}.${domain}" |
|||
DockerServName=${nextcloudServName} |
|||
CONF_DIR="${DOCK_VOL}/cloud_cloudConfig/_data" |
|||
;; |
|||
1) |
|||
# orga |
|||
available_orga=$("${KAZ_BIN_DIR}/kazList.sh" "compose" "enable" "orga" 2>/dev/null | sed "s/-orga\b//g") |
|||
[[ " ${available_orga} " =~ "$1" ]] || usage |
|||
ORGA=${1%-orga} |
|||
CloudName="${ORGA}" |
|||
OFFICE_URL=https://${ORGA}-${officeHost}.${domain} |
|||
DockerServName="${ORGA}-${nextcloudServName}" |
|||
CONF_DIR="${DOCK_VOL}/orga_${ORGA}-cloudConfig/_data" |
|||
;; |
|||
*) |
|||
usage |
|||
;; |
|||
esac |
|||
|
|||
PHP_CONF="${CONF_DIR}/config.php" |
|||
|
|||
printKazMsg "update cloud config" |
|||
update(){ |
|||
# $1 key |
|||
# $2 val |
|||
# $3 where |
|||
if ! grep -q "$1" "${PHP_CONF}" ; then |
|||
echo -n " ${CYAN}${BOLD}$1${NC}" |
|||
sed -i -e "/$3/a\ $2" "${PHP_CONF}" |
|||
fi |
|||
} |
|||
|
|||
printKazMsg "update ${CloudName}:" |
|||
echo -n " " |
|||
update "default_language" " 'default_language' => 'fr'," "CONFIG = array (" |
|||
|
|||
update "theme" " 'theme' => ''," "'installed' => true," |
|||
update "default_phone_region" " 'default_phone_region' => 'FR'," "'installed' => true," |
|||
update "loglevel" " 'loglevel' => 2," "'installed' => true," |
|||
update "maintenance" " 'maintenance' => false," "'installed' => true," |
|||
update "app_install_overwrite" " 'app_install_overwrite' => \n array (\n 0 => 'documents',\n )," "'installed' => true," |
|||
update "overwriteprotocol" " 'overwriteprotocol' => 'https'," "'installed' => true," |
|||
|
|||
update "mail_domain" " 'mail_domain' => 'kaz.bzh'," "'installed' => true," |
|||
update "mail_from_address" " 'mail_from_address' => 'admin'," "'installed' => true," |
|||
update "mail_smtpport" " 'mail_smtpport' => '25'," "'installed' => true," |
|||
update "mail_sendmailmode" " 'mail_sendmailmode' => 'smtp'," "'installed' => true," |
|||
update "mail_smtphost" " 'mail_smtphost' => 'smtp.kaz.bzh'," "'installed' => true," |
|||
update "mail_smtpmode" " 'mail_smtpmode' => 'smtp'," "'installed' => true," |
|||
update "enable_previews" " 'enable_previews' => false," "'installed' => true," |
|||
|
|||
printKazMsg "add applications" |
|||
for app in tasks calendar contacts mail richdocuments drawio rainloop |
|||
do |
|||
docker exec -ti -u 33 ${DockerServName} /var/www/html/occ app:install ${app} |
|||
done |
|||
|
|||
docker exec -ti -u 33 ${DockerServName} /var/www/html/occ config:app:set --value "${OFFICE_URL}" richdocuments public_wopi_url |
|||
docker exec -ti -u 33 ${DockerServName} /var/www/html/occ config:app:set --value "${OFFICE_URL}" richdocuments wopi_url |
|||
docker exec -ti -u 33 ${DockerServName} /var/www/html/occ config:app:set --value "${OFFICE_URL}" richdocuments disable_certificate_verification |
|||
|
@ -0,0 +1,108 @@ |
|||
#!/bin/bash |
|||
|
|||
KAZ_ROOT=$(cd $(dirname $0)/..; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
# pour mise au point |
|||
# SIMU=echo |
|||
|
|||
# Améliorations à prévoir |
|||
# - donner en paramètre les services concernés (pour limité les modifications) |
|||
# - pour les DB si on déclare un nouveau login, alors les privilèges sont créé mais les anciens pas révoqués |
|||
|
|||
. "${DOCKERS_ENV}" |
|||
. "${KAZ_KEY_DIR}/SetAllPass.sh" |
|||
|
|||
updateEnvDB(){ |
|||
# $1 = prefix |
|||
# $2 = envName |
|||
# $3 = containerName of DB |
|||
rootPass="$1_MYSQL_ROOT_PASSWORD" |
|||
dbName="$1_MYSQL_DATABASE" |
|||
userName="$1_MYSQL_USER" |
|||
userPass="$1_MYSQL_PASSWORD" |
|||
|
|||
${SIMU} sed -i \ |
|||
-e "s/MYSQL_ROOT_PASSWORD=.*/MYSQL_ROOT_PASSWORD=${!rootPass}/g" \ |
|||
-e "s/MYSQL_DATABASE=.*/MYSQL_DATABASE=${!dbName}/g" \ |
|||
-e "s/MYSQL_USER=.*/MYSQL_USER=${!userName}/g" \ |
|||
-e "s/MYSQL_PASSWORD=.*/MYSQL_PASSWORD=${!userPass}/g" \ |
|||
"$2" |
|||
|
|||
# seulement si pas de mdp pour root |
|||
# pb oeuf et poule (il faudrait les anciennes valeurs) : |
|||
# * si rootPass change, faire à la main |
|||
# * si dbName change, faire à la main |
|||
if [[ "$(docker ps -f name=$3 | grep -w $3)" ]]; then |
|||
echo "change DB pass on docker $3" |
|||
echo "grant all privileges on ${!dbName}.* to '${!userName}' identified by '${!userPass}';" | \ |
|||
docker exec -i $3 bash -c "mysql --user=root --password=${!rootPass}" |
|||
fi |
|||
} |
|||
|
|||
updateEnv(){ |
|||
# $1 = prefix |
|||
# $2 = envName |
|||
|
|||
for varName in $(grep "^[a-zA-Z_]*=" $2 | sed "s/^\([^=]*\)=.*/\1/g") |
|||
do |
|||
srcName="$1_${varName}" |
|||
srcVal=$(echo "${!srcName}" | sed -e "s/[&]/\\\&/g") |
|||
${SIMU} sed -i \ |
|||
-e "s%^[ ]*${varName}=.*\$%${varName}=${srcVal}%" \ |
|||
"$2" |
|||
done |
|||
} |
|||
|
|||
framadateUpdate(){ |
|||
if [ ! -f "${DOCK_LIB}/volumes/framadate_dateConfig/_data/config.php" ]; then |
|||
return 0 |
|||
fi |
|||
${SIMU} docker exec -ti "${framadateServName}" bash -c -i "htpasswd -bc /var/framadate/admin/.htpasswd ${framadate_HTTPD_USER} ${framadate_HTTPD_PASSWORD}" |
|||
${SIMU} sed -i \ |
|||
-e "s/^#*const DB_USER[ ]*=.*$/const DB_USER= '${framadate_MYSQL_USER}';/g" \ |
|||
-e "s/^#*const DB_PASSWORD[ ]*=.*$/const DB_PASSWORD= '${framadate_MYSQL_PASSWORD}';/g" \ |
|||
"${DOCK_LIB}/volumes/framadate_dateConfig/_data/config.php" |
|||
} |
|||
|
|||
jirafeauUpdate(){ |
|||
if [ ! -f "${DOCK_LIB}/volumes/jirafeau_fileConfig/_data/config.local.php" ]; then |
|||
return 0 |
|||
fi |
|||
SHA=$(echo -n "${jirafeau_HTTPD_PASSWORD}" | sha256sum | cut -d \ -f 1) |
|||
${SIMU} sed -i \ |
|||
-e "s/'admin_password'[ ]*=>[ ]*'[^']*'/'admin_password' => '${SHA}'/g" \ |
|||
"${DOCK_LIB}/volumes/jirafeau_fileConfig/_data/config.local.php" |
|||
} |
|||
|
|||
#################### |
|||
# main |
|||
|
|||
updateEnvDB "etherpad" "${KAZ_KEY_DIR}/env-${etherpadDBName}" "${etherpadDBName}" |
|||
updateEnvDB "framadate" "${KAZ_KEY_DIR}/env-${framadateDBName}" "${framadateDBName}" |
|||
updateEnvDB "gitea" "${KAZ_KEY_DIR}/env-${gitDBName}" "${gitDBName}" |
|||
updateEnvDB "mattermost" "${KAZ_KEY_DIR}/env-${mattermostDBName}" "${mattermostDBName}" |
|||
updateEnvDB "nextcloud" "${KAZ_KEY_DIR}/env-${nextcloudDBName}" "${nextcloudDBName}" |
|||
updateEnvDB "roundcube" "${KAZ_KEY_DIR}/env-${roundcubeDBName}" "${roundcubeDBName}" |
|||
updateEnvDB "sso" "${KAZ_KEY_DIR}/env-${ssoDBName}" "${ssoDBName}" |
|||
updateEnvDB "sympa" "${KAZ_KEY_DIR}/env-${sympaDBName}" "${sympaDBName}" |
|||
updateEnvDB "vigilo" "${KAZ_KEY_DIR}/env-${vigiloDBName}" "${vigiloDBName}" |
|||
updateEnvDB "wp" "${KAZ_KEY_DIR}/env-${wordpressDBName}" "${wordpressDBName}" |
|||
|
|||
updateEnv "ethercalc" "${KAZ_KEY_DIR}/env-${ethercalcServName}" |
|||
updateEnv "etherpad" "${KAZ_KEY_DIR}/env-${etherpadServName}" |
|||
updateEnv "framadate" "${KAZ_KEY_DIR}/env-${framadateServName}" |
|||
updateEnv "gandi" "${KAZ_KEY_DIR}/env-gandi" |
|||
updateEnv "gitea" "${KAZ_KEY_DIR}/env-${gitServName}" |
|||
updateEnv "jirafeau" "${KAZ_KEY_DIR}/env-${jirafeauServName}" |
|||
updateEnv "mattermost" "${KAZ_KEY_DIR}/env-${mattermostServName}" |
|||
updateEnv "nextcloud" "${KAZ_KEY_DIR}/env-${nextcloudServName}" |
|||
updateEnv "office" "${KAZ_KEY_DIR}/env-${officeServName}" |
|||
updateEnv "roundcube" "${KAZ_KEY_DIR}/env-${roundcubeServName}" |
|||
updateEnv "sso" "${KAZ_KEY_DIR}/env-${ssoServName}" |
|||
updateEnv "vigilo" "${KAZ_KEY_DIR}/env-${vigiloServName}" |
|||
updateEnv "wp" "${KAZ_KEY_DIR}/env-${wordpressServName}" |
|||
|
|||
framadateUpdate |
|||
jirafeauUpdate |
@ -0,0 +1,11 @@ |
|||
#!/bin/sh |
|||
|
|||
cd "${HOME}/tmp/POUBELLE" |
|||
|
|||
if test "$?" -eq 0 |
|||
then |
|||
rm -f * .* 2>/dev/null |
|||
else |
|||
echo "$0 pas de poubelle a vider !" |
|||
fi |
|||
|
@ -0,0 +1,103 @@ |
|||
# Les variables d'environnements utilisées |
|||
# par les dockers via le lien : |
|||
# .env -> ../../config/dockers.env |
|||
|
|||
######################################## |
|||
# choix du domaine |
|||
# prod=kaz.bzh / dev=dev.kaz.bzh / local=kaz.local |
|||
domain= |
|||
|
|||
######################################## |
|||
# choix du domaine des mails sympa |
|||
# prod=kaz.bzh / dev=kaz2.ovh / local=kaz.local |
|||
domain_sympa= |
|||
|
|||
######################################## |
|||
# Pour garradin qui met en "dure" dans |
|||
# sa config l'URL pour l'atteindre |
|||
|
|||
# prod=https (gandi) / dev=https (letsencrypt) / local=http |
|||
httpProto= |
|||
|
|||
# prod=89.234.186.111 / dev=192.168.57.1 / local=127.0.0.1 |
|||
MAIN_IP= |
|||
|
|||
# prod=89.234.186.151 / dev=192.168.57.2 / local=127.0.0.2 |
|||
SYMPA_IP= |
|||
|
|||
######################################## |
|||
# noms des services |
|||
|
|||
# ou www (mais bof) |
|||
webHost= |
|||
|
|||
calcHost=tableur |
|||
cloudHost=cloud |
|||
dateHost=sondage |
|||
dokuwikiHost=wiki |
|||
fileHost=depot |
|||
garHost=garradin |
|||
gitHost=git |
|||
gravHost=grav |
|||
matterHost=agora |
|||
officeHost=office |
|||
padHost=pad |
|||
smtpHost=smtp |
|||
ssoHost=keycloak |
|||
sympaHost=listes |
|||
vigiloHost=vigilo |
|||
webmailHost=webmail |
|||
wordpressHost=wp |
|||
|
|||
######################################## |
|||
# noms des containers |
|||
|
|||
dokuwikiServName=dokuwikiServ |
|||
ethercalcServName=ethercalcServ |
|||
etherpadServName=etherpadServ |
|||
framadateServName=framadateServ |
|||
garradinServName=garradinServ |
|||
gitServName=gitServ |
|||
gravServName=gravServ |
|||
jirafeauServName=jirafeauServ |
|||
mattermostServName=mattermostServ |
|||
nextcloudServName=nextcloudServ |
|||
officeServName=officeServ |
|||
proxyServName=proxyServ |
|||
roundcubeServName=roundcubeServ |
|||
smtpServName=mailServ |
|||
ssoServName=keycloakServ |
|||
sympaServName=sympaServ |
|||
vigiloServName=vigiloServ |
|||
webServName=webServ |
|||
wordpressServName=wpServ |
|||
|
|||
ethercalcDBName=ethercalcDB |
|||
etherpadDBName=etherpadDB |
|||
framadateDBName=framadateDB |
|||
gitDBName=gitDB |
|||
mattermostDBName=mattermostDB |
|||
nextcloudDBName=nextcloudDB |
|||
roundcubeDBName=roundcubeDB |
|||
ssoDBName=keycloakDB |
|||
sympaDBName=sympaDB |
|||
vigiloDBName=vigiloDB |
|||
wordpressDBName=wpDB |
|||
|
|||
######################################## |
|||
# politique de redémarrage |
|||
# prod=always / test=unless-stopped / local=no |
|||
restartPolicy= |
|||
|
|||
######################################## |
|||
# devrait être dans env-jirafeauServ |
|||
# mais seuls les variables de ".env" sont |
|||
# utilisables pour le montage des volumes |
|||
|
|||
jirafeauDir= |
|||
|
|||
######################################## |
|||
# services activés par container.sh |
|||
# variables d'environneements utilisées |
|||
# pour le tmpl du mandataire (proxy) |
|||
|
@ -0,0 +1 @@ |
|||
../../dockersConfig/dockers.env |
@ -0,0 +1,57 @@ |
|||
FROM alpine:3.10 |
|||
|
|||
# Some ENV variables |
|||
ENV PATH="/mattermost/bin:${PATH}" |
|||
ENV MM_VERSION=5.32.0 |
|||
ENV MM_INSTALL_TYPE=docker |
|||
|
|||
# Build argument to set Mattermost edition |
|||
ARG edition=enterprise |
|||
ARG PUID=2000 |
|||
ARG PGID=2000 |
|||
ARG MM_BINARY= |
|||
|
|||
|
|||
# Install some needed packages |
|||
RUN apk add --no-cache \ |
|||
ca-certificates \ |
|||
curl \ |
|||
jq \ |
|||
libc6-compat \ |
|||
libffi-dev \ |
|||
libcap \ |
|||
linux-headers \ |
|||
mailcap \ |
|||
netcat-openbsd \ |
|||
xmlsec-dev \ |
|||
tzdata \ |
|||
&& rm -rf /tmp/* |
|||
|
|||
# Get Mattermost |
|||
RUN mkdir -p /mattermost/data /mattermost/plugins /mattermost/client/plugins \ |
|||
&& if [ ! -z "$MM_BINARY" ]; then curl $MM_BINARY | tar -xvz ; \ |
|||
elif [ "$edition" = "team" ] ; then curl https://releases.mattermost.com/$MM_VERSION/mattermost-team-$MM_VERSION-linux-amd64.tar.gz?src=docker-app | tar -xvz ; \ |
|||
else curl https://releases.mattermost.com/$MM_VERSION/mattermost-$MM_VERSION-linux-amd64.tar.gz?src=docker-app | tar -xvz ; fi \ |
|||
&& cp /mattermost/config/config.json /config.json.save \ |
|||
&& rm -rf /mattermost/config/config.json \ |
|||
&& addgroup -g ${PGID} mattermost \ |
|||
&& adduser -D -u ${PUID} -G mattermost -h /mattermost -D mattermost \ |
|||
&& chown -R mattermost:mattermost /mattermost /config.json.save /mattermost/plugins /mattermost/client/plugins \ |
|||
&& setcap cap_net_bind_service=+ep /mattermost/bin/mattermost |
|||
|
|||
USER mattermost |
|||
|
|||
#Healthcheck to make sure container is ready |
|||
HEALTHCHECK CMD curl --fail http://localhost:8000 || exit 1 |
|||
|
|||
# Configure entrypoint and command |
|||
COPY entrypoint.sh / |
|||
ENTRYPOINT ["/entrypoint.sh"] |
|||
WORKDIR /mattermost |
|||
CMD ["mattermost"] |
|||
|
|||
# Expose port 8000 of the container |
|||
EXPOSE 8000 |
|||
|
|||
# Declare volumes for mount point directories |
|||
VOLUME ["/mattermost/data", "/mattermost/logs", "/mattermost/config", "/mattermost/plugins", "/mattermost/client/plugins"] |
@ -0,0 +1,82 @@ |
|||
#!/bin/sh |
|||
|
|||
# Function to generate a random salt |
|||
generate_salt() { |
|||
tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 48 | head -n 1 |
|||
} |
|||
|
|||
# Read environment variables or set default values |
|||
DB_HOST=${DB_HOST:-db} |
|||
DB_PORT_NUMBER=${DB_PORT_NUMBER:-5432} |
|||
# see https://www.postgresql.org/docs/current/libpq-ssl.html |
|||
# for usage when database connection requires encryption |
|||
# filenames should be escaped if they contain spaces |
|||
# i.e. $(printf %s ${MY_ENV_VAR:-''} | jq -s -R -r @uri) |
|||
# the location of the CA file can be set using environment var PGSSLROOTCERT |
|||
# the location of the CRL file can be set using PGSSLCRL |
|||
# The URL syntax for connection string does not support the parameters |
|||
# sslrootcert and sslcrl reliably, so use these PostgreSQL-specified variables |
|||
# to set names if using a location other than default |
|||
DB_USE_SSL=${DB_USE_SSL:-disable} |
|||
MM_DBNAME=${MM_DBNAME:-mattermost} |
|||
MM_CONFIG=${MM_CONFIG:-/mattermost/config/config.json} |
|||
|
|||
_1=$(echo "$1" | awk '{ s=substr($0, 0, 1); print s; }' ) |
|||
if [ "$_1" = '-' ]; then |
|||
set -- mattermost "$@" |
|||
fi |
|||
|
|||
if [ "$1" = 'mattermost' ]; then |
|||
# Check CLI args for a -config option |
|||
for ARG in "$@"; do |
|||
case "$ARG" in |
|||
-config=*) MM_CONFIG=${ARG#*=};; |
|||
esac |
|||
done |
|||
|
|||
if [ ! -f "$MM_CONFIG" ]; then |
|||
# If there is no configuration file, create it with some default values |
|||
echo "No configuration file $MM_CONFIG" |
|||
echo "Creating a new one" |
|||
# Copy default configuration file |
|||
cp /config.json.save "$MM_CONFIG" |
|||
# Substitute some parameters with jq |
|||
jq '.ServiceSettings.ListenAddress = ":8000"' "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
jq '.LogSettings.EnableConsole = true' "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
jq '.LogSettings.ConsoleLevel = "ERROR"' "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
jq '.FileSettings.Directory = "/mattermost/data/"' "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
jq '.FileSettings.EnablePublicLink = true' "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
jq ".FileSettings.PublicLinkSalt = \"$(generate_salt)\"" "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
jq '.EmailSettings.SendEmailNotifications = false' "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
jq '.EmailSettings.FeedbackEmail = ""' "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
jq '.EmailSettings.SMTPServer = ""' "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
jq '.EmailSettings.SMTPPort = ""' "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
jq ".EmailSettings.InviteSalt = \"$(generate_salt)\"" "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
jq ".EmailSettings.PasswordResetSalt = \"$(generate_salt)\"" "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
jq '.RateLimitSettings.Enable = true' "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
jq '.SqlSettings.DriverName = "postgres"' "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
jq ".SqlSettings.AtRestEncryptKey = \"$(generate_salt)\"" "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
jq '.PluginSettings.Directory = "/mattermost/plugins/"' "$MM_CONFIG" > "$MM_CONFIG.tmp" && mv "$MM_CONFIG.tmp" "$MM_CONFIG" |
|||
else |
|||
echo "Using existing config file $MM_CONFIG" |
|||
fi |
|||
|
|||
# Configure database access |
|||
if [ -z "$MM_SQLSETTINGS_DATASOURCE" ] && [ -n "$MM_USERNAME" ] && [ -n "$MM_PASSWORD" ]; then |
|||
echo "Configure database connection..." |
|||
# URLEncode the password, allowing for special characters |
|||
ENCODED_PASSWORD=$(printf %s "$MM_PASSWORD" | jq -s -R -r @uri) |
|||
export MM_SQLSETTINGS_DATASOURCE="postgres://$MM_USERNAME:$ENCODED_PASSWORD@$DB_HOST:$DB_PORT_NUMBER/$MM_DBNAME?sslmode=$DB_USE_SSL&connect_timeout=10" |
|||
echo "OK" |
|||
else |
|||
echo "Using existing database connection" |
|||
fi |
|||
|
|||
# Wait another second for the database to be properly started. |
|||
# Necessary to avoid "panic: Failed to open sql connection pq: the database system is starting up" |
|||
sleep 1 |
|||
|
|||
echo "Starting mattermost" |
|||
fi |
|||
|
|||
exec "$@" |
@ -0,0 +1,250 @@ |
|||
version: '3.3' |
|||
|
|||
#{{services |
|||
services: |
|||
#}} |
|||
#{{db |
|||
db: |
|||
image: mariadb:10.5 |
|||
container_name: ${orga}DB |
|||
#disk_quota: 10G |
|||
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW |
|||
restart: ${restartPolicy} |
|||
volumes: |
|||
- ./initdb.d:/docker-entrypoint-initdb.d:ro |
|||
- orgaDB:/var/lib/mysql |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /etc/timezone:/etc/timezone:ro |
|||
env_file: |
|||
- ../../secret/env-${nextcloudDBName} |
|||
- ../../secret/env-${mattermostDBName} |
|||
- ../../secret/env-${wordpressDBName} |
|||
networks: |
|||
- orgaNet |
|||
#}} |
|||
#{{cloud |
|||
cloud: |
|||
image: nextcloud |
|||
container_name: ${orga}${nextcloudServName} |
|||
#disk_quota: 10G |
|||
restart: ${restartPolicy} |
|||
networks: |
|||
- orgaNet |
|||
- postfixNet |
|||
depends_on: |
|||
- db |
|||
links: |
|||
- db |
|||
external_links: |
|||
- ${smtpServName}:${smtpHost} |
|||
volumes: |
|||
- cloudMain:/var/www/html |
|||
- cloudData:/var/www/html/data |
|||
- cloudConfig:/var/www/html/config |
|||
- cloudApps:/var/www/html/apps |
|||
- cloudCustomApps:/var/www/html/custom_apps |
|||
- cloudThemes:/var/www/html/themes/ |
|||
- cloudPhp:/usr/local/etc/php/conf.d/ |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /etc/timezone:/etc/timezone:ro |
|||
env_file: |
|||
- ../../secret/env-${nextcloudServName} |
|||
- ../../secret/env-${nextcloudDBName} |
|||
environment: |
|||
- NEXTCLOUD_TRUSTED_DOMAINS=${orga}${cloudHost}.${domain} |
|||
- SMTP_HOST=${smtpHost} |
|||
- SMTP_PORT=25 |
|||
- MAIL_DOMAIN=${domain} |
|||
#}} |
|||
#{{collabora |
|||
collabora: |
|||
image: collabora/code |
|||
container_name: ${orga}${officeServName} |
|||
#disk_quota: 10G |
|||
restart: ${restartPolicy} |
|||
cap_add: |
|||
- MKNOD |
|||
- SYS_CHROOT |
|||
- FOWNER |
|||
# ports: |
|||
# - 9980:9980 |
|||
env_file: |
|||
- ../../secret/env-${officeServName} |
|||
environment: |
|||
- dictionaries=fr_FR en_GB es_ES |
|||
- domain=.*${orga}${cloudHost}\.${domain} |
|||
- server_name=${orga}${officeHost}.${domain} |
|||
- VIRTUAL_HOST=${orga}${officeHost}.${domain} |
|||
- VIRTUAL_PORT=9980 |
|||
- VIRTUAL_PROTO=https |
|||
volumes: |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /etc/timezone:/etc/timezone:ro |
|||
networks: |
|||
orgaNet: |
|||
#}} |
|||
#{{agora |
|||
agora: |
|||
build: |
|||
context: app |
|||
# uncomment following lines for team edition or change UID/GID |
|||
args: |
|||
- edition=team |
|||
- PUID=1000 |
|||
- PGID=1000 |
|||
container_name: ${orga}${mattermostServName} |
|||
#disk_quota: 10G |
|||
restart: ${restartPolicy} |
|||
# memory: 1G |
|||
networks: |
|||
- orgaNet |
|||
- postfixNet |
|||
depends_on: |
|||
- db |
|||
links: |
|||
- db |
|||
external_links: |
|||
- ${smtpServName}:${smtpHost}.${domain} |
|||
volumes: |
|||
- matterConfig:/mattermost/config:rw |
|||
- matterData:/mattermost/data:rw |
|||
- matterLogs:/mattermost/logs:rw |
|||
- matterPlugins:/mattermost/plugins:rw |
|||
- matterClientPlugins:/mattermost/client/plugins:rw |
|||
- matterIcons:/mattermost/client/images:ro |
|||
- /etc/ssl:/etc/ssl:ro |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /etc/timezone:/etc/timezone:ro |
|||
- /etc/environment:/etc/environment:ro |
|||
env_file: |
|||
- ../../secret/env-${mattermostServName} |
|||
environment: |
|||
- VIRTUAL_HOST=${orga}${matterHost}.${domain} |
|||
# in case your config is not in default location |
|||
#- MM_CONFIG=/mattermost/config/config.json |
|||
#}} |
|||
#{{wp |
|||
wordpress: |
|||
image: wordpress |
|||
container_name: ${orga}${wordpressServName} |
|||
restart: ${restartPolicy} |
|||
networks: |
|||
- orgaNet |
|||
- postfixNet |
|||
depends_on: |
|||
- db |
|||
links: |
|||
- db |
|||
external_links: |
|||
- ${smtpServName}:${smtpHost}.${domain} |
|||
env_file: |
|||
- ../../secret/env-${wordpressServName} |
|||
environment: |
|||
- WORDPRESS_SMTP_HOST=${smtpHost}.${domain} |
|||
- WORDPRESS_SMTP_PORT=25 |
|||
# - WORDPRESS_SMTP_USERNAME |
|||
# - WORDPRESS_SMTP_PASSWORD |
|||
# - WORDPRESS_SMTP_FROM=${orga} |
|||
- WORDPRESS_SMTP_FROM_NAME=${orga} |
|||
volumes: |
|||
- wordpress:/var/www/html |
|||
#}} |
|||
#{{wiki |
|||
dokuwiki: |
|||
image: mprasil/dokuwiki |
|||
container_name: ${orga}${dokuwikiServName} |
|||
#disk_quota: 10G |
|||
restart: ${restartPolicy} |
|||
volumes: |
|||
- wikiData:/dokuwiki/data |
|||
- wikiConf:/dokuwiki/conf |
|||
- wikiPlugins:/dokuwiki/lib/plugins |
|||
- wikiLibtpl:/dokuwiki/lib/tpl |
|||
- wikiLogs:/var/log |
|||
networks: |
|||
- orgaNet |
|||
- postfixNet |
|||
external_links: |
|||
- ${smtpServName}:${smtpHost}.${domain} |
|||
#}} |
|||
|
|||
#{{services |
|||
volumes: |
|||
#}} |
|||
#{{db |
|||
orgaDB: |
|||
external: |
|||
name: orga_${orga}orgaDB |
|||
#}} |
|||
#{{agora |
|||
matterConfig: |
|||
external: |
|||
name: orga_${orga}matterConfig |
|||
matterData: |
|||
external: |
|||
name: orga_${orga}matterData |
|||
matterLogs: |
|||
external: |
|||
name: orga_${orga}matterLogs |
|||
matterPlugins: |
|||
external: |
|||
name: orga_${orga}matterPlugins |
|||
matterClientPlugins: |
|||
external: |
|||
name: orga_${orga}matterClientPlugins |
|||
matterIcons: |
|||
external: |
|||
name: matterIcons |
|||
#{{cloud |
|||
cloudMain: |
|||
external: |
|||
name: orga_${orga}cloudMain |
|||
cloudData: |
|||
external: |
|||
name: orga_${orga}cloudData |
|||
cloudConfig: |
|||
external: |
|||
name: orga_${orga}cloudConfig |
|||
cloudApps: |
|||
external: |
|||
name: orga_${orga}cloudApps |
|||
cloudCustomApps: |
|||
external: |
|||
name: orga_${orga}cloudCustomApps |
|||
cloudThemes: |
|||
external: |
|||
name: orga_${orga}cloudThemes |
|||
cloudPhp: |
|||
external: |
|||
name: orga_${orga}cloudPhp |
|||
#}} |
|||
#{{wiki |
|||
wikiData: |
|||
external: |
|||
name: orga_${orga}wikiData |
|||
wikiConf: |
|||
external: |
|||
name: orga_${orga}wikiConf |
|||
wikiPlugins: |
|||
external: |
|||
name: orga_${orga}wikiPlugins |
|||
wikiLibtpl: |
|||
external: |
|||
name: orga_${orga}wikiLibtpl |
|||
wikiLogs: |
|||
external: |
|||
name: orga_${orga}wikiLogs |
|||
#}} |
|||
#{{wp |
|||
wordpress: |
|||
external: |
|||
name: orga_${orga}wordpress |
|||
#}} |
|||
|
|||
networks: |
|||
orgaNet: |
|||
external: |
|||
name: ${orga}orgaNet |
|||
postfixNet: |
|||
external: |
|||
name: postfixNet |
@ -0,0 +1,71 @@ |
|||
#!/bin/bash |
|||
|
|||
KAZ_ROOT=$(cd $(dirname $0)/../..; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
. ".env" |
|||
. "${KAZ_KEY_DIR}/SetAllPass.sh" |
|||
|
|||
# begin commun service init |
|||
cd $(dirname $0) |
|||
PWD=$(pwd) |
|||
ORGA_DIR=$(basename ${PWD}) |
|||
|
|||
if [[ "${ORGA_DIR}" != *"-orga" ]] |
|||
then |
|||
printKazError "it's not an orga dir" |
|||
exit |
|||
fi |
|||
ORGA=${ORGA_DIR%-orga} |
|||
if [[ -z "${ORGA}" ]] |
|||
then |
|||
printKazError "it's not an orga dir" |
|||
exit |
|||
fi |
|||
|
|||
printKazMsg "init cloud ${ORGA}" |
|||
|
|||
CONF_DIR="${DOCK_VOL}/orga_${ORGA}-cloudConfig/_data" |
|||
PHP_CONF="${CONF_DIR}/config.php" |
|||
|
|||
CLOUD_URL="${httpProto}://${ORGA}-${cloudHost}.${domain}" |
|||
DockerServName="${ORGA}-${nextcloudServName}" |
|||
|
|||
if ! [[ "$(docker ps -f name=${DockerServName} | grep -w ${DockerServName})" ]]; then |
|||
printKazError "NextCloud not running... abort" |
|||
exit |
|||
fi |
|||
|
|||
echo " |
|||
CREATE DATABASE IF NOT EXISTS ${nextcloud_MYSQL_DATABASE}; |
|||
|
|||
DROP USER IF EXISTS '${nextcloud_MYSQL_USER}'; |
|||
CREATE USER '${nextcloud_MYSQL_USER}'@'%'; |
|||
|
|||
GRANT ALL ON ${nextcloud_MYSQL_DATABASE}.* TO '${nextcloud_MYSQL_USER}'@'%' IDENTIFIED BY '${nextcloud_MYSQL_PASSWORD}'; |
|||
|
|||
FLUSH PRIVILEGES;" | \ |
|||
docker exec -i ${ORGA}-DB bash -c "mysql --user=root --password=${nextcloud_MYSQL_ROOT_PASSWORD}" |
|||
|
|||
if ! grep -q "'installed' => true," "${PHP_CONF}" 2> /dev/null; then |
|||
printKazMsg "\n *** Premier lancement de NextCLoud" |
|||
|
|||
waitUrl "${CLOUD_URL}" |
|||
|
|||
printKazMsg "reset cloud ${ORGA}" |
|||
|
|||
curl -X POST \ |
|||
-d "install=true" \ |
|||
-d "adminlogin=${nextcloud_NEXTCLOUD_ADMIN_USER}" \ |
|||
-d "adminpass=${nextcloud_NEXTCLOUD_ADMIN_PASSWORD}" \ |
|||
-d "directory=/var/www/html/data" \ |
|||
-d "dbtype=mysql" \ |
|||
-d "dbuser=${nextcloud_MYSQL_USER}" \ |
|||
-d "dbpass=${nextcloud_MYSQL_PASSWORD}" \ |
|||
-d "dbname=${nextcloud_MYSQL_DATABASE}" \ |
|||
-d "dbhost=${nextcloud_MYSQL_HOST}" \ |
|||
-d "install-recommended-apps=true" \ |
|||
"${CLOUD_URL}" |
|||
fi |
|||
|
|||
"${KAZ_BIN_DIR}/updateCloud.sh" "${ORGA}" |
@ -0,0 +1,95 @@ |
|||
#!/bin/bash |
|||
|
|||
KAZ_ROOT=$(cd $(dirname $0)/../..; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
. ".env" |
|||
. "${KAZ_KEY_DIR}/SetAllPass.sh" |
|||
|
|||
# begin commun service init |
|||
cd $(dirname $0) |
|||
PWD=$(pwd) |
|||
ORGA_DIR=$(basename ${PWD}) |
|||
|
|||
if [[ "${ORGA_DIR}" != *"-orga" ]] |
|||
then |
|||
printKazError "it's not an orga dir" |
|||
exit |
|||
fi |
|||
ORGA=${ORGA_DIR%-orga} |
|||
if [[ -z "${ORGA}" ]] |
|||
then |
|||
printKazError "it's not an orga dir" |
|||
exit |
|||
fi |
|||
|
|||
printKazMsg "init agora ${ORGA}" |
|||
|
|||
CONF_DIR="${VOL_DIR}/orga_${ORGA}-matterConfig/_data" |
|||
JSON_CONF="${CONF_DIR}/config.json" |
|||
MATTER_URI="${ORGA}-${matterHost}.${domain}" |
|||
MATTER_URL="${httpProto}://${MATTER_URI}" |
|||
ORGA_FLAG="${ORGA_DIR//-/_}" |
|||
DockerServName="${ORGA_FLAG}-${mattermostServName}" |
|||
|
|||
if ! [[ "$(docker ps -f name=${DockerServName} | grep -w ${DockerServName})" ]]; then |
|||
printKazError "Agora not running... abort" |
|||
exit |
|||
fi |
|||
|
|||
echo " |
|||
CREATE DATABASE IF NOT EXISTS ${mattermost_MYSQL_DATABASE}; |
|||
|
|||
DROP USER IF EXISTS '${mattermost_MYSQL_USER}'; |
|||
CREATE USER '${mattermost_MYSQL_USER}'@'%'; |
|||
|
|||
GRANT ALL ON ${mattermost_MYSQL_DATABASE}.* TO '${mattermost_MYSQL_USER}'@'%' IDENTIFIED BY '${mattermost_MYSQL_PASSWORD}'; |
|||
|
|||
FLUSH PRIVILEGES;" | \ |
|||
docker exec -i "${ORGA}-DB" bash -c "mysql --user=root --password=${mattermost_MYSQL_ROOT_PASSWORD}" |
|||
|
|||
|
|||
if grep -q "${domain}" "${JSON_CONF}"; then |
|||
echo "agora ${ORGA} already done" |
|||
exit |
|||
fi |
|||
|
|||
printKazMsg "\n *** Premier lancement de Mattermost" |
|||
waitUrl "${MATTER_URL}" |
|||
|
|||
|
|||
# XXX l'url ne fonctione pas avec curl |
|||
curl -X POST \ |
|||
-d "email=francois@kaz.bzh" \ |
|||
-d "name=admin" \ |
|||
-d "password=_CovManrad6-" \ |
|||
"${MATTER_URL}/signup_email" |
|||
|
|||
|
|||
sed -i \ |
|||
-e 's|"SiteURL": ".*"|"SiteURL": "'${MATTER_URL}'"|g' \ |
|||
-e 's|"WebsocketURL": ".*"|"WebsocketURL": "wss://'${MATTER_URI}'"|g' \ |
|||
-e 's|"AllowCorsFrom": ".*"|"AllowCorsFrom": "'${domain}' '${MATTER_URI}':443 '${MATTER_URI}'"|g' \ |
|||
-e 's|"ConsoleLevel": ".*"|"ConsoleLevel": "ERROR"|g' \ |
|||
-e 's|"SendEmailNotifications": false|"SendEmailNotifications": true|g' \ |
|||
-e 's|"FeedbackEmail": ".*"|"FeedbackEmail": "admin@'${domain}'"|g' \ |
|||
-e 's|"FeedbackOrganization": ".*"|"FeedbackOrganization": "Cochez la KAZ du libre !"|g' \ |
|||
-e 's|"ReplyToAddress": ".*"|"ReplyToAddress": "admin@'${domain}'"|g' \ |
|||
-e 's|"SMTPServer": ".*"|"SMTPServer": "mail.'${domain}'"|g' \ |
|||
-e 's|"SMTPPort": ".*"|"SMTPPort": "25"|g' \ |
|||
-e 's|"DefaultServerLocale": ".*"|"DefaultServerLocale": "fr"|g' \ |
|||
-e 's|"DefaultClientLocale": ".*"|"DefaultClientLocale": "fr"|g' \ |
|||
-e 's|"AvailableLocales": ".*"|"AvailableLocales": "fr"|g' \ |
|||
${JSON_CONF} |
|||
|
|||
# "jitsi": { |
|||
# "jitsiappid": null, |
|||
# "jitsiappsecret": null, |
|||
# "jitsicompatibilitymode": false, |
|||
# "jitsiembedded": false, |
|||
# "jitsijwt": null, |
|||
# "jitsilinkvalidtime": 30, |
|||
# "jitsinamingscheme": "words", |
|||
# "jitsiurl": "https://meet.jit.si", |
|||
# "Enable": true |
|||
# } |
@ -0,0 +1,34 @@ |
|||
#!/bin/bash |
|||
|
|||
#docker network create postfix_mailNet |
|||
|
|||
#{{db |
|||
docker volume create --name=orga_${orga}orgaDB |
|||
#}} |
|||
#{{agora |
|||
docker volume create --name=orga_${orga}matterConfig |
|||
docker volume create --name=orga_${orga}matterData |
|||
docker volume create --name=orga_${orga}matterLogs |
|||
docker volume create --name=orga_${orga}matterPlugins |
|||
docker volume create --name=orga_${orga}matterClientPlugins |
|||
#}} |
|||
#{{cloud |
|||
docker volume create --name=orga_${orga}cloudMain |
|||
docker volume create --name=orga_${orga}cloudData |
|||
docker volume create --name=orga_${orga}cloudConfig |
|||
docker volume create --name=orga_${orga}cloudApps |
|||
docker volume create --name=orga_${orga}cloudCustomApps |
|||
docker volume create --name=orga_${orga}cloudThemes |
|||
docker volume create --name=orga_${orga}cloudPhp |
|||
#}} |
|||
#{{wiki |
|||
docker volume create --name=orga_${orga}wikiData |
|||
docker volume create --name=orga_${orga}wikiConf |
|||
docker volume create --name=orga_${orga}wikiPlugins |
|||
docker volume create --name=orga_${orga}wikiLibtpl |
|||
docker volume create --name=orga_${orga}wikiLogs |
|||
#}} |
|||
#{{wp |
|||
docker volume create --name=orga_${orga}wordpress |
|||
#}} |
|||
|
@ -0,0 +1,83 @@ |
|||
#!/bin/bash |
|||
|
|||
KAZ_ROOT=$(cd $(dirname $0)/../..; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
cd $(dirname $0) |
|||
. ".env" |
|||
|
|||
PWD=$(pwd) |
|||
ORGA_DIR=$(basename ${PWD}) |
|||
. .env |
|||
. ${KAZ_KEY_DIR}/SetAllPass.sh |
|||
|
|||
if [[ "${ORGA_DIR}" != *"-orga" ]] |
|||
then |
|||
printKazError "it's not an orga dir" |
|||
exit |
|||
fi |
|||
ORGA=${ORGA_DIR%-orga} |
|||
if [[ -z "${ORGA}" ]] |
|||
then |
|||
printKazError "it's not an orga dir" |
|||
exit |
|||
fi |
|||
|
|||
TPL_DIR="${DOCK_VOL}/orga_${ORGA}-dokuwikiLibtpl/_data" |
|||
PLG_DIR="${DOCK_VOL}/orga_${ORGA}-dokuwikiPlugins/_data" |
|||
CONF_DIR="${DOCK_VOL}/orga_${ORGA}-dokuwikiConf/_data" |
|||
DNLD_DIR="${KAZ_DNLD_DIR}/dokuwiki" |
|||
DockerServName="${ORGA}-${dokuwikiServName}" |
|||
WIKI_URL="${httpProto}://${ORGA}-${dokuwikiHost}.${domain}" |
|||
|
|||
WIKI_TITLE=Kaz |
|||
WIKI_ROOT=Kaz |
|||
WIKI_EMAIL=wiki@kaz.local |
|||
WIKI_PASS=azerty |
|||
|
|||
|
|||
printKazMsg "init wiki ${ORGA}" |
|||
|
|||
if ! [[ "$(docker ps -f name=${DockerServName} | grep -w ${DockerServName})" ]]; then |
|||
printKazError "Dokuwiki not running... abort" |
|||
exit |
|||
fi |
|||
|
|||
if [ ! -f "${CONF_DIR}/local.php" ] ; then |
|||
printKazMsg "\n *** Premier lancement de Dokuwiki" |
|||
|
|||
waitUrl "${WIKI_URL}" |
|||
|
|||
curl -X POST \ |
|||
-d "l=fr" \ |
|||
-d "d[title]=${WIKI_TITLE}" \ |
|||
-d "d[acl]=true" \ |
|||
-d "d[superuser]=${WIKI_ROOT}" \ |
|||
-d "d[fullname]=Admin"\ |
|||
-d "d[email]=${WIKI_EMAIL}" \ |
|||
-d "d[password]=${WIKI_PASS}" \ |
|||
-d "d[confirm]=${WIKI_PASS}" \ |
|||
-d "d[policy]=1" \ |
|||
-d "d[allowreg]=false" \ |
|||
-d "d[license]=lic_0" \ |
|||
-d "d[pop]=false" \ |
|||
"${WIKI_URL}/install.php" |
|||
|
|||
|
|||
unzipInDir "${DNLD_DIR}/docnavwiki.zip" "${TPL_DIR}/" |
|||
chown -R www-data: "${TPL_DIR}/" |
|||
|
|||
for plugin in captcha ckgedit smtp todo wrap wrapadd; do |
|||
unzipInDir "${DNLD_DIR}/${plugin}.zip" "${PLG_DIR}" |
|||
done |
|||
chown -R www-data: "${PLG_DIR}/" |
|||
|
|||
cd wiki-conf |
|||
# XXX initialiser admin:<pass>:admin:<mel>:admin,user |
|||
rsync -auHAX local.php users.auth.php acl.auth.php "${CONF_DIR}/" |
|||
chown -R www-data: "${CONF_DIR}/" |
|||
|
|||
sed -i -e "s|\(.*conf\['title'\].*=.*'\).*';|\1${ORGA}';|g" "${CONF_DIR}/local.php" |
|||
sed -i -e "s|\(.*conf\['lang'\].*=.*'\)en';|\1fr';|g" "${CONF_DIR}/dokuwiki.php" |
|||
fi |
@ -0,0 +1,66 @@ |
|||
#!/bin/bash |
|||
|
|||
KAZ_ROOT=$(cd $(dirname $0)/../..; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
cd $(dirname $0) |
|||
. ".env" |
|||
|
|||
PWD=$(pwd) |
|||
ORGA_DIR=$(basename ${PWD}) |
|||
VOL_DIR=/var/lib/docker/volumes |
|||
. .env |
|||
. ${KAZ_KEY_DIR}/SetAllPass.sh |
|||
|
|||
if [[ "${ORGA_DIR}" != *"-orga" ]] |
|||
then |
|||
printKazError "it's not an orga dir" |
|||
exit |
|||
fi |
|||
ORGA=${ORGA_DIR%-orga} |
|||
if [[ -z "${ORGA}" ]] |
|||
then |
|||
printKazError "it's not an orga dir" |
|||
exit |
|||
fi |
|||
|
|||
printKazMsg "init wordpress ${ORGA}" |
|||
|
|||
CONF_DIR="${VOL_DIR}/orga_${ORGA}-wordpress/_data" |
|||
WP_URL="${httpProto}://${ORGA}-${wordpressHost}.${domain}" |
|||
DockerServName="${ORGA}-${wordpressServName}" |
|||
|
|||
if ! [[ "$(docker ps -f name=${DockerServName} | grep -w ${DockerServName})" ]]; then |
|||
printKazError "Wordpress not running... abort" |
|||
exit |
|||
fi |
|||
|
|||
echo " |
|||
CREATE DATABASE IF NOT EXISTS ${wp_MYSQL_DATABASE}; |
|||
|
|||
DROP USER IF EXISTS '${wp_MYSQL_USER}'; |
|||
CREATE USER '${wp_MYSQL_USER}'@'%'; |
|||
|
|||
GRANT ALL ON ${wp_MYSQL_DATABASE}.* TO '${wp_MYSQL_USER}'@'%' IDENTIFIED BY '${wp_MYSQL_PASSWORD}'; |
|||
|
|||
FLUSH PRIVILEGES;" | \ |
|||
docker exec -i ${ORGA}-DB bash -c "mysql --user=root --password=${wp_MYSQL_ROOT_PASSWORD}" |
|||
|
|||
# XXX trouver un test du genre if ! grep -q "'installed' => true," "${PHP_CONF}" 2> /dev/null; then |
|||
printKazMsg "\n *** Premier lancement de WP" |
|||
|
|||
waitUrl "${WP_URL}" |
|||
|
|||
printKazMsg "reset wp ${ORGA}" |
|||
|
|||
curl -X POST \ |
|||
-d "user_name=${wp_WORDPRESS_ADMIN_USER}" \ |
|||
-d "admin_password=${wp_WORDPRESS_ADMIN_PASSWORD}" \ |
|||
-d "admin_password2=${wp_WORDPRESS_ADMIN_PASSWORD}" \ |
|||
-d "pw_weak=true" \ |
|||
-d "admin_email=admin@kaz.bzh" \ |
|||
-d "blog_public=0" \ |
|||
-d "language=fr_FR" \ |
|||
"${WP_URL}/wp-admin/install.php?step=2" |
|||
echo |
@ -0,0 +1,3 @@ |
|||
CREATE DATABASE IF NOT EXISTS nextcloud; |
|||
CREATE DATABASE IF NOT EXISTS mattermost; |
|||
CREATE DATABASE IF NOT EXISTS wpdb; |
@ -0,0 +1,371 @@ |
|||
#!/bin/bash |
|||
|
|||
# XXX pb arret des services retiré |
|||
|
|||
PRG=$(basename $0) |
|||
KAZ_ROOT=$(cd "$(dirname $0)/../.."; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
cd $(dirname $0) |
|||
PWD=$(pwd) |
|||
ORGA_DIR=$(basename ${PWD}) |
|||
TIMESTAMP=YES |
|||
TMPL_PROXY_COMPOSE="${KAZ_COMP_DIR}/proxy/docker-compose.tmpl.yml" |
|||
ORGA_LIST="${KAZ_CONF_DIR}/container-orga.list" |
|||
LIST="" |
|||
|
|||
usage(){ |
|||
echo "Usage: $0 [-h] [-l] [+/-garradin] [-/+cloud [-/+collabora}]] [+/-agora] [+/-wiki] [+/-wp] [x{G/M/k}] OrgaName" |
|||
echo " -h|--help : this help" |
|||
echo " -l|--list : list service" |
|||
echo " +/- garradin : on/off garradin" |
|||
echo " +/- cloud : on/off cloud" |
|||
echo " +/- coll* : on/off collabora" |
|||
echo " +/- matter*|agora : on/off agora" |
|||
echo " +/- wiki : on/off wiki" |
|||
echo " +/- wp|word* : on/off wp" |
|||
echo " x[GMk] : set quota" |
|||
echo " OrgaName : name must contain a-z0-9_\-" |
|||
exit 1 |
|||
} |
|||
|
|||
for ARG in "$@" |
|||
do |
|||
case "${ARG}" in |
|||
'-h' | '-help' ) |
|||
usage |
|||
exit;; |
|||
'-l' | '-list' ) |
|||
;; |
|||
'-'* ) |
|||
;; |
|||
'+'* ) |
|||
;; |
|||
[.0-9]*[GMk] ) |
|||
;; |
|||
* ) |
|||
if [[ "${ORGA_DIR}" = "orgaTmpl" ]]; then |
|||
if [[ "${ARG}" =~ ^[a-z0-9_\-]+$ ]]; then |
|||
printKazMsg "create ${ARG}" |
|||
mkdir -p "${KAZ_COMP_DIR}/${ARG}-orga" |
|||
cd "${KAZ_COMP_DIR}/${ARG}-orga" |
|||
ln -sf ../../config/dockers.env .env |
|||
ln -sf ../../config/orgaTmpl/orga-gen.sh |
|||
ln -sf ../../config/orgaTmpl/orga-rm.sh |
|||
PWD=$(pwd) |
|||
ORGA_DIR=$(basename ${PWD}) |
|||
else |
|||
printKazError "Name must contains only a-z0-9_\-" |
|||
usage |
|||
exit |
|||
fi |
|||
else |
|||
if [[ "${ARG}-orga" != "${ORGA_DIR}" ]]; then |
|||
printKazError "Can't cross config ${ARG}-orga with ${ORGA_DIR}" |
|||
usage |
|||
exit |
|||
fi |
|||
fi |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
if [[ "${ORGA_DIR}" = "orgaTmpl" ]] |
|||
then |
|||
printKazError "You must give orga name" |
|||
usage |
|||
exit |
|||
fi |
|||
|
|||
if [[ "${ORGA_DIR}" != *"-orga" ]] |
|||
then |
|||
printKazError "it's not an orga dir" |
|||
exit |
|||
fi |
|||
ORGA=${ORGA_DIR%-orga} |
|||
|
|||
if [[ -z "${ORGA}" ]] |
|||
then |
|||
printKazError "it's not an orga dir" |
|||
exit |
|||
fi |
|||
|
|||
# default values |
|||
flagInCompose(){ |
|||
# $1 filename (docker-compose.yml) |
|||
# $2 service name |
|||
# $3 default value |
|||
if [[ ! -f "$1" ]] |
|||
then |
|||
echo "$3" |
|||
else |
|||
if grep -q "$2" docker-compose.yml |
|||
then |
|||
echo on |
|||
else |
|||
echo off |
|||
fi |
|||
fi |
|||
} |
|||
|
|||
export cloud=$(flagInCompose docker-compose.yml cloud: off) |
|||
export collabora=$(flagInCompose docker-compose.yml collabora: off) |
|||
export agora=$(flagInCompose docker-compose.yml agora: off) |
|||
export wiki=$(flagInCompose docker-compose.yml dokuwiki: off) |
|||
export wp=$(flagInCompose docker-compose.yml wordpress: off) |
|||
export db="off" |
|||
export services="off" |
|||
export garradin=$([[ -f useGarradin ]] && echo "on" || echo "off") |
|||
export quota=10G |
|||
if [[ -f docker-compose.yml ]]; then |
|||
if grep -q "storage_opt" docker-compose.yml |
|||
then |
|||
quota=$(grep "storage_opt" docker-compose.yml | cut -d : -f 2 | tail -n 1) |
|||
fi |
|||
fi |
|||
|
|||
for ARG in "$@" |
|||
do |
|||
case "${ARG}" in |
|||
'-show' ) |
|||
for i in cloud collabora agora wiki wp db |
|||
do |
|||
echo "${i}=${!i}" |
|||
done |
|||
exit;; |
|||
'-h' | '--help' ) |
|||
usage |
|||
exit;; |
|||
'-l' | '--list' ) |
|||
LIST="list" |
|||
;; |
|||
'-time'* ) |
|||
TIMESTAMP=YES |
|||
;; |
|||
'-gar'* ) |
|||
garradin="off" |
|||
;; |
|||
'-cloud' ) |
|||
cloud="off" |
|||
collabora="off" |
|||
;; |
|||
'-coll'* | '-offi'* ) |
|||
collabora="off" |
|||
;; |
|||
'-matter'* | '-agora') |
|||
agora="off" |
|||
;; |
|||
'-wiki' ) |
|||
wiki="off" |
|||
;; |
|||
'-wp' | '-word'* ) |
|||
wp="off" |
|||
;; |
|||
'+gar'* ) |
|||
garradin="on" |
|||
;; |
|||
'+cloud' ) |
|||
cloud="on" |
|||
collabora="on" |
|||
;; |
|||
'+coll'* | '+offi'* ) |
|||
collabora="on" |
|||
;; |
|||
'+matter'* | '+agora' ) |
|||
agora="on" |
|||
;; |
|||
'+wiki' ) |
|||
wiki="on" |
|||
;; |
|||
'+wp' | '+word'* ) |
|||
wp="on" |
|||
;; |
|||
[.0-9]*[GMk] ) |
|||
quota="${ARG}" |
|||
;; |
|||
esac |
|||
done |
|||
|
|||
if [ "${cloud}" = "on" -o "${agora}" = "on" -o "${wp}" = "on" ] |
|||
then |
|||
db="on" |
|||
fi |
|||
if [ "${db}" = "on" -o "${wiki}" = "on" ] |
|||
then |
|||
services="on" |
|||
fi |
|||
|
|||
. "${DOCKERS_ENV}" |
|||
ORGA_FLAG=${ORGA_DIR//-/_} |
|||
DOMAIN_AREA="{{${ORGA_FLAG}\n" |
|||
ADD_DOMAIN="" |
|||
DEL_DOMAIN="" |
|||
|
|||
listServ () { |
|||
for serv in $(echo "$(${KAZ_BIN_DIR}/kazList.sh service available)") |
|||
do |
|||
if [[ "${!serv}" == "on" ]]; then |
|||
echo "${serv}" |
|||
fi |
|||
done |
|||
} |
|||
|
|||
if [[ -n "${LIST}" ]] ; then |
|||
listServ |
|||
exit |
|||
fi |
|||
|
|||
#printKazError "garradin:${garradin} cloud:${cloud} collabora:${collabora} agora:${agora} wiki:${wiki} wp:${wp}" |
|||
|
|||
if [[ "${garradin}" = "on" ]]; then |
|||
touch useGarradin |
|||
ADD_DOMAIN+="${ORGA}-${garHost} " |
|||
else |
|||
rm -f useGarradin |
|||
DEL_DOMAIN+="${ORGA}-${garHost} " |
|||
fi |
|||
if [[ "${cloud}" = "on" ]]; then |
|||
DOMAIN_AREA+=" - ${ORGA}-\${nextcloudServName}:${ORGA}-\${cloudHost}.\${domain}\n" |
|||
ADD_DOMAIN+="${ORGA}-${cloudHost} " |
|||
else |
|||
DEL_DOMAIN+="${ORGA}-${cloudHost} " |
|||
fi |
|||
if [[ "${collabora}" = "on" ]]; then |
|||
DOMAIN_AREA+=" - ${ORGA}-\${officeServName}:${ORGA}-\${officeHost}.\${domain}\n" |
|||
ADD_DOMAIN+="${ORGA}-${officeHost} " |
|||
else |
|||
DEL_DOMAIN+="${ORGA}-${officeHost} " |
|||
fi |
|||
if [[ "${agora}" = "on" ]]; then |
|||
DOMAIN_AREA+=" - ${ORGA}-\${mattermostServName}:${ORGA}-\${matterHost}.\${domain}\n" |
|||
ADD_DOMAIN+="${ORGA}-${matterHost} " |
|||
else |
|||
DEL_DOMAIN+="${ORGA}-${matterHost} " |
|||
fi |
|||
if [[ "${wiki}" = "on" ]]; then |
|||
DOMAIN_AREA+=" - ${ORGA}-\${dokuwikiServName}:${ORGA}-\${dokuwikiHost}.\${domain}\n" |
|||
ADD_DOMAIN+="${ORGA}-${dokuwikiHost} " |
|||
else |
|||
DEL_DOMAIN+="${ORGA}-${dokuwikiHost} " |
|||
fi |
|||
if [[ "${wp}" = "on" ]]; then |
|||
DOMAIN_AREA+=" - ${ORGA}-\${wordpressServName}:${ORGA}-\${wordpressHost}.\${domain}\n" |
|||
ADD_DOMAIN+="${ORGA}-${wordpressHost} " |
|||
else |
|||
DEL_DOMAIN+="${ORGA}-${wordpressHost} " |
|||
fi |
|||
DOMAIN_AREA+="}}\n" |
|||
|
|||
printKazMsg "Update ${TMPL_PROXY_COMPOSE}" |
|||
if grep -q "^{{${ORGA_FLAG}" "${TMPL_PROXY_COMPOSE}" 2> /dev/null ; then |
|||
sed -i -e "/^{{${ORGA_FLAG}/,/^}}/d" "${TMPL_PROXY_COMPOSE}" |
|||
fi |
|||
# domaine |
|||
sed "s/^#### END ORGA HOST/${DOMAIN_AREA}#### END ORGA HOST/" -i "${TMPL_PROXY_COMPOSE}" |
|||
# use net |
|||
sed "s/^#### END ORGA USE_NET/{{${ORGA_FLAG}\n - ${ORGA}Net\n}}\n#### END ORGA USE_NET/" -i "${TMPL_PROXY_COMPOSE}" |
|||
# def net |
|||
sed "s/^#### END ORGA DEF_NET/{{${ORGA_FLAG}\n ${ORGA}Net:\n external:\n name: ${ORGA}-orgaNet\n}}\n#### END ORGA DEF_NET/" -i "${TMPL_PROXY_COMPOSE}" |
|||
|
|||
printKazMsg "Update DNS" |
|||
${KAZ_BIN_DIR}/dns.sh add ${ADD_DOMAIN} |
|||
${KAZ_BIN_DIR}/dns.sh del ${DEL_DOMAIN} |
|||
|
|||
printKazMsg "update docker-compose.yml ${ORGA}" |
|||
|
|||
update() { |
|||
( |
|||
# $1 = template |
|||
# $2 = target |
|||
if [ "${TIMESTAMP}" == "YES" ]; then |
|||
echo "# Generated by $(pwd)$(basename $0)" |
|||
echo "# à partir du modèle $1" |
|||
echo "#" $(date "+%x %X") |
|||
echo |
|||
fi |
|||
awk ' |
|||
BEGIN {cp=1} |
|||
/#}}/ {cp=1 ; next}; |
|||
/#{{on/ {cp=1; next}; |
|||
/#{{off/ {cp=0; next}; |
|||
match($0, /#{{[a-zA-Z0-9_]+/) {cp=(ENVIRON[substr($0,RSTART+3,RLENGTH)] == "on"); next}; |
|||
{if (cp) print $0};' $1 | sed \ |
|||
-e "/^[ \t]*$/d"\ |
|||
-e "/^[ ]*#.*$/d"\ |
|||
-e "s|\${orga}|${ORGA}-|g" |
|||
) > "$2" |
|||
sed "s/storage_opt:.*/storage_opt: ${quota}/g" -i "$2" |
|||
} |
|||
|
|||
update ${KAZ_CONF_DIR}/orgaTmpl/docker-compose.yml docker-compose.yml |
|||
printKazMsg "Service enabled:" |
|||
for service in $(listServ) ; do |
|||
printKazMsg " * ${service}" |
|||
done |
|||
|
|||
update ${KAZ_CONF_DIR}/orgaTmpl/init-volume.sh init-volume.sh |
|||
chmod a+x init-volume.sh |
|||
|
|||
ln -sf ../../config/orgaTmpl/orga-rm.sh |
|||
ln -sf ../../config/orgaTmpl/init-matter.sh |
|||
ln -sf ../../config/orgaTmpl/init-cloud.sh |
|||
ln -sf ../../config/orgaTmpl/init-wiki.sh |
|||
ln -sf ../../config/orgaTmpl/init-wp.sh |
|||
ln -sf ../../config/orgaTmpl/initdb.d/ |
|||
ln -sf ../../config/orgaTmpl/app/ |
|||
ln -sf ../../config/orgaTmpl/wiki-conf/ |
|||
|
|||
if ! grep -q "proxy_orga=" .env 2> /dev/null |
|||
then |
|||
echo "proxy_orga=on" >> .env |
|||
fi |
|||
|
|||
if ! grep -q "proxy_${ORGA_FLAG}=" .env 2> /dev/null |
|||
then |
|||
echo "proxy_${ORGA_FLAG}=off" >> .env |
|||
fi |
|||
touch "${ORGA_LIST}" |
|||
if ! grep -qx "${ORGA}-orga" "${ORGA_LIST}" 2> /dev/null |
|||
then |
|||
echo "${ORGA}-orga" >> "${ORGA_LIST}" |
|||
fi |
|||
|
|||
./init-volume.sh |
|||
${KAZ_BIN_DIR}/container.sh start ${ORGA}-orga |
|||
|
|||
for service in $("${KAZ_BIN_DIR}/kazList.sh" service disable ${ORGA}-orga); do |
|||
DockerServName= |
|||
case "${service}" in |
|||
agora) |
|||
DockerServName="${ORGA}-${mattermostServName}" |
|||
;; |
|||
garradin) |
|||
continue |
|||
;; |
|||
cloud) |
|||
DockerServName="${ORGA}-${nextcloudServName}" |
|||
;; |
|||
collabora) |
|||
DockerServName="${ORGA}-${officeServName}" |
|||
;; |
|||
wiki) |
|||
DockerServName="${ORGA}-${dokuwikiServName}" |
|||
;; |
|||
wp) |
|||
DockerServName="${ORGA}-${wordpressServName}" |
|||
;; |
|||
esac |
|||
if [[ "$(docker ps -f name=${DockerServName} | grep -w ${DockerServName})" ]]; then |
|||
printKazMsg " - stop ${service}" |
|||
docker rm -f "${DockerServName}" 2>/dev/null |
|||
fi |
|||
done |
|||
|
|||
# XXX risque d'écraser des config |
|||
[[ "${cloud}" = "on" ]] && ./init-cloud.sh |
|||
[[ "${wp}" = "on" ]] && ./init-wp.sh |
|||
[[ "${wiki}" = "on" ]] && ./init-wiki.sh |
|||
[[ "${agora}" = "on" ]] && ./init-matter.sh |
|||
|
|||
${KAZ_COMP_DIR}/web/web-gen.sh |
@ -0,0 +1,63 @@ |
|||
#!/bin/bash |
|||
|
|||
KAZ_ROOT=$(cd "$(dirname $0)/../.."; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
cd $(dirname $0) |
|||
|
|||
PWD=$(pwd) |
|||
ORGA_DIR=$(basename ${PWD}) |
|||
|
|||
TMPL_PROXY_COMPOSE="${KAZ_COMP_DIR}/proxy/docker-compose.tmpl.yml" |
|||
ORGA_LIST="${KAZ_CONF_DIR}/container-orga.list" |
|||
|
|||
remove () { |
|||
read -r -p "Are you sure remove ${ORGA}? [y/N] " response |
|||
case "$response" in |
|||
[oO][uU][iI] | [yY][eE][sS] | [yY] | [oO] ) |
|||
|
|||
echo "remove ${ORGA}" |
|||
. .env |
|||
ORGA_FLAG=${ORGA//-/_}_orga |
|||
echo "Update ${TMPL_PROXY_COMPOSE}" |
|||
if grep -q "^{{${ORGA_FLAG}" "${TMPL_PROXY_COMPOSE}" 2> /dev/null ; then |
|||
sed -i -e "/^{{${ORGA_FLAG}/,/^}}/d" "${TMPL_PROXY_COMPOSE}" |
|||
fi |
|||
DEL_DOMAIN="" |
|||
for serv in ${garHost} ${cloudHost} ${officeHost} ${dokuwikiHost} ${wordpressHost} ${matterHost} |
|||
do |
|||
DEL_DOMAIN+="${ORGA}-${serv} " |
|||
done |
|||
${KAZ_BIN_DIR}/dns.sh del ${DEL_DOMAIN} |
|||
${KAZ_BIN_DIR}/container.sh stop ${ORGA}-orga |
|||
sed -i -e "/proxy_${ORGA_FLAG}=/d" "${DOCKERS_ENV}" |
|||
sed -i -e "/^${ORGA}-orga$/d" "${ORGA_LIST}" |
|||
rm -fr "${KAZ_COMP_DIR}/${ORGA}-orga" |
|||
exit;; |
|||
*) |
|||
exit;; |
|||
esac |
|||
} |
|||
|
|||
if [[ "${ORGA_DIR}" = "orgaTmpl" ]] |
|||
then |
|||
while : |
|||
do |
|||
echo -n "Give new organization name ? " |
|||
read ORGA |
|||
[[ "${ORGA}" =~ ^[a-zA-Z0-9_\-]+$ ]] && [[ ! -z "${ORGA}" ]] && break |
|||
echo "Name must contains only a-zA-Z0-9_\-" |
|||
done |
|||
remove |
|||
exit |
|||
fi |
|||
|
|||
if [[ "${ORGA_DIR}" != *"-orga" ]] |
|||
then |
|||
echo "it's not an orga dir" |
|||
exit |
|||
fi |
|||
|
|||
ORGA=${ORGA_DIR%-orga} |
|||
remove |
@ -0,0 +1,10 @@ |
|||
# acl.auth.php |
|||
# <?php exit()?> |
|||
# Don't modify the lines above |
|||
# |
|||
# Access Control Lists |
|||
# |
|||
# Auto-generated by install script |
|||
# Date: Sat, 13 Feb 2021 17:42:28 +0000 |
|||
* @ALL 1 |
|||
* @user 8 |
@ -0,0 +1,26 @@ |
|||
<?php |
|||
/* |
|||
* Dokuwiki's Main Configuration File - Local Settings |
|||
* Auto-generated by config plugin |
|||
* Run for user: felix |
|||
* Date: Sun, 28 Feb 2021 15:56:13 +0000 |
|||
*/ |
|||
|
|||
$conf['title'] = 'Kaz'; |
|||
$conf['template'] = 'docnavwiki'; |
|||
$conf['license'] = 'cc-by-sa'; |
|||
$conf['useacl'] = 1; |
|||
$conf['superuser'] = '@admin'; |
|||
$conf['manager'] = '@manager'; |
|||
$conf['disableactions'] = 'register'; |
|||
$conf['remoteuser'] = ''; |
|||
$conf['mailfrom'] = 'dokuwiki@kaz.bzh'; |
|||
$conf['updatecheck'] = 0; |
|||
$conf['userewrite'] = '1'; |
|||
$conf['useslash'] = 1; |
|||
$conf['plugin']['ckgedit']['scayt_auto'] = 'on'; |
|||
$conf['plugin']['ckgedit']['scayt_lang'] = 'French/fr_FR'; |
|||
$conf['plugin']['ckgedit']['other_lang'] = 'fr'; |
|||
$conf['plugin']['smtp']['smtp_host'] = 'smtp.kaz.bzh'; |
|||
$conf['plugin']['todo']['CheckboxText'] = 0; |
|||
$conf['plugin']['wrap']['restrictionType'] = '1'; |
@ -0,0 +1,13 @@ |
|||
# users.auth.php |
|||
# <?php exit()?> |
|||
# Don't modify the lines above |
|||
# |
|||
# Userfile |
|||
# |
|||
# Auto-generated by install script |
|||
# Date: Sat, 13 Feb 2021 17:42:28 +0000 |
|||
# |
|||
# Format: |
|||
# login:passwordhash:Real Name:email:groups,comma,separated |
|||
|
|||
admin:$2y$10$GYvFgViXeEUmDViplHEs7eoYV8tmbfsS8wA1vfHQ.tWgW14o9aTjy:admin:contact@kaz.bzh:admin,user |
@ -0,0 +1 @@ |
|||
/etc/nginx/allow_admin_ip |
@ -0,0 +1 @@ |
|||
allow all; |
@ -0,0 +1 @@ |
|||
allow all; |
@ -0,0 +1,11 @@ |
|||
listen 443 ssl http2; |
|||
ssl_certificate /etc/letsencrypt/live/dev.kaz.bzh/fullchain.pem; |
|||
ssl_certificate_key /etc/letsencrypt/live/dev.kaz.bzh/privkey.pem; |
|||
ssl_session_timeout 1d; |
|||
ssl_protocols TLSv1.2 TLSv1.3; |
|||
ssl_early_data on; |
|||
ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256'; |
|||
ssl_prefer_server_ciphers on; |
|||
ssl_session_cache shared:SSL:50m; |
|||
ssl_stapling on; |
|||
ssl_stapling_verify on; |
@ -0,0 +1,17 @@ |
|||
listen 443 ssl http2; |
|||
ssl_certificate /etc/ssl/certs/wildcard_kaz_bzh.chain.pem; |
|||
ssl_certificate_key /etc/ssl/private/wildcard_kaz_bzh.key.pem; |
|||
|
|||
# include assos-certif en fonction de domain_name |
|||
# ssl_certificate /etc/letsencrypt/live/domain_name/fullchain.pem; |
|||
# ssl_certificate_key /etc/letsencrypt/live/domain_name/privkey.pem; |
|||
# |
|||
|
|||
ssl_session_timeout 1d; |
|||
ssl_protocols TLSv1.2 TLSv1.3; |
|||
ssl_early_data on; |
|||
ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256'; |
|||
ssl_prefer_server_ciphers on; |
|||
ssl_session_cache shared:SSL:50m; |
|||
ssl_stapling on; |
|||
ssl_stapling_verify on; |
@ -0,0 +1,13 @@ |
|||
listen 443 ssl http2; |
|||
|
|||
ssl_certificate /etc/letsencrypt/local/_wildcard.kaz.local.pem; |
|||
ssl_certificate_key /etc/letsencrypt/local/_wildcard.kaz.local-key.pem; |
|||
|
|||
ssl_session_timeout 1d; |
|||
ssl_protocols TLSv1.2 TLSv1.3; |
|||
ssl_early_data on; |
|||
ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256'; |
|||
ssl_prefer_server_ciphers on; |
|||
ssl_session_cache shared:SSL:50m; |
|||
ssl_stapling on; |
|||
ssl_stapling_verify on; |
@ -0,0 +1,21 @@ |
|||
|
|||
#proxy_buffering off; |
|||
#proxy_set_header X-Forwarded-Host $host:$server_port; |
|||
#proxy_set_header X-Forwarded-Server $host; |
|||
#XXX pb proxy_set_header Connection $proxy_connection; |
|||
|
|||
proxy_buffers 256 16k; |
|||
proxy_buffer_size 16k; |
|||
|
|||
# mattermost |
|||
http2_push_preload on; # Enable HTTP/2 Server Push |
|||
add_header Strict-Transport-Security max-age=15768000; |
|||
proxy_set_header Host $http_host; |
|||
proxy_set_header X-Real-IP $remote_addr; |
|||
#proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; |
|||
proxy_set_header X-Forwarded-Proto $scheme; |
|||
|
|||
#proxy_hide_header 'x-frame-options'; |
|||
#proxy_set_header x-frame-options allowall; |
|||
proxy_set_header X-Frame-Options SAMEORIGIN; |
|||
|
@ -0,0 +1,49 @@ |
|||
# géné |
|||
server { |
|||
listen 80; |
|||
server_name dev.kaz.bzh www.dev.kaz.bzh dev.pad.kaz.bzh dev.listes.kaz.bzh; |
|||
# manque les *-garradin.kaz.bzh |
|||
return 301 https://$host$request_uri; |
|||
} |
|||
|
|||
# file |
|||
server { |
|||
listen 80; |
|||
server_name file.dev.kaz.bzh; |
|||
return 301 https://depot.dev.kaz.bzh$request_uri; |
|||
} |
|||
|
|||
# cacl |
|||
server { |
|||
listen 80; |
|||
server_name calc.dev.kaz.bzh; |
|||
return 301 https://tableur.dev.kaz.bzh$request_uri; |
|||
} |
|||
|
|||
# date |
|||
server { |
|||
listen 80; |
|||
server_name date.dev.kaz.bzh; |
|||
return 301 https://sondage.dev.kaz.bzh$request_uri; |
|||
} |
|||
|
|||
# cloud |
|||
server { |
|||
listen 80; |
|||
server_name bureau.dev.kaz.bzh; |
|||
return 301 https://cloud.dev.kaz.bzh$request_uri; |
|||
} |
|||
|
|||
# mattermost |
|||
server { |
|||
listen 80; |
|||
server_name mattermost.dev.kaz.bzh; |
|||
return 301 https://agora.dev.kaz.bzh$request_uri; |
|||
} |
|||
|
|||
# dokuwiki |
|||
server { |
|||
listen 80; |
|||
server_name dokuwiki.dev.kaz.bzh; |
|||
return 301 https://wiki.dev.kaz.bzh$request_uri; |
|||
} |
@ -0,0 +1,47 @@ |
|||
# géné |
|||
server { |
|||
listen 443; |
|||
# return 301 http://$host$request_uri; |
|||
} |
|||
|
|||
# file |
|||
server { |
|||
listen 80; |
|||
server_name file.dev.kaz.bzh; |
|||
return 301 https://depot.dev.kaz.bzh$request_uri; |
|||
} |
|||
|
|||
# cacl |
|||
server { |
|||
listen 80; |
|||
server_name calc.dev.kaz.bzh; |
|||
return 301 https://tableur.dev.kaz.bzh$request_uri; |
|||
} |
|||
|
|||
# date |
|||
server { |
|||
listen 80; |
|||
server_name date.dev.kaz.bzh; |
|||
return 301 https://sondage.dev.kaz.bzh$request_uri; |
|||
} |
|||
|
|||
# cloud |
|||
server { |
|||
listen 80; |
|||
server_name bureau.dev.kaz.bzh; |
|||
return 301 https://cloud.dev.kaz.bzh$request_uri; |
|||
} |
|||
|
|||
# mattermost |
|||
server { |
|||
listen 80; |
|||
server_name mattermost.dev.kaz.bzh; |
|||
return 301 https://agora.dev.kaz.bzh$request_uri; |
|||
} |
|||
|
|||
# dokuwiki |
|||
server { |
|||
listen 80; |
|||
server_name dokuwiki.dev.kaz.bzh; |
|||
return 301 https://wiki.dev.kaz.bzh$request_uri; |
|||
} |
@ -0,0 +1,79 @@ |
|||
# géné |
|||
server { |
|||
listen 80; |
|||
server_name kaz.bzh www.kaz.bzh pad.kaz.bzh listes.kaz.bzh; |
|||
# manque les *-garradin.kaz.bzh |
|||
return 301 https://$host$request_uri; |
|||
} |
|||
|
|||
# file |
|||
server { |
|||
listen 80; |
|||
server_name file.kaz.bzh; |
|||
return 301 https://depot.kaz.bzh$request_uri; |
|||
} |
|||
server { |
|||
include includes/port.kaz.bzh; |
|||
server_name file.kaz.bzh; |
|||
return 301 https://depot.kaz.bzh$request_uri; |
|||
} |
|||
|
|||
# cacl |
|||
server { |
|||
listen 80; |
|||
server_name calc.kaz.bzh; |
|||
return 301 https://tableur.kaz.bzh$request_uri; |
|||
} |
|||
server { |
|||
include includes/port.kaz.bzh; |
|||
server_name calc.kaz.bzh; |
|||
return 301 https://tableur.kaz.bzh$request_uri; |
|||
} |
|||
|
|||
# date |
|||
server { |
|||
listen 80; |
|||
server_name date.kaz.bzh; |
|||
return 301 https://sondage.kaz.bzh$request_uri; |
|||
} |
|||
server { |
|||
include includes/port.kaz.bzh; |
|||
server_name date.kaz.bzh; |
|||
return 301 https://sondage.kaz.bzh$request_uri; |
|||
} |
|||
|
|||
# cloud |
|||
server { |
|||
listen 80; |
|||
server_name bureau.kaz.bzh; |
|||
return 301 https://cloud.kaz.bzh$request_uri; |
|||
} |
|||
server { |
|||
include includes/port.kaz.bzh; |
|||
server_name bureau.kaz.bzh; |
|||
return 301 https://cloud.kaz.bzh$request_uri; |
|||
} |
|||
|
|||
# mattermost |
|||
server { |
|||
listen 80; |
|||
server_name mattermost.kaz.bzh; |
|||
return 301 https://agora.kaz.bzh$request_uri; |
|||
} |
|||
server { |
|||
include includes/port.kaz.bzh; |
|||
server_name mattermost.kaz.bzh; |
|||
return 301 https://agora.kaz.bzh$request_uri; |
|||
} |
|||
|
|||
# dokuwiki |
|||
server { |
|||
listen 80; |
|||
server_name dokuwiki.kaz.bzh; |
|||
return 301 https://wiki.kaz.bzh$request_uri; |
|||
} |
|||
server { |
|||
include includes/port.kaz.bzh; |
|||
server_name dokuwiki.kaz.bzh; |
|||
return 301 https://wiki.kaz.bzh$request_uri; |
|||
} |
@ -0,0 +1,47 @@ |
|||
# géné |
|||
server { |
|||
listen 80; |
|||
return 301 https://$host$request_uri; |
|||
} |
|||
|
|||
# file |
|||
server { |
|||
listen 80; |
|||
server_name file.kaz.local; |
|||
return 301 https://depot.kaz.local$request_uri; |
|||
} |
|||
|
|||
# cacl |
|||
server { |
|||
listen 80; |
|||
server_name calc.kaz.local; |
|||
return 301 https://tableur.kaz.local$request_uri; |
|||
} |
|||
|
|||
# date |
|||
server { |
|||
listen 80; |
|||
server_name date.kaz.local; |
|||
return 301 https://sondage.kaz.local$request_uri; |
|||
} |
|||
|
|||
# cloud |
|||
server { |
|||
listen 80; |
|||
server_name bureau.kaz.local; |
|||
return 301 https://cloud.kaz.local$request_uri; |
|||
} |
|||
|
|||
# mattermost |
|||
server { |
|||
listen 80; |
|||
server_name mattermost.kaz.local; |
|||
return 301 https://agora.kaz.local$request_uri; |
|||
} |
|||
|
|||
# dokuwiki |
|||
server { |
|||
listen 80; |
|||
server_name dokuwiki.kaz.local; |
|||
return 301 https://wiki.kaz.local$request_uri; |
|||
} |
@ -0,0 +1 @@ |
|||
# <external domaine> <orga>; |
@ -0,0 +1 @@ |
|||
# server_name <external domaine>; |
@ -0,0 +1,55 @@ |
|||
# a verifier |
|||
/vigilo/ |
|||
dockers/grav |
|||
dockers/web/Dockerfile |
|||
dockers/proxy/todo-ssl |
|||
dockers/cloud/DEADJOE |
|||
# jamais |
|||
/.git/ |
|||
bin/createUser.log |
|||
bin/createUser.old |
|||
bin/createUser.txt |
|||
dockers/mattermost/modif_user.txt |
|||
state/activites_mailbox.csv |
|||
state/collecte.csv |
|||
config/createUser_cmds_to_run.sh |
|||
dockers/proxy/config/pb-vigilo.txt |
|||
dockers/garradin/garradin- |
|||
*~ |
|||
*/*~ |
|||
*/*/*~ |
|||
*/*/*/*~ |
|||
*/*.old/ |
|||
*/*/*.old/ |
|||
*/*/*/*.old/ |
|||
# auto |
|||
dockers/garradin/config/config.local.php |
|||
dockers/proxy/config/nginx.conf |
|||
dockers/*-orga/ |
|||
# param |
|||
dockers/postfix/config/dovecot-quotas.cf |
|||
dockers/postfix/config/postfix-accounts.cf |
|||
dockers/postfix/config/postfix-accounts.cf |
|||
dockers/postfix/config/postfix-virtual.cf |
|||
dockers/postfix/config/ssl/demoCA/cacert.pem |
|||
dockers/postfix/config/ssl/demoCA/careq.pem |
|||
dockers/postfix/config/ssl/demoCA/index.txt |
|||
dockers/postfix/config/ssl/demoCA/index.txt.attr |
|||
dockers/postfix/config/ssl/demoCA/index.txt.old |
|||
dockers/postfix/config/ssl/demoCA/newcerts/*.pem |
|||
dockers/postfix/config/ssl/demoCA/private/cakey.pem |
|||
dockers/postfix/config/ssl/demoCA/serial |
|||
dockers/postfix/config/ssl/mail.dev.kaz.bzh-key.pem |
|||
dockers/postfix/config/ssl/mail.dev.kaz.bzh-req.pem |
|||
dockers/proxy/docker-compose.yml |
|||
dockers/test-all-but-mail.sh |
|||
dockers/web/html/status/allServices.html |
|||
dockers/web/html/status/allServices.tmpl.html |
|||
config/container-orga.list |
|||
config/container-withMail.list |
|||
config/container-withoutMail.list |
|||
config/password/env-mailServ |
|||
config/proxy/*_kaz_map.* |
|||
config/proxy/*_kaz_name.* |
|||
# parfois |
|||
dockers/proxy/docker-compose.tmpl.yml |
@ -0,0 +1 @@ |
|||
../../config/dockers.env |
@ -0,0 +1,90 @@ |
|||
Pour l'installation de Nextcloud |
|||
|
|||
Documentation: |
|||
https://registry.hub.docker.com/_/nextcloud?tab=description |
|||
https://registry.hub.docker.com/_/mariadb?tab=description |
|||
https://blog.ssdnodes.com/blog/installing-nextcloud-docker/ |
|||
|
|||
____________________________________________________________ |
|||
Contenu du répertoire |
|||
|
|||
____________________________________________________________ |
|||
Se placer dans le bon répertoire |
|||
|
|||
# cd /docker/cloud |
|||
|
|||
____________________________________________________________ |
|||
Lancement de nextcloud et nextcloudDB |
|||
|
|||
# docker-compose up -d |
|||
|
|||
____________________________________________________________ |
|||
Vérification |
|||
Il y a des containers qui tournent cloudServ cloudDB (collabraServ) |
|||
|
|||
# docker ps |
|||
|
|||
# docker exec -ti cloudServ bash |
|||
exit |
|||
|
|||
# docker exec -ti cloudDB bash |
|||
exit |
|||
|
|||
____________________________________________________________ |
|||
Personalisation |
|||
|
|||
Il faut attendre 2 minutes pour le lancement |
|||
|
|||
Pour mettre en français |
|||
emacs /var/lib/docker/volumes/cloud_cloudConfig/_data/config.php |
|||
il faut ajouter : |
|||
"default_language" => "fr", |
|||
|
|||
Création des comptes. |
|||
Application |
|||
* Tasks |
|||
* Calendar |
|||
* Desk |
|||
* Contact |
|||
* Mail |
|||
* Talk |
|||
* Draw.io |
|||
|
|||
|
|||
|
|||
* Collabora Online - Built-in CODE Server (il faut un port d'écoute) |
|||
apt update |
|||
apt install sudo |
|||
sudo -u www-data php -d memory_limit=512M ./occ app:install richdocumentscode |
|||
sudo -u www-data php -d memory_limit=512M ./occ app:update --all |
|||
|
|||
ou |
|||
|
|||
installer un docker collabra et |
|||
apt update |
|||
apt install sudo |
|||
sudo -u www-data ./occ config:app:set --value http://89.234.186.106:9980/ richdocuments wopi_url |
|||
sudo -u www-data ./occ richdocuments:activate-config |
|||
|
|||
https://cloud.kaz.bzh/settings/admin/richdocuments |
|||
|
|||
____________________________________________________________ |
|||
Mettre à jour le mot de passe dans /kaz/secret |
|||
|
|||
____________________________________________________________ |
|||
Test |
|||
Y a plus qu'a tester |
|||
http://kaz.bzh:8080 |
|||
|
|||
____________________________________________________________ |
|||
Traces |
|||
https://cloud.kaz.bzh/index.php/settings/admin/logging |
|||
|
|||
____________________________________________________________ |
|||
Pour la sauvegarde il faut également des scripts |
|||
|
|||
Didier le 11/12/2020 |
|||
installation du module RainLoop pour les mails |
|||
mot passe de l' admin : voir dans /kaz/secret/SetAllPass.sh |
|||
le module a viré pas de custom_apps dans le docker-compose |
|||
proposition de modif du docker-compose.yml mis en commentaires. |
@ -0,0 +1,69 @@ |
|||
version: '3.3' |
|||
|
|||
services: |
|||
|
|||
cloud: |
|||
image: nextcloud |
|||
container_name: ${nextcloudServName} |
|||
restart: ${restartPolicy} |
|||
depends_on: |
|||
- db |
|||
links: |
|||
- db |
|||
external_links: |
|||
- ${smtpServName}:${smtpHost} |
|||
ports: |
|||
- 8090:80 |
|||
networks: |
|||
- cloudNet |
|||
- postfixNet |
|||
volumes: |
|||
- cloudMain:/var/www/html |
|||
- cloudData:/var/www/html/data |
|||
- cloudConfig:/var/www/html/config |
|||
- cloudApps:/var/www/html/apps |
|||
- cloudCustomApps:/var/www/html/custom_apps |
|||
- cloudThemes:/var/www/html/themes/ |
|||
- cloudPhp:/usr/local/etc/php/conf.d/ |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /etc/timezone:/etc/timezone:ro |
|||
env_file: |
|||
- ../../secret/env-${nextcloudServName} |
|||
- ../../secret/env-${nextcloudDBName} |
|||
environment: |
|||
- NEXTCLOUD_TRUSTED_DOMAINS=${cloudHost}.${domain} |
|||
- SMTP_HOST=${smtpHost} |
|||
- SMTP_PORT=25 |
|||
- MAIL_DOMAIN=${domain} |
|||
|
|||
db: |
|||
image: mariadb:10.5 |
|||
container_name: ${nextcloudDBName} |
|||
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW |
|||
restart: ${restartPolicy} |
|||
volumes: |
|||
- cloudDB:/var/lib/mysql |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /etc/timezone:/etc/timezone:ro |
|||
env_file: |
|||
- ../../secret/env-${nextcloudDBName} |
|||
networks: |
|||
- cloudNet |
|||
|
|||
volumes: |
|||
cloudDB: |
|||
cloudMain: |
|||
cloudData: |
|||
cloudConfig: |
|||
cloudApps: |
|||
cloudCustomApps: |
|||
cloudThemes: |
|||
cloudPhp: |
|||
|
|||
networks: |
|||
cloudNet: |
|||
external: |
|||
name: cloudNet |
|||
postfixNet: |
|||
external: |
|||
name: postfixNet |
@ -0,0 +1,39 @@ |
|||
#!/bin/bash |
|||
|
|||
KAZ_ROOT=$(cd $(dirname $0)/../..; pwd) |
|||
. "${KAZ_ROOT}/bin/.commonFunctions.sh" |
|||
setKazVars |
|||
|
|||
cd $(dirname $0) |
|||
. ".env" |
|||
. "${KAZ_KEY_DIR}/env-${nextcloudServName}" |
|||
. "${KAZ_KEY_DIR}/env-${nextcloudDBName}" |
|||
|
|||
CLOUD_URL="${httpProto}://${cloudHost}.${domain}" |
|||
|
|||
if ! [[ "$(docker ps -f name=${nextcloudServName} | grep -w ${nextcloudServName})" ]]; then |
|||
printKazError "NextCloud not running... abort" |
|||
exit |
|||
fi |
|||
|
|||
if ! grep -q "'installed' => true," "${DOCK_VOL}/volumes/cloudConfig/_data/config.php" 2> /dev/null; then |
|||
printKazMsg "\n *** Premier lancement de NextCLoud" |
|||
|
|||
waitUrl "${CLOUD_URL}" |
|||
|
|||
curl -X POST \ |
|||
-d "install=true" \ |
|||
-d "adminlogin=${nextcloud_NEXTCLOUD_ADMIN_USER}" \ |
|||
-d "adminpass=${nextcloud_NEXTCLOUD_ADMIN_PASSWORD}" \ |
|||
-d "directory=/var/www/html/data" \ |
|||
-d "dbtype=mysql" \ |
|||
-d "dbuser=${nextcloud_MYSQL_USER}" \ |
|||
-d "dbpass=${nextcloud_MYSQL_PASSWORD}" \ |
|||
-d "dbname=${nextcloud_MYSQL_DATABASE}" \ |
|||
-d "dbhost=${nextcloud_MYSQL_HOST}" \ |
|||
-d "install-recommended-apps=true" \ |
|||
"${ORGA_URL}" |
|||
fi |
|||
|
|||
"${KAZ_ROOT}/bin/updateCloud.sh" |
|||
|
@ -0,0 +1,3 @@ |
|||
#!/bin/bash |
|||
|
|||
docker exec --user www-data -ti nextcloudServ bash -c "/var/www/html/occ db:add-missing-indices" |
@ -0,0 +1 @@ |
|||
../../config/dockers.env |
@ -0,0 +1,37 @@ |
|||
|
|||
https://help.nextcloud.com/t/socket-error-when-accessing-collabora/22486/17 |
|||
https://collabora-online-for-nextcloud.readthedocs.io/en/latest/install/ |
|||
https://www.collaboraoffice.com/code/nginx-reverse-proxy/ |
|||
|
|||
https://www.digitalocean.com/community/tutorials/understanding-nginx-server-and-location-block-selection-algorithms |
|||
|
|||
https://cloud.kaz.bzh/settings/admin/richdocuments |
|||
https://office.kaz.bzh/ |
|||
|
|||
docker run -t -d -p 127.0.0.1:9980:9980 -e 'domain=cloud\\.kaz\\.local --restart always --cap-add MKNOD collabora/code |
|||
|
|||
|
|||
https://stackoverflow.com/questions/31667160/running-docker-container-iptables-no-chain-target-match-by-that-name |
|||
|
|||
*nat |
|||
:PREROUTING ACCEPT [144:8072] |
|||
:INPUT ACCEPT [87:5208] |
|||
:OUTPUT ACCEPT [118:8055] |
|||
:POSTROUTING ACCEPT [118:8055] |
|||
:DOCKER - [0:0] |
|||
... your previous rules here ... |
|||
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER |
|||
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER |
|||
-A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE |
|||
COMMIT |
|||
*filter |
|||
:INPUT ACCEPT [0:0] |
|||
:FORWARD ACCEPT [0:0] |
|||
:OUTPUT ACCEPT [5781:5099614] |
|||
:DOCKER - [0:0] |
|||
... your previous rules here ... |
|||
-A FORWARD -o docker0 -j DOCKER |
|||
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT |
|||
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT |
|||
-A FORWARD -i docker0 -o docker0 -j ACCEPT |
|||
COMMIT |
@ -0,0 +1,33 @@ |
|||
version: '3.3' |
|||
|
|||
services: |
|||
|
|||
collabora: |
|||
image: collabora/code |
|||
container_name: ${officeServName} |
|||
restart: ${restartPolicy} |
|||
cap_add: |
|||
- MKNOD |
|||
- SYS_CHROOT |
|||
- FOWNER |
|||
ports: |
|||
- 8091:9980 |
|||
env_file: |
|||
- ../../secret/env-${officeServName} |
|||
environment: |
|||
- dictionaries=fr_FR en_GB es_ES |
|||
- domain=.*${cloudHost}\.${domain} |
|||
- server_name=${officeHost}.${domain} |
|||
- VIRTUAL_HOST=${officeHost}.${domain} |
|||
- VIRTUAL_PORT=9980 |
|||
- VIRTUAL_PROTO=https |
|||
volumes: |
|||
- /etc/localtime:/etc/localtime:ro |
|||
- /etc/timezone:/etc/timezone:ro |
|||
networks: |
|||
collaboraNet: |
|||
|
|||
networks: |
|||
collaboraNet: |
|||
external: |
|||
name: collaboraNet |
@ -0,0 +1 @@ |
|||
../../config/dockers.env |
@ -0,0 +1,85 @@ |
|||
FROM --platform=${TARGETPLATFORM:-linux/amd64} crazymax/alpine-s6:3.12 |
|||
|
|||
ARG TARGETPLATFORM |
|||
ARG BUILDPLATFORM |
|||
RUN printf "I am running on ${BUILDPLATFORM:-linux/amd64}, building for ${TARGETPLATFORM:-linux/amd64}\n$(uname -a)\n" |
|||
|
|||
LABEL maintainer="CrazyMax" |
|||
|
|||
######################################## |
|||
# APT local cache |
|||
# work around because COPY failed if no source file |
|||
COPY .dummy .apt-mirror-confi[g] .proxy-confi[g] / |
|||
RUN cp /.proxy-config /etc/profile.d/proxy.sh 2> /dev/null || true |
|||
RUN if [ -f /.apt-mirror-config ] ; then . /.apt-mirror-config && sed -i \ |
|||
-e "s/deb.debian.org/${APT_MIRROR_DEBIAN}/g" \ |
|||
-e "s/security.debian.org/${APT_MIRROR_DEBIAN_SECURITY}/g" \ |
|||
-e "s/archive.ubuntu.com/${APT_MIRROR_UBUNTU}/g" \ |
|||
-e "s/security.ubuntu.com/${APT_MIRROR_UBUNTU_SECURITY}/g" \ |
|||
/etc/apt/sources.list; fi |
|||
|
|||
######################################## |
|||
RUN apk --update --no-cache add \ |
|||
curl \ |
|||
imagemagick \ |
|||
inotify-tools \ |
|||
libgd \ |
|||
nginx \ |
|||
php7 \ |
|||
php7-cli \ |
|||
php7-ctype \ |
|||
php7-curl \ |
|||
php7-fpm \ |
|||
php7-gd \ |
|||
php7-imagick \ |
|||
php7-json \ |
|||
php7-ldap \ |
|||
php7-mbstring \ |
|||
php7-openssl \ |
|||
php7-pdo \ |
|||
php7-pdo_sqlite \ |
|||
php7-session \ |
|||
php7-simplexml \ |
|||
php7-sqlite3 \ |
|||
php7-xml \ |
|||
php7-zip \ |
|||
php7-zlib \ |
|||
shadow \ |
|||
su-exec \ |
|||
tar \ |
|||
tzdata \ |
|||
&& rm -rf /tmp/* /var/cache/apk/* /var/www/* |
|||
|
|||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS="2" \ |
|||
DOKUWIKI_VERSION="2020-07-29" \ |
|||
DOKUWIKI_MD5="8867b6a5d71ecb5203402fe5e8fa18c9" \ |
|||
TZ="UTC" \ |
|||
PUID="1500" \ |
|||
PGID="1500" |
|||
|
|||
RUN apk --update --no-cache add -t build-dependencies \ |
|||
gnupg \ |
|||
wget \ |
|||
&& cd /tmp \ |
|||
&& wget -q "https://download.dokuwiki.org/src/dokuwiki/dokuwiki-$DOKUWIKI_VERSION.tgz" \ |
|||
&& echo "$DOKUWIKI_MD5 /tmp/dokuwiki-$DOKUWIKI_VERSION.tgz" | md5sum -c - | grep OK \ |
|||
&& tar -xzf "dokuwiki-$DOKUWIKI_VERSION.tgz" --strip 1 -C /var/www \ |
|||
&& apk del build-dependencies \ |
|||
&& rm -rf /root/.gnupg /tmp/* /var/cache/apk/* |
|||
|
|||
COPY rootfs / |
|||
RUN rm -f /dokuwiki.tgz |
|||
COPY htaccess /dokuwiki/.htaccess |
|||
|
|||
RUN chmod a+x /usr/local/bin/* \ |
|||
&& addgroup -g ${PGID} dokuwiki \ |
|||
&& adduser -D -H -u ${PUID} -G dokuwiki -s /bin/sh dokuwiki |
|||
|
|||
EXPOSE 8000 |
|||
WORKDIR /var/www |
|||
VOLUME [ "/data" ] |
|||
|
|||
ENTRYPOINT [ "/init" ] |
|||
|
|||
HEALTHCHECK --interval=10s --timeout=5s --start-period=20s \ |
|||
CMD curl --fail http://127.0.0.1:12345/ping || exit 1 |
Some files were not shown because too many files changed in this diff
Loading…
Reference in new issue