first commit
This commit is contained in:
@@ -0,0 +1,61 @@
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
"""build environment used by shell scripts
|
||||
"""
|
||||
|
||||
# set path
|
||||
import sys
|
||||
import os
|
||||
from os.path import realpath, dirname, join, sep, abspath
|
||||
|
||||
repo_root = realpath(dirname(realpath(__file__)) + sep + '..')
|
||||
sys.path.insert(0, repo_root)
|
||||
|
||||
# Assure that the settings file from reposetorie's working tree is used to
|
||||
# generate the build_env, not from /etc/searxng/settings.yml.
|
||||
os.environ['SEARXNG_SETTINGS_PATH'] = join(repo_root, 'etc', 'settings.yml')
|
||||
|
||||
def _env(*arg, **kwargs):
|
||||
val = get_setting(*arg, **kwargs)
|
||||
if val is True:
|
||||
val = '1'
|
||||
elif val is False:
|
||||
val = ''
|
||||
return val
|
||||
|
||||
# If you add or remove variables here, do not forget to update:
|
||||
# - ./docs/admin/engines/settings.rst
|
||||
# - ./docs/dev/makefile.rst (section make buildenv)
|
||||
|
||||
name_val = [
|
||||
|
||||
('SEARXNG_URL' , 'server.base_url'),
|
||||
('SEARXNG_PORT' , 'server.port'),
|
||||
('SEARXNG_BIND_ADDRESS' , 'server.bind_address'),
|
||||
|
||||
]
|
||||
|
||||
brand_env = 'utils' + sep + 'brand.env'
|
||||
|
||||
# Some defaults in the settings.yml are taken from the environment,
|
||||
# e.g. SEARXNG_BIND_ADDRESS (:py:obj:`searx.settings_defaults.SHEMA`). When the
|
||||
# 'brand.env' file is created these envirnoment variables should be unset first::
|
||||
|
||||
_unset = object()
|
||||
for name, option in name_val:
|
||||
if not os.environ.get(name, _unset) is _unset:
|
||||
del os.environ[name]
|
||||
|
||||
# After the variables are unset in the environ, we can import from the searx
|
||||
# package (what will read the values from the settings.yml).
|
||||
|
||||
from searx.version import GIT_URL, GIT_BRANCH
|
||||
from searx import get_setting
|
||||
|
||||
print('build %s (settings from: %s)' % (brand_env, os.environ['SEARXNG_SETTINGS_PATH']))
|
||||
sys.path.insert(0, repo_root)
|
||||
|
||||
with open(repo_root + sep + brand_env, 'w', encoding='utf-8') as f:
|
||||
for name, option in name_val:
|
||||
print("export %s='%s'" % (name, _env(option)), file=f)
|
||||
print(f"export GIT_URL='{GIT_URL}'", file=f)
|
||||
print(f"export GIT_BRANCH='{GIT_BRANCH}'", file=f)
|
||||
Executable
+127
@@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
# shellcheck disable=SC2001
|
||||
|
||||
# shellcheck source=utils/lib.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib.sh"
|
||||
# shellcheck source=utils/brand.env
|
||||
source "${REPO_ROOT}/utils/brand.env"
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# config
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
PUBLIC_URL="${PUBLIC_URL:-${SEARXNG_URL}}"
|
||||
|
||||
FILTRON_ETC="/etc/filtron"
|
||||
|
||||
SERVICE_NAME="filtron"
|
||||
SERVICE_USER="${SERVICE_USER:-${SERVICE_NAME}}"
|
||||
SERVICE_SYSTEMD_UNIT="${SYSTEMD_UNITS}/${SERVICE_NAME}.service"
|
||||
|
||||
APACHE_FILTRON_SITE="searx.conf"
|
||||
NGINX_FILTRON_SITE="searx.conf"
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
usage() {
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# shellcheck disable=SC1117
|
||||
cat <<EOF
|
||||
usage::
|
||||
$(basename "$0") remove all
|
||||
$(basename "$0") apache remove
|
||||
$(basename "$0") nginx remove
|
||||
|
||||
remove all : drop all components of the filtron service
|
||||
apache remove : drop apache site ${APACHE_FILTRON_SITE}
|
||||
nginx remove : drop nginx site ${NGINX_FILTRON_SITE}
|
||||
|
||||
environment:
|
||||
PUBLIC_URL : ${PUBLIC_URL}
|
||||
EOF
|
||||
|
||||
[[ -n ${1} ]] && err_msg "$1"
|
||||
}
|
||||
|
||||
main() {
|
||||
local _usage="unknown or missing $1 command $2"
|
||||
|
||||
case $1 in
|
||||
-h|--help) usage; exit 0;;
|
||||
remove)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
all) remove_all;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
esac ;;
|
||||
apache)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
remove) remove_apache_site ;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
esac ;;
|
||||
nginx)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
remove) remove_nginx_site ;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
esac ;;
|
||||
*) usage "unknown or missing command $1"; exit 42;;
|
||||
esac
|
||||
}
|
||||
|
||||
remove_all() {
|
||||
rst_title "De-Install $SERVICE_NAME (service)"
|
||||
|
||||
rst_para "\
|
||||
It goes without saying that this script can only be used to remove
|
||||
installations that were installed with this script."
|
||||
|
||||
if ! systemd_remove_service "${SERVICE_NAME}" "${SERVICE_SYSTEMD_UNIT}"; then
|
||||
return 42
|
||||
fi
|
||||
drop_service_account "${SERVICE_USER}"
|
||||
rm -r "$FILTRON_ETC" 2>&1 | prefix_stdout
|
||||
if service_is_available "${PUBLIC_URL}"; then
|
||||
MSG="** Don't forget to remove your public site! (${PUBLIC_URL}) **" wait_key 10
|
||||
fi
|
||||
}
|
||||
|
||||
remove_apache_site() {
|
||||
|
||||
rst_title "Remove Apache site $APACHE_FILTRON_SITE"
|
||||
|
||||
rst_para "\
|
||||
This removes apache site ${APACHE_FILTRON_SITE}."
|
||||
|
||||
! apache_is_installed && err_msg "Apache is not installed."
|
||||
|
||||
if ! ask_yn "Do you really want to continue?" Yn; then
|
||||
return
|
||||
fi
|
||||
|
||||
apache_remove_site "$APACHE_FILTRON_SITE"
|
||||
|
||||
}
|
||||
|
||||
remove_nginx_site() {
|
||||
|
||||
rst_title "Remove nginx site $NGINX_FILTRON_SITE"
|
||||
|
||||
rst_para "\
|
||||
This removes nginx site ${NGINX_FILTRON_SITE}."
|
||||
|
||||
! nginx_is_installed && err_msg "nginx is not installed."
|
||||
|
||||
if ! ask_yn "Do you really want to continue?" Yn; then
|
||||
return
|
||||
fi
|
||||
|
||||
nginx_remove_app "$FILTRON_FILTRON_SITE"
|
||||
|
||||
}
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
main "$@"
|
||||
# ----------------------------------------------------------------------------
|
||||
Executable
+1811
File diff suppressed because it is too large
Load Diff
Executable
+211
@@ -0,0 +1,211 @@
|
||||
#!/usr/bin/env bash
|
||||
# -*- coding: utf-8; mode: sh indent-tabs-mode: nil -*-
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
#
|
||||
# Tools to install and maintain golang [1] binaries & packages.
|
||||
#
|
||||
# [1] https://golang.org/doc/devel/release#policy
|
||||
#
|
||||
# A simple *helloworld* test with user 'my_user' :
|
||||
#
|
||||
# sudo -H adduser my_user
|
||||
# ./manage go.golang go1.17.3 my_user
|
||||
# ./manage go.install github.com/go-training/helloworld@latest my_user
|
||||
# ./manage go.bash my_user
|
||||
# $ helloword
|
||||
# Hello World!!
|
||||
#
|
||||
# Don't forget to remove 'my_user': sudo -H deluser --remove-home my_user
|
||||
|
||||
# shellcheck source=utils/lib.sh
|
||||
. /dev/null
|
||||
|
||||
# configure golang environment
|
||||
# ----------------------------
|
||||
|
||||
[[ -z "${GO_VERSION}" ]] && GO_VERSION="go1.17.3"
|
||||
|
||||
GO_DL_URL="https://golang.org/dl"
|
||||
|
||||
# implement go functions
|
||||
# -----------------------
|
||||
|
||||
go.help(){
|
||||
cat <<EOF
|
||||
go.:
|
||||
ls : list golang binary archives (stable)
|
||||
golang : (re-) install golang binary in user's \$HOME/local folder
|
||||
install : install go package in user's \$HOME/go-apps folder
|
||||
bash : start bash interpreter with golang environment sourced
|
||||
EOF
|
||||
}
|
||||
|
||||
go.ls(){
|
||||
python3 <<EOF
|
||||
import sys, json, requests
|
||||
resp = requests.get("${GO_DL_URL}/?mode=json&include=all")
|
||||
for ver in json.loads(resp.text):
|
||||
if not ver['stable']:
|
||||
continue
|
||||
for f in ver['files']:
|
||||
if f['kind'] != 'archive' or not f['size'] or not f['sha256'] or len(f['os']) < 2:
|
||||
continue
|
||||
print(" %(version)-10s|%(os)-8s|%(arch)-8s|%(filename)-30s|%(size)-10s|%(sha256)s" % f)
|
||||
EOF
|
||||
}
|
||||
|
||||
go.ver_info(){
|
||||
|
||||
# print informations about a golang distribution. To print filename
|
||||
# sha256 and size of the archive that fits to your OS and host:
|
||||
#
|
||||
# go.ver_info "${GO_VERSION}" archive "$(go.os)" "$(go.arch)" filename sha256 size
|
||||
#
|
||||
# usage: go.ver_info <go-vers> <kind> <os> <arch> [filename|sha256|size]
|
||||
#
|
||||
# kind: [archive|source|installer]
|
||||
# os: [darwin|freebsd|linux|windows]
|
||||
# arch: [amd64|arm64|386|armv6l|ppc64le|s390x]
|
||||
|
||||
python3 - "$@" <<EOF
|
||||
import sys, json, requests
|
||||
resp = requests.get("${GO_DL_URL}/?mode=json&include=all")
|
||||
for ver in json.loads(resp.text):
|
||||
if ver['version'] != sys.argv[1]:
|
||||
continue
|
||||
for f in ver['files']:
|
||||
if (f['kind'] != sys.argv[2] or f['os'] != sys.argv[3] or f['arch'] != sys.argv[4]):
|
||||
continue
|
||||
for x in sys.argv[5:]:
|
||||
print(f[x])
|
||||
sys.exit(0)
|
||||
sys.exit(42)
|
||||
EOF
|
||||
}
|
||||
|
||||
go.os() {
|
||||
local OS
|
||||
case "$(command uname -a)xx" in
|
||||
Linux\ *) OS=linux ;;
|
||||
Darwin\ *) OS=darwin ;;
|
||||
FreeBSD\ *) OS=freebsd ;;
|
||||
CYGWIN* | MSYS* | MINGW*) OS=windows ;;
|
||||
*) die 42 "OS is unknown: $(command uname -a)" ;;
|
||||
esac
|
||||
echo "${OS}"
|
||||
}
|
||||
|
||||
go.arch() {
|
||||
local ARCH
|
||||
case "$(command uname -m)" in
|
||||
"x86_64") ARCH=amd64 ;;
|
||||
"aarch64") ARCH=arm64 ;;
|
||||
"armv6" | "armv7l") ARCH=armv6l ;;
|
||||
"armv8") ARCH=arm64 ;;
|
||||
.*386.*) ARCH=386 ;;
|
||||
ppc64*) ARCH=ppc64le ;;
|
||||
*) die 42 "ARCH is unknown: $(command uname -m)" ;;
|
||||
esac
|
||||
echo "${ARCH}"
|
||||
}
|
||||
|
||||
go.golang() {
|
||||
|
||||
# install golang binary in user's $HOME/local folder:
|
||||
#
|
||||
# go.golang ${GO_VERSION} ${SERVICE_USER}
|
||||
#
|
||||
# usage: go.golang <go-vers> [<username>]
|
||||
|
||||
local version fname sha size user userpr
|
||||
local buf=()
|
||||
|
||||
version="${1:-${GO_VERSION}}"
|
||||
user="${2:-${USERNAME}}"
|
||||
userpr=" ${_Yellow}|${user}|${_creset} "
|
||||
|
||||
rst_title "Install Go in ${user}'s HOME" section
|
||||
|
||||
mapfile -t buf < <(
|
||||
go.ver_info "${version}" archive "$(go.os)" "$(go.arch)" filename sha256 size
|
||||
)
|
||||
|
||||
if [ ${#buf[@]} -eq 0 ]; then
|
||||
die 42 "can't find info of golang version: ${version}"
|
||||
fi
|
||||
fname="${buf[0]}"
|
||||
sha="${buf[1]}"
|
||||
size="$(numfmt --to=iec "${buf[2]}")"
|
||||
|
||||
info_msg "Download go binary ${fname} (${size}B)"
|
||||
cache_download "${GO_DL_URL}/${fname}" "${fname}"
|
||||
|
||||
pushd "${CACHE}" &> /dev/null
|
||||
echo "${sha} ${fname}" > "${fname}.sha256"
|
||||
if ! sha256sum -c "${fname}.sha256" >/dev/null; then
|
||||
die 42 "downloaded file ${fname} checksum does not match"
|
||||
else
|
||||
info_msg "${fname} checksum OK"
|
||||
fi
|
||||
popd &> /dev/null
|
||||
|
||||
info_msg "install golang"
|
||||
tee_stderr 0.1 <<EOF | sudo -i -u "${user}" | prefix_stdout "${userpr}"
|
||||
mkdir -p \$HOME/local
|
||||
rm -rf \$HOME/local/go
|
||||
tar -C \$HOME/local -xzf ${CACHE}/${fname}
|
||||
echo "export GOPATH=\$HOME/go-apps" > \$HOME/.go_env
|
||||
echo "export PATH=\$HOME/local/go/bin:\\\$GOPATH/bin:\\\$PATH" >> \$HOME/.go_env
|
||||
EOF
|
||||
info_msg "test golang installation"
|
||||
sudo -i -u "${user}" <<EOF
|
||||
source \$HOME/.go_env
|
||||
command -v go
|
||||
go version
|
||||
EOF
|
||||
}
|
||||
|
||||
go.install() {
|
||||
|
||||
# install go package in user's $HOME/go-apps folder:
|
||||
#
|
||||
# go.install github.com/go-training/helloworld@lates ${SERVICE_USER}
|
||||
#
|
||||
# usage: go.install <package> [<username>]
|
||||
|
||||
local package user userpr
|
||||
|
||||
package="${1}"
|
||||
user="${2:-${USERNAME}}"
|
||||
userpr=" ${_Yellow}|${user}|${_creset} "
|
||||
|
||||
if [ -z "${package}" ]; then
|
||||
die 42 "${FUNCNAME[0]}() - missing argument: <package>"
|
||||
fi
|
||||
tee_stderr 0.1 <<EOF | sudo -i -u "${user}" | prefix_stdout "${userpr}"
|
||||
source \$HOME/.go_env
|
||||
go install -v ${package}
|
||||
EOF
|
||||
}
|
||||
|
||||
go.bash() {
|
||||
|
||||
# start bash interpreter with golang environment sourced
|
||||
#
|
||||
# go.bash ${SERVICE_USER}
|
||||
#
|
||||
# usage: go.bash [<username>]
|
||||
|
||||
local user
|
||||
user="${1:-${USERNAME}}"
|
||||
sudo -i -u "${user}" bash --init-file "~${user}/.go_env"
|
||||
}
|
||||
|
||||
go.version(){
|
||||
local user
|
||||
user="${1:-${USERNAME}}"
|
||||
sudo -i -u "${user}" <<EOF
|
||||
source \$HOME/.go_env
|
||||
go version | cut -d' ' -f 3
|
||||
EOF
|
||||
}
|
||||
Executable
+189
@@ -0,0 +1,189 @@
|
||||
#!/usr/bin/env bash
|
||||
# -*- coding: utf-8; mode: sh indent-tabs-mode: nil -*-
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
#
|
||||
# Tools to install and maintain NVM versions manager for Node.js
|
||||
#
|
||||
# [1] https://github.com/nvm-sh/nvm
|
||||
|
||||
# https://github.com/koalaman/shellcheck/issues/356#issuecomment-853515285
|
||||
# shellcheck source=utils/lib.sh
|
||||
. /dev/null
|
||||
|
||||
declare main_cmd
|
||||
|
||||
# configure nvm environment
|
||||
# -------------------------
|
||||
|
||||
NVM_LOCAL_FOLDER=.nvm
|
||||
|
||||
[[ -z "${NVM_GIT_URL}" ]] && NVM_GIT_URL="https://github.com/nvm-sh/nvm.git"
|
||||
[[ -z "${NVM_MIN_NODE_VER}" ]] && NVM_MIN_NODE_VER="16.13.0"
|
||||
|
||||
# initialize nvm environment
|
||||
# -------------------------
|
||||
|
||||
nvm.env() {
|
||||
source "${NVM_DIR}/nvm.sh"
|
||||
source "${NVM_DIR}/bash_completion"
|
||||
[ "$VERBOSE" = "1" ] && info_msg "sourced NVM environment from ${NVM_DIR}"
|
||||
return 0
|
||||
}
|
||||
|
||||
nvm.is_installed() {
|
||||
# is true if NVM is installed / in $HOME or even in <repo-root>/.nvm
|
||||
[[ -f "${NVM_DIR}/nvm.sh" ]]
|
||||
}
|
||||
|
||||
if [[ -z "${NVM_DIR}" ]]; then
|
||||
# nvm is not pre-intalled in $HOME. Prepare for using nvm from <repo-root>
|
||||
NVM_DIR="$(git rev-parse --show-toplevel)/${NVM_LOCAL_FOLDER}"
|
||||
fi
|
||||
export NVM_DIR
|
||||
|
||||
if nvm.is_installed; then
|
||||
nvm.env
|
||||
else
|
||||
# if nvm is not installed, use this function as a wrapper
|
||||
nvm() {
|
||||
nvm.ensure
|
||||
nvm "$@"
|
||||
}
|
||||
fi
|
||||
|
||||
# implement nvm functions
|
||||
# -----------------------
|
||||
|
||||
nvm.is_local() {
|
||||
# is true if NVM is installed in <repo-root>/.nvm
|
||||
[ "${NVM_DIR}" = "$(git rev-parse --show-toplevel)/${NVM_LOCAL_FOLDER}" ]
|
||||
}
|
||||
|
||||
nvm.min_node() {
|
||||
|
||||
# usage: nvm.min_node 16.3.0
|
||||
#
|
||||
# Is true if minimal Node.js version is installed.
|
||||
|
||||
local min_v
|
||||
local node_v
|
||||
local higher_v
|
||||
|
||||
if ! command -v node >/dev/null; then
|
||||
warn_msg "Node.js is not yet installed"
|
||||
return 42
|
||||
fi
|
||||
|
||||
min_v="${1}"
|
||||
node_v="$(node --version)"
|
||||
node_v="${node_v:1}" # remove 'v' from 'v16.3.0'
|
||||
if ! [ "${min_v}" = "${node_v}" ]; then
|
||||
higher_v="$(echo -e "$min_v\n${node_v}" | sort -Vr | head -1)"
|
||||
if [ "${min_v}" = "${higher_v}" ]; then
|
||||
return 42
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# implement nvm command line
|
||||
# --------------------------
|
||||
|
||||
nvm.help() {
|
||||
cat <<EOF
|
||||
nvm.: use nvm (without dot) to execute nvm commands directly
|
||||
install : install NVM locally at $(git rev-parse --show-toplevel)/${NVM_LOCAL_FOLDER}
|
||||
clean : remove NVM installation
|
||||
status : prompt some status informations about nvm & node
|
||||
nodejs : install Node.js latest LTS
|
||||
cmd ... : run command ... in NVM environment
|
||||
bash : start bash interpreter with NVM environment sourced
|
||||
EOF
|
||||
}
|
||||
|
||||
nvm.install() {
|
||||
local NVM_VERSION_TAG
|
||||
info_msg "install (update) NVM at ${NVM_DIR}"
|
||||
if nvm.is_installed; then
|
||||
info_msg "already cloned at: ${NVM_DIR}"
|
||||
pushd "${NVM_DIR}" &> /dev/null
|
||||
git fetch --all | prefix_stdout " ${_Yellow}||${_creset} "
|
||||
else
|
||||
# delete any leftovers from previos installations
|
||||
if nvm.is_local; then
|
||||
rm -rf "${NVM_DIR}"
|
||||
fi
|
||||
info_msg "clone: ${NVM_GIT_URL}"
|
||||
git clone "${NVM_GIT_URL}" "${NVM_DIR}" 2>&1 | prefix_stdout " ${_Yellow}||${_creset} "
|
||||
pushd "${NVM_DIR}" &> /dev/null
|
||||
git config --local advice.detachedHead false
|
||||
fi
|
||||
NVM_VERSION_TAG="$(git rev-list --tags --max-count=1)"
|
||||
NVM_VERSION_TAG="$(git describe --abbrev=0 --tags --match "v[0-9]*" "${NVM_VERSION_TAG}")"
|
||||
info_msg "checkout ${NVM_VERSION_TAG}"
|
||||
git checkout "${NVM_VERSION_TAG}" 2>&1 | prefix_stdout " ${_Yellow}||${_creset} "
|
||||
popd &> /dev/null
|
||||
if [ -f "${REPO_ROOT}/.nvm_packages" ]; then
|
||||
cp "${REPO_ROOT}/.nvm_packages" "${NVM_DIR}/default-packages"
|
||||
fi
|
||||
nvm.env
|
||||
}
|
||||
|
||||
nvm.clean() {
|
||||
if ! nvm.is_installed; then
|
||||
build_msg CLEAN "[NVM] not installed"
|
||||
return
|
||||
fi
|
||||
if ! nvm.is_local; then
|
||||
build_msg CLEAN "[NVM] can't remove nvm from ${NVM_DIR}"
|
||||
return
|
||||
fi
|
||||
if [ -n "${NVM_DIR}" ]; then
|
||||
build_msg CLEAN "[NVM] drop $(realpath --relative-to=. "${NVM_DIR}")/"
|
||||
rm -rf "${NVM_DIR}"
|
||||
fi
|
||||
}
|
||||
|
||||
nvm.status() {
|
||||
if command -v node >/dev/null; then
|
||||
info_msg "Node.js is installed at $(command -v node)"
|
||||
info_msg "Node.js is version $(node --version)"
|
||||
if ! nvm.min_node "${NVM_MIN_NODE_VER}"; then
|
||||
warn_msg "minimal Node.js version is ${NVM_MIN_NODE_VER}"
|
||||
fi
|
||||
else
|
||||
warn_msg "Node.js is mot installed"
|
||||
fi
|
||||
if command -v npm >/dev/null; then
|
||||
info_msg "npm is installed at $(command -v npm)"
|
||||
info_msg "npm is version $(npm --version)"
|
||||
else
|
||||
warn_msg "npm is not installed"
|
||||
fi
|
||||
if nvm.is_installed; then
|
||||
info_msg "NVM is installed at ${NVM_DIR}"
|
||||
else
|
||||
warn_msg "NVM is not installed"
|
||||
info_msg "to install NVM and Node.js (LTS) use: ${main_cmd} nvm.nodejs"
|
||||
fi
|
||||
}
|
||||
|
||||
nvm.nodejs() {
|
||||
nvm install
|
||||
nvm.status
|
||||
}
|
||||
|
||||
nvm.bash() {
|
||||
nvm.ensure
|
||||
bash --init-file <(cat "${NVM_DIR}/nvm.sh" "${NVM_DIR}/bash_completion")
|
||||
}
|
||||
|
||||
nvm.cmd() {
|
||||
nvm.ensure
|
||||
"$@"
|
||||
}
|
||||
|
||||
nvm.ensure() {
|
||||
if ! nvm.is_installed; then
|
||||
nvm.install
|
||||
fi
|
||||
}
|
||||
Executable
+355
@@ -0,0 +1,355 @@
|
||||
#!/usr/bin/env bash
|
||||
# -*- coding: utf-8; mode: sh indent-tabs-mode: nil -*-
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
#
|
||||
# Tools to build and install redis [1] binaries & packages.
|
||||
#
|
||||
# [1] https://redis.io/download#installation
|
||||
#
|
||||
# 1. redis.devpkg (sudo)
|
||||
# 2. redis.build
|
||||
# 3. redis.install (sudo)
|
||||
#
|
||||
# systemd commands::
|
||||
#
|
||||
# sudo -H systemctl status searxng-redis
|
||||
# sudo -H journalctl -u searxng-redis
|
||||
# sudo -H journalctl --vacuum-size=1M
|
||||
#
|
||||
# Test socket connection from client (local user)::
|
||||
#
|
||||
# $ sudo -H ./manage redis.addgrp "${USER}"
|
||||
# # logout & login to get member of group
|
||||
# $ groups
|
||||
# ... searxng-redis ...
|
||||
# $ source /usr/local/searxng-redis/.redis_env
|
||||
# $ which redis-cli
|
||||
# /usr/local/searxng-redis/.local/bin/redis-cli
|
||||
#
|
||||
# $ redis-cli -s /usr/local/searxng-redis/redis.sock
|
||||
# redis /usr/local/searxng-redis/redis.sock> set foo bar
|
||||
# OK
|
||||
# redis /usr/local/searxng-redis/redis.sock> get foo
|
||||
# "bar"
|
||||
# [CTRL-D]
|
||||
|
||||
|
||||
# shellcheck disable=SC2091
|
||||
# shellcheck source=utils/lib.sh
|
||||
. /dev/null
|
||||
|
||||
REDIS_GIT_URL="https://github.com/redis/redis.git"
|
||||
REDIS_GIT_TAG="${REDIS_GIT_TAG:-6.2.6}"
|
||||
|
||||
REDIS_USER="searxng-redis"
|
||||
REDIS_GROUP="searxng-redis"
|
||||
|
||||
REDIS_HOME="/usr/local/${REDIS_USER}"
|
||||
REDIS_HOME_BIN="${REDIS_HOME}/.local/bin"
|
||||
REDIS_ENV="${REDIS_HOME}/.redis_env"
|
||||
|
||||
REDIS_SERVICE_NAME="searxng-redis"
|
||||
REDIS_SYSTEMD_UNIT="${SYSTEMD_UNITS}/${REDIS_SERVICE_NAME}.service"
|
||||
|
||||
# binaries to compile & install
|
||||
REDIS_INSTALL_EXE=(redis-server redis-benchmark redis-cli)
|
||||
# link names of redis-server binary
|
||||
REDIS_LINK_EXE=(redis-sentinel redis-check-rdb redis-check-aof)
|
||||
|
||||
REDIS_CONF="${REDIS_HOME}/redis.conf"
|
||||
REDIS_CONF_TEMPLATE=$(cat <<EOF
|
||||
# Note that in order to read the configuration file, Redis must be
|
||||
# started with the file path as first argument:
|
||||
#
|
||||
# ./redis-server /path/to/redis.conf
|
||||
|
||||
# bind 127.0.0.1 -::1
|
||||
protected-mode yes
|
||||
|
||||
# Accept connections on the specified port, default is 6379 (IANA #815344).
|
||||
# If port 0 is specified Redis will not listen on a TCP socket.
|
||||
port 0
|
||||
|
||||
# Specify the path for the Unix socket that will be used to listen for
|
||||
# incoming connections.
|
||||
|
||||
unixsocket ${REDIS_HOME}/run/redis.sock
|
||||
unixsocketperm 770
|
||||
|
||||
# The working directory.
|
||||
dir ${REDIS_HOME}/run
|
||||
|
||||
# If you run Redis from upstart or systemd, Redis can interact with your
|
||||
# supervision tree.
|
||||
supervised auto
|
||||
|
||||
pidfile ${REDIS_HOME}/run/redis.pid
|
||||
|
||||
# log to the system logger
|
||||
syslog-enabled yes
|
||||
EOF
|
||||
)
|
||||
|
||||
redis.help(){
|
||||
cat <<EOF
|
||||
redis.:
|
||||
devpkg : install essential packages to compile redis
|
||||
build : build redis binaries at $(redis._get_dist)
|
||||
install : create user (${REDIS_USER}) and install systemd service (${REDIS_SERVICE_NAME})
|
||||
remove : delete user (${REDIS_USER}) and remove service (${REDIS_SERVICE_NAME})
|
||||
shell : start bash interpreter from user ${REDIS_USER}
|
||||
src : clone redis source code to <path> and checkput ${REDIS_GIT_TAG}
|
||||
useradd : create user (${REDIS_USER}) at ${REDIS_HOME}
|
||||
userdel : delete user (${REDIS_USER})
|
||||
addgrp : add <user> to group (${REDIS_USER})
|
||||
rmgrp : remove <user> from group (${REDIS_USER})
|
||||
EOF
|
||||
}
|
||||
|
||||
redis.devpkg() {
|
||||
|
||||
# Uses OS package manager to install the essential packages to build and
|
||||
# compile sources
|
||||
|
||||
sudo_or_exit
|
||||
|
||||
case ${DIST_ID} in
|
||||
ubuntu|debian)
|
||||
pkg_install git build-essential gawk
|
||||
;;
|
||||
arch)
|
||||
pkg_install git base-devel
|
||||
;;
|
||||
fedora)
|
||||
pkg_install git @development-tools
|
||||
;;
|
||||
centos)
|
||||
pkg_install git
|
||||
yum groupinstall "Development Tools" -y
|
||||
;;
|
||||
*)
|
||||
err_msg "$DIST_ID-$DIST_VERS: No rules to install development tools from OS."
|
||||
return 42
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
redis.build() {
|
||||
|
||||
# usage: redis.build
|
||||
|
||||
rst_title "get redis sources" section
|
||||
redis.src "${CACHE}/redis"
|
||||
|
||||
if ! required_commands gcc nm make gawk ; then
|
||||
info_msg "install development tools to get missing command(s) .."
|
||||
if [[ -n ${SUDO_USER} ]]; then
|
||||
sudo -H "$0" redis.devpkg
|
||||
else
|
||||
redis.devpkg
|
||||
fi
|
||||
fi
|
||||
|
||||
rst_title "compile redis sources" section
|
||||
|
||||
pushd "${CACHE}/redis" &>/dev/null
|
||||
|
||||
if ask_yn "Do you run 'make distclean' first'?" Yn; then
|
||||
$(bash.cmd) -c "make distclean" 2>&1 | prefix_stdout
|
||||
fi
|
||||
|
||||
$(bash.cmd) -c "make" 2>&1 | prefix_stdout
|
||||
if ask_yn "Do you run 'make test'?" Ny; then
|
||||
$(bash.cmd) -c "make test" | prefix_stdout
|
||||
fi
|
||||
|
||||
popd &>/dev/null
|
||||
|
||||
tee_stderr 0.1 <<EOF | $(bash.cmd) 2>&1 | prefix_stdout
|
||||
mkdir -p "$(redis._get_dist)"
|
||||
cd "${CACHE}/redis/src"
|
||||
cp ${REDIS_INSTALL_EXE[@]} "$(redis._get_dist)"
|
||||
EOF
|
||||
info_msg "redis binaries available at $(redis._get_dist)"
|
||||
}
|
||||
|
||||
|
||||
redis.install() {
|
||||
sudo_or_exit
|
||||
(
|
||||
set -e
|
||||
redis.useradd
|
||||
redis._install_bin
|
||||
redis._install_conf
|
||||
redis._install_service
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
redis.remove() {
|
||||
sudo_or_exit
|
||||
(
|
||||
set -e
|
||||
redis._remove_service
|
||||
redis.userdel
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
redis.shell() {
|
||||
interactive_shell "${REDIS_USER}"
|
||||
}
|
||||
|
||||
redis.src() {
|
||||
|
||||
# usage: redis.src "${CACHE}/redis"
|
||||
|
||||
local dest="${1:-${CACHE}/redis}"
|
||||
|
||||
if [ -d "${dest}" ] ; then
|
||||
info_msg "already cloned: $dest"
|
||||
tee_stderr 0.1 <<EOF | $(bash.cmd) 2>&1 | prefix_stdout
|
||||
cd "${dest}"
|
||||
git fetch --all
|
||||
git reset --hard tags/${REDIS_GIT_TAG}
|
||||
EOF
|
||||
else
|
||||
tee_stderr 0.1 <<EOF | $(bash.cmd) 2>&1 | prefix_stdout
|
||||
mkdir -p "$(dirname "$dest")"
|
||||
cd "$(dirname "$dest")"
|
||||
git clone "${REDIS_GIT_URL}" "${dest}"
|
||||
EOF
|
||||
tee_stderr 0.1 <<EOF | $(bash.cmd) 2>&1 | prefix_stdout
|
||||
cd "${dest}"
|
||||
git checkout tags/${REDIS_GIT_TAG} -b "build-branch"
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
redis.useradd(){
|
||||
|
||||
# usage: redis.useradd
|
||||
|
||||
rst_title "add user ${REDIS_USER}" section
|
||||
echo
|
||||
sudo_or_exit
|
||||
|
||||
# create user account
|
||||
tee_stderr 0.5 <<EOF | sudo -H bash | prefix_stdout
|
||||
useradd --shell /bin/bash --system \
|
||||
--home-dir "${REDIS_HOME}" \
|
||||
--comment 'user that runs a redis instance' "${REDIS_USER}"
|
||||
mkdir -p "${REDIS_HOME}"
|
||||
chown -R "${REDIS_USER}:${REDIS_GROUP}" "${REDIS_HOME}"
|
||||
groups "${REDIS_USER}"
|
||||
EOF
|
||||
|
||||
# create App-ENV and add source it in the .profile
|
||||
tee_stderr 0.5 <<EOF | sudo -H -u "${REDIS_USER}" bash | prefix_stdout
|
||||
mkdir -p "${REDIS_HOME_BIN}"
|
||||
echo "export PATH=${REDIS_HOME_BIN}:\\\$PATH" > "${REDIS_ENV}"
|
||||
grep -qFs -- 'source "${REDIS_ENV}"' ~/.profile || echo 'source "${REDIS_ENV}"' >> ~/.profile
|
||||
EOF
|
||||
}
|
||||
|
||||
redis.userdel() {
|
||||
sudo_or_exit
|
||||
drop_service_account "${REDIS_USER}"
|
||||
groupdel "${REDIS_GROUP}" 2>&1 | prefix_stdout || true
|
||||
}
|
||||
|
||||
redis.addgrp() {
|
||||
|
||||
# usage: redis.addgrp <user>
|
||||
|
||||
[[ -z $1 ]] && die_caller 42 "missing argument <user>"
|
||||
sudo -H gpasswd -a "$1" "${REDIS_GROUP}"
|
||||
}
|
||||
|
||||
redis.rmgrp() {
|
||||
|
||||
# usage: redis.rmgrp <user>
|
||||
|
||||
[[ -z $1 ]] && die_caller 42 "missing argument <user>"
|
||||
sudo -H gpasswd -d "$1" "${REDIS_GROUP}"
|
||||
|
||||
}
|
||||
|
||||
|
||||
# private redis. functions
|
||||
# ------------------------
|
||||
|
||||
redis._install_bin() {
|
||||
local src
|
||||
src="$(redis._get_dist)"
|
||||
(
|
||||
set -e
|
||||
for redis_exe in "${REDIS_INSTALL_EXE[@]}"; do
|
||||
install -v -o "${REDIS_USER}" -g "${REDIS_GROUP}" \
|
||||
"${src}/${redis_exe}" "${REDIS_HOME_BIN}"
|
||||
done
|
||||
|
||||
pushd "${REDIS_HOME_BIN}" &> /dev/null
|
||||
for redis_exe in "${REDIS_LINK_EXE[@]}"; do
|
||||
info_msg "link redis-server --> ${redis_exe}"
|
||||
sudo -H -u "${REDIS_USER}" ln -sf redis-server "${redis_exe}"
|
||||
done
|
||||
popd &> /dev/null
|
||||
|
||||
)
|
||||
}
|
||||
|
||||
redis._install_conf() {
|
||||
sudo -H -u "${REDIS_USER}" bash <<EOF
|
||||
mkdir -p "${REDIS_HOME}/run"
|
||||
echo '${REDIS_CONF_TEMPLATE}' > "${REDIS_CONF}"
|
||||
EOF
|
||||
}
|
||||
|
||||
redis._install_service() {
|
||||
systemd_install_service "${REDIS_SERVICE_NAME}" "${REDIS_SYSTEMD_UNIT}"
|
||||
}
|
||||
|
||||
redis._remove_service() {
|
||||
systemd_remove_service "${REDIS_SERVICE_NAME}" "${REDIS_SYSTEMD_UNIT}"
|
||||
}
|
||||
|
||||
redis._get_dist() {
|
||||
if [ -z "${REDIS_DIST}" ]; then
|
||||
echo "${REPO_ROOT}/dist/redis/${REDIS_GIT_TAG}/$(redis._arch)"
|
||||
else
|
||||
echo "${REDIS_DIST}"
|
||||
fi
|
||||
}
|
||||
|
||||
redis._arch() {
|
||||
local ARCH
|
||||
case "$(command uname -m)" in
|
||||
"x86_64") ARCH=amd64 ;;
|
||||
"aarch64") ARCH=arm64 ;;
|
||||
"armv6" | "armv7l") ARCH=armv6l ;;
|
||||
"armv8") ARCH=arm64 ;;
|
||||
.*386.*) ARCH=386 ;;
|
||||
ppc64*) ARCH=ppc64le ;;
|
||||
*) die 42 "ARCH is unknown: $(command uname -m)" ;;
|
||||
esac
|
||||
echo "${ARCH}"
|
||||
}
|
||||
|
||||
# TODO: move this to the right place ..
|
||||
|
||||
bash.cmd(){
|
||||
|
||||
# print cmd to get a bash in a non-root mode, even if we are in a sudo
|
||||
# context.
|
||||
|
||||
local user="${USER}"
|
||||
local bash_cmd="bash"
|
||||
|
||||
if [ -n "${SUDO_USER}" ] && [ "root" != "${SUDO_USER}" ] ; then
|
||||
user="${SUDO_USER}"
|
||||
bash_cmd="sudo -H -u ${SUDO_USER} bash"
|
||||
fi
|
||||
|
||||
printf "%s" "${bash_cmd}"
|
||||
}
|
||||
Executable
+62
@@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
data.help(){
|
||||
cat <<EOF
|
||||
data.:
|
||||
all : update searx/sxng_locales.py and searx/data/*
|
||||
traits : update searx/data/engine_traits.json & searx/sxng_locales.py
|
||||
useragents: update searx/data/useragents.json with the most recent versions of Firefox
|
||||
EOF
|
||||
}
|
||||
|
||||
data.all() {
|
||||
( set -e
|
||||
|
||||
pyenv.activate
|
||||
data.traits
|
||||
data.useragents
|
||||
|
||||
build_msg DATA "update searx/data/osm_keys_tags.json"
|
||||
pyenv.cmd python searxng_extra/update/update_osm_keys_tags.py
|
||||
build_msg DATA "update searx/data/ahmia_blacklist.txt"
|
||||
python searxng_extra/update/update_ahmia_blacklist.py
|
||||
build_msg DATA "update searx/data/wikidata_units.json"
|
||||
python searxng_extra/update/update_wikidata_units.py
|
||||
build_msg DATA "update searx/data/currencies.json"
|
||||
python searxng_extra/update/update_currencies.py
|
||||
build_msg DATA "update searx/data/external_bangs.json"
|
||||
python searxng_extra/update/update_external_bangs.py
|
||||
build_msg DATA "update searx/data/engine_descriptions.json"
|
||||
python searxng_extra/update/update_engine_descriptions.py
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
data.traits() {
|
||||
( set -e
|
||||
pyenv.activate
|
||||
build_msg DATA "update searx/data/engine_traits.json"
|
||||
python searxng_extra/update/update_engine_traits.py
|
||||
build_msg ENGINES "update searx/sxng_locales.py"
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
data.useragents() {
|
||||
build_msg DATA "update searx/data/useragents.json"
|
||||
pyenv.cmd python searxng_extra/update/update_firefox_version.py
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
docs.prebuild() {
|
||||
build_msg DOCS "build ${DOCS_BUILD}/includes"
|
||||
(
|
||||
set -e
|
||||
[ "$VERBOSE" = "1" ] && set -x
|
||||
mkdir -p "${DOCS_BUILD}/includes"
|
||||
./utils/searxng.sh searxng.doc.rst > "${DOCS_BUILD}/includes/searxng.rst"
|
||||
pyenv.cmd searxng_extra/docs_prebuild
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
Executable
+51
@@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
export NODE_MINIMUM_VERSION="16.13.0"
|
||||
|
||||
node.help(){
|
||||
cat <<EOF
|
||||
node.:
|
||||
env : download & install SearXNG's npm dependencies locally
|
||||
env.dev : download & install developer and CI tools
|
||||
clean : drop locally npm installations
|
||||
EOF
|
||||
}
|
||||
|
||||
nodejs.ensure() {
|
||||
if ! nvm.min_node "${NODE_MINIMUM_VERSION}"; then
|
||||
info_msg "install Node.js by NVM"
|
||||
nvm.nodejs
|
||||
fi
|
||||
}
|
||||
|
||||
node.env() {
|
||||
nodejs.ensure
|
||||
( set -e
|
||||
build_msg INSTALL "./searx/static/themes/simple/package.json"
|
||||
npm --prefix searx/static/themes/simple install
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
node.env.dev() {
|
||||
nodejs.ensure
|
||||
build_msg INSTALL "./package.json: developer and CI tools"
|
||||
npm install
|
||||
}
|
||||
|
||||
node.clean() {
|
||||
if ! required_commands npm 2>/dev/null; then
|
||||
build_msg CLEAN "npm is not installed / ignore npm dependencies"
|
||||
return 0
|
||||
fi
|
||||
build_msg CLEAN "themes -- locally installed npm dependencies"
|
||||
( set -e
|
||||
npm --prefix searx/static/themes/simple run clean
|
||||
)
|
||||
build_msg CLEAN "locally installed developer and CI tools"
|
||||
( set -e
|
||||
npm --prefix . run clean
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
Executable
+129
@@ -0,0 +1,129 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
|
||||
STATIC_BUILD_COMMIT="[build] /static"
|
||||
STATIC_BUILT_PATHS=(
|
||||
'searx/static/themes/simple/css'
|
||||
'searx/static/themes/simple/js'
|
||||
'searx/static/themes/simple/src/generated/pygments.less'
|
||||
'searx/static/themes/simple/img'
|
||||
'searx/templates/simple/searxng-wordmark.min.svg'
|
||||
'searx/templates/simple/icons.html'
|
||||
)
|
||||
|
||||
static.help(){
|
||||
cat <<EOF
|
||||
static.build.: ${STATIC_BUILD_COMMIT}
|
||||
commit : build & commit /static folder
|
||||
drop : drop last commit if it was previously done by static.build.commit
|
||||
restore : git restore of the /static folder (after themes.all)
|
||||
EOF
|
||||
}
|
||||
|
||||
is.static.build.commit() {
|
||||
|
||||
local commit_sha="$1"
|
||||
local commit_message
|
||||
local commit_files
|
||||
|
||||
# check commit message
|
||||
commit_message=$(git show -s --format=%s "${commit_sha}")
|
||||
if [ "${commit_message}" != "${STATIC_BUILD_COMMIT}" ]; then
|
||||
err_msg "expecting commit message: '${STATIC_BUILD_COMMIT}'"
|
||||
err_msg "commit message of ${commit_sha} is: '${commit_message}'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# check all files of the commit belongs to $STATIC_BUILT_PATHS
|
||||
commit_files=$(git diff-tree --no-commit-id --name-only -r "${commit_sha}")
|
||||
for i in ${STATIC_BUILT_PATHS[*]}; do
|
||||
# remove files of ${STATIC_BUILT_PATHS}
|
||||
commit_files=$(echo "${commit_files}" | grep -v "^${i}")
|
||||
done
|
||||
|
||||
if [ -n "${commit_files}" ]; then
|
||||
err_msg "commit ${commit_sha} contains files not a part of ${STATIC_BUILD_COMMIT}"
|
||||
echo "${commit_files}" | prefix_stdout " "
|
||||
return 2
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
static.build.drop() {
|
||||
# drop last commit if it was made by the static.build.commit command
|
||||
|
||||
local last_commit_id
|
||||
local branch
|
||||
|
||||
build_msg STATIC "drop last commit if it was previously done by static.build.commit"
|
||||
|
||||
# get only last (option -n1) local commit not in remotes
|
||||
branch="$(git branch --show-current)"
|
||||
last_commit_id="$(git log -n1 "${branch}" --pretty=format:'%h'\
|
||||
--not --exclude="${branch}" --branches --remotes)"
|
||||
|
||||
if [ -z "${last_commit_id}" ]; then
|
||||
err_msg "there are no local commits"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! is.static.build.commit "${last_commit_id}"; then
|
||||
return $?
|
||||
fi
|
||||
|
||||
build_msg STATIC "drop last commit ${last_commit_id}"
|
||||
git reset --hard HEAD~1
|
||||
}
|
||||
|
||||
static.build.commit() {
|
||||
# call the "static.build.drop" command, then "themes.all" then commit the
|
||||
# built files ($BUILT_PATHS).
|
||||
|
||||
build_msg STATIC "build & commit /static files"
|
||||
|
||||
# check for not committed files
|
||||
if [ -n "$(git diff --name-only)" ]; then
|
||||
err_msg "some files are not committed:"
|
||||
git diff --name-only | prefix_stdout " "
|
||||
return 1
|
||||
fi
|
||||
|
||||
# check for staged files
|
||||
if [ -n "$(git diff --name-only --cached)" ]; then
|
||||
err_msg "some files are staged:"
|
||||
git diff --name-only --cached | prefix_stdout " "
|
||||
return 1
|
||||
fi
|
||||
|
||||
# drop existing commit from previos build
|
||||
static.build.drop &>/dev/null
|
||||
|
||||
( set -e
|
||||
# build the themes
|
||||
themes.all
|
||||
|
||||
# add build files
|
||||
for built_path in "${STATIC_BUILT_PATHS[@]}"; do
|
||||
git add -v "${built_path}"
|
||||
done
|
||||
|
||||
# check if any file has been added (in case of no changes)
|
||||
if [ -z "$(git diff --name-only --cached)" ]; then
|
||||
build_msg STATIC "no changes applied / nothing to commit"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# check for modified files that are not staged
|
||||
if [ -n "$(git diff --name-only)" ]; then
|
||||
die 42 "themes.all has created files that are not in STATIC_BUILT_PATHS"
|
||||
fi
|
||||
git commit -m "${STATIC_BUILD_COMMIT}"
|
||||
)
|
||||
}
|
||||
|
||||
static.build.restore() {
|
||||
build_msg STATIC "git-restore of the built files (/static)"
|
||||
git restore --staged "${STATIC_BUILT_PATHS[@]}"
|
||||
git restore --worktree "${STATIC_BUILT_PATHS[@]}"
|
||||
}
|
||||
@@ -0,0 +1,111 @@
|
||||
test.help(){
|
||||
cat <<EOF
|
||||
test.:
|
||||
yamllint : lint YAML files (YAMLLINT_FILES)
|
||||
pylint : lint PYLINT_FILES, searx/engines, searx & tests
|
||||
pyright : static type check of python sources
|
||||
black : check black code format
|
||||
unit : run unit tests
|
||||
coverage : run unit tests with coverage
|
||||
robot : run robot test
|
||||
rst : test .rst files incl. README.rst
|
||||
clean : clean intermediate test stuff
|
||||
EOF
|
||||
}
|
||||
|
||||
test.yamllint() {
|
||||
build_msg TEST "[yamllint] \$YAMLLINT_FILES"
|
||||
pyenv.cmd yamllint --strict --format parsable "${YAMLLINT_FILES[@]}"
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.pylint() {
|
||||
# shellcheck disable=SC2086
|
||||
( set -e
|
||||
build_msg TEST "[pylint] \$PYLINT_FILES"
|
||||
pyenv.activate
|
||||
python ${PYLINT_OPTIONS} ${PYLINT_VERBOSE} \
|
||||
--additional-builtins="${PYLINT_ADDITIONAL_BUILTINS_FOR_ENGINES}" \
|
||||
"${PYLINT_FILES[@]}"
|
||||
|
||||
build_msg TEST "[pylint] searx/engines"
|
||||
python ${PYLINT_OPTIONS} ${PYLINT_VERBOSE} \
|
||||
--disable="${PYLINT_SEARXNG_DISABLE_OPTION}" \
|
||||
--additional-builtins="${PYLINT_ADDITIONAL_BUILTINS_FOR_ENGINES}" \
|
||||
searx/engines
|
||||
|
||||
build_msg TEST "[pylint] searx tests"
|
||||
python ${PYLINT_OPTIONS} ${PYLINT_VERBOSE} \
|
||||
--disable="${PYLINT_SEARXNG_DISABLE_OPTION}" \
|
||||
--ignore=searx/engines \
|
||||
searx tests
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.pyright() {
|
||||
build_msg TEST "[pyright] static type check of python sources"
|
||||
node.env.dev
|
||||
# We run Pyright in the virtual environment because Pyright
|
||||
# executes "python" to determine the Python version.
|
||||
build_msg TEST "[pyright] suppress warnings related to intentional monkey patching"
|
||||
pyenv.cmd npx --no-install pyright -p pyrightconfig-ci.json \
|
||||
| grep -v ".py$" \
|
||||
| grep -v '/engines/.*.py.* - warning: "logger" is not defined'\
|
||||
| grep -v '/plugins/.*.py.* - error: "logger" is not defined'\
|
||||
| grep -v '/engines/.*.py.* - warning: "supported_languages" is not defined' \
|
||||
| grep -v '/engines/.*.py.* - warning: "language_aliases" is not defined' \
|
||||
| grep -v '/engines/.*.py.* - warning: "categories" is not defined'
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.black() {
|
||||
build_msg TEST "[black] \$BLACK_TARGETS"
|
||||
pyenv.cmd black --check --diff "${BLACK_OPTIONS[@]}" "${BLACK_TARGETS[@]}"
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.unit() {
|
||||
build_msg TEST 'tests/unit'
|
||||
pyenv.cmd python -m nose2 -s tests/unit
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.coverage() {
|
||||
build_msg TEST 'unit test coverage'
|
||||
( set -e
|
||||
pyenv.activate
|
||||
python -m nose2 -C --log-capture --with-coverage --coverage searx -s tests/unit
|
||||
coverage report
|
||||
coverage html
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.robot() {
|
||||
build_msg TEST 'robot'
|
||||
gecko.driver
|
||||
PYTHONPATH=. pyenv.cmd python -m tests.robot
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.rst() {
|
||||
build_msg TEST "[reST markup] ${RST_FILES[*]}"
|
||||
for rst in "${RST_FILES[@]}"; do
|
||||
pyenv.cmd rst2html.py --halt error "$rst" > /dev/null || die 42 "fix issue in $rst"
|
||||
done
|
||||
}
|
||||
|
||||
test.pybabel() {
|
||||
TEST_BABEL_FOLDER="build/test/pybabel"
|
||||
build_msg TEST "[extract messages] pybabel"
|
||||
mkdir -p "${TEST_BABEL_FOLDER}"
|
||||
pyenv.cmd pybabel extract -F babel.cfg -o "${TEST_BABEL_FOLDER}/messages.pot" searx
|
||||
}
|
||||
|
||||
test.clean() {
|
||||
build_msg CLEAN "test stuff"
|
||||
rm -rf geckodriver.log .coverage coverage/
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
Executable
+65
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
declare _Blue
|
||||
declare _creset
|
||||
|
||||
themes.help(){
|
||||
cat <<EOF
|
||||
themes.:
|
||||
all : build all themes
|
||||
live : to get live builds of CSS & JS use 'LIVE_THEME=simple make run'
|
||||
simple.:
|
||||
build : build simple theme
|
||||
test : test simple theme
|
||||
EOF
|
||||
}
|
||||
|
||||
themes.all() {
|
||||
( set -e
|
||||
pygments.less
|
||||
node.env
|
||||
themes.simple
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
themes.live() {
|
||||
local LIVE_THEME="${LIVE_THEME:-${1}}"
|
||||
case "${LIVE_THEME}" in
|
||||
simple)
|
||||
theme="searx/static/themes/${LIVE_THEME}"
|
||||
;;
|
||||
'')
|
||||
die_caller 42 "missing theme argument"
|
||||
;;
|
||||
*)
|
||||
die_caller 42 "unknown theme '${LIVE_THEME}' // [simple]'"
|
||||
;;
|
||||
esac
|
||||
build_msg GRUNT "theme: $1 (live build)"
|
||||
nodejs.ensure
|
||||
cd "${theme}"
|
||||
{
|
||||
npm install
|
||||
npm run watch
|
||||
} 2>&1 \
|
||||
| prefix_stdout "${_Blue}THEME ${1} ${_creset} " \
|
||||
| grep -E --ignore-case --color 'error[s]?[:]? |warning[s]?[:]? |'
|
||||
}
|
||||
|
||||
themes.simple() {
|
||||
( set -e
|
||||
build_msg GRUNT "theme: simple"
|
||||
npm --prefix searx/static/themes/simple run build
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
themes.simple.test() {
|
||||
build_msg TEST "theme: simple"
|
||||
nodejs.ensure
|
||||
npm --prefix searx/static/themes/simple install
|
||||
npm --prefix searx/static/themes/simple run test
|
||||
dump_return $?
|
||||
}
|
||||
Executable
+211
@@ -0,0 +1,211 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
weblate.help(){
|
||||
cat <<EOF
|
||||
weblate.:
|
||||
push.translations: push translation changes from SearXNG to Weblate's counterpart
|
||||
to.translations: Update 'translations' branch with last additions from Weblate.
|
||||
EOF
|
||||
}
|
||||
|
||||
TRANSLATIONS_WORKTREE="$CACHE/translations"
|
||||
|
||||
weblate.translations.worktree() {
|
||||
|
||||
# Create git worktree ${TRANSLATIONS_WORKTREE} and checkout branch
|
||||
# 'translations' from Weblate's counterpart (weblate) of the SearXNG
|
||||
# (origin).
|
||||
#
|
||||
# remote weblate https://translate.codeberg.org/git/searxng/searxng/
|
||||
|
||||
( set -e
|
||||
if ! git remote get-url weblate 2> /dev/null; then
|
||||
git remote add weblate https://translate.codeberg.org/git/searxng/searxng/
|
||||
fi
|
||||
if [ -d "${TRANSLATIONS_WORKTREE}" ]; then
|
||||
pushd .
|
||||
cd "${TRANSLATIONS_WORKTREE}"
|
||||
git reset --hard HEAD
|
||||
git pull origin translations
|
||||
popd
|
||||
else
|
||||
mkdir -p "${TRANSLATIONS_WORKTREE}"
|
||||
git worktree add "${TRANSLATIONS_WORKTREE}" translations
|
||||
fi
|
||||
)
|
||||
}
|
||||
|
||||
weblate.to.translations() {
|
||||
|
||||
# Update 'translations' branch of SearXNG (origin) with last additions from
|
||||
# Weblate.
|
||||
|
||||
# 1. Check if Weblate is locked, if not die with error message
|
||||
# 2. On Weblate's counterpart (weblate), pull master and translations branch
|
||||
# from SearXNG (origin).
|
||||
# 3. Commit changes made in a Weblate object on Weblate's counterpart
|
||||
# (weblate).
|
||||
# 4. In translations worktree, merge changes of branch 'translations' from
|
||||
# remote 'weblate' and push it on branch 'translations' of 'origin'
|
||||
|
||||
( set -e
|
||||
pyenv.activate
|
||||
if [ "$(wlc lock-status)" != "locked: True" ]; then
|
||||
die 1 "weblate must be locked, currently: $(wlc lock-status)"
|
||||
fi
|
||||
# weblate: commit pending changes
|
||||
wlc pull
|
||||
wlc commit
|
||||
|
||||
# get the translations in a worktree
|
||||
weblate.translations.worktree
|
||||
|
||||
pushd "${TRANSLATIONS_WORKTREE}"
|
||||
git remote update weblate
|
||||
git merge weblate/translations
|
||||
git push
|
||||
popd
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
weblate.translations.commit() {
|
||||
|
||||
# Update 'translations' branch of SearXNG (origin) with last additions from
|
||||
# Weblate. Copy the changes to the master branch, compile translations and
|
||||
# create a commit in the local branch (master)
|
||||
|
||||
local existing_commit_hash commit_body commit_message exitcode
|
||||
( set -e
|
||||
pyenv.activate
|
||||
# lock change on weblate
|
||||
wlc lock
|
||||
|
||||
# get translations branch in git worktree (TRANSLATIONS_WORKTREE)
|
||||
weblate.translations.worktree
|
||||
existing_commit_hash=$(cd "${TRANSLATIONS_WORKTREE}"; git log -n1 --pretty=format:'%h')
|
||||
|
||||
# pull weblate commits
|
||||
weblate.to.translations
|
||||
|
||||
# copy the changes to the master branch
|
||||
cp -rv --preserve=mode,timestamps "${TRANSLATIONS_WORKTREE}/searx/translations" "searx"
|
||||
|
||||
# compile translations
|
||||
build_msg BABEL 'compile translation catalogs into binary MO files'
|
||||
pybabel compile --statistics \
|
||||
-d "searx/translations"
|
||||
# git add/commit (no push)
|
||||
commit_body=$(cd "${TRANSLATIONS_WORKTREE}"; git log --pretty=format:'%h - %as - %aN <%ae>' "${existing_commit_hash}..HEAD")
|
||||
commit_message=$(echo -e "[translations] update from Weblate\n\n${commit_body}")
|
||||
git add searx/translations
|
||||
git commit -m "${commit_message}"
|
||||
)
|
||||
exitcode=$?
|
||||
( # make sure to always unlock weblate
|
||||
set -e
|
||||
pyenv.cmd wlc unlock
|
||||
)
|
||||
dump_return $exitcode
|
||||
}
|
||||
|
||||
weblate.push.translations() {
|
||||
|
||||
# Push *translation changes* from SearXNG (origin) to Weblate's counterpart
|
||||
# (weblate).
|
||||
|
||||
# In branch master of SearXNG (origin) check for meaningful changes in
|
||||
# folder 'searx/translations', commit changes on branch 'translations' and
|
||||
# at least, pull updated branches on Weblate's counterpart (weblate).
|
||||
|
||||
# 1. Create git worktree ${TRANSLATIONS_WORKTREE} and checkout branch
|
||||
# 'translations' from remote 'weblate'.
|
||||
# 2. Stop if there is no meaningful change in the 'master' branch (origin),
|
||||
# compared to the 'translations' branch (weblate), otherwise ...
|
||||
# 3. Update 'translations' branch of SearXNG (origin) with last additions
|
||||
# from Weblate.
|
||||
# 5. Notify Weblate to pull updated 'master' & 'translations' branch.
|
||||
|
||||
local messages_pot diff_messages_pot last_commit_hash last_commit_detail \
|
||||
exitcode
|
||||
messages_pot="${TRANSLATIONS_WORKTREE}/searx/translations/messages.pot"
|
||||
( set -e
|
||||
pyenv.activate
|
||||
# get translations branch in git worktree (TRANSLATIONS_WORKTREE)
|
||||
weblate.translations.worktree
|
||||
|
||||
# update messages.pot in the master branch
|
||||
build_msg BABEL 'extract messages from source files and generate POT file'
|
||||
pybabel extract -F babel.cfg \
|
||||
-o "${messages_pot}" \
|
||||
"searx/"
|
||||
|
||||
# stop if there is no meaningful change in the master branch
|
||||
diff_messages_pot=$(cd "${TRANSLATIONS_WORKTREE}";\
|
||||
git diff -- "searx/translations/messages.pot")
|
||||
if ! echo "$diff_messages_pot" | grep -qE "[\+\-](msgid|msgstr)"; then
|
||||
build_msg BABEL 'no changes detected, exiting'
|
||||
return 42
|
||||
fi
|
||||
return 0
|
||||
)
|
||||
exitcode=$?
|
||||
if [ "$exitcode" -eq 42 ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ "$exitcode" -gt 0 ]; then
|
||||
return $exitcode
|
||||
fi
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
|
||||
# lock change on weblate
|
||||
# weblate may add commit(s) since the call to "weblate.translations.worktree".
|
||||
# this is not a problem because after this line, "weblate.to.translations"
|
||||
# calls again "weblate.translations.worktree" which calls "git pull"
|
||||
wlc lock
|
||||
|
||||
# save messages.pot in the translations branch for later
|
||||
pushd "${TRANSLATIONS_WORKTREE}"
|
||||
git stash push
|
||||
popd
|
||||
|
||||
# merge weblate commits into the translations branch
|
||||
weblate.to.translations
|
||||
|
||||
# restore messages.pot in the translations branch
|
||||
pushd "${TRANSLATIONS_WORKTREE}"
|
||||
git stash pop
|
||||
popd
|
||||
|
||||
# update messages.po files in the master branch
|
||||
build_msg BABEL 'update existing message catalogs from POT file'
|
||||
pybabel update -N \
|
||||
-i "${messages_pot}" \
|
||||
-d "${TRANSLATIONS_WORKTREE}/searx/translations"
|
||||
|
||||
# git add/commit/push
|
||||
last_commit_hash=$(git log -n1 --pretty=format:'%h')
|
||||
last_commit_detail=$(git log -n1 --pretty=format:'%h - %as - %aN <%ae>' "${last_commit_hash}")
|
||||
|
||||
pushd "${TRANSLATIONS_WORKTREE}"
|
||||
git add searx/translations
|
||||
git commit \
|
||||
-m "[translations] update messages.pot and messages.po files" \
|
||||
-m "From ${last_commit_detail}"
|
||||
git push
|
||||
popd
|
||||
|
||||
# notify weblate to pull updated master & translations branch
|
||||
wlc pull
|
||||
)
|
||||
exitcode=$?
|
||||
( # make sure to always unlock weblate
|
||||
set -e
|
||||
pyenv.activate
|
||||
wlc unlock
|
||||
)
|
||||
dump_return $exitcode
|
||||
}
|
||||
Executable
+573
@@ -0,0 +1,573 @@
|
||||
#!/usr/bin/env bash
|
||||
# -*- coding: utf-8; mode: sh indent-tabs-mode: nil -*-
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
# shellcheck source=utils/lib.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib.sh"
|
||||
# shellcheck source=utils/brand.env
|
||||
source "${REPO_ROOT}/utils/brand.env"
|
||||
|
||||
# load environment of the LXC suite
|
||||
LXC_ENV="${LXC_ENV:-${REPO_ROOT}/utils/lxc-searxng.env}"
|
||||
source "$LXC_ENV"
|
||||
lxc_set_suite_env
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# config
|
||||
# ----------------------------------------------------------------------------
|
||||
#
|
||||
# read also:
|
||||
# - https://lxd.readthedocs.io/en/latest/
|
||||
|
||||
LXC_HOST_PREFIX="${LXC_HOST_PREFIX:-test}"
|
||||
|
||||
# Location in the container where all folders from HOST are mounted
|
||||
LXC_SHARE_FOLDER="/share"
|
||||
LXC_REPO_ROOT="${LXC_SHARE_FOLDER}/$(basename "${REPO_ROOT}")"
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
ubu2004_boilerplate="
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get update -y
|
||||
apt-get upgrade -y
|
||||
apt-get install -y git curl wget
|
||||
echo 'Set disable_coredump false' >> /etc/sudo.conf
|
||||
"
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
ubu2204_boilerplate="$ubu2004_boilerplate"
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
archlinux_boilerplate="
|
||||
pacman --noprogressbar -Syu --noconfirm
|
||||
pacman --noprogressbar -S --noconfirm inetutils git curl wget sudo
|
||||
echo 'Set disable_coredump false' >> /etc/sudo.conf
|
||||
"
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
fedora35_boilerplate="
|
||||
dnf update -y
|
||||
dnf install -y git curl wget hostname
|
||||
echo 'Set disable_coredump false' >> /etc/sudo.conf
|
||||
"
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
centos7_boilerplate="
|
||||
yum update -y
|
||||
yum install -y git curl wget hostname sudo which
|
||||
echo 'Set disable_coredump false' >> /etc/sudo.conf
|
||||
"
|
||||
|
||||
REMOTE_IMAGES=()
|
||||
CONTAINERS=()
|
||||
LOCAL_IMAGES=()
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
REMOTE_IMAGES=("${REMOTE_IMAGES[@]}" "${LXC_SUITE[i]}")
|
||||
CONTAINERS=("${CONTAINERS[@]}" "${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}")
|
||||
LOCAL_IMAGES=("${LOCAL_IMAGES[@]}" "${LXC_SUITE[i+1]}")
|
||||
done
|
||||
|
||||
HOST_USER="${SUDO_USER:-$USER}"
|
||||
HOST_USER_ID=$(id -u "${HOST_USER}")
|
||||
HOST_GROUP_ID=$(id -g "${HOST_USER}")
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
usage() {
|
||||
# ----------------------------------------------------------------------------
|
||||
_cmd="$(basename "$0")"
|
||||
cat <<EOF
|
||||
usage::
|
||||
$_cmd build [containers|<name>]
|
||||
$_cmd copy [images]
|
||||
$_cmd remove [containers|<name>|images]
|
||||
$_cmd [start|stop] [containers|<name>]
|
||||
$_cmd show [images|suite|info|config [<name>]]
|
||||
$_cmd cmd [--|<name>] '...'
|
||||
$_cmd install [suite|base [<name>]]
|
||||
|
||||
build
|
||||
:containers: build, launch all containers and 'install base' packages
|
||||
:<name>: build, launch container <name> and 'install base' packages
|
||||
copy:
|
||||
:images: copy remote images of the suite into local storage
|
||||
remove
|
||||
:containers: delete all 'containers' or only <container-name>
|
||||
:images: delete local images of the suite
|
||||
start/stop
|
||||
:containers: start/stop all 'containers' from the suite
|
||||
:<name>: start/stop container <name> from suite
|
||||
show
|
||||
:info: show info of all (or <name>) containers from LXC suite
|
||||
:config: show config of all (or <name>) containers from the LXC suite
|
||||
:suite: show services of all (or <name>) containers from the LXC suite
|
||||
:images: show information of local images
|
||||
cmd
|
||||
use single quotes to evaluate in container's bash, e.g.: 'echo \$(hostname)'
|
||||
-- run command '...' in all containers of the LXC suite
|
||||
:<name>: run command '...' in container <name>
|
||||
install
|
||||
:base: prepare LXC; install basic packages
|
||||
:suite: install LXC ${LXC_SUITE_NAME} suite into all (or <name>) containers
|
||||
|
||||
EOF
|
||||
usage_containers
|
||||
[ -n "${1+x}" ] && err_msg "$1"
|
||||
}
|
||||
|
||||
usage_containers() {
|
||||
lxc_suite_install_info
|
||||
[ -n "${1+x}" ] && err_msg "$1"
|
||||
}
|
||||
|
||||
lxd_info() {
|
||||
|
||||
cat <<EOF
|
||||
|
||||
LXD is needed, to install run::
|
||||
|
||||
snap install lxd
|
||||
lxd init --auto
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
local exit_val
|
||||
local _usage="unknown or missing $1 command $2"
|
||||
|
||||
# don't check prerequisite when in recursion
|
||||
if [[ ! $1 == __* ]] && [[ ! $1 == --help ]]; then
|
||||
if ! in_container; then
|
||||
! required_commands lxc && lxd_info && exit 42
|
||||
fi
|
||||
[[ -z $LXC_SUITE ]] && err_msg "missing LXC_SUITE" && exit 42
|
||||
fi
|
||||
|
||||
case $1 in
|
||||
--getenv) var="$2"; echo "${!var}"; exit 0;;
|
||||
-h|--help) usage; exit 0;;
|
||||
|
||||
build)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
${LXC_HOST_PREFIX}-*) build_container "$2" ;;
|
||||
''|--|containers) build_all_containers ;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
esac
|
||||
;;
|
||||
copy)
|
||||
case $2 in
|
||||
''|images) lxc_copy_images_localy;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
esac
|
||||
;;
|
||||
remove)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
''|--|containers) remove_containers ;;
|
||||
images) lxc_delete_images_localy ;;
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$2" && warn_msg "container not yet exists: $2" && exit 0
|
||||
if ask_yn "Do you really want to delete container $2"; then
|
||||
lxc_delete_container "$2"
|
||||
fi
|
||||
;;
|
||||
*) usage "unknown or missing container <name> $2"; exit 42;;
|
||||
esac
|
||||
;;
|
||||
start|stop)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
''|--|containers) lxc_cmd "$1" ;;
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$2" && usage_containers "unknown container: $2" && exit 42
|
||||
info_msg "lxc $1 $2"
|
||||
lxc "$1" "$2" | prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
;;
|
||||
*) usage "unknown or missing container <name> $2"; exit 42;;
|
||||
esac
|
||||
;;
|
||||
show)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
suite)
|
||||
case $3 in
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
lxc exec -t "$3" -- "${LXC_REPO_ROOT}/utils/lxc.sh" __show suite \
|
||||
| prefix_stdout "[${_BBlue}$3${_creset}] "
|
||||
;;
|
||||
*) show_suite;;
|
||||
esac
|
||||
;;
|
||||
images) show_images ;;
|
||||
config)
|
||||
case $3 in
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$3" && usage_containers "unknown container: $3" && exit 42
|
||||
lxc config show "$3" | prefix_stdout "[${_BBlue}${3}${_creset}] "
|
||||
;;
|
||||
*)
|
||||
rst_title "container configurations"
|
||||
echo
|
||||
lxc list "$LXC_HOST_PREFIX-"
|
||||
echo
|
||||
lxc_cmd config show
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
info)
|
||||
case $3 in
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$3" && usage_containers "unknown container: $3" && exit 42
|
||||
lxc info "$3" | prefix_stdout "[${_BBlue}${3}${_creset}] "
|
||||
;;
|
||||
*)
|
||||
rst_title "container info"
|
||||
echo
|
||||
lxc_cmd info
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
esac
|
||||
;;
|
||||
__show)
|
||||
# wrapped show commands, called once in each container
|
||||
case $2 in
|
||||
suite) lxc_suite_info ;;
|
||||
esac
|
||||
;;
|
||||
cmd)
|
||||
sudo_or_exit
|
||||
shift
|
||||
case $1 in
|
||||
--) shift; lxc_exec "$@" ;;
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$1" && usage_containers "unknown container: $1" && exit 42
|
||||
local name=$1
|
||||
shift
|
||||
lxc_exec_cmd "${name}" "$@"
|
||||
;;
|
||||
*) usage_containers "unknown container: $1" && exit 42
|
||||
esac
|
||||
;;
|
||||
install)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
suite|base)
|
||||
case $3 in
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$3" && usage_containers "unknown container: $3" && exit 42
|
||||
lxc_exec_cmd "$3" "${LXC_REPO_ROOT}/utils/lxc.sh" __install "$2"
|
||||
;;
|
||||
''|--) lxc_exec "${LXC_REPO_ROOT}/utils/lxc.sh" __install "$2" ;;
|
||||
*) usage_containers "unknown container: $3" && exit 42
|
||||
esac
|
||||
;;
|
||||
*) usage "$_usage"; exit 42 ;;
|
||||
esac
|
||||
;;
|
||||
__install)
|
||||
# wrapped install commands, called once in each container
|
||||
# shellcheck disable=SC2119
|
||||
case $2 in
|
||||
suite) lxc_suite_install ;;
|
||||
base) FORCE_TIMEOUT=0 lxc_install_base_packages ;;
|
||||
esac
|
||||
;;
|
||||
doc)
|
||||
echo
|
||||
echo ".. generic utils/lxc.sh documentation"
|
||||
;;
|
||||
-*) usage "unknown option $1"; exit 42;;
|
||||
*) usage "unknown or missing command $1"; exit 42;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
build_all_containers() {
|
||||
rst_title "Build all LXC containers of suite"
|
||||
echo
|
||||
usage_containers
|
||||
lxc_copy_images_localy
|
||||
lxc_init_all_containers
|
||||
lxc_config_all_containers
|
||||
lxc_boilerplate_all_containers
|
||||
rst_title "install LXC base packages" section
|
||||
echo
|
||||
lxc_exec "${LXC_REPO_ROOT}/utils/lxc.sh" __install base
|
||||
echo
|
||||
lxc list "$LXC_HOST_PREFIX"
|
||||
}
|
||||
|
||||
build_container() {
|
||||
rst_title "Build container $1"
|
||||
|
||||
local remote_image
|
||||
local container
|
||||
local image
|
||||
local boilerplate_script
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
if [ "${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}" = "$1" ]; then
|
||||
remote_image="${LXC_SUITE[i]}"
|
||||
container="${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}"
|
||||
image="${LXC_SUITE[i+1]}"
|
||||
boilerplate_script="${image}_boilerplate"
|
||||
boilerplate_script="${!boilerplate_script}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
echo
|
||||
if [ -z "$container" ]; then
|
||||
err_msg "container $1 unknown"
|
||||
usage_containers
|
||||
return 42
|
||||
fi
|
||||
lxc_image_copy "${remote_image}" "${image}"
|
||||
rst_title "init container" section
|
||||
lxc_init_container "${image}" "${container}"
|
||||
rst_title "configure container" section
|
||||
lxc_config_container "${container}"
|
||||
rst_title "run LXC boilerplate scripts" section
|
||||
lxc_install_boilerplate "${container}" "$boilerplate_script"
|
||||
echo
|
||||
rst_title "install LXC base packages" section
|
||||
lxc_exec_cmd "${container}" "${LXC_REPO_ROOT}/utils/lxc.sh" __install base \
|
||||
| prefix_stdout "[${_BBlue}${container}${_creset}] "
|
||||
echo
|
||||
lxc list "$container"
|
||||
}
|
||||
|
||||
remove_containers() {
|
||||
rst_title "Remove all LXC containers of suite"
|
||||
rst_para "existing containers matching ${_BGreen}$LXC_HOST_PREFIX-*${_creset}"
|
||||
echo
|
||||
lxc list "$LXC_HOST_PREFIX-"
|
||||
echo -en "\\n${_BRed}LXC containers to delete::${_creset}\\n\\n ${CONTAINERS[*]}\\n" | $FMT
|
||||
local default=Ny
|
||||
[[ $FORCE_TIMEOUT = 0 ]] && default=Yn
|
||||
if ask_yn "Do you really want to delete these containers" $default; then
|
||||
for i in "${CONTAINERS[@]}"; do
|
||||
lxc_delete_container "$i"
|
||||
done
|
||||
fi
|
||||
echo
|
||||
lxc list "$LXC_HOST_PREFIX-"
|
||||
}
|
||||
|
||||
# images
|
||||
# ------
|
||||
|
||||
lxc_copy_images_localy() {
|
||||
rst_title "copy images" section
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
lxc_image_copy "${LXC_SUITE[i]}" "${LXC_SUITE[i+1]}"
|
||||
done
|
||||
# lxc image list local: && wait_key
|
||||
}
|
||||
|
||||
lxc_delete_images_localy() {
|
||||
rst_title "Delete LXC images"
|
||||
rst_para "local existing images"
|
||||
echo
|
||||
lxc image list local:
|
||||
echo -en "\\n${_BRed}LXC images to delete::${_creset}\\n\\n ${LOCAL_IMAGES[*]}\\n"
|
||||
if ask_yn "Do you really want to delete these images"; then
|
||||
for i in "${LOCAL_IMAGES[@]}"; do
|
||||
lxc_delete_local_image "$i"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(lxc image list --format csv | grep '^,' | sed 's/,\([^,]*\).*$/\1/'); do
|
||||
if ask_yn "Image $i has no alias, do you want to delete the image?" Yn; then
|
||||
lxc_delete_local_image "$i"
|
||||
fi
|
||||
done
|
||||
|
||||
echo
|
||||
lxc image list local:
|
||||
}
|
||||
|
||||
show_images(){
|
||||
rst_title "local images"
|
||||
echo
|
||||
lxc image list local:
|
||||
echo -en "\\n${_Green}LXC suite images::${_creset}\\n\\n ${LOCAL_IMAGES[*]}\\n"
|
||||
wait_key
|
||||
for i in "${LOCAL_IMAGES[@]}"; do
|
||||
if lxc_image_exists "$i"; then
|
||||
info_msg "lxc image info ${_BBlue}${i}${_creset}"
|
||||
lxc image info "$i" | prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
else
|
||||
warn_msg "image ${_BBlue}$i${_creset} does not yet exists"
|
||||
fi
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
|
||||
# container
|
||||
# ---------
|
||||
|
||||
show_suite(){
|
||||
rst_title "LXC suite ($LXC_HOST_PREFIX-*)"
|
||||
echo
|
||||
lxc list "$LXC_HOST_PREFIX-"
|
||||
echo
|
||||
for i in "${CONTAINERS[@]}"; do
|
||||
if ! lxc_exists "$i"; then
|
||||
warn_msg "container ${_BBlue}$i${_creset} does not yet exists"
|
||||
else
|
||||
lxc exec -t "${i}" -- "${LXC_REPO_ROOT}/utils/lxc.sh" __show suite \
|
||||
| prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
echo
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
lxc_cmd() {
|
||||
for i in "${CONTAINERS[@]}"; do
|
||||
if ! lxc_exists "$i"; then
|
||||
warn_msg "container ${_BBlue}$i${_creset} does not yet exists"
|
||||
else
|
||||
info_msg "lxc $* $i"
|
||||
lxc "$@" "$i" | prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
lxc_exec_cmd() {
|
||||
local name="$1"
|
||||
shift
|
||||
exit_val=
|
||||
info_msg "[${_BBlue}${name}${_creset}] ${_BGreen}${*}${_creset}"
|
||||
lxc exec -t --cwd "${LXC_REPO_ROOT}" "${name}" -- bash -c "$*"
|
||||
exit_val=$?
|
||||
if [[ $exit_val -ne 0 ]]; then
|
||||
warn_msg "[${_BBlue}${name}${_creset}] exit code (${_BRed}${exit_val}${_creset}) from ${_BGreen}${*}${_creset}"
|
||||
else
|
||||
info_msg "[${_BBlue}${name}${_creset}] exit code (${exit_val}) from ${_BGreen}${*}${_creset}"
|
||||
fi
|
||||
}
|
||||
|
||||
lxc_exec() {
|
||||
for i in "${CONTAINERS[@]}"; do
|
||||
if ! lxc_exists "$i"; then
|
||||
warn_msg "container ${_BBlue}$i${_creset} does not yet exists"
|
||||
else
|
||||
lxc_exec_cmd "${i}" "$@" | prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
lxc_init_all_containers() {
|
||||
rst_title "init all containers" section
|
||||
|
||||
local image_name
|
||||
local container_name
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
lxc_init_container "${LXC_SUITE[i+1]}" "${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}"
|
||||
done
|
||||
}
|
||||
|
||||
lxc_config_all_containers() {
|
||||
rst_title "configure all containers" section
|
||||
|
||||
for i in "${CONTAINERS[@]}"; do
|
||||
lxc_config_container "${i}"
|
||||
done
|
||||
}
|
||||
|
||||
lxc_config_container() {
|
||||
info_msg "[${_BBlue}$1${_creset}] configure container ..."
|
||||
|
||||
info_msg "[${_BBlue}$1${_creset}] map uid/gid from host to container"
|
||||
# https://lxd.readthedocs.io/en/latest/userns-idmap/#custom-idmaps
|
||||
echo -e -n "uid $HOST_USER_ID 0\\ngid $HOST_GROUP_ID 0"\
|
||||
| lxc config set "$1" raw.idmap -
|
||||
|
||||
info_msg "[${_BBlue}$1${_creset}] share ${REPO_ROOT} (repo_share) from HOST into container"
|
||||
# https://lxd.readthedocs.io/en/latest/instances/#type-disk
|
||||
lxc config device add "$1" repo_share disk \
|
||||
source="${REPO_ROOT}" \
|
||||
path="${LXC_REPO_ROOT}" &>/dev/null
|
||||
# lxc config show "$1" && wait_key
|
||||
}
|
||||
|
||||
lxc_boilerplate_all_containers() {
|
||||
rst_title "run LXC boilerplate scripts" section
|
||||
|
||||
local boilerplate_script
|
||||
local image_name
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
|
||||
image_name="${LXC_SUITE[i+1]}"
|
||||
boilerplate_script="${image_name}_boilerplate"
|
||||
boilerplate_script="${!boilerplate_script}"
|
||||
|
||||
lxc_install_boilerplate "${LXC_HOST_PREFIX}-${image_name}" "$boilerplate_script"
|
||||
|
||||
if [[ -z "${boilerplate_script}" ]]; then
|
||||
err_msg "[${_BBlue}${container_name}${_creset}] no boilerplate for image '${image_name}'"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
lxc_install_boilerplate() {
|
||||
|
||||
# usage: lxc_install_boilerplate <container-name> <string: shell commands ..>
|
||||
#
|
||||
# usage: lxc_install_boilerplate searx-archlinux "${archlinux_boilerplate}"
|
||||
|
||||
local container_name="$1"
|
||||
local boilerplate_script="$2"
|
||||
|
||||
info_msg "[${_BBlue}${container_name}${_creset}] init .."
|
||||
if lxc start -q "${container_name}" &>/dev/null; then
|
||||
sleep 5 # guest needs some time to come up and get an IP
|
||||
fi
|
||||
if ! check_connectivity "${container_name}"; then
|
||||
die 42 "Container ${container_name} has no internet connectivity!"
|
||||
fi
|
||||
lxc_init_container_env "${container_name}"
|
||||
info_msg "[${_BBlue}${container_name}${_creset}] install /.lxcenv.mk .."
|
||||
cat <<EOF | lxc exec "${container_name}" -- bash | prefix_stdout "[${_BBlue}${container_name}${_creset}] "
|
||||
rm -f "/.lxcenv.mk"
|
||||
ln -s "${LXC_REPO_ROOT}/utils/makefile.lxc" "/.lxcenv.mk"
|
||||
ls -l "/.lxcenv.mk"
|
||||
EOF
|
||||
|
||||
info_msg "[${_BBlue}${container_name}${_creset}] run LXC boilerplate scripts .."
|
||||
if lxc start -q "${container_name}" &>/dev/null; then
|
||||
sleep 5 # guest needs some time to come up and get an IP
|
||||
fi
|
||||
if [[ -n "${boilerplate_script}" ]]; then
|
||||
echo "${boilerplate_script}" \
|
||||
| lxc exec "${container_name}" -- bash \
|
||||
| prefix_stdout "[${_BBlue}${container_name}${_creset}] "
|
||||
fi
|
||||
}
|
||||
|
||||
check_connectivity() {
|
||||
local ret_val=0
|
||||
info_msg "check internet connectivity ..."
|
||||
if ! lxc exec "${1}" -- ping -c 1 8.8.8.8 &>/dev/null; then
|
||||
ret_val=1
|
||||
err_msg "no internet connectivity!"
|
||||
info_msg "Most often the connectivity is blocked by a docker installation:"
|
||||
info_msg "Whenever docker is started (reboot) it sets the iptables policy "
|
||||
info_msg "for the FORWARD chain to DROP, see:"
|
||||
info_msg " https://docs.searxng.org/utils/lxc.sh.html#internet-connectivity-docker"
|
||||
iptables-save | grep ":FORWARD"
|
||||
fi
|
||||
return $ret_val
|
||||
}
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
main "$@"
|
||||
# ----------------------------------------------------------------------------
|
||||
Executable
+126
@@ -0,0 +1,126 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
# shellcheck source=utils/lib.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib.sh"
|
||||
# shellcheck source=utils/brand.env
|
||||
source "${REPO_ROOT}/utils/brand.env"
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# config
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
PUBLIC_URL="${PUBLIC_URL:-${SEARXNG_URL}}"
|
||||
|
||||
MORTY_LISTEN="${MORTY_LISTEN:-127.0.0.1:3000}"
|
||||
PUBLIC_URL_PATH_MORTY="${PUBLIC_URL_PATH_MORTY:-/morty/}"
|
||||
PUBLIC_URL_MORTY="${PUBLIC_URL_MORTY:-$(echo "$PUBLIC_URL" | sed -e's,^\(.*://[^/]*\).*,\1,g')${PUBLIC_URL_PATH_MORTY}}"
|
||||
|
||||
SERVICE_NAME="morty"
|
||||
SERVICE_USER="${SERVICE_USER:-${SERVICE_NAME}}"
|
||||
SERVICE_SYSTEMD_UNIT="${SYSTEMD_UNITS}/${SERVICE_NAME}.service"
|
||||
|
||||
# Apache Settings
|
||||
|
||||
APACHE_MORTY_SITE="morty.conf"
|
||||
NGINX_MORTY_SITE="morty.conf"
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
usage() {
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# shellcheck disable=SC1117
|
||||
cat <<EOF
|
||||
usage::
|
||||
$(basename "$0") remove all
|
||||
$(basename "$0") apache remove
|
||||
$(basename "$0") nginx remove
|
||||
|
||||
remove all : drop all components of the morty service
|
||||
apache remove : drop apache site ${APACHE_MORTY_SITE}
|
||||
nginx remove : drop nginx site ${NGINX_MORTY_SITE}
|
||||
|
||||
environment:
|
||||
PUBLIC_URL_MORTY : ${PUBLIC_URL_MORTY}
|
||||
EOF
|
||||
|
||||
[[ -n ${1} ]] && err_msg "$1"
|
||||
}
|
||||
|
||||
main() {
|
||||
local _usage="ERROR: unknown or missing $1 command $2"
|
||||
|
||||
case $1 in
|
||||
-h|--help) usage; exit 0;;
|
||||
remove)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
all) remove_all;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
esac ;;
|
||||
apache)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
remove) remove_apache_site ;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
esac ;;
|
||||
nginx)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
remove) remove_nginx_site ;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
esac ;;
|
||||
*) usage "ERROR: unknown or missing command $1"; exit 42;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
remove_all() {
|
||||
rst_title "De-Install $SERVICE_NAME (service)"
|
||||
|
||||
rst_para "\
|
||||
It goes without saying that this script can only be used to remove
|
||||
installations that were installed with this script."
|
||||
|
||||
if systemd_remove_service "${SERVICE_NAME}" "${SERVICE_SYSTEMD_UNIT}"; then
|
||||
drop_service_account "${SERVICE_USER}"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
remove_apache_site() {
|
||||
|
||||
rst_title "Remove Apache site $APACHE_MORTY_SITE"
|
||||
|
||||
rst_para "\
|
||||
This removes apache site ${APACHE_MORTY_SITE}."
|
||||
|
||||
! apache_is_installed && err_msg "Apache is not installed."
|
||||
|
||||
if ! ask_yn "Do you really want to continue?" Yn; then
|
||||
return
|
||||
fi
|
||||
|
||||
apache_remove_site "$APACHE_MORTY_SITE"
|
||||
}
|
||||
|
||||
remove_nginx_site() {
|
||||
|
||||
rst_title "Remove nginx site $NGINX_MORTY_SITE"
|
||||
|
||||
rst_para "\
|
||||
This removes nginx site ${NGINX_MORTY_SITE}."
|
||||
|
||||
! nginx_is_installed && err_msg "nginx is not installed."
|
||||
|
||||
if ! ask_yn "Do you really want to continue?" Yn; then
|
||||
return
|
||||
fi
|
||||
|
||||
nginx_remove_app "$NGINX_MORTY_SITE"
|
||||
|
||||
}
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
main "$@"
|
||||
# ----------------------------------------------------------------------------
|
||||
Executable
+90
@@ -0,0 +1,90 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
# shellcheck disable=SC2001
|
||||
|
||||
# shellcheck source=utils/lib.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib.sh"
|
||||
# shellcheck source=utils/brand.env
|
||||
source "${REPO_ROOT}/utils/brand.env"
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# config
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
PUBLIC_URL="${PUBLIC_URL:-${SEARXNG_URL}}"
|
||||
|
||||
SERVICE_NAME="searx"
|
||||
SERVICE_USER="${SERVICE_USER:-${SERVICE_NAME}}"
|
||||
SEARXNG_SETTINGS_PATH="/etc/searx/settings.yml"
|
||||
SEARXNG_UWSGI_APP="searx.ini"
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
usage() {
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# shellcheck disable=SC1117
|
||||
cat <<EOF
|
||||
usage::
|
||||
$(basename "$0") remove all
|
||||
|
||||
remove all: complete uninstall of SearXNG service
|
||||
|
||||
environment:
|
||||
PUBLIC_URL : ${PUBLIC_URL}
|
||||
EOF
|
||||
|
||||
[[ -n ${1} ]] && err_msg "$1"
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
local _usage="unknown or missing $1 command $2"
|
||||
|
||||
case $1 in
|
||||
remove)
|
||||
rst_title "SearXNG (remove)" part
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
all) remove_all;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
esac ;;
|
||||
*) usage "unknown or missing command $1"; exit 42;;
|
||||
esac
|
||||
}
|
||||
|
||||
remove_all() {
|
||||
rst_title "De-Install SearXNG (service)"
|
||||
|
||||
rst_para "\
|
||||
It goes without saying that this script can only be used to remove
|
||||
installations that were installed with this script."
|
||||
|
||||
if ! ask_yn "Do you really want to deinstall SearXNG?"; then
|
||||
return
|
||||
fi
|
||||
remove_searx_uwsgi
|
||||
drop_service_account "${SERVICE_USER}"
|
||||
remove_settings
|
||||
wait_key
|
||||
if service_is_available "${PUBLIC_URL}"; then
|
||||
MSG="** Don't forget to remove your public site! (${PUBLIC_URL}) **" wait_key 10
|
||||
fi
|
||||
}
|
||||
|
||||
remove_settings() {
|
||||
rst_title "remove SearXNG settings" section
|
||||
echo
|
||||
info_msg "delete ${SEARXNG_SETTINGS_PATH}"
|
||||
rm -f "${SEARXNG_SETTINGS_PATH}"
|
||||
}
|
||||
|
||||
remove_searx_uwsgi() {
|
||||
rst_title "Remove SearXNG's uWSGI app (searxng.ini)" section
|
||||
echo
|
||||
uWSGI_remove_app "$SEARXNG_UWSGI_APP"
|
||||
}
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
main "$@"
|
||||
# ----------------------------------------------------------------------------
|
||||
Executable
+1029
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,33 @@
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
# lint: pylint
|
||||
"""Implement some checks in the active installation
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import warnings
|
||||
|
||||
LOG_FORMAT_DEBUG = '%(levelname)-7s %(name)-30.30s: %(message)s'
|
||||
logging.basicConfig(level=logging.getLevelName('DEBUG'), format=LOG_FORMAT_DEBUG)
|
||||
os.environ['SEARXNG_DEBUG'] = '1'
|
||||
|
||||
# from here on implement the checks of the installation
|
||||
|
||||
import searx
|
||||
|
||||
OLD_SETTING = '/etc/searx/settings.yml'
|
||||
|
||||
if os.path.isfile(OLD_SETTING):
|
||||
msg = (
|
||||
'%s is no longer valid, move setting to %s' % (
|
||||
OLD_SETTING,
|
||||
os.environ.get('SEARXNG_SETTINGS_PATH', '/etc/searxng/settings.yml')
|
||||
))
|
||||
warnings.warn(msg, DeprecationWarning)
|
||||
|
||||
from searx import redisdb, get_setting
|
||||
|
||||
if not redisdb.initialize():
|
||||
warnings.warn("can't connect to redis DB at: %s" % get_setting('redis.url'), RuntimeWarning, stacklevel=2)
|
||||
warnings.warn("--> no bot protection without redis DB", RuntimeWarning, stacklevel=2)
|
||||
@@ -0,0 +1,71 @@
|
||||
# SearXNG settings
|
||||
|
||||
use_default_settings: true
|
||||
|
||||
general:
|
||||
debug: false
|
||||
instance_name: "SearXNG"
|
||||
|
||||
search:
|
||||
safe_search: 2
|
||||
autocomplete: 'duckduckgo'
|
||||
|
||||
server:
|
||||
# Is overwritten by ${SEARXNG_SECRET}
|
||||
secret_key: "ultrasecretkey"
|
||||
limiter: true
|
||||
image_proxy: true
|
||||
# public URL of the instance, to ensure correct inbound links. Is overwritten
|
||||
# by ${SEARXNG_URL}.
|
||||
# base_url: http://example.com/location
|
||||
|
||||
redis:
|
||||
# URL to connect redis database. Is overwritten by ${SEARXNG_REDIS_URL}.
|
||||
url: unix:///usr/local/searxng-redis/run/redis.sock?db=0
|
||||
|
||||
ui:
|
||||
static_use_hash: true
|
||||
|
||||
# preferences:
|
||||
# lock:
|
||||
# - autocomplete
|
||||
# - method
|
||||
|
||||
enabled_plugins:
|
||||
- 'Hash plugin'
|
||||
- 'Search on category select'
|
||||
- 'Self Informations'
|
||||
- 'Tracker URL remover'
|
||||
- 'Ahmia blacklist'
|
||||
# - 'Hostname replace' # see hostname_replace configuration below
|
||||
# - 'Infinite scroll'
|
||||
# - 'Open Access DOI rewrite'
|
||||
# - 'Vim-like hotkeys'
|
||||
|
||||
# plugins:
|
||||
# - only_show_green_results
|
||||
|
||||
# hostname_replace:
|
||||
#
|
||||
# # twitter --> nitter
|
||||
# '(www\.)?twitter\.com$': 'nitter.net'
|
||||
|
||||
engines:
|
||||
|
||||
# - name: fdroid
|
||||
# disabled: false
|
||||
#
|
||||
# - name: apk mirror
|
||||
# disabled: false
|
||||
#
|
||||
# - name: mediathekviewweb
|
||||
# categories: TV
|
||||
# disabled: false
|
||||
#
|
||||
# - name: invidious
|
||||
# disabled: false
|
||||
# base_url:
|
||||
# - https://invidious.snopyta.org
|
||||
# - https://invidious.tiekoetter.com
|
||||
# - https://invidio.xamh.de
|
||||
# - https://inv.riverside.rocks
|
||||
Reference in New Issue
Block a user