addons-server/docker-compose.yml

214 строки
6.1 KiB
YAML
Исходник Обычный вид История

x-env-mapping: &env
# https://docs.docker.com/compose/environment-variables/envvars-precedence/
env_file:
- .env
environment:
- CELERY_BROKER_URL=amqp://olympia:olympia@rabbitmq/olympia
- CELERY_RESULT_BACKEND=redis://redis:6379/1
- DATABASES_DEFAULT_URL=mysql://root:@mysqld/olympia
- ELASTICSEARCH_LOCATION=elasticsearch:9200
- MEMCACHE_LOCATION=memcached:11211
- MYSQL_DATABASE=olympia
- MYSQL_ROOT_PASSWORD=docker
2018-01-26 09:08:08 +03:00
- OLYMPIA_SITE_URL=http://olympia.test
- PYTHONDONTWRITEBYTECODE=1
- PYTHONUNBUFFERED=1
- PYTHONBREAKPOINT=ipdb.set_trace
- TERM=xterm-256color
- HISTFILE=/data/olympia/docker/artifacts/bash_history
- HISTSIZE=50000
- HISTIGNORE=ls:exit:"cd .."
- HISTCONTROL=erasedups
- CIRCLECI
- HOST_UID
- DEBUG
- DATA_BACKUP_SKIP
2015-08-24 20:56:42 +03:00
x-olympia: &olympia
<<: *env
image: ${DOCKER_TAG:-}
# We don't want docker compose to manage the image for us.
# We sometimes build the image locally and sometimes pull from a registry
# but docker compose should always assume the image is available.
pull_policy: never
# We drop down to a different user through entrypoint.sh, but starting as
# root allows us to fix the ownership of files generated at image build
# time through the ./docker/entrypoint.sh script.
user: root
platform: linux/amd64
entrypoint: ["/data/olympia/docker/entrypoint.sh"]
services:
worker:
<<: *olympia
command: [
"DJANGO_SETTINGS_MODULE=settings",
"watchmedo",
"auto-restart",
"--directory=/data/olympia/src",
"--pattern=*.py",
"--recursive",
"--",
"celery -A olympia.amo.celery:app worker -E -c 2 --loglevel=INFO",
]
volumes:
- data_olympia:/data/olympia
# Don't mount generated files. They only exist in the container
# and would otherwiser be deleted by mounbting data_olympia
- /data/olympia/static-build
- /data/olympia/site-static
- storage:/data/olympia/storage
- ./package.json:/deps/package.json
- ./package-lock.json:/deps/package-lock.json
extra_hosts:
2018-01-26 09:08:08 +03:00
- "olympia.test:127.0.0.1"
restart: on-failure:5
# entrypoint.sh takes some time
# we can wait for services to be up and running
healthcheck:
test: ["CMD-SHELL", "DJANGO_SETTINGS_MODULE=olympia celery -A olympia.amo.celery status"]
# The interval is 90s after the start period of 60s
interval: 90s
# 3 failed attempts result in container failure
retries: 3
# While starting, ping faster to get the container healthy as soon as possible
start_interval: 1s
# The start period is 60s
start_period: 120s
depends_on:
- mysqld
- elasticsearch
- redis
- memcached
- rabbitmq
- autograph
2014-10-01 15:44:44 +04:00
web:
extends:
service: worker
healthcheck:
test: ["CMD-SHELL", "curl --fail --show-error --include --location http://127.0.0.1:8002/__version__"]
retries: 3
interval: 90s
start_interval: 1s
start_period: 120s
command:
- uwsgi --ini /data/olympia/docker/uwsgi.ini
nginx:
image: nginx
volumes:
- ./docker/nginx/addons.conf:/etc/nginx/conf.d/addons.conf
- ./static:/srv/static
- storage:/srv/user-media
ports:
- "80:80"
networks:
default:
aliases:
- olympia.test
depends_on:
- web
- addons-frontend
2014-10-01 15:44:44 +04:00
memcached:
image: memcached:1.4
# Remove this once we upgrade to a version that provides multi-platform images
platform: linux/amd64
2014-10-01 15:44:44 +04:00
mysqld:
image: mysql:8.0
environment:
- MYSQL_ALLOW_EMPTY_PASSWORD=yes
- MYSQL_DATABASE=olympia
ports:
- "3306:3306"
volumes:
- data_mysqld:/var/lib/mysql
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "--silent"]
start_interval: 1s
2014-10-01 15:44:44 +04:00
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.3
environment:
# Disable all xpack related features to avoid unrelated logging
# in docker logs. https://github.com/mozilla/addons-server/issues/8887
# This also avoids us to require authentication for local development
# which simplifies the setup.
- xpack.security.enabled=false
- xpack.monitoring.enabled=false
- xpack.graph.enabled=false
- xpack.watcher.enabled=false
Default to 'and' operator for match queries. Remove slug search, prioritize exact matches more. (#7303) * Default to 'and' operator for match queries. Remove slug search, prioritize exact matches more. References many "component: search" issues. What I tested with a database of all public add-ons: Example searches: tab center redux - should find "Tab Center Redux" while "Tab Mix Plus" is probably second and "Redux DevTools" 4th or so Open Image in New Tab -> should find "Open Image in New Tab" while "Open Bookmarks in New Tab" should be 2nd or 3rd CoinHive -> Finds "Coinhive Blocker", "CoinBlock" (prefix search) and "NoMiners" (description) Privacy -> Finds "Privacy Badger", "Privacy Pass", "Privacy Settings", "Google Privacy" (probably 4th or so) and "Blur" (summary + description + many users). Scores "Ghostery" on the first page but ranks it in the middle firebu -> Finds "Firebug", "Firebug Autocompleter", "Firefinder for Firebug" fireb -> Scores "Fire Drag" first, puts "Firebug" approximately 3rd or so Menu Wizzard -> Finds "Menu Wizard" (fuzzy, typo) first, then "Add-ons Manager Context Menu" apparently because it matches good in the title and has many users Frame Demolition -> Finds "Frame Demolition" Demolition -> Finds only "Frame Demolition", same for "Demolation" (typo) reStyle -> Finds "reStyle" and scores a few add-ons that match on "restore" next since the term is similar MegaUpload DownloadHelper -> finds "MegaUpload DownloadHelper" first, scores "Video DownloadHelper" and "RadpidShare DownloadHelper" next. Doesn't find "Popup Blocker" anymore as currently happening on -prod MegaUpload -> only finds "MegaUpload DownloadHelper" and nothing else No Flash -> Scores "No Flash" first, then depending on users "Download Flash and Video", "YouTube Flash Video Player" and "YouTube Flash Player" (not necessarily in that order) Disable Hello, Pocket & Reader+ -> finds "Disable Hello, Pocket & Reader+" first (yeay!), then scores "Reader", "Disable WebRTC" and "In My Pocket" next similarly to what's happening on -prod currently Not working yet: privacybadger -> "Privacy Badger" -> will probably need some kind of ngram filtering and analyzing (#591) eyes -> 'decentraleyes' -> Not sure this should actually work, will probably need some more analyzing too (#591) Not sure if it's specifically only because of these changes but #3248 is fixed. This potentially fixes #7244, #6891, #6837, #6417, mozilla/addons#359. Not sure if this fixes #mozilla/addons#567 but the results look much more promising and the amount of results doesn't explode here. I only have 2.8k add-ons for testing though so I'm not too sure. And might be relevant to #6137. This is a big step towards #2661, I doubt we can call this fixed though. * Fix name tests * Speed up ES tests, make scoring results more predictable by using only one shard and one replica * Add tests * Only test on new apiv3 based search. * Fixup 'get_results' * Remove debug print * Isort imports * Fix test settings, I'm blind. * Adapt number of shards to what we define in settings_test * Test search for grapple * Remove property filtering in legacy api search, fix tests, fix flake8 * Add comment explaining shard config * Fix tests again, add default platform, fix total counts again. * Try to do some fixture cleanup, let's see... * More test refactoring, make use of dfs-query-then-fetch during tests, allow us to test this later too via a waffle flag. * Don't use waffle flag for legacy search, only for apiv3 searches. * Fix unused import, fix usage of 'params' * Use query-then-fetch in a regular addons-view tool, fix a few more tests to be able to handle the waffle-flag * Create the dfs query then fetch flag only in ESTestCase, delete it properly. More serializer fixes * precache the waffle flag for autocomplete tests too * Minor cleanups * Add docs, fix code style * Add a todo * Fix codestyle
2018-01-30 08:26:30 +03:00
- "discovery.type=single-node"
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
mem_limit: 2g
volumes:
- data_elasticsearch:/usr/share/elasticsearch/data
2015-08-24 20:56:42 +03:00
redis:
image: redis:6.2
volumes:
- data_redis:/data
rabbitmq:
image: rabbitmq:3.12
hostname: olympia
expose:
- "5672"
environment:
- RABBITMQ_DEFAULT_USER=olympia
- RABBITMQ_DEFAULT_PASS=olympia
- RABBITMQ_DEFAULT_VHOST=olympia
volumes:
- data_rabbitmq:/var/lib/rabbitmq
autograph:
image: mozilla/autograph:3.3.2
platform: linux/amd64
command: /go/bin/autograph -c /autograph_localdev_config.yaml
volumes:
- ./scripts/autograph_localdev_config.yaml:/autograph_localdev_config.yaml
addons-frontend:
<<: *env
image: mozilla/addons-frontend:latest
platform: linux/amd64
environment:
# We change the proxy port (which is the main entrypoint) as well as the
# webpack port to avoid a conflict in case someone runs both addons-server
# and addons-frontend locally, with the frontend configured to access
# addons-server locally.
- PROXY_PORT=7010
- WEBPACK_SERVER_PORT=7011
ports:
# We need to expose this port so that statics can be fetched (they are
# exposed using webpack and not by the node app server).
- 7011:7011
command: yarn amo:olympia
networks:
default:
volumes:
data_redis:
data_elasticsearch:
data_mysqld:
# Keep this value in sync with Makefile-os
# External volumes must be manually created/destroyed
name: addons-server_data_mysqld
external: true
data_rabbitmq:
data_olympia:
driver: local
driver_opts:
type: none
o: bind
device: ${PWD}
storage:
driver: local
driver_opts:
type: none
o: bind
device: ./storage