We started with the docker-compose file setup which worked great. We’re now extracting kafka and redis out and running them on their own separate resources.
However, we’re running into an issue where the web
container isn’t starting up but is also not logging any errors (we did make changes to the sentry.conf.py file)
nginx:
nginx_1 | 10.0.102.214 - - [22/Jul/2021:05:10:54 +0000] "POST /api/5/envelope/ HTTP/1.1" 200 41 "-" "sentry.php.laravel/2.6.0" "MY_IP"
nginx_1 | 2021/07/22 05:10:54 [error] 6#6: *246 connect() failed (111: Connection refused) while connecting to upstream, client: 10.0.101.222, server: , request: "GET /auth/login/sentry/ HTTP/1.1", upstream: "http://172.18.0.20:9000/auth/login/sentry/", host: "10.0.1.148:9000"
nginx_1 | 10.0.101.222 - - [22/Jul/2021:05:10:54 +0000] "GET /auth/login/sentry/ HTTP/1.1" 502 150 "-" "ELB-HealthChecker/2.0" "-"
nginx_1 | 2021/07/22 05:10:59 [error] 6#6: *248 connect() failed (111: Connection refused) while connecting to upstream, client: 10.0.102.214, server: , request: "GET /auth/login/sentry/ HTTP/1.1", upstream: "http://172.18.0.20:9000/auth/login/sentry/", host: "10.0.1.148:9000"
nginx_1 | 10.0.102.214 - - [22/Jul/2021:05:10:59 +0000] "GET /auth/login/sentry/ HTTP/1.1" 502 150 "-" "ELB-HealthChecker/2.0" "-"
web
web_1 | 04:56:16 [INFO] sentry.plugins.github: apps-not-configured
....Nothing else
sentry.conf.py
# This file is just Python, with a touch of Django which means
# you can inherit and tweak settings to your hearts content.
from sentry.conf.server import * # NOQA
# Generously adapted from pynetlinux: https://git.io/JJmga
def get_internal_network():
import ctypes
import fcntl
import math
import socket
import struct
iface = b"eth0"
sockfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifreq = struct.pack(b"16sH14s", iface, socket.AF_INET, b"\x00" * 14)
try:
ip = struct.unpack(
b"!I", struct.unpack(b"16sH2x4s8x", fcntl.ioctl(sockfd, 0x8915, ifreq))[2]
)[0]
netmask = socket.ntohl(
struct.unpack(b"16sH2xI8x", fcntl.ioctl(sockfd, 0x891B, ifreq))[2]
)
except IOError:
return ()
base = socket.inet_ntoa(struct.pack(b"!I", ip & netmask))
netmask_bits = 32 - int(round(math.log(ctypes.c_uint32(~netmask).value + 1, 2), 1))
return "{0:s}/{1:d}".format(base, netmask_bits)
INTERNAL_SYSTEM_IPS = (get_internal_network(),)
DATABASES = {
"default": {
"ENGINE": "sentry.db.postgres",
"NAME": "postgres",
"USER": "postgres",
"PASSWORD": "",
"HOST": "postgres",
"PORT": "",
}
}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = True
# If you're expecting any kind of real traffic on Sentry, we highly recommend
# configuring the CACHES and Redis settings
###########
# General #
###########
# Instruct Sentry that this install intends to be run by a single organization
# and thus various UI optimizations should be enabled.
SENTRY_SINGLE_ORGANIZATION = True
SENTRY_OPTIONS["system.event-retention-days"] = int(
env("SENTRY_EVENT_RETENTION_DAYS", "90")
)
#########
# Redis #
#########
# Generic Redis configuration used as defaults for various things including:
# Buffers, Quotas, TSDB
SENTRY_OPTIONS["redis.clusters"] = {
"default": {
"hosts": {0: {"host": "MY_REDIS_HOST", "password": "", "port": "6379", "db": "0"}}
}
}
#########
# Queue #
#########
# See https://develop.sentry.dev/services/queue/ for more
# information on configuring your queue broker and workers. Sentry relies
# on a Python framework called Celery to manage queues.
rabbitmq_host = None
if rabbitmq_host:
BROKER_URL = "amqp://{username}:{password}@{host}/{vhost}".format(
username="guest", password="guest", host=rabbitmq_host, vhost="/"
)
else:
BROKER_URL = "redis://:{password}@{host}:{port}/{db}".format(
**SENTRY_OPTIONS["redis.clusters"]["default"]["hosts"][0]
)
#########
# Cache #
#########
# Sentry currently utilizes two separate mechanisms. While CACHES is not a
# requirement, it will optimize several high throughput patterns.
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": ["memcached:11211"],
"TIMEOUT": 3600,
}
}
# A primary cache is required for things such as processing events
SENTRY_CACHE = "sentry.cache.redis.RedisCache"
DEFAULT_KAFKA_OPTIONS = {
"bootstrap.servers": "MY_BOOTSTRAP_SERVERS",
"message.max.bytes": 50000000,
"socket.timeout.ms": 1000,
}
SENTRY_EVENTSTREAM = "sentry.eventstream.kafka.KafkaEventStream"
SENTRY_EVENTSTREAM_OPTIONS = {"producer_configuration": DEFAULT_KAFKA_OPTIONS}
KAFKA_CLUSTERS["default"] = DEFAULT_KAFKA_OPTIONS
###############
# Rate Limits #
###############
# Rate limits apply to notification handlers and are enforced per-project
# automatically.
SENTRY_RATELIMITER = "sentry.ratelimits.redis.RedisRateLimiter"
##################
# Update Buffers #
##################
# Buffers (combined with queueing) act as an intermediate layer between the
# database and the storage API. They will greatly improve efficiency on large
# numbers of the same events being sent to the API in a short amount of time.
# (read: if you send any kind of real data to Sentry, you should enable buffers)
SENTRY_BUFFER = "sentry.buffer.redis.RedisBuffer"
##########
# Quotas #
##########
# Quotas allow you to rate limit individual projects or the Sentry install as
# a whole.
SENTRY_QUOTAS = "sentry.quotas.redis.RedisQuota"
########
# TSDB #
########
# The TSDB is used for building charts as well as making things like per-rate
# alerts possible.
SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB"
#########
# SNUBA #
#########
SENTRY_SEARCH = "sentry.search.snuba.EventsDatasetSnubaSearchBackend"
SENTRY_SEARCH_OPTIONS = {}
SENTRY_TAGSTORE_OPTIONS = {}
###########
# Digests #
###########
# The digest backend powers notification summaries.
SENTRY_DIGESTS = "sentry.digests.backends.redis.RedisBackend"
##############
# Web Server #
##############
SENTRY_WEB_HOST = "0.0.0.0"
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {
"http": "%s:%s" % (SENTRY_WEB_HOST, SENTRY_WEB_PORT),
"protocol": "uwsgi",
# This is needed in order to prevent https://git.io/fj7Lw
"uwsgi-socket": None,
"so-keepalive": True,
# Keep this between 15s-75s as that's what Relay supports
"http-keepalive": 15,
"http-chunked-input": True,
# the number of web workers
"workers": 3,
"threads": 4,
"memory-report": False,
# Some stuff so uwsgi will cycle workers sensibly
"max-requests": 100000,
"max-requests-delta": 500,
"max-worker-lifetime": 86400,
# Duplicate options from sentry default just so we don't get
# bit by sentry changing a default value that we depend on.
"thunder-lock": True,
"log-x-forwarded-for": False,
"buffer-size": 32768,
"limit-post": 209715200,
"disable-logging": True,
"reload-on-rss": 600,
"ignore-sigpipe": True,
"ignore-write-errors": True,
"disable-write-exception": True,
}
###########
# SSL/TLS #
###########
# If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto
# header and enable the settings below
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SESSION_COOKIE_SECURE = True
# CSRF_COOKIE_SECURE = True
# SOCIAL_AUTH_REDIRECT_IS_HTTPS = True
# End of SSL/TLS settings
############
# Features #
############
SENTRY_FEATURES["projects:sample-events"] = False
SENTRY_FEATURES.update(
{
feature: True
for feature in (
"organizations:discover",
"organizations:events",
"organizations:global-views",
"organizations:incidents",
"organizations:integrations-issue-basic",
"organizations:integrations-issue-sync",
"organizations:invite-members",
"organizations:metric-alert-builder-aggregate",
"organizations:sso-basic",
"organizations:sso-rippling",
"organizations:sso-saml2",
"organizations:performance-view",
"organizations:advanced-search",
"projects:custom-inbound-filters",
"projects:data-forwarding",
"projects:discard-groups",
"projects:plugins",
"projects:rate-limits",
"projects:servicehooks",
)
}
)
#######################
# MaxMind Integration #
#######################
GEOIP_PATH_MMDB = '/geoip/GeoLite2-City.mmdb'
#########################
# Bitbucket Integration #
#########################
# BITBUCKET_CONSUMER_KEY = 'YOUR_BITBUCKET_CONSUMER_KEY'
# BITBUCKET_CONSUMER_SECRET = 'YOUR_BITBUCKET_CONSUMER_SECRET'
Any ideas of what we can try to get some more information about what’s failing?