Redis use lot of memory

we had a on premise setup for gateway version 2.3.6. It had been runned for 2 months and now we found the redis-server cost a lot of memory (10GB on a 16GB standalone instance).
my questions are

  1. why redis costs so much? is there something wrong with the tyk configuration?
  2. Is it sale to FLUSHALL on redis to release the memory?


following is my tyk.conf

“listen_port”: 443,
“node_secret”: “xxxxxxxxx”,
“secret”: “xxxxxxxxx”,
“template_path”: “/opt/tyk-gateway/templates”,
“tyk_js_path”: “/opt/tyk-gateway/js/tyk.js”,
“use_db_app_configs”: true,
“db_app_conf_options”: {
“connection_string”: “”,
“node_is_segmented”: false,
“tags”: []
“disable_dashboard_zeroconf”: false,
“app_path”: “/opt/tyk-gateway/apps”,
“middleware_path”: “/opt/tyk-gateway/middleware”,
“storage”: {
“type”: “redis”,
“host”: “”,
“port”: 6379,
“username”: “”,
“password”: “”,
“database”: 0,
“optimisation_max_idle”: 2000,
“optimisation_max_active”: 4000
“enable_analytics”: true,
“analytics_config”: {
“type”: “mongo”,
“pool_size”: 100,
“csv_dir”: “/tmp”,
“mongo_url”: “”,
“mongo_db_name”: “”,
“mongo_collection”: “”,
“purge_delay”: 100,
“ignored_ips”: [],
“enable_detailed_recording”: true,
“enable_geo_ip”: false,
“geo_ip_db_path”: “”,
“normalise_urls”: {
“enabled”: true,
“normalise_uuids”: true,
“normalise_numbers”: true,
“custom_patterns”: []
“health_check”: {
“enable_health_checks”: false,
“health_check_value_timeouts”: 60
“optimisations_use_async_session_write”: true,
“allow_master_keys”: false,
“policies”: {
“policy_source”: “service”,
“policy_connection_string”: “”,
“policy_record_name”: “tyk_policies”,
“allow_explicit_policy_id”: true
“hash_keys”: false,
“suppress_redis_signal_reload”: false,
“use_redis_log”: true,

"close_connections": true,
"enforce_org_quotas": false, 
"enforce_org_data_detail_logging": false,
"experimental_process_org_off_thread": true,
"enable_non_transactional_rate_limiter": true,
"enable_sentinel_rate_limiter": false,
"local_session_cache": {
    "disable_cached_session_state": false

“proxy_ssl_insecure_skip_verify”: true,
“http_server_options”: {
“enable_websockets”: true,
“ssl_insecure_skip_verify”: true,
“use_ssl”: true,
“certificates”: [
“domain_name”: “*”,
“cert_file”: “/root/server_cert.pem”,
“key_file” : “/root/server_key.pem”


“uptime_tests”: {
“disable”: false,
“config”: {
“enable_uptime_analytics”: true,
“failure_trigger_sample_size”: 3,
“time_wait”: 300,
“checker_pool_size”: 50
“hostname”: “”,
“enable_custom_domains”: true,
“enable_jsvm”: true,
“oauth_redirect_uri_separator”: “;”,
“coprocess_options”: {
“enable_coprocess”: false,
“coprocess_grpc_server”: “”
“pid_file_location”: “./”,
“allow_insecure_configs”: true,
“public_key_path”: “”,
“close_idle_connections”: false,
“allow_remote_config”: false,
“enable_bundle_downloader”: true,
“bundle_base_url”: “”,
“global_session_lifetime”: 100,
“force_global_session_lifetime”: false,
“max_idle_connections_per_host”: 100


Do you have pump running?

we do not configure pump right now

Then that is why your redis is huge - the analytics that get recorded by tyk are stored in redis temporarily, they require the pump to be purged.

There will be a tyk-analytics key in redis you can safely delete - FLUSHALL will be extremely destructive and break things, so don’t do that.

I would suggest just setting up pump and send the data to CSV.

thanks alot
i’ll try it and feedback later.

hi Martin,
I setup pump and problem resolved.
thanks again!


1 Like