From cebd39fd76f2957a9f702f62206f4c1f22828cf8 Mon Sep 17 00:00:00 2001 From: Luca D'Agati <66645997+lucadagati@users.noreply.github.com> Date: Fri, 21 Feb 2025 17:20:59 +0100 Subject: [PATCH 01/18] Add files via upload added the compose file and configurations for deployment --- conf_conductor/iotronic.conf | 102 ++ conf_keystone/keystone.conf | 2715 ++++++++++++++++++++++++++++++++++ conf_mysql/99-openstack.conf | 13 + conf_ui/local_settings.py | 916 ++++++++++++ conf_wagent/iotronic.conf | 96 ++ docker-compose.yml | 393 +++++ 6 files changed, 4235 insertions(+) create mode 100644 conf_conductor/iotronic.conf create mode 100644 conf_keystone/keystone.conf create mode 100644 conf_mysql/99-openstack.conf create mode 100644 conf_ui/local_settings.py create mode 100644 conf_wagent/iotronic.conf create mode 100644 docker-compose.yml diff --git a/conf_conductor/iotronic.conf b/conf_conductor/iotronic.conf new file mode 100644 index 0000000..30371a1 --- /dev/null +++ b/conf_conductor/iotronic.conf @@ -0,0 +1,102 @@ +[DEFAULT] +transport_url = rabbit://openstack:unime@rabbitmq + +debug=True +log_file = /var/log/iotronic/iotronic-conductor.log +proxy=nginx + + +# Authentication strategy used by iotronic-api: one of +# "keystone" or "noauth". "noauth" should not be used in a +# production environment because all authentication will be +# disabled. (string value) +auth_strategy=keystone + +# Enable pecan debug mode. WARNING: this is insecure and +# should not be used in a production environment. (boolean +# value) +#pecan_debug=false + + +[conductor] +service_port_min=50000 +service_port_max=50100 + +[wamp] +wamp_transport_url = ws://iotronic-wagent:8181/ +wamp_realm = s4t +#skip_cert_verify= False +register_agent = True + + + +[database] +connection = mysql+pymysql://iotronic:unime@iotronic-db/iotronic + +[keystone_authtoken] +www_authenticate_uri = http://keystone:5000 +auth_url = http://keystone:5000 +auth_plugin = password +auth_type = password +project_domain_id = default +user_domain_id = default +project_name = service +username = iotronic +password = unime + + +[neutron] +auth_url = http://controller:5000 +url = http://controller:9696 +auth_strategy = password +auth_type = password +project_domain_name = default +user_domain_name = default +region_name = RegionOne +project_name = service +username = neutron +password = NEUTRON_PASS +retries = 3 +project_domain_id= default + + +[designate] +auth_url = http://controller:5000 +url = http://controller:9001 +auth_strategy = password +project_domain_name = default +user_domain_name = default +region_name = RegionOne +project_name = service +username = designate +password = password +retries = 3 +project_domain_id= default + + +[cors] +# Indicate whether this resource may be shared with the domain +# received in the requests "origin" header. Format: +# "://[:]", no trailing slash. Example: +# https://horizon.example.com (list value) +#allowed_origin = + +# Indicate that the actual request can include user +# credentials (boolean value) +#allow_credentials = true + +# Indicate which headers are safe to expose to the API. +# Defaults to HTTP Simple Headers. (list value) +#expose_headers = + +# Maximum cache age of CORS preflight requests. (integer +# value) +#max_age = 3600 + +# Indicate which methods can be used during the actual +# request. (list value) +#allow_methods = OPTIONS,GET,HEAD,POST,PUT,DELETE,TRACE,PATCH + +# Indicate which header field names may be used during the +# actual request. (list value) +#allow_headers = diff --git a/conf_keystone/keystone.conf b/conf_keystone/keystone.conf new file mode 100644 index 0000000..01e3d6f --- /dev/null +++ b/conf_keystone/keystone.conf @@ -0,0 +1,2715 @@ +[DEFAULT] +debug = True +#log_config = /etc/keystone/logging.conf +log_dir = /var/log/keystone + +# +# From keystone +# + +# Using this feature is *NOT* recommended. Instead, use the `keystone-manage +# bootstrap` command. The value of this option is treated as a "shared secret" +# that can be used to bootstrap Keystone through the API. This "token" does not +# represent a user (it has no identity), and carries no explicit authorization +# (it effectively bypasses most authorization checks). If set to `None`, the +# value is ignored and the `admin_token` middleware is effectively disabled. +# (string value) +#admin_token = + +# The base public endpoint URL for Keystone that is advertised to clients +# (NOTE: this does NOT affect how Keystone listens for connections). Defaults +# to the base host URL of the request. For example, if keystone receives a +# request to `http://server:5000/v3/users`, then this will option will be +# automatically treated as `http://server:5000`. You should only need to set +# option if either the value of the base URL contains a path that keystone does +# not automatically infer (`/prefix/v3`), or if the endpoint should be found on +# a different host. (uri value) +#public_endpoint = + +# DEPRECATED: The base admin endpoint URL for Keystone that is advertised to +# clients (NOTE: this does NOT affect how Keystone listens for connections). +# Defaults to the base host URL of the request. For example, if keystone +# receives a request to `http://server:35357/v3/users`, then this will option +# will be automatically treated as `http://server:35357`. You should only need +# to set option if either the value of the base URL contains a path that +# keystone does not automatically infer (`/prefix/v3`), or if the endpoint +# should be found on a different host. (uri value) +# This option is deprecated for removal since R. +# Its value may be silently ignored in the future. +# Reason: With the removal of the 2.0 API keystone does not distinguish between +# admin and public endpoints. +#admin_endpoint = + +# Maximum depth of the project hierarchy, excluding the project acting as a +# domain at the top of the hierarchy. WARNING: Setting it to a large value may +# adversely impact performance. (integer value) +#max_project_tree_depth = 5 + +# Limit the sizes of user & project ID/names. (integer value) +#max_param_size = 64 + +# Similar to `[DEFAULT] max_param_size`, but provides an exception for token +# values. With Fernet tokens, this can be set as low as 255. With UUID tokens, +# this should be set to 32). (integer value) +#max_token_size = 255 + +# The maximum number of entities that will be returned in a collection. This +# global limit may be then overridden for a specific driver, by specifying a +# list_limit in the appropriate section (for example, `[assignment]`). No limit +# is set by default. In larger deployments, it is recommended that you set this +# to a reasonable number to prevent operations like listing all users and +# projects from placing an unnecessary load on the system. (integer value) +#list_limit = + +# If set to true, strict password length checking is performed for password +# manipulation. If a password exceeds the maximum length, the operation will +# fail with an HTTP 403 Forbidden error. If set to false, passwords are +# automatically truncated to the maximum length. (boolean value) +#strict_password_check = false + +# If set to true, then the server will return information in HTTP responses +# that may allow an unauthenticated or authenticated user to get more +# information than normal, such as additional details about why authentication +# failed. This may be useful for debugging but is insecure. (boolean value) +#insecure_debug = false + +# Default `publisher_id` for outgoing notifications. If left undefined, +# Keystone will default to using the server's host name. (string value) +#default_publisher_id = + +# Define the notification format for identity service events. A `basic` +# notification only has information about the resource being operated on. A +# `cadf` notification has the same information, as well as information about +# the initiator of the event. The `cadf` option is entirely backwards +# compatible with the `basic` option, but is fully CADF-compliant, and is +# recommended for auditing use cases. (string value) +# Possible values: +# basic - +# cadf - +#notification_format = cadf + +# You can reduce the number of notifications keystone emits by explicitly +# opting out. Keystone will not emit notifications that match the patterns +# expressed in this list. Values are expected to be in the form of +# `identity..`. By default, all notifications related +# to authentication are automatically suppressed. This field can be set +# multiple times in order to opt-out of multiple notification topics. For +# example, the following suppresses notifications describing user creation or +# successful authentication events: notification_opt_out=identity.user.create +# notification_opt_out=identity.authenticate.success (multi valued) +#notification_opt_out = identity.authenticate.success +#notification_opt_out = identity.authenticate.pending +#notification_opt_out = identity.authenticate.failed + +# +# From oslo.log +# + +# If set to true, the logging level will be set to DEBUG instead of the default +# INFO level. (boolean value) +# Note: This option can be changed without restarting. +#debug = false + +# The name of a logging configuration file. This file is appended to any +# existing logging configuration files. For details about logging configuration +# files, see the Python logging module documentation. Note that when logging +# configuration files are used then all logging configuration is set in the +# configuration file and other logging configuration options are ignored (for +# example, log-date-format). (string value) +# Note: This option can be changed without restarting. +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append = + +# Defines the format string for %%(asctime)s in log records. Default: +# %(default)s . This option is ignored if log_config_append is set. (string +# value) +#log_date_format = %Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to send logging output to. If no default is set, +# logging will go to stderr as defined by use_stderr. This option is ignored if +# log_config_append is set. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file = + +# (Optional) The base directory used for relative log_file paths. This option +# is ignored if log_config_append is set. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir = + +# Uses logging handler designed to watch file system. When log file is moved or +# removed this handler will open a new log file with specified path +# instantaneously. It makes sense only if log_file option is specified and +# Linux platform is used. This option is ignored if log_config_append is set. +# (boolean value) +#watch_log_file = false + +# Use syslog for logging. Existing syslog format is DEPRECATED and will be +# changed later to honor RFC5424. This option is ignored if log_config_append +# is set. (boolean value) +#use_syslog = false + +# Enable journald for logging. If running in a systemd environment you may wish +# to enable journal support. Doing so will use the journal native protocol +# which includes structured metadata in addition to log messages.This option is +# ignored if log_config_append is set. (boolean value) +#use_journal = false + +# Syslog facility to receive log lines. This option is ignored if +# log_config_append is set. (string value) +#syslog_log_facility = LOG_USER + +# Use JSON formatting for logging. This option is ignored if log_config_append +# is set. (boolean value) +#use_json = false + +# Log output to standard error. This option is ignored if log_config_append is +# set. (boolean value) +#use_stderr = false + +# Log output to Windows Event Log. (boolean value) +#use_eventlog = false + +# The amount of time before the log files are rotated. This option is ignored +# unless log_rotation_type is setto "interval". (integer value) +#log_rotate_interval = 1 + +# Rotation interval type. The time of the last file change (or the time when +# the service was started) is used when scheduling the next rotation. (string +# value) +# Possible values: +# Seconds - +# Minutes - +# Hours - +# Days - +# Weekday - +# Midnight - +#log_rotate_interval_type = days + +# Maximum number of rotated log files. (integer value) +#max_logfile_count = 30 + +# Log file maximum size in MB. This option is ignored if "log_rotation_type" is +# not set to "size". (integer value) +#max_logfile_size_mb = 200 + +# Log rotation type. (string value) +# Possible values: +# interval - Rotate logs at predefined time intervals. +# size - Rotate logs once they reach a predefined size. +# none - Do not rotate log files. +#log_rotation_type = none + +# Format string to use for log messages with context. Used by +# oslo_log.formatters.ContextFormatter (string value) +#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages when context is undefined. Used by +# oslo_log.formatters.ContextFormatter (string value) +#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Additional data to append to log message when logging level for the message +# is DEBUG. Used by oslo_log.formatters.ContextFormatter (string value) +#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. Used by +# oslo_log.formatters.ContextFormatter (string value) +#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s + +# Defines the format string for %(user_identity)s that is used in +# logging_context_format_string. Used by oslo_log.formatters.ContextFormatter +# (string value) +#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s + +# List of package logging levels in logger=LEVEL pairs. This option is ignored +# if log_config_append is set. (list value) +#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO + +# Enables or disables publication of error events. (boolean value) +#publish_errors = false + +# The format for an instance that is passed with the log message. (string +# value) +#instance_format = "[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log message. (string +# value) +#instance_uuid_format = "[instance: %(uuid)s] " + +# Interval, number of seconds, of log rate limiting. (integer value) +#rate_limit_interval = 0 + +# Maximum number of logged messages per rate_limit_interval. (integer value) +#rate_limit_burst = 0 + +# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG +# or empty string. Logs with level greater or equal to rate_limit_except_level +# are not filtered. An empty string means that all levels are filtered. (string +# value) +#rate_limit_except_level = CRITICAL + +# Enables or disables fatal status of deprecations. (boolean value) +#fatal_deprecations = false + +# +# From oslo.messaging +# + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size = 30 + +# The pool size limit for connections expiration policy (integer value) +#conn_pool_min_size = 2 + +# The time-to-live in sec of idle connections in the pool (integer value) +#conn_pool_ttl = 1200 + +# Size of executor thread pool when executor is threading or eventlet. (integer +# value) +# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size +#executor_thread_pool_size = 64 + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout = 60 + +# The network address and optional user credentials for connecting to the +# messaging backend, in URL format. The expected format is: +# +# driver://[user:pass@]host:port[,[userN:passN@]hostN:portN]/virtual_host?query +# +# Example: rabbit://rabbitmq:password@127.0.0.1:5672// +# +# For full details on the fields in the URL see the documentation of +# oslo_messaging.TransportURL at +# https://docs.openstack.org/oslo.messaging/latest/reference/transport.html +# (string value) +#transport_url = rabbit:// + +# The default exchange under which topics are scoped. May be overridden by an +# exchange name specified in the transport_url option. (string value) +#control_exchange = keystone + + +[access_rules_config] + +# +# From keystone +# + +# Entry point for the access rules config backend driver in the +# `keystone.access_rules_config` namespace. Keystone only provides a `json` +# driver, so there is no reason to change this unless you are providing a +# custom entry point. (string value) +#driver = json + +# Toggle for access rules caching. This has no effect unless global caching is +# enabled. (boolean value) +#caching = true + +# Time to cache access rule data in seconds. This has no effect unless global +# caching is enabled. (integer value) +#cache_time = + +# Path to access rules configuration. If not present, no access rule +# configuration will be loaded and application credential access rules will be +# unavailable. (string value) +#rules_file = /etc/keystone/access_rules.json + +# Toggles permissive mode for access rules. When enabled, application +# credentials can be created with any access rules regardless of operator's +# configuration. (boolean value) +#permissive = false + + +[application_credential] + +# +# From keystone +# + +# Entry point for the application credential backend driver in the +# `keystone.application_credential` namespace. Keystone only provides a `sql` +# driver, so there is no reason to change this unless you are providing a +# custom entry point. (string value) +#driver = sql + +# Toggle for application credential caching. This has no effect unless global +# caching is enabled. (boolean value) +#caching = true + +# Time to cache application credential data in seconds. This has no effect +# unless global caching is enabled. (integer value) +#cache_time = + +# Maximum number of application credentials a user is permitted to create. A +# value of -1 means unlimited. If a limit is not set, users are permitted to +# create application credentials at will, which could lead to bloat in the +# keystone database or open keystone to a DoS attack. (integer value) +#user_limit = -1 + + +[assignment] + +# +# From keystone +# + +# Entry point for the assignment backend driver (where role assignments are +# stored) in the `keystone.assignment` namespace. Only a SQL driver is supplied +# by keystone itself. Unless you are writing proprietary drivers for keystone, +# you do not need to set this option. (string value) +#driver = sql + +# A list of role names which are prohibited from being an implied role. (list +# value) +#prohibited_implied_role = admin + + +[auth] + +# +# From keystone +# + +# Allowed authentication methods. Note: You should disable the `external` auth +# method if you are currently using federation. External auth and federation +# both use the REMOTE_USER variable. Since both the mapped and external plugin +# are being invoked to validate attributes in the request environment, it can +# cause conflicts. (list value) +#methods = external,password,token,oauth1,mapped,application_credential + +# Entry point for the password auth plugin module in the +# `keystone.auth.password` namespace. You do not need to set this unless you +# are overriding keystone's own password authentication plugin. (string value) +#password = + +# Entry point for the token auth plugin module in the `keystone.auth.token` +# namespace. You do not need to set this unless you are overriding keystone's +# own token authentication plugin. (string value) +#token = + +# Entry point for the external (`REMOTE_USER`) auth plugin module in the +# `keystone.auth.external` namespace. Supplied drivers are `DefaultDomain` and +# `Domain`. The default driver is `DefaultDomain`, which assumes that all users +# identified by the username specified to keystone in the `REMOTE_USER` +# variable exist within the context of the default domain. The `Domain` option +# expects an additional environment variable be presented to keystone, +# `REMOTE_DOMAIN`, containing the domain name of the `REMOTE_USER` (if +# `REMOTE_DOMAIN` is not set, then the default domain will be used instead). +# You do not need to set this unless you are taking advantage of "external +# authentication", where the application server (such as Apache) is handling +# authentication instead of keystone. (string value) +#external = + +# Entry point for the OAuth 1.0a auth plugin module in the +# `keystone.auth.oauth1` namespace. You do not need to set this unless you are +# overriding keystone's own `oauth1` authentication plugin. (string value) +#oauth1 = + +# Entry point for the mapped auth plugin module in the `keystone.auth.mapped` +# namespace. You do not need to set this unless you are overriding keystone's +# own `mapped` authentication plugin. (string value) +#mapped = + +# Entry point for the application_credential auth plugin module in the +# `keystone.auth.application_credential` namespace. You do not need to set this +# unless you are overriding keystone's own `application_credential` +# authentication plugin. (string value) +#application_credential = + + +[cache] + +# +# From oslo.cache +# + +# Prefix for building the configuration dictionary for the cache region. This +# should not need to be changed unless there is another dogpile.cache region +# with the same configuration name. (string value) +#config_prefix = cache.oslo + +# Default TTL, in seconds, for any cached item in the dogpile.cache region. +# This applies to any cached method that doesn't have an explicit cache +# expiration time defined for it. (integer value) +#expiration_time = 600 + +# Cache backend module. For eventlet-based or environments with hundreds of +# threaded servers, Memcache with pooling (oslo_cache.memcache_pool) is +# recommended. For environments with less than 100 threaded servers, Memcached +# (dogpile.cache.memcached) or Redis (dogpile.cache.redis) is recommended. Test +# environments with a single instance of the server can use the +# dogpile.cache.memory backend. (string value) +# Possible values: +# oslo_cache.memcache_pool - +# oslo_cache.dict - +# oslo_cache.mongo - +# oslo_cache.etcd3gw - +# dogpile.cache.memcached - +# dogpile.cache.pylibmc - +# dogpile.cache.bmemcached - +# dogpile.cache.dbm - +# dogpile.cache.redis - +# dogpile.cache.memory - +# dogpile.cache.memory_pickle - +# dogpile.cache.null - +#backend = dogpile.cache.null + +# Arguments supplied to the backend module. Specify this option once per +# argument to be passed to the dogpile.cache backend. Example format: +# ":". (multi valued) +#backend_argument = + +# Proxy classes to import that will affect the way the dogpile.cache backend +# functions. See the dogpile.cache documentation on changing-backend-behavior. +# (list value) +#proxies = + +# Global toggle for caching. (boolean value) +#enabled = true + +# Extra debugging from the cache backend (cache keys, get/set/delete/etc +# calls). This is only really useful if you need to see the specific cache- +# backend get/set/delete calls with the keys/values. Typically this should be +# left set to false. (boolean value) +#debug_cache_backend = false + +# Memcache servers in the format of "host:port". (dogpile.cache.memcache and +# oslo_cache.memcache_pool backends only). (list value) +#memcache_servers = localhost:11211 + +# Number of seconds memcached server is considered dead before it is tried +# again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only). +# (integer value) +#memcache_dead_retry = 300 + +# Timeout in seconds for every call to a server. (dogpile.cache.memcache and +# oslo_cache.memcache_pool backends only). (floating point value) +#memcache_socket_timeout = 3.0 + +# Max total number of open connections to every memcached server. +# (oslo_cache.memcache_pool backend only). (integer value) +#memcache_pool_maxsize = 10 + +# Number of seconds a connection to memcached is held unused in the pool before +# it is closed. (oslo_cache.memcache_pool backend only). (integer value) +#memcache_pool_unused_timeout = 60 + +# Number of seconds that an operation will wait to get a memcache client +# connection. (integer value) +#memcache_pool_connection_get_timeout = 10 + + +[catalog] + +# +# From keystone +# + +# Absolute path to the file used for the templated catalog backend. This option +# is only used if the `[catalog] driver` is set to `templated`. (string value) +#template_file = default_catalog.templates + +# Entry point for the catalog driver in the `keystone.catalog` namespace. +# Keystone provides a `sql` option (which supports basic CRUD operations +# through SQL), a `templated` option (which loads the catalog from a templated +# catalog file on disk), and a `endpoint_filter.sql` option (which supports +# arbitrary service catalogs per project). (string value) +#driver = sql + +# Toggle for catalog caching. This has no effect unless global caching is +# enabled. In a typical deployment, there is no reason to disable this. +# (boolean value) +#caching = true + +# Time to cache catalog data (in seconds). This has no effect unless global and +# catalog caching are both enabled. Catalog data (services, endpoints, etc.) +# typically does not change frequently, and so a longer duration than the +# global default may be desirable. (integer value) +#cache_time = + +# Maximum number of entities that will be returned in a catalog collection. +# There is typically no reason to set this, as it would be unusual for a +# deployment to have enough services or endpoints to exceed a reasonable limit. +# (integer value) +#list_limit = + + +[cors] + +# +# From oslo.middleware +# + +# Indicate whether this resource may be shared with the domain received in the +# requests "origin" header. Format: "://[:]", no trailing +# slash. Example: https://horizon.example.com (list value) +#allowed_origin = + +# Indicate that the actual request can include user credentials (boolean value) +#allow_credentials = true + +# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple +# Headers. (list value) +#expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,Openstack-Auth-Receipt + +# Maximum cache age of CORS preflight requests. (integer value) +#max_age = 3600 + +# Indicate which methods can be used during the actual request. (list value) +#allow_methods = GET,PUT,POST,DELETE,PATCH + +# Indicate which header field names may be used during the actual request. +# (list value) +#allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Project-Id,X-Project-Name,X-Project-Domain-Id,X-Project-Domain-Name,X-Domain-Id,X-Domain-Name,Openstack-Auth-Receipt + + +[credential] + +# +# From keystone +# + +# Entry point for the credential backend driver in the `keystone.credential` +# namespace. Keystone only provides a `sql` driver, so there's no reason to +# change this unless you are providing a custom entry point. (string value) +#driver = sql + +# Entry point for credential encryption and decryption operations in the +# `keystone.credential.provider` namespace. Keystone only provides a `fernet` +# driver, so there's no reason to change this unless you are providing a custom +# entry point to encrypt and decrypt credentials. (string value) +#provider = fernet + +# Directory containing Fernet keys used to encrypt and decrypt credentials +# stored in the credential backend. Fernet keys used to encrypt credentials +# have no relationship to Fernet keys used to encrypt Fernet tokens. Both sets +# of keys should be managed separately and require different rotation policies. +# Do not share this repository with the repository used to manage keys for +# Fernet tokens. (string value) +#key_repository = /etc/keystone/credential-keys/ + + +[database] +connection = mysql+pymysql://keystone:unime@iotronic-db/keystone + +# +# From oslo.db +# + +# If True, SQLite uses synchronous mode. (boolean value) +#sqlite_synchronous = true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string to use to connect to the database. (string +# value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = + +# The SQLAlchemy connection string to use to connect to the slave database. +# (string value) +#slave_connection = + +# The SQL mode to be used for MySQL sessions. This option, including the +# default, overrides any server-set SQL mode. To use whatever SQL mode is set +# by the server configuration, set this to no value. Example: mysql_sql_mode= +# (string value) +#mysql_sql_mode = TRADITIONAL + +# If True, transparently enables support for handling MySQL Cluster (NDB). +# (boolean value) +#mysql_enable_ndb = false + +# Connections which have been present in the connection pool longer than this +# number of seconds will be replaced with a new one the next time they are +# checked out from the pool. (integer value) +# Deprecated group/name - [DATABASE]/idle_timeout +# Deprecated group/name - [database]/idle_timeout +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#connection_recycle_time = 3600 + +# DEPRECATED: Minimum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: The option to set the minimum pool size is not supported by +# sqlalchemy. +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool. Setting a value of +# 0 indicates no limit. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = 5 + +# Maximum number of database connection retries during startup. Set to -1 to +# specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a SQL connection. (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with SQLAlchemy. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = 50 + +# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer +# value) +# Minimum value: 0 +# Maximum value: 100 +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add Python stack traces to SQL as comment strings. (boolean value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = false + +# If set, use this value for pool_timeout with SQLAlchemy. (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on connection lost. +# (boolean value) +#use_db_reconnect = false + +# Seconds between retries of a database transaction. (integer value) +#db_retry_interval = 1 + +# If True, increases the interval between retries of a database operation up to +# db_max_retry_interval. (boolean value) +#db_inc_retry_interval = true + +# If db_inc_retry_interval is set, the maximum seconds between retries of a +# database operation. (integer value) +#db_max_retry_interval = 10 + +# Maximum retries in case of connection error or deadlock error before error is +# raised. Set to -1 to specify an infinite retry count. (integer value) +#db_max_retries = 20 + +# Optional URL parameters to append onto the connection URL at connect time; +# specify as param1=value1¶m2=value2&... (string value) +#connection_parameters = + + +[domain_config] + +# +# From keystone +# + +# Entry point for the domain-specific configuration driver in the +# `keystone.resource.domain_config` namespace. Only a `sql` option is provided +# by keystone, so there is no reason to set this unless you are providing a +# custom entry point. (string value) +#driver = sql + +# Toggle for caching of the domain-specific configuration backend. This has no +# effect unless global caching is enabled. There is normally no reason to +# disable this. (boolean value) +#caching = true + +# Time-to-live (TTL, in seconds) to cache domain-specific configuration data. +# This has no effect unless `[domain_config] caching` is enabled. (integer +# value) +#cache_time = 300 + + +[endpoint_filter] + +# +# From keystone +# + +# Entry point for the endpoint filter driver in the `keystone.endpoint_filter` +# namespace. Only a `sql` option is provided by keystone, so there is no reason +# to set this unless you are providing a custom entry point. (string value) +#driver = sql + +# This controls keystone's behavior if the configured endpoint filters do not +# result in any endpoints for a user + project pair (and therefore a +# potentially empty service catalog). If set to true, keystone will return the +# entire service catalog. If set to false, keystone will return an empty +# service catalog. (boolean value) +#return_all_endpoints_if_no_filter = true + + +[endpoint_policy] + +# +# From keystone +# + +# Entry point for the endpoint policy driver in the `keystone.endpoint_policy` +# namespace. Only a `sql` driver is provided by keystone, so there is no reason +# to set this unless you are providing a custom entry point. (string value) +#driver = sql + + +[eventlet_server] + +# +# From keystone +# + +# DEPRECATED: The IP address of the network interface for the public service to +# listen on. (host address value) +# Deprecated group/name - [DEFAULT]/bind_host +# Deprecated group/name - [DEFAULT]/public_bind_host +# This option is deprecated for removal since K. +# Its value may be silently ignored in the future. +# Reason: Support for running keystone under eventlet has been removed in the +# Newton release. These options remain for backwards compatibility because they +# are used for URL substitutions. +#public_bind_host = 0.0.0.0 + +# DEPRECATED: The port number for the public service to listen on. (port value) +# Minimum value: 0 +# Maximum value: 65535 +# Deprecated group/name - [DEFAULT]/public_port +# This option is deprecated for removal since K. +# Its value may be silently ignored in the future. +# Reason: Support for running keystone under eventlet has been removed in the +# Newton release. These options remain for backwards compatibility because they +# are used for URL substitutions. +#public_port = 5000 + +# DEPRECATED: The IP address of the network interface for the admin service to +# listen on. (host address value) +# Deprecated group/name - [DEFAULT]/bind_host +# Deprecated group/name - [DEFAULT]/admin_bind_host +# This option is deprecated for removal since K. +# Its value may be silently ignored in the future. +# Reason: Support for running keystone under eventlet has been removed in the +# Newton release. These options remain for backwards compatibility because they +# are used for URL substitutions. +#admin_bind_host = 0.0.0.0 + +# DEPRECATED: The port number for the admin service to listen on. (port value) +# Minimum value: 0 +# Maximum value: 65535 +# Deprecated group/name - [DEFAULT]/admin_port +# This option is deprecated for removal since K. +# Its value may be silently ignored in the future. +# Reason: Support for running keystone under eventlet has been removed in the +# Newton release. These options remain for backwards compatibility because they +# are used for URL substitutions. +#admin_port = 35357 + + +[extra_headers] +Distribution = Ubuntu + +# +# From keystone +# + +# Specifies the distribution of the keystone server. (string value) +#Distribution = Ubuntu + + +[federation] + +# +# From keystone +# + +# Entry point for the federation backend driver in the `keystone.federation` +# namespace. Keystone only provides a `sql` driver, so there is no reason to +# set this option unless you are providing a custom entry point. (string value) +#driver = sql + +# Prefix to use when filtering environment variable names for federated +# assertions. Matched variables are passed into the federated mapping engine. +# (string value) +#assertion_prefix = + +# Value to be used to obtain the entity ID of the Identity Provider from the +# environment. For `mod_shib`, this would be `Shib-Identity-Provider`. For +# `mod_auth_openidc`, this could be `HTTP_OIDC_ISS`. For `mod_auth_mellon`, +# this could be `MELLON_IDP`. (string value) +#remote_id_attribute = + +# An arbitrary domain name that is reserved to allow federated ephemeral users +# to have a domain concept. Note that an admin will not be able to create a +# domain with this name or update an existing domain to this name. You are not +# advised to change this value unless you really have to. (string value) +#federated_domain_name = Federated + +# A list of trusted dashboard hosts. Before accepting a Single Sign-On request +# to return a token, the origin host must be a member of this list. This +# configuration option may be repeated for multiple values. You must set this +# in order to use web-based SSO flows. For example: +# trusted_dashboard=https://acme.example.com/auth/websso +# trusted_dashboard=https://beta.example.com/auth/websso (multi valued) +#trusted_dashboard = + +# Absolute path to an HTML file used as a Single Sign-On callback handler. This +# page is expected to redirect the user from keystone back to a trusted +# dashboard host, by form encoding a token in a POST request. Keystone's +# default value should be sufficient for most deployments. (string value) +#sso_callback_template = /etc/keystone/sso_callback_template.html + +# Toggle for federation caching. This has no effect unless global caching is +# enabled. There is typically no reason to disable this. (boolean value) +#caching = true + + +[fernet_receipts] + +# +# From keystone +# + +# Directory containing Fernet receipt keys. This directory must exist before +# using `keystone-manage fernet_setup` for the first time, must be writable by +# the user running `keystone-manage fernet_setup` or `keystone-manage +# fernet_rotate`, and of course must be readable by keystone's server process. +# The repository may contain keys in one of three states: a single staged key +# (always index 0) used for receipt validation, a single primary key (always +# the highest index) used for receipt creation and validation, and any number +# of secondary keys (all other index values) used for receipt validation. With +# multiple keystone nodes, each node must share the same key repository +# contents, with the exception of the staged key (index 0). It is safe to run +# `keystone-manage fernet_rotate` once on any one node to promote a staged key +# (index 0) to be the new primary (incremented from the previous highest +# index), and produce a new staged key (a new key with index 0); the resulting +# repository can then be atomically replicated to other nodes without any risk +# of race conditions (for example, it is safe to run `keystone-manage +# fernet_rotate` on host A, wait any amount of time, create a tarball of the +# directory on host A, unpack it on host B to a temporary location, and +# atomically move (`mv`) the directory into place on host B). Running +# `keystone-manage fernet_rotate` *twice* on a key repository without syncing +# other nodes will result in receipts that can not be validated by all nodes. +# (string value) +#key_repository = /etc/keystone/fernet-keys/ + +# This controls how many keys are held in rotation by `keystone-manage +# fernet_rotate` before they are discarded. The default value of 3 means that +# keystone will maintain one staged key (always index 0), one primary key (the +# highest numerical index), and one secondary key (every other index). +# Increasing this value means that additional secondary keys will be kept in +# the rotation. (integer value) +# Minimum value: 1 +#max_active_keys = 3 + + +[fernet_tokens] + +# +# From keystone +# + +# Directory containing Fernet token keys. This directory must exist before +# using `keystone-manage fernet_setup` for the first time, must be writable by +# the user running `keystone-manage fernet_setup` or `keystone-manage +# fernet_rotate`, and of course must be readable by keystone's server process. +# The repository may contain keys in one of three states: a single staged key +# (always index 0) used for token validation, a single primary key (always the +# highest index) used for token creation and validation, and any number of +# secondary keys (all other index values) used for token validation. With +# multiple keystone nodes, each node must share the same key repository +# contents, with the exception of the staged key (index 0). It is safe to run +# `keystone-manage fernet_rotate` once on any one node to promote a staged key +# (index 0) to be the new primary (incremented from the previous highest +# index), and produce a new staged key (a new key with index 0); the resulting +# repository can then be atomically replicated to other nodes without any risk +# of race conditions (for example, it is safe to run `keystone-manage +# fernet_rotate` on host A, wait any amount of time, create a tarball of the +# directory on host A, unpack it on host B to a temporary location, and +# atomically move (`mv`) the directory into place on host B). Running +# `keystone-manage fernet_rotate` *twice* on a key repository without syncing +# other nodes will result in tokens that can not be validated by all nodes. +# (string value) +#key_repository = /etc/keystone/fernet-keys/ + +# This controls how many keys are held in rotation by `keystone-manage +# fernet_rotate` before they are discarded. The default value of 3 means that +# keystone will maintain one staged key (always index 0), one primary key (the +# highest numerical index), and one secondary key (every other index). +# Increasing this value means that additional secondary keys will be kept in +# the rotation. (integer value) +# Minimum value: 1 +#max_active_keys = 3 + + +[healthcheck] + +# +# From oslo.middleware +# + +# DEPRECATED: The path to respond to healtcheck requests on. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#path = /healthcheck + +# Show more detailed information as part of the response. Security note: +# Enabling this option may expose sensitive details about the service being +# monitored. Be sure to verify that it will not violate your security policies. +# (boolean value) +#detailed = false + +# Additional backends that can perform health checks and report that +# information back as part of a request. (list value) +#backends = + +# Check the presence of a file to determine if an application is running on a +# port. Used by DisableByFileHealthcheck plugin. (string value) +#disable_by_file_path = + +# Check the presence of a file based on a port to determine if an application +# is running on a port. Expects a "port:path" list of strings. Used by +# DisableByFilesPortsHealthcheck plugin. (list value) +#disable_by_file_paths = + + +[identity] + +# +# From keystone +# + +# This references the domain to use for all Identity API v2 requests (which are +# not aware of domains). A domain with this ID can optionally be created for +# you by `keystone-manage bootstrap`. The domain referenced by this ID cannot +# be deleted on the v3 API, to prevent accidentally breaking the v2 API. There +# is nothing special about this domain, other than the fact that it must exist +# to order to maintain support for your v2 clients. There is typically no +# reason to change this value. (string value) +#default_domain_id = default + +# A subset (or all) of domains can have their own identity driver, each with +# their own partial configuration options, stored in either the resource +# backend or in a file in a domain configuration directory (depending on the +# setting of `[identity] domain_configurations_from_database`). Only values +# specific to the domain need to be specified in this manner. This feature is +# disabled by default, but may be enabled by default in a future release; set +# to true to enable. (boolean value) +#domain_specific_drivers_enabled = false + +# By default, domain-specific configuration data is read from files in the +# directory identified by `[identity] domain_config_dir`. Enabling this +# configuration option allows you to instead manage domain-specific +# configurations through the API, which are then persisted in the backend +# (typically, a SQL database), rather than using configuration files on disk. +# (boolean value) +#domain_configurations_from_database = false + +# Absolute path where keystone should locate domain-specific `[identity]` +# configuration files. This option has no effect unless `[identity] +# domain_specific_drivers_enabled` is set to true. There is typically no reason +# to change this value. (string value) +#domain_config_dir = /etc/keystone/domains + +# Entry point for the identity backend driver in the `keystone.identity` +# namespace. Keystone provides a `sql` and `ldap` driver. This option is also +# used as the default driver selection (along with the other configuration +# variables in this section) in the event that `[identity] +# domain_specific_drivers_enabled` is enabled, but no applicable domain- +# specific configuration is defined for the domain in question. Unless your +# deployment primarily relies on `ldap` AND is not using domain-specific +# configuration, you should typically leave this set to `sql`. (string value) +#driver = sql + +# Toggle for identity caching. This has no effect unless global caching is +# enabled. There is typically no reason to disable this. (boolean value) +#caching = true + +# Time to cache identity data (in seconds). This has no effect unless global +# and identity caching are enabled. (integer value) +#cache_time = 600 + +# Maximum allowed length for user passwords. Decrease this value to improve +# performance. Changing this value does not effect existing passwords. (integer +# value) +# Maximum value: 4096 +#max_password_length = 4096 + +# Maximum number of entities that will be returned in an identity collection. +# (integer value) +#list_limit = + +# The password hashing algorithm to use for passwords stored within keystone. +# (string value) +# Possible values: +# bcrypt - +# scrypt - +# pbkdf2_sha512 - +#password_hash_algorithm = bcrypt + +# This option represents a trade off between security and performance. Higher +# values lead to slower performance, but higher security. Changing this option +# will only affect newly created passwords as existing password hashes already +# have a fixed number of rounds applied, so it is safe to tune this option in a +# running cluster. The default for bcrypt is 12, must be between 4 and 31, +# inclusive. The default for scrypt is 16, must be within `range(1,32)`. The +# default for pbkdf_sha512 is 60000, must be within `range(1,1<<32)` WARNING: +# If using scrypt, increasing this value increases BOTH time AND memory +# requirements to hash a password. (integer value) +#password_hash_rounds = + +# Optional block size to pass to scrypt hash function (the `r` parameter). +# Useful for tuning scrypt to optimal performance for your CPU architecture. +# This option is only used when the `password_hash_algorithm` option is set to +# `scrypt`. Defaults to 8. (integer value) +#scrypt_block_size = + +# Optional parallelism to pass to scrypt hash function (the `p` parameter). +# This option is only used when the `password_hash_algorithm` option is set to +# `scrypt`. Defaults to 1. (integer value) +#scrypt_parallelism = + +# Number of bytes to use in scrypt and pbkfd2_sha512 hashing salt. Default for +# scrypt is 16 bytes. Default for pbkfd2_sha512 is 16 bytes. Limited to a +# maximum of 96 bytes due to the size of the column used to store password +# hashes. (integer value) +# Minimum value: 0 +# Maximum value: 96 +#salt_bytesize = + + +[identity_mapping] + +# +# From keystone +# + +# Entry point for the identity mapping backend driver in the +# `keystone.identity.id_mapping` namespace. Keystone only provides a `sql` +# driver, so there is no reason to change this unless you are providing a +# custom entry point. (string value) +#driver = sql + +# Entry point for the public ID generator for user and group entities in the +# `keystone.identity.id_generator` namespace. The Keystone identity mapper only +# supports generators that produce 64 bytes or less. Keystone only provides a +# `sha256` entry point, so there is no reason to change this value unless +# you're providing a custom entry point. (string value) +#generator = sha256 + +# The format of user and group IDs changed in Juno for backends that do not +# generate UUIDs (for example, LDAP), with keystone providing a hash mapping to +# the underlying attribute in LDAP. By default this mapping is disabled, which +# ensures that existing IDs will not change. Even when the mapping is enabled +# by using domain-specific drivers (`[identity] +# domain_specific_drivers_enabled`), any users and groups from the default +# domain being handled by LDAP will still not be mapped to ensure their IDs +# remain backward compatible. Setting this value to false will enable the new +# mapping for all backends, including the default LDAP driver. It is only +# guaranteed to be safe to enable this option if you do not already have +# assignments for users and groups from the default LDAP domain, and you +# consider it to be acceptable for Keystone to provide the different IDs to +# clients than it did previously (existing IDs in the API will suddenly +# change). Typically this means that the only time you can set this value to +# false is when configuring a fresh installation, although that is the +# recommended value. (boolean value) +#backward_compatible_ids = true + + +[jwt_tokens] + +# +# From keystone +# + +# Directory containing public keys for validating JWS token signatures. This +# directory must exist in order for keystone's server process to start. It must +# also be readable by keystone's server process. It must contain at least one +# public key that corresponds to a private key in `keystone.conf [jwt_tokens] +# jws_private_key_repository`. This option is only applicable in deployments +# issuing JWS tokens and setting `keystone.conf [tokens] provider = jws`. +# (string value) +#jws_public_key_repository = /etc/keystone/jws-keys/public + +# Directory containing private keys for signing JWS tokens. This directory must +# exist in order for keystone's server process to start. It must also be +# readable by keystone's server process. It must contain at least one private +# key that corresponds to a public key in `keystone.conf [jwt_tokens] +# jws_public_key_repository`. In the event there are multiple private keys in +# this directory, keystone will use a key named `private.pem` to sign tokens. +# In the future, keystone may support the ability to sign tokens with multiple +# private keys. For now, only a key named `private.pem` within this directory +# is required to issue JWS tokens. This option is only applicable in +# deployments issuing JWS tokens and setting `keystone.conf [tokens] provider = +# jws`. (string value) +#jws_private_key_repository = /etc/keystone/jws-keys/private + + +[ldap] + +# +# From keystone +# + +# URL(s) for connecting to the LDAP server. Multiple LDAP URLs may be specified +# as a comma separated string. The first URL to successfully bind is used for +# the connection. (string value) +#url = ldap://localhost + +# The user name of the administrator bind DN to use when querying the LDAP +# server, if your LDAP server requires it. (string value) +#user = + +# The password of the administrator bind DN to use when querying the LDAP +# server, if your LDAP server requires it. (string value) +#password = + +# The default LDAP server suffix to use, if a DN is not defined via either +# `[ldap] user_tree_dn` or `[ldap] group_tree_dn`. (string value) +#suffix = cn=example,cn=com + +# The search scope which defines how deep to search within the search base. A +# value of `one` (representing `oneLevel` or `singleLevel`) indicates a search +# of objects immediately below to the base object, but does not include the +# base object itself. A value of `sub` (representing `subtree` or +# `wholeSubtree`) indicates a search of both the base object itself and the +# entire subtree below it. (string value) +# Possible values: +# one - +# sub - +#query_scope = one + +# Defines the maximum number of results per page that keystone should request +# from the LDAP server when listing objects. A value of zero (`0`) disables +# paging. (integer value) +# Minimum value: 0 +#page_size = 0 + +# The LDAP dereferencing option to use for queries involving aliases. A value +# of `default` falls back to using default dereferencing behavior configured by +# your `ldap.conf`. A value of `never` prevents aliases from being dereferenced +# at all. A value of `searching` dereferences aliases only after name +# resolution. A value of `finding` dereferences aliases only during name +# resolution. A value of `always` dereferences aliases in all cases. (string +# value) +# Possible values: +# never - +# searching - +# always - +# finding - +# default - +#alias_dereferencing = default + +# Sets the LDAP debugging level for LDAP calls. A value of 0 means that +# debugging is not enabled. This value is a bitmask, consult your LDAP +# documentation for possible values. (integer value) +# Minimum value: -1 +#debug_level = + +# Sets keystone's referral chasing behavior across directory partitions. If +# left unset, the system's default behavior will be used. (boolean value) +#chase_referrals = + +# The search base to use for users. Defaults to the `[ldap] suffix` value. +# (string value) +#user_tree_dn = + +# The LDAP search filter to use for users. (string value) +#user_filter = + +# The LDAP object class to use for users. (string value) +#user_objectclass = inetOrgPerson + +# The LDAP attribute mapped to user IDs in keystone. This must NOT be a +# multivalued attribute. User IDs are expected to be globally unique across +# keystone domains and URL-safe. (string value) +#user_id_attribute = cn + +# The LDAP attribute mapped to user names in keystone. User names are expected +# to be unique only within a keystone domain and are not expected to be URL- +# safe. (string value) +#user_name_attribute = sn + +# The LDAP attribute mapped to user descriptions in keystone. (string value) +#user_description_attribute = description + +# The LDAP attribute mapped to user emails in keystone. (string value) +#user_mail_attribute = mail + +# The LDAP attribute mapped to user passwords in keystone. (string value) +#user_pass_attribute = userPassword + +# The LDAP attribute mapped to the user enabled attribute in keystone. If +# setting this option to `userAccountControl`, then you may be interested in +# setting `[ldap] user_enabled_mask` and `[ldap] user_enabled_default` as well. +# (string value) +#user_enabled_attribute = enabled + +# Logically negate the boolean value of the enabled attribute obtained from the +# LDAP server. Some LDAP servers use a boolean lock attribute where "true" +# means an account is disabled. Setting `[ldap] user_enabled_invert = true` +# will allow these lock attributes to be used. This option will have no effect +# if either the `[ldap] user_enabled_mask` or `[ldap] user_enabled_emulation` +# options are in use. (boolean value) +#user_enabled_invert = false + +# Bitmask integer to select which bit indicates the enabled value if the LDAP +# server represents "enabled" as a bit on an integer rather than as a discrete +# boolean. A value of `0` indicates that the mask is not used. If this is not +# set to `0` the typical value is `2`. This is typically used when `[ldap] +# user_enabled_attribute = userAccountControl`. Setting this option causes +# keystone to ignore the value of `[ldap] user_enabled_invert`. (integer value) +# Minimum value: 0 +#user_enabled_mask = 0 + +# The default value to enable users. This should match an appropriate integer +# value if the LDAP server uses non-boolean (bitmask) values to indicate if a +# user is enabled or disabled. If this is not set to `True`, then the typical +# value is `512`. This is typically used when `[ldap] user_enabled_attribute = +# userAccountControl`. (string value) +#user_enabled_default = True + +# List of user attributes to ignore on create and update, or whether a specific +# user attribute should be filtered for list or show user. (list value) +#user_attribute_ignore = default_project_id + +# The LDAP attribute mapped to a user's default_project_id in keystone. This is +# most commonly used when keystone has write access to LDAP. (string value) +#user_default_project_id_attribute = + +# If enabled, keystone uses an alternative method to determine if a user is +# enabled or not by checking if they are a member of the group defined by the +# `[ldap] user_enabled_emulation_dn` option. Enabling this option causes +# keystone to ignore the value of `[ldap] user_enabled_invert`. (boolean value) +#user_enabled_emulation = false + +# DN of the group entry to hold enabled users when using enabled emulation. +# Setting this option has no effect unless `[ldap] user_enabled_emulation` is +# also enabled. (string value) +#user_enabled_emulation_dn = + +# Use the `[ldap] group_member_attribute` and `[ldap] group_objectclass` +# settings to determine membership in the emulated enabled group. Enabling this +# option has no effect unless `[ldap] user_enabled_emulation` is also enabled. +# (boolean value) +#user_enabled_emulation_use_group_config = false + +# A list of LDAP attribute to keystone user attribute pairs used for mapping +# additional attributes to users in keystone. The expected format is +# `:`, where `ldap_attr` is the attribute in the LDAP +# object and `user_attr` is the attribute which should appear in the identity +# API. (list value) +#user_additional_attribute_mapping = + +# The search base to use for groups. Defaults to the `[ldap] suffix` value. +# (string value) +#group_tree_dn = + +# The LDAP search filter to use for groups. (string value) +#group_filter = + +# The LDAP object class to use for groups. If setting this option to +# `posixGroup`, you may also be interested in enabling the `[ldap] +# group_members_are_ids` option. (string value) +#group_objectclass = groupOfNames + +# The LDAP attribute mapped to group IDs in keystone. This must NOT be a +# multivalued attribute. Group IDs are expected to be globally unique across +# keystone domains and URL-safe. (string value) +#group_id_attribute = cn + +# The LDAP attribute mapped to group names in keystone. Group names are +# expected to be unique only within a keystone domain and are not expected to +# be URL-safe. (string value) +#group_name_attribute = ou + +# The LDAP attribute used to indicate that a user is a member of the group. +# (string value) +#group_member_attribute = member + +# Enable this option if the members of the group object class are keystone user +# IDs rather than LDAP DNs. This is the case when using `posixGroup` as the +# group object class in Open Directory. (boolean value) +#group_members_are_ids = false + +# The LDAP attribute mapped to group descriptions in keystone. (string value) +#group_desc_attribute = description + +# List of group attributes to ignore on create and update. or whether a +# specific group attribute should be filtered for list or show group. (list +# value) +#group_attribute_ignore = + +# A list of LDAP attribute to keystone group attribute pairs used for mapping +# additional attributes to groups in keystone. The expected format is +# `:`, where `ldap_attr` is the attribute in the LDAP +# object and `group_attr` is the attribute which should appear in the identity +# API. (list value) +#group_additional_attribute_mapping = + +# If enabled, group queries will use Active Directory specific filters for +# nested groups. (boolean value) +#group_ad_nesting = false + +# An absolute path to a CA certificate file to use when communicating with LDAP +# servers. This option will take precedence over `[ldap] tls_cacertdir`, so +# there is no reason to set both. (string value) +#tls_cacertfile = + +# An absolute path to a CA certificate directory to use when communicating with +# LDAP servers. There is no reason to set this option if you've also set +# `[ldap] tls_cacertfile`. (string value) +#tls_cacertdir = + +# Enable TLS when communicating with LDAP servers. You should also set the +# `[ldap] tls_cacertfile` and `[ldap] tls_cacertdir` options when using this +# option. Do not set this option if you are using LDAP over SSL (LDAPS) instead +# of TLS. (boolean value) +#use_tls = false + +# Specifies which checks to perform against client certificates on incoming TLS +# sessions. If set to `demand`, then a certificate will always be requested and +# required from the LDAP server. If set to `allow`, then a certificate will +# always be requested but not required from the LDAP server. If set to `never`, +# then a certificate will never be requested. (string value) +# Possible values: +# demand - +# never - +# allow - +#tls_req_cert = demand + +# The connection timeout to use with the LDAP server. A value of `-1` means +# that connections will never timeout. (integer value) +# Minimum value: -1 +#connection_timeout = -1 + +# Enable LDAP connection pooling for queries to the LDAP server. There is +# typically no reason to disable this. (boolean value) +#use_pool = true + +# The size of the LDAP connection pool. This option has no effect unless +# `[ldap] use_pool` is also enabled. (integer value) +# Minimum value: 1 +#pool_size = 10 + +# The maximum number of times to attempt reconnecting to the LDAP server before +# aborting. A value of zero prevents retries. This option has no effect unless +# `[ldap] use_pool` is also enabled. (integer value) +# Minimum value: 0 +#pool_retry_max = 3 + +# The number of seconds to wait before attempting to reconnect to the LDAP +# server. This option has no effect unless `[ldap] use_pool` is also enabled. +# (floating point value) +#pool_retry_delay = 0.1 + +# The connection timeout to use when pooling LDAP connections. A value of `-1` +# means that connections will never timeout. This option has no effect unless +# `[ldap] use_pool` is also enabled. (integer value) +# Minimum value: -1 +#pool_connection_timeout = -1 + +# The maximum connection lifetime to the LDAP server in seconds. When this +# lifetime is exceeded, the connection will be unbound and removed from the +# connection pool. This option has no effect unless `[ldap] use_pool` is also +# enabled. (integer value) +# Minimum value: 1 +#pool_connection_lifetime = 600 + +# Enable LDAP connection pooling for end user authentication. There is +# typically no reason to disable this. (boolean value) +#use_auth_pool = true + +# The size of the connection pool to use for end user authentication. This +# option has no effect unless `[ldap] use_auth_pool` is also enabled. (integer +# value) +# Minimum value: 1 +#auth_pool_size = 100 + +# The maximum end user authentication connection lifetime to the LDAP server in +# seconds. When this lifetime is exceeded, the connection will be unbound and +# removed from the connection pool. This option has no effect unless `[ldap] +# use_auth_pool` is also enabled. (integer value) +# Minimum value: 1 +#auth_pool_connection_lifetime = 60 + + +[memcache] + +# +# From keystone +# + +# Number of seconds memcached server is considered dead before it is tried +# again. This is used by the key value store system. (integer value) +#dead_retry = 300 + +# Timeout in seconds for every call to a server. This is used by the key value +# store system. (integer value) +#socket_timeout = 3 + +# Max total number of open connections to every memcached server. This is used +# by the key value store system. (integer value) +#pool_maxsize = 10 + +# Number of seconds a connection to memcached is held unused in the pool before +# it is closed. This is used by the key value store system. (integer value) +#pool_unused_timeout = 60 + +# Number of seconds that an operation will wait to get a memcache client +# connection. This is used by the key value store system. (integer value) +#pool_connection_get_timeout = 10 + + +[oauth1] + +# +# From keystone +# + +# Entry point for the OAuth backend driver in the `keystone.oauth1` namespace. +# Typically, there is no reason to set this option unless you are providing a +# custom entry point. (string value) +#driver = sql + +# Number of seconds for the OAuth Request Token to remain valid after being +# created. This is the amount of time the user has to authorize the token. +# Setting this option to zero means that request tokens will last forever. +# (integer value) +# Minimum value: 0 +#request_token_duration = 28800 + +# Number of seconds for the OAuth Access Token to remain valid after being +# created. This is the amount of time the consumer has to interact with the +# service provider (which is typically keystone). Setting this option to zero +# means that access tokens will last forever. (integer value) +# Minimum value: 0 +#access_token_duration = 86400 + + +[oslo_messaging_amqp] + +# +# From oslo.messaging +# + +# Name for the AMQP container. must be globally unique. Defaults to a generated +# UUID (string value) +#container_name = + +# Timeout for inactive connections (in seconds) (integer value) +#idle_timeout = 0 + +# Debug: dump AMQP frames to stdout (boolean value) +#trace = false + +# Attempt to connect via SSL. If no other ssl-related parameters are given, it +# will use the system's CA-bundle to verify the server's certificate. (boolean +# value) +#ssl = false + +# CA certificate PEM file used to verify the server's certificate (string +# value) +#ssl_ca_file = + +# Self-identifying certificate PEM file for client authentication (string +# value) +#ssl_cert_file = + +# Private key PEM file used to sign ssl_cert_file certificate (optional) +# (string value) +#ssl_key_file = + +# Password for decrypting ssl_key_file (if encrypted) (string value) +#ssl_key_password = + +# By default SSL checks that the name in the server's certificate matches the +# hostname in the transport_url. In some configurations it may be preferable to +# use the virtual hostname instead, for example if the server uses the Server +# Name Indication TLS extension (rfc6066) to provide a certificate per virtual +# host. Set ssl_verify_vhost to True if the server's SSL certificate uses the +# virtual host name instead of the DNS name. (boolean value) +#ssl_verify_vhost = false + +# Space separated list of acceptable SASL mechanisms (string value) +#sasl_mechanisms = + +# Path to directory that contains the SASL configuration (string value) +#sasl_config_dir = + +# Name of configuration file (without .conf suffix) (string value) +#sasl_config_name = + +# SASL realm to use if no realm present in username (string value) +#sasl_default_realm = + +# Seconds to pause before attempting to re-connect. (integer value) +# Minimum value: 1 +#connection_retry_interval = 1 + +# Increase the connection_retry_interval by this many seconds after each +# unsuccessful failover attempt. (integer value) +# Minimum value: 0 +#connection_retry_backoff = 2 + +# Maximum limit for connection_retry_interval + connection_retry_backoff +# (integer value) +# Minimum value: 1 +#connection_retry_interval_max = 30 + +# Time to pause between re-connecting an AMQP 1.0 link that failed due to a +# recoverable error. (integer value) +# Minimum value: 1 +#link_retry_delay = 10 + +# The maximum number of attempts to re-send a reply message which failed due to +# a recoverable error. (integer value) +# Minimum value: -1 +#default_reply_retry = 0 + +# The deadline for an rpc reply message delivery. (integer value) +# Minimum value: 5 +#default_reply_timeout = 30 + +# The deadline for an rpc cast or call message delivery. Only used when caller +# does not provide a timeout expiry. (integer value) +# Minimum value: 5 +#default_send_timeout = 30 + +# The deadline for a sent notification message delivery. Only used when caller +# does not provide a timeout expiry. (integer value) +# Minimum value: 5 +#default_notify_timeout = 30 + +# The duration to schedule a purge of idle sender links. Detach link after +# expiry. (integer value) +# Minimum value: 1 +#default_sender_link_timeout = 600 + +# Indicates the addressing mode used by the driver. +# Permitted values: +# 'legacy' - use legacy non-routable addressing +# 'routable' - use routable addresses +# 'dynamic' - use legacy addresses if the message bus does not support routing +# otherwise use routable addressing (string value) +#addressing_mode = dynamic + +# Enable virtual host support for those message buses that do not natively +# support virtual hosting (such as qpidd). When set to true the virtual host +# name will be added to all message bus addresses, effectively creating a +# private 'subnet' per virtual host. Set to False if the message bus supports +# virtual hosting using the 'hostname' field in the AMQP 1.0 Open performative +# as the name of the virtual host. (boolean value) +#pseudo_vhost = true + +# address prefix used when sending to a specific server (string value) +#server_request_prefix = exclusive + +# address prefix used when broadcasting to all servers (string value) +#broadcast_prefix = broadcast + +# address prefix when sending to any server in group (string value) +#group_request_prefix = unicast + +# Address prefix for all generated RPC addresses (string value) +#rpc_address_prefix = openstack.org/om/rpc + +# Address prefix for all generated Notification addresses (string value) +#notify_address_prefix = openstack.org/om/notify + +# Appended to the address prefix when sending a fanout message. Used by the +# message bus to identify fanout messages. (string value) +#multicast_address = multicast + +# Appended to the address prefix when sending to a particular RPC/Notification +# server. Used by the message bus to identify messages sent to a single +# destination. (string value) +#unicast_address = unicast + +# Appended to the address prefix when sending to a group of consumers. Used by +# the message bus to identify messages that should be delivered in a round- +# robin fashion across consumers. (string value) +#anycast_address = anycast + +# Exchange name used in notification addresses. +# Exchange name resolution precedence: +# Target.exchange if set +# else default_notification_exchange if set +# else control_exchange if set +# else 'notify' (string value) +#default_notification_exchange = + +# Exchange name used in RPC addresses. +# Exchange name resolution precedence: +# Target.exchange if set +# else default_rpc_exchange if set +# else control_exchange if set +# else 'rpc' (string value) +#default_rpc_exchange = + +# Window size for incoming RPC Reply messages. (integer value) +# Minimum value: 1 +#reply_link_credit = 200 + +# Window size for incoming RPC Request messages (integer value) +# Minimum value: 1 +#rpc_server_credit = 100 + +# Window size for incoming Notification messages (integer value) +# Minimum value: 1 +#notify_server_credit = 100 + +# Send messages of this type pre-settled. +# Pre-settled messages will not receive acknowledgement +# from the peer. Note well: pre-settled messages may be +# silently discarded if the delivery fails. +# Permitted values: +# 'rpc-call' - send RPC Calls pre-settled +# 'rpc-reply'- send RPC Replies pre-settled +# 'rpc-cast' - Send RPC Casts pre-settled +# 'notify' - Send Notifications pre-settled +# (multi valued) +#pre_settled = rpc-cast +#pre_settled = rpc-reply + + +[oslo_messaging_kafka] + +# +# From oslo.messaging +# + +# Max fetch bytes of Kafka consumer (integer value) +#kafka_max_fetch_bytes = 1048576 + +# Default timeout(s) for Kafka consumers (floating point value) +#kafka_consumer_timeout = 1.0 + +# DEPRECATED: Pool Size for Kafka Consumers (integer value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Driver no longer uses connection pool. +#pool_size = 10 + +# DEPRECATED: The pool size limit for connections expiration policy (integer +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Driver no longer uses connection pool. +#conn_pool_min_size = 2 + +# DEPRECATED: The time-to-live in sec of idle connections in the pool (integer +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Driver no longer uses connection pool. +#conn_pool_ttl = 1200 + +# Group id for Kafka consumer. Consumers in one group will coordinate message +# consumption (string value) +#consumer_group = oslo_messaging_consumer + +# Upper bound on the delay for KafkaProducer batching in seconds (floating +# point value) +#producer_batch_timeout = 0.0 + +# Size of batch for the producer async send (integer value) +#producer_batch_size = 16384 + +# Enable asynchronous consumer commits (boolean value) +#enable_auto_commit = false + +# The maximum number of records returned in a poll call (integer value) +#max_poll_records = 500 + +# Protocol used to communicate with brokers (string value) +# Possible values: +# PLAINTEXT - +# SASL_PLAINTEXT - +# SSL - +# SASL_SSL - +#security_protocol = PLAINTEXT + +# Mechanism when security protocol is SASL (string value) +#sasl_mechanism = PLAIN + +# CA certificate PEM file used to verify the server certificate (string value) +#ssl_cafile = + + +[oslo_messaging_notifications] + +# +# From oslo.messaging +# + +# The Drivers(s) to handle sending notifications. Possible values are +# messaging, messagingv2, routing, log, test, noop (multi valued) +# Deprecated group/name - [DEFAULT]/notification_driver +#driver = + +# A URL representing the messaging driver to use for notifications. If not set, +# we fall back to the same configuration used for RPC. (string value) +# Deprecated group/name - [DEFAULT]/notification_transport_url +#transport_url = + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +# Deprecated group/name - [DEFAULT]/notification_topics +#topics = notifications + +# The maximum number of attempts to re-send a notification message which failed +# to be delivered due to a recoverable error. 0 - No retry, -1 - indefinite +# (integer value) +#retry = -1 + + +[oslo_messaging_rabbit] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +#amqp_durable_queues = false + +# Auto-delete queues in AMQP. (boolean value) +#amqp_auto_delete = false + +# Connect over SSL. (boolean value) +# Deprecated group/name - [oslo_messaging_rabbit]/rabbit_use_ssl +#ssl = false + +# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and +# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some +# distributions. (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_version +#ssl_version = + +# SSL key file (valid only if SSL enabled). (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_keyfile +#ssl_key_file = + +# SSL cert file (valid only if SSL enabled). (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_certfile +#ssl_cert_file = + +# SSL certification authority file (valid only if SSL enabled). (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_ca_certs +#ssl_ca_file = + +# How long to wait before reconnecting in response to an AMQP consumer cancel +# notification. (floating point value) +#kombu_reconnect_delay = 1.0 + +# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not +# be used. This option may not be available in future versions. (string value) +#kombu_compression = + +# How long to wait a missing client before abandoning to send it its replies. +# This value should not be longer than rpc_response_timeout. (integer value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout +#kombu_missing_consumer_retry_timeout = 60 + +# Determines how the next RabbitMQ node is chosen in case the one we are +# currently connected to becomes unavailable. Takes effect only if more than +# one RabbitMQ node is provided in config. (string value) +# Possible values: +# round-robin - +# shuffle - +#kombu_failover_strategy = round-robin + +# The RabbitMQ login method. (string value) +# Possible values: +# PLAIN - +# AMQPLAIN - +# RABBIT-CR-DEMO - +#rabbit_login_method = AMQPLAIN + +# How frequently to retry connecting with RabbitMQ. (integer value) +#rabbit_retry_interval = 1 + +# How long to backoff for between retries when connecting to RabbitMQ. (integer +# value) +#rabbit_retry_backoff = 2 + +# Maximum interval of RabbitMQ connection retries. Default is 30 seconds. +# (integer value) +#rabbit_interval_max = 30 + +# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this +# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring +# is no longer controlled by the x-ha-policy argument when declaring a queue. +# If you just want to make sure that all queues (except those with auto- +# generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy +# HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value) +#rabbit_ha_queues = false + +# Positive integer representing duration in seconds for queue TTL (x-expires). +# Queues which are unused for the duration of the TTL are automatically +# deleted. The parameter affects only reply and fanout queues. (integer value) +# Minimum value: 1 +#rabbit_transient_queues_ttl = 1800 + +# Specifies the number of messages to prefetch. Setting to zero allows +# unlimited messages. (integer value) +#rabbit_qos_prefetch_count = 0 + +# Number of seconds after which the Rabbit broker is considered down if +# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer +# value) +#heartbeat_timeout_threshold = 60 + +# How often times during the heartbeat_timeout_threshold we check the +# heartbeat. (integer value) +#heartbeat_rate = 2 + + +[oslo_middleware] + +# +# From oslo.middleware +# + +# The maximum body size for each request, in bytes. (integer value) +# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size +# Deprecated group/name - [DEFAULT]/max_request_body_size +#max_request_body_size = 114688 + +# DEPRECATED: The HTTP Header that will be used to determine what the original +# request protocol scheme was, even if it was hidden by a SSL termination +# proxy. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#secure_proxy_ssl_header = X-Forwarded-Proto + +# Whether the application is behind a proxy or not. This determines if the +# middleware should parse the headers or not. (boolean value) +#enable_proxy_headers_parsing = false + + +[oslo_policy] + +# +# From oslo.policy +# + +# This option controls whether or not to enforce scope when evaluating +# policies. If ``True``, the scope of the token used in the request is compared +# to the ``scope_types`` of the policy being enforced. If the scopes do not +# match, an ``InvalidScope`` exception will be raised. If ``False``, a message +# will be logged informing operators that policies are being invoked with +# mismatching scope. (boolean value) +#enforce_scope = false + +# The file that defines policies. (string value) +#policy_file = policy.json + +# Default rule. Enforced when a requested rule is not found. (string value) +#policy_default_rule = default + +# Directories where policy configuration files are stored. They can be relative +# to any directory in the search path defined by the config_dir option, or +# absolute paths. The file defined by policy_file must exist for these +# directories to be searched. Missing or empty directories are ignored. (multi +# valued) +#policy_dirs = policy.d + +# Content Type to send and receive data for REST based policy check (string +# value) +# Possible values: +# application/x-www-form-urlencoded - +# application/json - +#remote_content_type = application/x-www-form-urlencoded + +# server identity verification for REST based policy check (boolean value) +#remote_ssl_verify_server_crt = false + +# Absolute path to ca cert file for REST based policy check (string value) +#remote_ssl_ca_crt_file = + +# Absolute path to client cert for REST based policy check (string value) +#remote_ssl_client_crt_file = + +# Absolute path client key file REST based policy check (string value) +#remote_ssl_client_key_file = + + +[policy] + +# +# From keystone +# + +# Entry point for the policy backend driver in the `keystone.policy` namespace. +# Supplied drivers are `rules` (which does not support any CRUD operations for +# the v3 policy API) and `sql`. Typically, there is no reason to set this +# option unless you are providing a custom entry point. (string value) +#driver = sql + +# Maximum number of entities that will be returned in a policy collection. +# (integer value) +#list_limit = + + +[profiler] + +# +# From osprofiler +# + +# +# Enable the profiling for all services on this node. +# +# Default value is False (fully disable the profiling feature). +# +# Possible values: +# +# * True: Enables the feature +# * False: Disables the feature. The profiling cannot be started via this +# project +# operations. If the profiling is triggered by another project, this project +# part will be empty. +# (boolean value) +# Deprecated group/name - [profiler]/profiler_enabled +#enabled = false + +# +# Enable SQL requests profiling in services. +# +# Default value is False (SQL requests won't be traced). +# +# Possible values: +# +# * True: Enables SQL requests profiling. Each SQL query will be part of the +# trace and can the be analyzed by how much time was spent for that. +# * False: Disables SQL requests profiling. The spent time is only shown on a +# higher level of operations. Single SQL queries cannot be analyzed this way. +# (boolean value) +#trace_sqlalchemy = false + +# +# Secret key(s) to use for encrypting context data for performance profiling. +# +# This string value should have the following format: +# [,,...], +# where each key is some random string. A user who triggers the profiling via +# the REST API has to set one of these keys in the headers of the REST API call +# to include profiling results of this node for this particular project. +# +# Both "enabled" flag and "hmac_keys" config options should be set to enable +# profiling. Also, to generate correct profiling information across all +# services +# at least one key needs to be consistent between OpenStack projects. This +# ensures it can be used from client side to generate the trace, containing +# information from all possible resources. +# (string value) +#hmac_keys = SECRET_KEY + +# +# Connection string for a notifier backend. +# +# Default value is ``messaging://`` which sets the notifier to oslo_messaging. +# +# Examples of possible values: +# +# * ``messaging://`` - use oslo_messaging driver for sending spans. +# * ``redis://127.0.0.1:6379`` - use redis driver for sending spans. +# * ``mongodb://127.0.0.1:27017`` - use mongodb driver for sending spans. +# * ``elasticsearch://127.0.0.1:9200`` - use elasticsearch driver for sending +# spans. +# * ``jaeger://127.0.0.1:6831`` - use jaeger tracing as driver for sending +# spans. +# (string value) +#connection_string = messaging:// + +# +# Document type for notification indexing in elasticsearch. +# (string value) +#es_doc_type = notification + +# +# This parameter is a time value parameter (for example: es_scroll_time=2m), +# indicating for how long the nodes that participate in the search will +# maintain +# relevant resources in order to continue and support it. +# (string value) +#es_scroll_time = 2m + +# +# Elasticsearch splits large requests in batches. This parameter defines +# maximum size of each batch (for example: es_scroll_size=10000). +# (integer value) +#es_scroll_size = 10000 + +# +# Redissentinel provides a timeout option on the connections. +# This parameter defines that timeout (for example: socket_timeout=0.1). +# (floating point value) +#socket_timeout = 0.1 + +# +# Redissentinel uses a service name to identify a master redis service. +# This parameter defines the name (for example: +# ``sentinal_service_name=mymaster``). +# (string value) +#sentinel_service_name = mymaster + +# +# Enable filter traces that contain error/exception to a separated place. +# +# Default value is set to False. +# +# Possible values: +# +# * True: Enable filter traces that contain error/exception. +# * False: Disable the filter. +# (boolean value) +#filter_error_trace = false + + +[receipt] + +# +# From keystone +# + +# The amount of time that a receipt should remain valid (in seconds). This +# value should always be very short, as it represents how long a user has to +# reattempt auth with the missing auth methods. (integer value) +# Minimum value: 0 +# Maximum value: 86400 +#expiration = 300 + +# Entry point for the receipt provider in the `keystone.receipt.provider` +# namespace. The receipt provider controls the receipt construction and +# validation operations. Keystone includes just the `fernet` receipt provider +# for now. `fernet` receipts do not need to be persisted at all, but require +# that you run `keystone-manage fernet_setup` (also see the `keystone-manage +# fernet_rotate` command). (string value) +#provider = fernet + +# Toggle for caching receipt creation and validation data. This has no effect +# unless global caching is enabled, or if cache_on_issue is disabled as we only +# cache receipts on issue. (boolean value) +#caching = true + +# The number of seconds to cache receipt creation and validation data. This has +# no effect unless both global and `[receipt] caching` are enabled. (integer +# value) +# Minimum value: 0 +#cache_time = 300 + +# Enable storing issued receipt data to receipt validation cache so that first +# receipt validation doesn't actually cause full validation cycle. This option +# has no effect unless global caching and receipt caching are enabled. (boolean +# value) +#cache_on_issue = true + + +[resource] + +# +# From keystone +# + +# DEPRECATED: Entry point for the resource driver in the `keystone.resource` +# namespace. Only a `sql` driver is supplied by keystone. Unless you are +# writing proprietary drivers for keystone, you do not need to set this option. +# (string value) +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: Non-SQL resource cannot be used with SQL Identity and has been unable +# to be used since Ocata. SQL Resource backend is a requirement as of Pike. +# Setting this option no longer has an effect on how Keystone operates. +#driver = sql + +# Toggle for resource caching. This has no effect unless global caching is +# enabled. (boolean value) +# Deprecated group/name - [assignment]/caching +#caching = true + +# Time to cache resource data in seconds. This has no effect unless global +# caching is enabled. (integer value) +# Deprecated group/name - [assignment]/cache_time +#cache_time = + +# Maximum number of entities that will be returned in a resource collection. +# (integer value) +# Deprecated group/name - [assignment]/list_limit +#list_limit = + +# Name of the domain that owns the `admin_project_name`. If left unset, then +# there is no admin project. `[resource] admin_project_name` must also be set +# to use this option. (string value) +#admin_project_domain_name = + +# This is a special project which represents cloud-level administrator +# privileges across services. Tokens scoped to this project will contain a true +# `is_admin_project` attribute to indicate to policy systems that the role +# assignments on that specific project should apply equally across every +# project. If left unset, then there is no admin project, and thus no explicit +# means of cross-project role assignments. `[resource] +# admin_project_domain_name` must also be set to use this option. (string +# value) +#admin_project_name = + +# This controls whether the names of projects are restricted from containing +# URL-reserved characters. If set to `new`, attempts to create or update a +# project with a URL-unsafe name will fail. If set to `strict`, attempts to +# scope a token with a URL-unsafe project name will fail, thereby forcing all +# project names to be updated to be URL-safe. (string value) +# Possible values: +# off - +# new - +# strict - +#project_name_url_safe = off + +# This controls whether the names of domains are restricted from containing +# URL-reserved characters. If set to `new`, attempts to create or update a +# domain with a URL-unsafe name will fail. If set to `strict`, attempts to +# scope a token with a URL-unsafe domain name will fail, thereby forcing all +# domain names to be updated to be URL-safe. (string value) +# Possible values: +# off - +# new - +# strict - +#domain_name_url_safe = off + + +[revoke] + +# +# From keystone +# + +# Entry point for the token revocation backend driver in the `keystone.revoke` +# namespace. Keystone only provides a `sql` driver, so there is no reason to +# set this option unless you are providing a custom entry point. (string value) +#driver = sql + +# The number of seconds after a token has expired before a corresponding +# revocation event may be purged from the backend. (integer value) +# Minimum value: 0 +#expiration_buffer = 1800 + +# Toggle for revocation event caching. This has no effect unless global caching +# is enabled. (boolean value) +#caching = true + +# Time to cache the revocation list and the revocation events (in seconds). +# This has no effect unless global and `[revoke] caching` are both enabled. +# (integer value) +# Deprecated group/name - [token]/revocation_cache_time +#cache_time = 3600 + + +[role] + +# +# From keystone +# + +# Entry point for the role backend driver in the `keystone.role` namespace. +# Keystone only provides a `sql` driver, so there's no reason to change this +# unless you are providing a custom entry point. (string value) +#driver = + +# Toggle for role caching. This has no effect unless global caching is enabled. +# In a typical deployment, there is no reason to disable this. (boolean value) +#caching = true + +# Time to cache role data, in seconds. This has no effect unless both global +# caching and `[role] caching` are enabled. (integer value) +#cache_time = + +# Maximum number of entities that will be returned in a role collection. This +# may be useful to tune if you have a large number of discrete roles in your +# deployment. (integer value) +#list_limit = + + +[saml] + +# +# From keystone +# + +# Determines the lifetime for any SAML assertions generated by keystone, using +# `NotOnOrAfter` attributes. (integer value) +#assertion_expiration_time = 3600 + +# Name of, or absolute path to, the binary to be used for XML signing. Although +# only the XML Security Library (`xmlsec1`) is supported, it may have a non- +# standard name or path on your system. If keystone cannot find the binary +# itself, you may need to install the appropriate package, use this option to +# specify an absolute path, or adjust keystone's PATH environment variable. +# (string value) +#xmlsec1_binary = xmlsec1 + +# Absolute path to the public certificate file to use for SAML signing. The +# value cannot contain a comma (`,`). (string value) +#certfile = /etc/keystone/ssl/certs/signing_cert.pem + +# Absolute path to the private key file to use for SAML signing. The value +# cannot contain a comma (`,`). (string value) +#keyfile = /etc/keystone/ssl/private/signing_key.pem + +# This is the unique entity identifier of the identity provider (keystone) to +# use when generating SAML assertions. This value is required to generate +# identity provider metadata and must be a URI (a URL is recommended). For +# example: `https://keystone.example.com/v3/OS-FEDERATION/saml2/idp`. (uri +# value) +#idp_entity_id = + +# This is the single sign-on (SSO) service location of the identity provider +# which accepts HTTP POST requests. A value is required to generate identity +# provider metadata. For example: `https://keystone.example.com/v3/OS- +# FEDERATION/saml2/sso`. (uri value) +#idp_sso_endpoint = + +# This is the language used by the identity provider's organization. (string +# value) +#idp_lang = en + +# This is the name of the identity provider's organization. (string value) +#idp_organization_name = SAML Identity Provider + +# This is the name of the identity provider's organization to be displayed. +# (string value) +#idp_organization_display_name = OpenStack SAML Identity Provider + +# This is the URL of the identity provider's organization. The URL referenced +# here should be useful to humans. (uri value) +#idp_organization_url = https://example.com/ + +# This is the company name of the identity provider's contact person. (string +# value) +#idp_contact_company = Example, Inc. + +# This is the given name of the identity provider's contact person. (string +# value) +#idp_contact_name = SAML Identity Provider Support + +# This is the surname of the identity provider's contact person. (string value) +#idp_contact_surname = Support + +# This is the email address of the identity provider's contact person. (string +# value) +#idp_contact_email = support@example.com + +# This is the telephone number of the identity provider's contact person. +# (string value) +#idp_contact_telephone = +1 800 555 0100 + +# This is the type of contact that best describes the identity provider's +# contact person. (string value) +# Possible values: +# technical - +# support - +# administrative - +# billing - +# other - +#idp_contact_type = other + +# Absolute path to the identity provider metadata file. This file should be +# generated with the `keystone-manage saml_idp_metadata` command. There is +# typically no reason to change this value. (string value) +#idp_metadata_path = /etc/keystone/saml2_idp_metadata.xml + +# The prefix of the RelayState SAML attribute to use when generating enhanced +# client and proxy (ECP) assertions. In a typical deployment, there is no +# reason to change this value. (string value) +#relay_state_prefix = ss:mem: + + +[security_compliance] + +# +# From keystone +# + +# The maximum number of days a user can go without authenticating before being +# considered "inactive" and automatically disabled (locked). This feature is +# disabled by default; set any value to enable it. This feature depends on the +# `sql` backend for the `[identity] driver`. When a user exceeds this threshold +# and is considered "inactive", the user's `enabled` attribute in the HTTP API +# may not match the value of the user's `enabled` column in the user table. +# (integer value) +# Minimum value: 1 +#disable_user_account_days_inactive = + +# The maximum number of times that a user can fail to authenticate before the +# user account is locked for the number of seconds specified by +# `[security_compliance] lockout_duration`. This feature is disabled by +# default. If this feature is enabled and `[security_compliance] +# lockout_duration` is not set, then users may be locked out indefinitely until +# the user is explicitly enabled via the API. This feature depends on the `sql` +# backend for the `[identity] driver`. (integer value) +# Minimum value: 1 +#lockout_failure_attempts = + +# The number of seconds a user account will be locked when the maximum number +# of failed authentication attempts (as specified by `[security_compliance] +# lockout_failure_attempts`) is exceeded. Setting this option will have no +# effect unless you also set `[security_compliance] lockout_failure_attempts` +# to a non-zero value. This feature depends on the `sql` backend for the +# `[identity] driver`. (integer value) +# Minimum value: 1 +#lockout_duration = 1800 + +# The number of days for which a password will be considered valid before +# requiring it to be changed. This feature is disabled by default. If enabled, +# new password changes will have an expiration date, however existing passwords +# would not be impacted. This feature depends on the `sql` backend for the +# `[identity] driver`. (integer value) +# Minimum value: 1 +#password_expires_days = + +# This controls the number of previous user password iterations to keep in +# history, in order to enforce that newly created passwords are unique. The +# total number which includes the new password should not be greater or equal +# to this value. Setting the value to zero (the default) disables this feature. +# Thus, to enable this feature, values must be greater than 0. This feature +# depends on the `sql` backend for the `[identity] driver`. (integer value) +# Minimum value: 0 +#unique_last_password_count = 0 + +# The number of days that a password must be used before the user can change +# it. This prevents users from changing their passwords immediately in order to +# wipe out their password history and reuse an old password. This feature does +# not prevent administrators from manually resetting passwords. It is disabled +# by default and allows for immediate password changes. This feature depends on +# the `sql` backend for the `[identity] driver`. Note: If +# `[security_compliance] password_expires_days` is set, then the value for this +# option should be less than the `password_expires_days`. (integer value) +# Minimum value: 0 +#minimum_password_age = 0 + +# The regular expression used to validate password strength requirements. By +# default, the regular expression will match any password. The following is an +# example of a pattern which requires at least 1 letter, 1 digit, and have a +# minimum length of 7 characters: ^(?=.*\d)(?=.*[a-zA-Z]).{7,}$ This feature +# depends on the `sql` backend for the `[identity] driver`. (string value) +#password_regex = + +# Describe your password regular expression here in language for humans. If a +# password fails to match the regular expression, the contents of this +# configuration variable will be returned to users to explain why their +# requested password was insufficient. (string value) +#password_regex_description = + +# Enabling this option requires users to change their password when the user is +# created, or upon administrative reset. Before accessing any services, +# affected users will have to change their password. To ignore this requirement +# for specific users, such as service users, set the `options` attribute +# `ignore_change_password_upon_first_use` to `True` for the desired user via +# the update user API. This feature is disabled by default. This feature is +# only applicable with the `sql` backend for the `[identity] driver`. (boolean +# value) +#change_password_upon_first_use = false + + +[shadow_users] + +# +# From keystone +# + +# Entry point for the shadow users backend driver in the +# `keystone.identity.shadow_users` namespace. This driver is used for +# persisting local user references to externally-managed identities (via +# federation, LDAP, etc). Keystone only provides a `sql` driver, so there is no +# reason to change this option unless you are providing a custom entry point. +# (string value) +#driver = sql + + +[signing] + +# +# From keystone +# + +# DEPRECATED: Absolute path to the public certificate file to use for signing +# responses to revocation lists requests. Set this together with `[signing] +# keyfile`. For non-production environments, you may be interested in using +# `keystone-manage pki_setup` to generate self-signed certificates. (string +# value) +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in +# Pike. These options remain for backwards compatibility. +#certfile = /etc/keystone/ssl/certs/signing_cert.pem + +# DEPRECATED: Absolute path to the private key file to use for signing +# responses to revocation lists requests. Set this together with `[signing] +# certfile`. (string value) +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in +# Pike. These options remain for backwards compatibility. +#keyfile = /etc/keystone/ssl/private/signing_key.pem + +# DEPRECATED: Absolute path to the public certificate authority (CA) file to +# use when creating self-signed certificates with `keystone-manage pki_setup`. +# Set this together with `[signing] ca_key`. There is no reason to set this +# option unless you are requesting revocation lists in a non-production +# environment. Use a `[signing] certfile` issued from a trusted certificate +# authority instead. (string value) +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in +# Pike. These options remain for backwards compatibility. +#ca_certs = /etc/keystone/ssl/certs/ca.pem + +# DEPRECATED: Absolute path to the private certificate authority (CA) key file +# to use when creating self-signed certificates with `keystone-manage +# pki_setup`. Set this together with `[signing] ca_certs`. There is no reason +# to set this option unless you are requesting revocation lists in a non- +# production environment. Use a `[signing] certfile` issued from a trusted +# certificate authority instead. (string value) +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in +# Pike. These options remain for backwards compatibility. +#ca_key = /etc/keystone/ssl/private/cakey.pem + +# DEPRECATED: Key size (in bits) to use when generating a self-signed token +# signing certificate. There is no reason to set this option unless you are +# requesting revocation lists in a non-production environment. Use a `[signing] +# certfile` issued from a trusted certificate authority instead. (integer +# value) +# Minimum value: 1024 +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in +# Pike. These options remain for backwards compatibility. +#key_size = 2048 + +# DEPRECATED: The validity period (in days) to use when generating a self- +# signed token signing certificate. There is no reason to set this option +# unless you are requesting revocation lists in a non-production environment. +# Use a `[signing] certfile` issued from a trusted certificate authority +# instead. (integer value) +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in +# Pike. These options remain for backwards compatibility. +#valid_days = 3650 + +# DEPRECATED: The certificate subject to use when generating a self-signed +# token signing certificate. There is no reason to set this option unless you +# are requesting revocation lists in a non-production environment. Use a +# `[signing] certfile` issued from a trusted certificate authority instead. +# (string value) +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in +# Pike. These options remain for backwards compatibility. +#cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com + + +[token] + +provider = fernet + +# +# From keystone +# + +# The amount of time that a token should remain valid (in seconds). Drastically +# reducing this value may break "long-running" operations that involve multiple +# services to coordinate together, and will force users to authenticate with +# keystone more frequently. Drastically increasing this value will increase the +# number of tokens that will be simultaneously valid. Keystone tokens are also +# bearer tokens, so a shorter duration will also reduce the potential security +# impact of a compromised token. (integer value) +# Minimum value: 0 +# Maximum value: 9223372036854775807 +#expiration = 3600 + +# Entry point for the token provider in the `keystone.token.provider` +# namespace. The token provider controls the token construction, validation, +# and revocation operations. Supported upstream providers are `fernet` and +# `jws`. Neither `fernet` or `jws` tokens require persistence and both require +# additional setup. If using `fernet`, you're required to run `keystone-manage +# fernet_setup`, which creates symmetric keys used to encrypt tokens. If using +# `jws`, you're required to generate an ECDSA keypair using a SHA-256 hash +# algorithm for signing and validating token, which can be done with `keystone- +# manage create_jws_keypair`. Note that `fernet` tokens are encrypted and `jws` +# tokens are only signed. Please be sure to consider this if your deployment +# has security requirements regarding payload contents used to generate token +# IDs. (string value) +#provider = fernet + +# Toggle for caching token creation and validation data. This has no effect +# unless global caching is enabled. (boolean value) +#caching = true + +# The number of seconds to cache token creation and validation data. This has +# no effect unless both global and `[token] caching` are enabled. (integer +# value) +# Minimum value: 0 +# Maximum value: 9223372036854775807 +#cache_time = + +# This toggles support for revoking individual tokens by the token identifier +# and thus various token enumeration operations (such as listing all tokens +# issued to a specific user). These operations are used to determine the list +# of tokens to consider revoked. Do not disable this option if you're using the +# `kvs` `[revoke] driver`. (boolean value) +#revoke_by_id = true + +# This toggles whether scoped tokens may be re-scoped to a new project or +# domain, thereby preventing users from exchanging a scoped token (including +# those with a default project scope) for any other token. This forces users to +# either authenticate for unscoped tokens (and later exchange that unscoped +# token for tokens with a more specific scope) or to provide their credentials +# in every request for a scoped token to avoid re-scoping altogether. (boolean +# value) +#allow_rescope_scoped_token = true + +# DEPRECATED: This controls whether roles should be included with tokens that +# are not directly assigned to the token's scope, but are instead linked +# implicitly to other role assignments. (boolean value) +# This option is deprecated for removal since R. +# Its value may be silently ignored in the future. +# Reason: Default roles depend on a chain of implied role assignments. Ex: an +# admin user will also have the reader and member role. By ensuring that all +# these roles will always appear on the token validation response, we can +# improve the simplicity and readability of policy files. +#infer_roles = true + +# DEPRECATED: Enable storing issued token data to token validation cache so +# that first token validation doesn't actually cause full validation cycle. +# This option has no effect unless global caching is enabled and will still +# cache tokens even if `[token] caching = False`. (boolean value) +# This option is deprecated for removal since S. +# Its value may be silently ignored in the future. +# Reason: Keystone already exposes a configuration option for caching tokens. +# Having a separate configuration option to cache tokens when they are issued +# is redundant, unnecessarily complicated, and is misleading if token caching +# is disabled because tokens will still be pre-cached by default when they are +# issued. The ability to pre-cache tokens when they are issued is going to rely +# exclusively on the ``keystone.conf [token] caching`` option in the future. +#cache_on_issue = true + +# This controls the number of seconds that a token can be retrieved for beyond +# the built-in expiry time. This allows long running operations to succeed. +# Defaults to two days. (integer value) +#allow_expired_window = 172800 + + +[tokenless_auth] + +# +# From keystone +# + +# The list of distinguished names which identify trusted issuers of client +# certificates allowed to use X.509 tokenless authorization. If the option is +# absent then no certificates will be allowed. The format for the values of a +# distinguished name (DN) must be separated by a comma and contain no spaces. +# Furthermore, because an individual DN may contain commas, this configuration +# option may be repeated multiple times to represent multiple values. For +# example, keystone.conf would include two consecutive lines in order to trust +# two different DNs, such as `trusted_issuer = CN=john,OU=keystone,O=openstack` +# and `trusted_issuer = CN=mary,OU=eng,O=abc`. (multi valued) +#trusted_issuer = + +# The federated protocol ID used to represent X.509 tokenless authorization. +# This is used in combination with the value of `[tokenless_auth] +# issuer_attribute` to find a corresponding federated mapping. In a typical +# deployment, there is no reason to change this value. (string value) +#protocol = x509 + +# The name of the WSGI environment variable used to pass the issuer of the +# client certificate to keystone. This attribute is used as an identity +# provider ID for the X.509 tokenless authorization along with the protocol to +# look up its corresponding mapping. In a typical deployment, there is no +# reason to change this value. (string value) +#issuer_attribute = SSL_CLIENT_I_DN + + +[trust] + +# +# From keystone +# + +# Allows authorization to be redelegated from one user to another, effectively +# chaining trusts together. When disabled, the `remaining_uses` attribute of a +# trust is constrained to be zero. (boolean value) +#allow_redelegation = false + +# Maximum number of times that authorization can be redelegated from one user +# to another in a chain of trusts. This number may be reduced further for a +# specific trust. (integer value) +#max_redelegation_count = 3 + +# Entry point for the trust backend driver in the `keystone.trust` namespace. +# Keystone only provides a `sql` driver, so there is no reason to change this +# unless you are providing a custom entry point. (string value) +#driver = sql + + +[unified_limit] + +# +# From keystone +# + +# Entry point for the unified limit backend driver in the +# `keystone.unified_limit` namespace. Keystone only provides a `sql` driver, so +# there's no reason to change this unless you are providing a custom entry +# point. (string value) +#driver = sql + +# Toggle for unified limit caching. This has no effect unless global caching is +# enabled. In a typical deployment, there is no reason to disable this. +# (boolean value) +#caching = true + +# Time to cache unified limit data, in seconds. This has no effect unless both +# global caching and `[unified_limit] caching` are enabled. (integer value) +#cache_time = + +# Maximum number of entities that will be returned in a role collection. This +# may be useful to tune if you have a large number of unified limits in your +# deployment. (integer value) +#list_limit = + +# The enforcement model to use when validating limits associated to projects. +# Enforcement models will behave differently depending on the existing limits, +# which may result in backwards incompatible changes if a model is switched in +# a running deployment. (string value) +# Possible values: +# flat - +# strict_two_level - +#enforcement_model = flat + + +[wsgi] + +# +# From keystone +# + +# If set to true, this enables the oslo debug middleware in Keystone. This +# Middleware prints a lot of information about the request and the response. It +# is useful for getting information about the data on the wire (decoded) and +# passed to the WSGI application pipeline. This middleware has no effect on the +# "debug" setting in the [DEFAULT] section of the config file or setting +# Keystone's log-level to "DEBUG"; it is specific to debugging the WSGI data as +# it enters and leaves Keystone (specific request-related data). This option is +# used for introspection on the request and response data between the web +# server (apache, nginx, etc) and Keystone. This middleware is inserted as the +# first element in the middleware chain and will show the data closest to the +# wire. WARNING: NOT INTENDED FOR USE IN PRODUCTION. THIS MIDDLEWARE CAN AND +# WILL EMIT SENSITIVE/PRIVILEGED DATA. (boolean value) +#debug_middleware = false diff --git a/conf_mysql/99-openstack.conf b/conf_mysql/99-openstack.conf new file mode 100644 index 0000000..4cd6688 --- /dev/null +++ b/conf_mysql/99-openstack.conf @@ -0,0 +1,13 @@ +[mysqld] +bind-address = 0.0.0.0 + +default-storage-engine = innodb +innodb_file_per_table = on +max_connections = 4096 +collation-server = utf8_general_ci +character-set-server = utf8 +wait_timeout = 600 +interactive_timeout = 600 +net_read_timeout = 600 +net_write_timeout = 600 + diff --git a/conf_ui/local_settings.py b/conf_ui/local_settings.py new file mode 100644 index 0000000..042b682 --- /dev/null +++ b/conf_ui/local_settings.py @@ -0,0 +1,916 @@ +# -*- coding: utf-8 -*- + +import os + +from django.utils.translation import ugettext_lazy as _ + +from horizon.utils import secret_key + +from openstack_dashboard.settings import HORIZON_CONFIG + +DEBUG = False + +# This setting controls whether or not compression is enabled. Disabling +# compression makes Horizon considerably slower, but makes it much easier +# to debug JS and CSS changes +#COMPRESS_ENABLED = not DEBUG + +# This setting controls whether compression happens on the fly, or offline +# with `python manage.py compress` +# See https://django-compressor.readthedocs.io/en/latest/usage/#offline-compression +# for more information +#COMPRESS_OFFLINE = not DEBUG + +# WEBROOT is the location relative to Webserver root +# should end with a slash. +WEBROOT = '/' +#LOGIN_URL = WEBROOT + 'auth/login/' +#LOGOUT_URL = WEBROOT + 'auth/logout/' +# +# LOGIN_REDIRECT_URL can be used as an alternative for +# HORIZON_CONFIG.user_home, if user_home is not set. +# Do not set it to '/home/', as this will cause circular redirect loop +#LOGIN_REDIRECT_URL = WEBROOT + +# If horizon is running in production (DEBUG is False), set this +# with the list of host/domain names that the application can serve. +# For more information see: +# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts +ALLOWED_HOSTS = ['*', ] + +# Set SSL proxy settings: +# Pass this header from the proxy after terminating the SSL, +# and don't forget to strip it from the client's request. +# For more information see: +# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header +#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') + +# If Horizon is being served through SSL, then uncomment the following two +# settings to better secure the cookies from security exploits +#CSRF_COOKIE_SECURE = True +#SESSION_COOKIE_SECURE = True + +# The absolute path to the directory where message files are collected. +# The message file must have a .json file extension. When the user logins to +# horizon, the message files collected are processed and displayed to the user. +#MESSAGES_PATH=None + +# Overrides for OpenStack API versions. Use this setting to force the +# OpenStack dashboard to use a specific API version for a given service API. +# Versions specified here should be integers or floats, not strings. +# NOTE: The version should be formatted as it appears in the URL for the +# service API. For example, The identity service APIs have inconsistent +# use of the decimal point, so valid options would be 2.0 or 3. +# Minimum compute version to get the instance locked status is 2.9. +#OPENSTACK_API_VERSIONS = { +# "data-processing": 1.1, +# "identity": 3, +# "image": 2, +# "volume": 2, +# "compute": 2, +#} + +# Set this to True if running on a multi-domain model. When this is enabled, it +# will require the user to enter the Domain name in addition to the username +# for login. +#OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False + +# Set this to True if you want available domains displayed as a dropdown menu +# on the login screen. It is strongly advised NOT to enable this for public +# clouds, as advertising enabled domains to unauthenticated customers +# irresponsibly exposes private information. This should only be used for +# private clouds where the dashboard sits behind a corporate firewall. +#OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN = False + +# If OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN is enabled, this option can be used to +# set the available domains to choose from. This is a list of pairs whose first +# value is the domain name and the second is the display name. +#OPENSTACK_KEYSTONE_DOMAIN_CHOICES = ( +# ('Default', 'Default'), +#) + +# Overrides the default domain used when running on single-domain model +# with Keystone V3. All entities will be created in the default domain. +# NOTE: This value must be the name of the default domain, NOT the ID. +# Also, you will most likely have a value in the keystone policy file like this +# "cloud_admin": "rule:admin_required and domain_id:" +# This value must be the name of the domain whose ID is specified there. +#OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default' + +# Set this to True to enable panels that provide the ability for users to +# manage Identity Providers (IdPs) and establish a set of rules to map +# federation protocol attributes to Identity API attributes. +# This extension requires v3.0+ of the Identity API. +#OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT = False + +# Set Console type: +# valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL", "MKS" +# or None. Set to None explicitly if you want to deactivate the console. +#CONSOLE_TYPE = "AUTO" + +# Toggle showing the openrc file for Keystone V2. +# If set to false the link will be removed from the user dropdown menu +# and the API Access page +#SHOW_KEYSTONE_V2_RC = True + +# If provided, a "Report Bug" link will be displayed in the site header +# which links to the value of this setting (ideally a URL containing +# information on how to report issues). +#HORIZON_CONFIG["bug_url"] = "http://bug-report.example.com" + +# Show backdrop element outside the modal, do not close the modal +# after clicking on backdrop. +#HORIZON_CONFIG["modal_backdrop"] = "static" + +# Specify a regular expression to validate user passwords. +#HORIZON_CONFIG["password_validator"] = { +# "regex": '.*', +# "help_text": _("Your password does not meet the requirements."), +#} + +# Turn off browser autocompletion for forms including the login form and +# the database creation workflow if so desired. +#HORIZON_CONFIG["password_autocomplete"] = "off" + +# Setting this to True will disable the reveal button for password fields, +# including on the login form. +#HORIZON_CONFIG["disable_password_reveal"] = False + +LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) + +# Set custom secret key: +# You can either set it to a specific value or you can let horizon generate a +# default secret key that is unique on this machine, e.i. regardless of the +# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, +# there may be situations where you would want to set this explicitly, e.g. +# when multiple dashboard instances are distributed on different machines +# (usually behind a load-balancer). Either you have to make sure that a session +# gets all requests routed to the same dashboard instance or you set the same +# SECRET_KEY for all of them. +SECRET_KEY = secret_key.generate_or_read_from_file('/var/lib/openstack-dashboard/secret_key') + +# We recommend you use memcached for development; otherwise after every reload +# of the django development server, you will have to login again. To use +# memcached set CACHES to something like + +CACHES = { + 'default': { + 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', + 'LOCATION': 'iotronic-ui:11211', + }, +} + +#CACHES = { +# 'default': { +# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', +# } +#} + +# Send email to the console by default +EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' +# Or send them to /dev/null +#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' + +# Configure these for your outgoing email host +#EMAIL_HOST = 'smtp.my-company.com' +#EMAIL_PORT = 25 +#EMAIL_HOST_USER = 'djangomail' +#EMAIL_HOST_PASSWORD = 'top-secret!' + +# For multiple regions uncomment this configuration, and add (endpoint, title). +#AVAILABLE_REGIONS = [ +# ('http://cluster1.example.com:5000/v3', 'cluster1'), +# ('http://cluster2.example.com:5000/v3', 'cluster2'), +#] + +OPENSTACK_HOST = "keystone" +OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST +OPENSTACK_KEYSTONE_DEFAULT_ROLE = "admin" + +# For setting the default service region on a per-endpoint basis. Note that the +# default value for this setting is {}, and below is just an example of how it +# should be specified. +# A key of '*' is an optional global default if no other key matches. +#DEFAULT_SERVICE_REGIONS = { +# '*': 'RegionOne' +# OPENSTACK_KEYSTONE_URL: 'RegionTwo' +#} + +# Enables keystone web single-sign-on if set to True. +#WEBSSO_ENABLED = False + +# Authentication mechanism to be selected as default. +# The value must be a key from WEBSSO_CHOICES. +#WEBSSO_INITIAL_CHOICE = "credentials" + +# The list of authentication mechanisms which include keystone +# federation protocols and identity provider/federation protocol +# mapping keys (WEBSSO_IDP_MAPPING). Current supported protocol +# IDs are 'saml2' and 'oidc' which represent SAML 2.0, OpenID +# Connect respectively. +# Do not remove the mandatory credentials mechanism. +# Note: The last two tuples are sample mapping keys to a identity provider +# and federation protocol combination (WEBSSO_IDP_MAPPING). +#WEBSSO_CHOICES = ( +# ("credentials", _("Keystone Credentials")), +# ("oidc", _("OpenID Connect")), +# ("saml2", _("Security Assertion Markup Language")), +# ("acme_oidc", "ACME - OpenID Connect"), +# ("acme_saml2", "ACME - SAML2"), +#) + +# A dictionary of specific identity provider and federation protocol +# combinations. From the selected authentication mechanism, the value +# will be looked up as keys in the dictionary. If a match is found, +# it will redirect the user to a identity provider and federation protocol +# specific WebSSO endpoint in keystone, otherwise it will use the value +# as the protocol_id when redirecting to the WebSSO by protocol endpoint. +# NOTE: The value is expected to be a tuple formatted as: (, ). +#WEBSSO_IDP_MAPPING = { +# "acme_oidc": ("acme", "oidc"), +# "acme_saml2": ("acme", "saml2"), +#} + +# If set this URL will be used for web single-sign-on authentication +# instead of OPENSTACK_KEYSTONE_URL. This is needed in the deployment +# scenarios where network segmentation is used per security requirement. +# In this case, the controllers are not reachable from public network. +# Therefore, user's browser will not be able to access OPENSTACK_KEYSTONE_URL +# if it is set to the internal endpoint. +#WEBSSO_KEYSTONE_URL = "http://keystone-public.example.com/v3" + +# The Keystone Provider drop down uses Keystone to Keystone federation +# to switch between Keystone service providers. +# Set display name for Identity Provider (dropdown display name) +#KEYSTONE_PROVIDER_IDP_NAME = "Local Keystone" +# This id is used for only for comparison with the service provider IDs. This ID +# should not match any service provider IDs. +#KEYSTONE_PROVIDER_IDP_ID = "localkeystone" + +# Disable SSL certificate checks (useful for self-signed certificates): +#OPENSTACK_SSL_NO_VERIFY = True + +# The CA certificate to use to verify SSL connections +#OPENSTACK_SSL_CACERT = '/path/to/cacert.pem' + +# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the +# capabilities of the auth backend for Keystone. +# If Keystone has been configured to use LDAP as the auth backend then set +# can_edit_user to False and name to 'ldap'. +# +# TODO(tres): Remove these once Keystone has an API to identify auth backend. +OPENSTACK_KEYSTONE_BACKEND = { + 'name': 'native', + 'can_edit_user': True, + 'can_edit_group': True, + 'can_edit_project': True, + 'can_edit_domain': True, + 'can_edit_role': True, +} + +# Setting this to True, will add a new "Retrieve Password" action on instance, +# allowing Admin session password retrieval/decryption. +#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False + +# The Launch Instance user experience has been significantly enhanced. +# You can choose whether to enable the new launch instance experience, +# the legacy experience, or both. The legacy experience will be removed +# in a future release, but is available as a temporary backup setting to ensure +# compatibility with existing deployments. Further development will not be +# done on the legacy experience. Please report any problems with the new +# experience via the Launchpad tracking system. +# +# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to +# determine the experience to enable. Set them both to true to enable +# both. +#LAUNCH_INSTANCE_LEGACY_ENABLED = True +#LAUNCH_INSTANCE_NG_ENABLED = False + +# A dictionary of settings which can be used to provide the default values for +# properties found in the Launch Instance modal. +#LAUNCH_INSTANCE_DEFAULTS = { +# 'config_drive': False, +# 'enable_scheduler_hints': True, +# 'disable_image': False, +# 'disable_instance_snapshot': False, +# 'disable_volume': False, +# 'disable_volume_snapshot': False, +# 'create_volume': True, +#} + +# The Xen Hypervisor has the ability to set the mount point for volumes +# attached to instances (other Hypervisors currently do not). Setting +# can_set_mount_point to True will add the option to set the mount point +# from the UI. +OPENSTACK_HYPERVISOR_FEATURES = { + 'can_set_mount_point': False, + 'can_set_password': False, + 'requires_keypair': False, + 'enable_quotas': True +} + +# This settings controls whether IP addresses of servers are retrieved from +# neutron in the project instance table. Setting this to ``False`` may mitigate +# a performance issue in the project instance table in large deployments. +#OPENSTACK_INSTANCE_RETRIEVE_IP_ADDRESSES = True + +# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional +# services provided by cinder that is not exposed by its extension API. +OPENSTACK_CINDER_FEATURES = { + 'enable_backup': False, +} + +# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional +# services provided by neutron. Options currently available are load +# balancer service, security groups, quotas, VPN service. +OPENSTACK_NEUTRON_NETWORK = { + 'enable_router': True, + 'enable_quotas': True, + 'enable_ipv6': True, + 'enable_distributed_router': False, + 'enable_ha_router': False, + 'enable_fip_topology_check': True, + + # Default dns servers you would like to use when a subnet is + # created. This is only a default, users can still choose a different + # list of dns servers when creating a new subnet. + # The entries below are examples only, and are not appropriate for + # real deployments + # 'default_dns_nameservers': ["8.8.8.8", "8.8.4.4", "208.67.222.222"], + + # Set which provider network types are supported. Only the network types + # in this list will be available to choose from when creating a network. + # Network types include local, flat, vlan, gre, vxlan and geneve. + # 'supported_provider_types': ['*'], + + # You can configure available segmentation ID range per network type + # in your deployment. + # 'segmentation_id_range': { + # 'vlan': [1024, 2048], + # 'vxlan': [4094, 65536], + # }, + + # You can define additional provider network types here. + # 'extra_provider_types': { + # 'awesome_type': { + # 'display_name': 'Awesome New Type', + # 'require_physical_network': False, + # 'require_segmentation_id': True, + # } + # }, + + # Set which VNIC types are supported for port binding. Only the VNIC + # types in this list will be available to choose from when creating a + # port. + # VNIC types include 'normal', 'direct', 'direct-physical', 'macvtap', + # 'baremetal' and 'virtio-forwarder' + # Set to empty list or None to disable VNIC type selection. + 'supported_vnic_types': ['*'], + + # Set list of available physical networks to be selected in the physical + # network field on the admin create network modal. If it's set to an empty + # list, the field will be a regular input field. + # e.g. ['default', 'test'] + 'physical_networks': [], + +} + +# The OPENSTACK_HEAT_STACK settings can be used to disable password +# field required while launching the stack. +OPENSTACK_HEAT_STACK = { + 'enable_user_pass': True, +} + +# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features +# in the OpenStack Dashboard related to the Image service, such as the list +# of supported image formats. +#OPENSTACK_IMAGE_BACKEND = { +# 'image_formats': [ +# ('', _('Select format')), +# ('aki', _('AKI - Amazon Kernel Image')), +# ('ami', _('AMI - Amazon Machine Image')), +# ('ari', _('ARI - Amazon Ramdisk Image')), +# ('docker', _('Docker')), +# ('iso', _('ISO - Optical Disk Image')), +# ('ova', _('OVA - Open Virtual Appliance')), +# ('qcow2', _('QCOW2 - QEMU Emulator')), +# ('raw', _('Raw')), +# ('vdi', _('VDI - Virtual Disk Image')), +# ('vhd', _('VHD - Virtual Hard Disk')), +# ('vhdx', _('VHDX - Large Virtual Hard Disk')), +# ('vmdk', _('VMDK - Virtual Machine Disk')), +# ], +#} + +# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for +# image custom property attributes that appear on image detail pages. +IMAGE_CUSTOM_PROPERTY_TITLES = { + "architecture": _("Architecture"), + "kernel_id": _("Kernel ID"), + "ramdisk_id": _("Ramdisk ID"), + "image_state": _("Euca2ools state"), + "project_id": _("Project ID"), + "image_type": _("Image Type"), +} + +# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image +# custom properties should not be displayed in the Image Custom Properties +# table. +IMAGE_RESERVED_CUSTOM_PROPERTIES = [] + +# Set to 'legacy' or 'direct' to allow users to upload images to glance via +# Horizon server. When enabled, a file form field will appear on the create +# image form. If set to 'off', there will be no file form field on the create +# image form. See documentation for deployment considerations. +#HORIZON_IMAGES_UPLOAD_MODE = 'legacy' + +# Allow a location to be set when creating or updating Glance images. +# If using Glance V2, this value should be False unless the Glance +# configuration and policies allow setting locations. +#IMAGES_ALLOW_LOCATION = False + +# A dictionary of default settings for create image modal. +#CREATE_IMAGE_DEFAULTS = { +# 'image_visibility': "public", +#} + +# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is 'publicURL'. +#OPENSTACK_ENDPOINT_TYPE = "publicURL" + +# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the +# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is None. This +# value should differ from OPENSTACK_ENDPOINT_TYPE if used. +#SECONDARY_ENDPOINT_TYPE = None + +# The number of objects (Swift containers/objects or images) to display +# on a single page before providing a paging element (a "more" link) +# to paginate results. +API_RESULT_LIMIT = 1000 +API_RESULT_PAGE_SIZE = 20 + +# The size of chunk in bytes for downloading objects from Swift +SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024 + +# The default number of lines displayed for instance console log. +INSTANCE_LOG_LENGTH = 35 + +# Specify a maximum number of items to display in a dropdown. +DROPDOWN_MAX_ITEMS = 30 + +# The timezone of the server. This should correspond with the timezone +# of your entire OpenStack installation, and hopefully be in UTC. +TIME_ZONE = "UTC" + +# When launching an instance, the menu of available flavors is +# sorted by RAM usage, ascending. If you would like a different sort order, +# you can provide another flavor attribute as sorting key. Alternatively, you +# can provide a custom callback method to use for sorting. You can also provide +# a flag for reverse sort. For more info, see +# http://docs.python.org/2/library/functions.html#sorted +#CREATE_INSTANCE_FLAVOR_SORT = { +# 'key': 'name', +# # or +# 'key': my_awesome_callback_method, +# 'reverse': False, +#} + +# Set this to True to display an 'Admin Password' field on the Change Password +# form to verify that it is indeed the admin logged-in who wants to change +# the password. +#ENFORCE_PASSWORD_CHECK = False + +# Modules that provide /auth routes that can be used to handle different types +# of user authentication. Add auth plugins that require extra route handling to +# this list. +#AUTHENTICATION_URLS = [ +# 'openstack_auth.urls', +#] + +# The Horizon Policy Enforcement engine uses these values to load per service +# policy rule files. The content of these files should match the files the +# OpenStack services are using to determine role based access control in the +# target installation. + +# Path to directory containing policy.json files +#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") + +# Map of local copy of service policy files. +# Please insure that your identity policy file matches the one being used on +# your keystone servers. There is an alternate policy file that may be used +# in the Keystone v3 multi-domain case, policy.v3cloudsample.json. +# This file is not included in the Horizon repository by default but can be +# found at +# http://git.openstack.org/cgit/openstack/keystone/tree/etc/ \ +# policy.v3cloudsample.json +# Having matching policy files on the Horizon and Keystone servers is essential +# for normal operation. This holds true for all services and their policy files. +#POLICY_FILES = { +# 'identity': 'keystone_policy.json', +# 'compute': 'nova_policy.json', +# 'volume': 'cinder_policy.json', +# 'image': 'glance_policy.json', +# 'network': 'neutron_policy.json', +#} + +# Change this patch to the appropriate list of tuples containing +# a key, label and static directory containing two files: +# _variables.scss and _styles.scss +AVAILABLE_THEMES = [ + ('default', 'Default', 'themes/default'), +# ('material', 'Material', 'themes/material'), +] + +LOGGING = { + 'version': 1, + # When set to True this will disable all logging except + # for loggers specified in this configuration dictionary. Note that + # if nothing is specified here and disable_existing_loggers is True, + # django.db.backends will still log unless it is disabled explicitly. + 'disable_existing_loggers': False, + # If apache2 mod_wsgi is used to deploy OpenStack dashboard + # timestamp is output by mod_wsgi. If WSGI framework you use does not + # output timestamp for logging, add %(asctime)s in the following + # format definitions. + 'formatters': { + 'console': { + 'format': '%(levelname)s %(name)s %(message)s' + }, + 'operation': { + # The format of "%(message)s" is defined by + # OPERATION_LOG_OPTIONS['format'] + 'format': '%(message)s' + }, + }, + 'handlers': { + 'null': { + 'level': 'DEBUG', + 'class': 'logging.NullHandler', + }, + 'console': { + # Set the level to "DEBUG" for verbose output logging. + 'level': 'INFO', + 'class': 'logging.StreamHandler', + 'formatter': 'console', + }, + 'operation': { + 'level': 'INFO', + 'class': 'logging.StreamHandler', + 'formatter': 'operation', + }, + }, + 'loggers': { + 'horizon': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'horizon.operation_log': { + 'handlers': ['operation'], + 'level': 'INFO', + 'propagate': False, + }, + 'openstack_dashboard': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'novaclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'cinderclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'keystoneauth': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'keystoneclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'glanceclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'neutronclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'swiftclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'oslo_policy': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'openstack_auth': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'django': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + # Logging from django.db.backends is VERY verbose, send to null + # by default. + 'django.db.backends': { + 'handlers': ['null'], + 'propagate': False, + }, + 'requests': { + 'handlers': ['null'], + 'propagate': False, + }, + 'urllib3': { + 'handlers': ['null'], + 'propagate': False, + }, + 'chardet.charsetprober': { + 'handlers': ['null'], + 'propagate': False, + }, + 'iso8601': { + 'handlers': ['null'], + 'propagate': False, + }, + 'scss': { + 'handlers': ['null'], + 'propagate': False, + }, + }, +} + +# 'direction' should not be specified for all_tcp/udp/icmp. +# It is specified in the form. +SECURITY_GROUP_RULES = { + 'all_tcp': { + 'name': _('All TCP'), + 'ip_protocol': 'tcp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_udp': { + 'name': _('All UDP'), + 'ip_protocol': 'udp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_icmp': { + 'name': _('All ICMP'), + 'ip_protocol': 'icmp', + 'from_port': '-1', + 'to_port': '-1', + }, + 'ssh': { + 'name': 'SSH', + 'ip_protocol': 'tcp', + 'from_port': '22', + 'to_port': '22', + }, + 'smtp': { + 'name': 'SMTP', + 'ip_protocol': 'tcp', + 'from_port': '25', + 'to_port': '25', + }, + 'dns': { + 'name': 'DNS', + 'ip_protocol': 'tcp', + 'from_port': '53', + 'to_port': '53', + }, + 'http': { + 'name': 'HTTP', + 'ip_protocol': 'tcp', + 'from_port': '80', + 'to_port': '80', + }, + 'pop3': { + 'name': 'POP3', + 'ip_protocol': 'tcp', + 'from_port': '110', + 'to_port': '110', + }, + 'imap': { + 'name': 'IMAP', + 'ip_protocol': 'tcp', + 'from_port': '143', + 'to_port': '143', + }, + 'ldap': { + 'name': 'LDAP', + 'ip_protocol': 'tcp', + 'from_port': '389', + 'to_port': '389', + }, + 'https': { + 'name': 'HTTPS', + 'ip_protocol': 'tcp', + 'from_port': '443', + 'to_port': '443', + }, + 'smtps': { + 'name': 'SMTPS', + 'ip_protocol': 'tcp', + 'from_port': '465', + 'to_port': '465', + }, + 'imaps': { + 'name': 'IMAPS', + 'ip_protocol': 'tcp', + 'from_port': '993', + 'to_port': '993', + }, + 'pop3s': { + 'name': 'POP3S', + 'ip_protocol': 'tcp', + 'from_port': '995', + 'to_port': '995', + }, + 'ms_sql': { + 'name': 'MS SQL', + 'ip_protocol': 'tcp', + 'from_port': '1433', + 'to_port': '1433', + }, + 'mysql': { + 'name': 'MYSQL', + 'ip_protocol': 'tcp', + 'from_port': '3306', + 'to_port': '3306', + }, + 'rdp': { + 'name': 'RDP', + 'ip_protocol': 'tcp', + 'from_port': '3389', + 'to_port': '3389', + }, +} + +# Deprecation Notice: +# +# The setting FLAVOR_EXTRA_KEYS has been deprecated. +# Please load extra spec metadata into the Glance Metadata Definition Catalog. +# +# The sample quota definitions can be found in: +# /etc/metadefs/compute-quota.json +# +# The metadata definition catalog supports CLI and API: +# $glance --os-image-api-version 2 help md-namespace-import +# $glance-manage db_load_metadefs +# +# See Metadata Definitions on: +# https://docs.openstack.org/glance/latest/user/glancemetadefcatalogapi.html + +# The hash algorithm to use for authentication tokens. This must +# match the hash algorithm that the identity server and the +# auth_token middleware are using. Allowed values are the +# algorithms supported by Python's hashlib library. +#OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5' + +# AngularJS requires some settings to be made available to +# the client side. Some settings are required by in-tree / built-in horizon +# features. These settings must be added to REST_API_REQUIRED_SETTINGS in the +# form of ['SETTING_1','SETTING_2'], etc. +# +# You may remove settings from this list for security purposes, but do so at +# the risk of breaking a built-in horizon feature. These settings are required +# for horizon to function properly. Only remove them if you know what you +# are doing. These settings may in the future be moved to be defined within +# the enabled panel configuration. +# You should not add settings to this list for out of tree extensions. +# See: https://wiki.openstack.org/wiki/Horizon/RESTAPI +REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES', + 'LAUNCH_INSTANCE_DEFAULTS', + 'OPENSTACK_IMAGE_FORMATS', + 'OPENSTACK_KEYSTONE_BACKEND', + 'OPENSTACK_KEYSTONE_DEFAULT_DOMAIN', + 'CREATE_IMAGE_DEFAULTS', + 'ENFORCE_PASSWORD_CHECK'] + +# Additional settings can be made available to the client side for +# extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS +# !! Please use extreme caution as the settings are transferred via HTTP/S +# and are not encrypted on the browser. This is an experimental API and +# may be deprecated in the future without notice. +#REST_API_ADDITIONAL_SETTINGS = [] + +############################################################################### +# Ubuntu Settings +############################################################################### + + # The default theme if no cookie is present +DEFAULT_THEME = 'default' + +# Default Ubuntu apache configuration uses /horizon as the application root. +WEBROOT='/horizon/' + +# By default, validation of the HTTP Host header is disabled. Production +# installations should have this set accordingly. For more information +# see https://docs.djangoproject.com/en/dev/ref/settings/. +ALLOWED_HOSTS = '*' + +# Compress all assets offline as part of packaging installation +COMPRESS_OFFLINE = True + +# DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded +# within an iframe. Legacy browsers are still vulnerable to a Cross-Frame +# Scripting (XFS) vulnerability, so this option allows extra security hardening +# where iframes are not used in deployment. Default setting is True. +# For more information see: +# http://tinyurl.com/anticlickjack +#DISALLOW_IFRAME_EMBED = True + +# Help URL can be made available for the client. To provide a help URL, edit the +# following attribute to the URL of your choice. +#HORIZON_CONFIG["help_url"] = "http://openstack.mycompany.org" + +# Settings for OperationLogMiddleware +# OPERATION_LOG_ENABLED is flag to use the function to log an operation on +# Horizon. +# mask_targets is arrangement for appointing a target to mask. +# method_targets is arrangement of HTTP method to output log. +# format is the log contents. +#OPERATION_LOG_ENABLED = False +#OPERATION_LOG_OPTIONS = { +# 'mask_fields': ['password'], +# 'target_methods': ['POST'], +# 'ignored_urls': ['/js/', '/static/', '^/api/'], +# 'format': ("[%(client_ip)s] [%(domain_name)s]" +# " [%(domain_id)s] [%(project_name)s]" +# " [%(project_id)s] [%(user_name)s] [%(user_id)s] [%(request_scheme)s]" +# " [%(referer_url)s] [%(request_url)s] [%(message)s] [%(method)s]" +# " [%(http_status)s] [%(param)s]"), +#} + +# The default date range in the Overview panel meters - either minus N +# days (if the value is integer N), or from the beginning of the current month +# until today (if set to None). This setting should be used to limit the amount +# of data fetched by default when rendering the Overview panel. +#OVERVIEW_DAYS_RANGE = 1 + +# To allow operators to require users provide a search criteria first +# before loading any data into the views, set the following dict +# attributes to True in each one of the panels you want to enable this feature. +# Follow the convention . +#FILTER_DATA_FIRST = { +# 'admin.instances': False, +# 'admin.images': False, +# 'admin.networks': False, +# 'admin.routers': False, +# 'admin.volumes': False, +# 'identity.users': False, +# 'identity.projects': False, +# 'identity.groups': False, +# 'identity.roles': False +#} + +# Dict used to restrict user private subnet cidr range. +# An empty list means that user input will not be restricted +# for a corresponding IP version. By default, there is +# no restriction for IPv4 or IPv6. To restrict +# user private subnet cidr range set ALLOWED_PRIVATE_SUBNET_CIDR +# to something like +#ALLOWED_PRIVATE_SUBNET_CIDR = { +# 'ipv4': ['10.0.0.0/8', '192.168.0.0/16'], +# 'ipv6': ['fc00::/7'] +#} +ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []} + +# Projects and users can have extra attributes as defined by keystone v3. +# Horizon has the ability to display these extra attributes via this setting. +# If you'd like to display extra data in the project or user tables, set the +# corresponding dict key to the attribute name, followed by the display name. +# For more information, see horizon's customization +# (https://docs.openstack.org/horizon/latest/configuration/customizing.html#horizon-customization-module-overrides) +#PROJECT_TABLE_EXTRA_INFO = { +# 'phone_num': _('Phone Number'), +#} +#USER_TABLE_EXTRA_INFO = { +# 'phone_num': _('Phone Number'), +#} + +# Password will have an expiration date when using keystone v3 and enabling the +# feature. +# This setting allows you to set the number of days that the user will be alerted +# prior to the password expiration. +# Once the password expires keystone will deny the access and users must +# contact an admin to change their password. +#PASSWORD_EXPIRES_WARNING_THRESHOLD_DAYS = 0 diff --git a/conf_wagent/iotronic.conf b/conf_wagent/iotronic.conf new file mode 100644 index 0000000..c096c3c --- /dev/null +++ b/conf_wagent/iotronic.conf @@ -0,0 +1,96 @@ +[DEFAULT] +transport_url = rabbit://openstack:unime@rabbitmq + +debug=True +proxy=nginx +log_file = /var/log/iotronic/iotronic-wagent.log + +# Authentication strategy used by iotronic-api: one of +# "keystone" or "noauth". "noauth" should not be used in a +# production environment because all authentication will be +# disabled. (string value) +auth_strategy=keystone + +# Enable pecan debug mode. WARNING: this is insecure and +# should not be used in a production environment. (boolean +# value) +#pecan_debug=false + + +[wamp] +wamp_transport_url = wss://iotronic-wagent:8181/ +wamp_realm = s4t +skip_cert_verify= True +register_agent = True + + + +[database] +connection = mysql+pymysql://iotronic:unime@iotronic-db/iotronic + +[keystone_authtoken] +www_authenticate_uri = http://keystone:5000 +auth_url = http://keystone:5000 +auth_plugin = password +auth_type = password +project_domain_id = default +user_domain_id = default +project_name = service +username = iotronic +password = unime + + +[neutron] +auth_url = http://controller:5000 +url = http://controller:9696 +auth_strategy = password +project_domain_name = default +user_domain_name = default +region_name = RegionOne +project_name = service +username = neutron +password = netrn_pwd +retries = 3 +project_domain_id= default + + +[designate] +auth_url = http://controller:35357 +url = http://controller:9001 +auth_strategy = password +project_domain_name = default +user_domain_name = default +region_name = RegionOne +project_name = service +username = designate +password = password +retries = 3 +project_domain_id= default + + +[cors] +# Indicate whether this resource may be shared with the domain +# received in the requests "origin" header. Format: +# "://[:]", no trailing slash. Example: +# https://horizon.example.com (list value) +#allowed_origin = + +# Indicate that the actual request can include user +# credentials (boolean value) +#allow_credentials = true + +# Indicate which headers are safe to expose to the API. +# Defaults to HTTP Simple Headers. (list value) +#expose_headers = + +# Maximum cache age of CORS preflight requests. (integer +# value) +#max_age = 3600 + +# Indicate which methods can be used during the actual +# request. (list value) +#allow_methods = OPTIONS,GET,HEAD,POST,PUT,DELETE,TRACE,PATCH + +# Indicate which header field names may be used during the +# actual request. (list value) +#allow_headers = diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..b807a03 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,393 @@ +version: "3.8" + +services: + ca_service: + image: debian:buster + container_name: ca_service + networks: + - s4t + volumes: + - iotronic_ssl:/etc/ssl/iotronic # Condiviso con iotronic-wstun + entrypoint: ["/bin/bash", "-c"] + command: + - | + echo "[INFO] Installazione di OpenSSL..." + apt-get update && apt-get install -y openssl && + + echo "[INFO] Generazione della Root CA..." + mkdir -p /etc/ssl/iotronic && + cd /etc/ssl/iotronic && + + openssl genrsa -out iotronic_CA.key 2048 && + openssl req -x509 -new -nodes -key iotronic_CA.key -sha256 -days 18250 \ + -subj "/C=IT/O=iotronic" -out iotronic_CA.pem && + + echo "[INFO] Generazione della chiave privata e del certificato per Crossbar..." + openssl genrsa -out crossbar.key 2048 && + openssl req -new -key crossbar.key -subj "/C=IT/O=iotronic/CN=crossbar" -out crossbar.csr && + openssl x509 -req -in crossbar.csr -CA iotronic_CA.pem -CAkey iotronic_CA.key -CAcreateserial -out crossbar.pem -days 18250 -sha256 && + + echo "[INFO] Impostazione permessi certificati..." + chmod 644 iotronic_CA.key iotronic_CA.pem crossbar.key crossbar.pem + chmod 755 /etc/ssl/iotronic + + echo "[INFO] Certificati generati con successo." + tail -f /dev/null + + crossbar: + image: crossbario/crossbar + container_name: crossbar + restart: unless-stopped + networks: + - s4t + volumes: + - iotronic_ssl:/node/.crossbar/ssl # Condiviso con iotronic-wstun + - crossbar_data:/node/.crossbar + ports: + - "8181:8181" + entrypoint: ["/bin/sh", "-c"] + command: + - | + echo "[INFO] Attesa dei certificati..." + while [ ! -f /node/.crossbar/ssl/crossbar.pem ] || [ ! -f /node/.crossbar/ssl/crossbar.key ]; do + sleep 2 + done + echo "[INFO] Certificati trovati!" + + echo "[INFO] Scrittura configurazione Crossbar..." + cat < /node/.crossbar/config.json + { + "version": 2, + "controller": {}, + "workers": [ + { + "type": "router", + "realms": [ + { + "name": "s4t", + "roles": [ + { + "name": "anonymous", + "permissions": [ + { + "uri": "*", + "allow": { + "publish": true, + "subscribe": true, + "call": true, + "register": true + } + } + ] + } + ] + } + ], + "transports": [ + { + "type": "websocket", + "endpoint": { + "type": "tcp", + "port": 8181, + "tls": { + "chain_certificates": ["/node/.crossbar/ssl/iotronic_CA.pem"], + "key": "/node/.crossbar/ssl/crossbar.key", + "certificate": "/node/.crossbar/ssl/crossbar.pem" + } + }, + "options":{ + "enable_webstatus": true, + "fail_by_drop": true, + "open_handshake_timeout": 2500, + "close_handshake_timeout": 1000, + "auto_ping_interval": 30000, + "auto_ping_timeout": 5000, + "auto_ping_size": 13 + } + } + ] + } + ] + } + EOF + + echo "[INFO] Avvio di Crossbar..." + crossbar start + + iotronic-wstun: + image: lucadagati/iotronic-wstun:latest + container_name: iotronic-wstun + restart: unless-stopped + networks: + - s4t + ports: + - "8080:8080" + - "50000-50100:50000-50100" + volumes: + - iotronic_ssl:/var/lib/iotronic/ssl + entrypoint: ["/bin/sh", "-c"] + command: + - | + set -x # DEBUG: Mostra i comandi eseguiti + echo "[INFO] Verifica permessi certificati..." + ls -l /var/lib/iotronic/ssl + while [ ! -e /var/lib/iotronic/ssl/iotronic_CA.pem ] || [ ! -e /var/lib/iotronic/ssl/crossbar.key ]; do + echo "[DEBUG] Certificati mancanti:" + ls -l /var/lib/iotronic/ssl + sleep 2 + done + + echo "[INFO] Certificati SSL trovati!" + ls -l /var/lib/iotronic/ssl + + echo "[INFO] Avvio di iotronic-wstun..." + exec node /usr/local/lib/node_modules/@mdslab/wstun/bin/wstun.js -r -s 8080 --ssl=true --key=/var/lib/iotronic/ssl/iotronic_CA.key --cert=/var/lib/iotronic/ssl/iotronic_CA.pem + + iotronic-db: + image: mariadb:bionic + container_name: iotronic-db + restart: unless-stopped + networks: + - s4t + environment: + MYSQL_ROOT_PASSWORD: "s4t" + MYSQL_DATABASE: "unime" + MYSQL_USER: "admin" + MYSQL_PASSWORD: "s4t" + command: > + mysqld --bind-address=0.0.0.0 + --default-storage-engine=innodb + --innodb-file-per-table=on + --max-connections=4096 + --collation-server=utf8_general_ci + --character-set-server=utf8 + --max_allowed_packet=128M + --connect_timeout=120 + --wait_timeout=48800 + --interactive_timeout=48800 + ports: + - "3306:3306" + volumes: + - ./conf_mysql:/etc/mysql + - db_data:/var/lib/mysql + - ./init-db.sql:/docker-entrypoint-initdb.d/init-db.sql + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-ps4t"] + interval: 10s + retries: 5 + start_period: 20s + timeout: 5s + + rabbitmq: + image: rabbitmq:3 + container_name: rabbitmq + restart: unless-stopped + networks: + - s4t + ports: + - "5672:5672" + environment: + RABBIT_PASS: "unime" + healthcheck: + test: ["CMD", "rabbitmqctl", "status"] + interval: 10s + retries: 5 + start_period: 20s + timeout: 5s + entrypoint: ["/bin/bash", "-c"] + command: | + rabbitmq-server & + sleep 30 && + rabbitmqctl add_user openstack unime && + rabbitmqctl set_permissions openstack ".*" ".*" ".*" && + wait -n + + keystone: + image: lucadagati/iotronic-keystone + container_name: keystone + restart: unless-stopped + depends_on: + iotronic-db: + condition: service_healthy + rabbitmq: + condition: service_healthy + networks: + - s4t + environment: + # Credenziali admin e impostazioni Keystone + ADMIN_PASS: "s4t" + OS_USERNAME: "admin" + OS_PASSWORD: "s4t" + OS_PROJECT_NAME: "admin" + OS_USER_DOMAIN_NAME: "Default" + OS_PROJECT_DOMAIN_NAME: "Default" + OS_AUTH_URL: "http://keystone:5000/v3" + OS_IDENTITY_API_VERSION: "3" + + KEYSTONE_DB_NAME: "keystone" + KEYSTONE_DB_USER: "keystone" + KEYSTONE_DBPASS: "unime" + + DB_HOST: "iotronic-db" + RABBIT_PASS: "unime" + REGION_NAME: "RegionOne" + ports: + - "5000:5000" + volumes: + - ./conf_keystone:/etc/keystone + - keystone_data:/var/lib/keystone + - /var/log/keystone:/var/log/keystone + - /var/log/keystone-api:/var/log/apache2 + command: > + /bin/bash -c " + echo '[INFO] Attesa del database Keystone...'; + until mysql -h iotronic-db -uroot -ps4t -e 'SELECT 1' >/dev/null 2>&1; do + echo '[INFO] Database non ancora pronto, riprovo...'; + sleep 5; + done; + echo '[INFO] Database pronto!'; + mysql -u root -ps4t -h iotronic-db -e \"CREATE DATABASE IF NOT EXISTS keystone; + CREATE DATABASE IF NOT EXISTS iotronic; + DROP USER IF EXISTS 'keystone'@'localhost'; + DROP USER IF EXISTS 'keystone'@'%'; + CREATE USER 'keystone'@'localhost' IDENTIFIED BY 'unime'; + CREATE USER 'keystone'@'%' IDENTIFIED BY 'unime'; + GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost'; + GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%'; + DROP USER IF EXISTS 'iotronic'@'localhost'; + DROP USER IF EXISTS 'iotronic'@'%'; + CREATE USER 'iotronic'@'localhost' IDENTIFIED BY 'unime'; + CREATE USER 'iotronic'@'%' IDENTIFIED BY 'unime'; + GRANT ALL PRIVILEGES ON iotronic.* TO 'iotronic'@'localhost'; + GRANT ALL PRIVILEGES ON iotronic.* TO 'iotronic'@'%'; + FLUSH PRIVILEGES;\"; + echo '[INFO] Creazione delle cartelle per le chiavi Fernet e credenziali...'; + mkdir -p /etc/keystone/fernet-keys; + mkdir -p /etc/keystone/credential-keys; + chown -R keystone:keystone /etc/keystone; + + echo '[INFO] Verifica delle chiavi Fernet...'; + if [ ! -f /etc/keystone/fernet-keys/0 ]; then + echo '[INFO] Nessuna chiave Fernet trovata, eseguo fernet_setup...'; + su -s /bin/sh -c 'keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone' keystone; + else + echo '[INFO] Chiavi Fernet già presenti.'; + fi + + echo '[INFO] Verifica delle credenziali crittografate...'; + if [ ! -f /etc/keystone/credential-keys/0 ]; then + echo '[INFO] Nessuna chiave di credenziali trovata, eseguo credential_setup...'; + su -s /bin/sh -c 'keystone-manage credential_setup --keystone-user keystone --keystone-group keystone' keystone; + else + echo '[INFO] Chiavi di credenziali già presenti.'; + fi + + echo '[INFO] Sincronizzazione delle tabelle di Keystone...'; + su -s /bin/sh -c 'keystone-manage db_sync' keystone; + echo '[INFO] Configurazione dei token Fernet...'; + su -s /bin/sh -c 'keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone' keystone; + echo '[INFO] Configurazione delle credenziali crittografate...'; + su -s /bin/sh -c 'keystone-manage credential_setup --keystone-user keystone --keystone-group keystone' keystone; + echo '[INFO] Bootstrap di Keystone...'; + su -s /bin/sh -c 'keystone-manage bootstrap --bootstrap-password s4t --bootstrap-admin-url http://keystone:5000/v3 --bootstrap-internal-url http://keystone:5000/v3 --bootstrap-public-url http://keystone:5000/v3 --bootstrap-region-id RegionOne' keystone; + exec apache2ctl -D FOREGROUND" + + iotronic-conductor: + image: lucadagati/iotronic-conductor:latest + container_name: iotronic-conductor + restart: unless-stopped + networks: + - s4t + environment: + # Credenziali DB Iotronic + MYSQL_ROOT_PASSWORD: "s4t" + DB_HOST: "iotronic-db" + IOTRONIC_DB_NAME: "iotronic" + IOTRONIC_DB_USER: "iotronic" + IOTRONIC_DBPASS: "unime" + + # Credenziali OpenStack + OS_AUTH_URL: "http://keystone:5000/v3" + OS_USERNAME: "admin" + OS_PASSWORD: "s4t" + OS_PROJECT_NAME: "admin" + OS_USER_DOMAIN_NAME: "Default" + OS_PROJECT_DOMAIN_NAME: "Default" + + # Stringa di connessione + DB_CONNECTION_STRING: "mysql+pymysql://iotronic:unime@iotronic-db/iotronic" + ports: + - "8812:8812" + volumes: + - ./conf_conductor:/etc/iotronic + - iotronic_logs:/var/log/iotronic + command: > + /bin/bash -c " + echo '[INFO] Attesa del database MySQL...'; + until mysql -h iotronic-db -uroot -ps4t -e 'SELECT 1' >/dev/null 2>&1; do + echo '[INFO] Database non ancora pronto, riprovo...'; + sleep 5; + done; + iotronic-dbsync; + echo '[INFO] Configurazione dei permessi sui log...'; + chown -R iotronic:iotronic /var/log/iotronic; + echo '[INFO] Avvio di Iotronic Conductor...'; + iotronic-conductor" + + wagent: + image: lucadagati/iotronic-wagent:latest + container_name: iotronic-wagent + restart: unless-stopped + networks: + - s4t + environment: + # DB info + MYSQL_ROOT_PASSWORD: "s4t" + DB_HOST: "iotronic-db" + + # Stringa di connessione + DB_CONNECTION_STRING: "mysql+pymysql://iotronic:unime@iotronic-db/iotronic" + + # Credenziali OpenStack + OS_AUTH_URL: "http://keystone:5000/v3" + OS_USERNAME: "admin" + OS_PASSWORD: "s4t" + OS_PROJECT_NAME: "admin" + OS_USER_DOMAIN_NAME: "Default" + OS_PROJECT_DOMAIN_NAME: "Default" + volumes: + - ./conf_wagent:/etc/iotronic + - iotronic_logs:/var/log/iotronic + command: > + /bin/bash -c " + echo '[INFO] Configurazione dei permessi sui log...'; + chown -R iotronic:iotronic /var/log/iotronic; + echo '[INFO] Avvio del Wagent...'; + exec /usr/local/bin/iotronic-wamp-agent --config-file /etc/iotronic/iotronic.conf" + + iotronic-ui: + image: lucadagati/iotronic-ui:latest + container_name: iotronic-ui + restart: unless-stopped + networks: + - s4t + ports: + - "8585:80" + volumes: + - iotronic-ui_config:/etc/openstack-dashboard + - iotronic-ui_logs:/var/log/apache2 + - ./conf_ui:/etc/openstack-dashboard # <--- Monta tutta la cartella + +networks: + s4t: + driver: bridge + +volumes: + db_data: + keystone_data: + iotronic_logs: + iotronic-ui_logs: + iotronic-ui_config: + crossbar_data: + ca_data: + iotronic_ssl: From 64602a14c609cfb455ed0328beddfc03816943f8 Mon Sep 17 00:00:00 2001 From: Luca D'Agati <66645997+lucadagati@users.noreply.github.com> Date: Fri, 21 Feb 2025 17:21:39 +0100 Subject: [PATCH 02/18] Delete docker-compose.yml --- docker-compose.yml | 393 --------------------------------------------- 1 file changed, 393 deletions(-) delete mode 100644 docker-compose.yml diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index b807a03..0000000 --- a/docker-compose.yml +++ /dev/null @@ -1,393 +0,0 @@ -version: "3.8" - -services: - ca_service: - image: debian:buster - container_name: ca_service - networks: - - s4t - volumes: - - iotronic_ssl:/etc/ssl/iotronic # Condiviso con iotronic-wstun - entrypoint: ["/bin/bash", "-c"] - command: - - | - echo "[INFO] Installazione di OpenSSL..." - apt-get update && apt-get install -y openssl && - - echo "[INFO] Generazione della Root CA..." - mkdir -p /etc/ssl/iotronic && - cd /etc/ssl/iotronic && - - openssl genrsa -out iotronic_CA.key 2048 && - openssl req -x509 -new -nodes -key iotronic_CA.key -sha256 -days 18250 \ - -subj "/C=IT/O=iotronic" -out iotronic_CA.pem && - - echo "[INFO] Generazione della chiave privata e del certificato per Crossbar..." - openssl genrsa -out crossbar.key 2048 && - openssl req -new -key crossbar.key -subj "/C=IT/O=iotronic/CN=crossbar" -out crossbar.csr && - openssl x509 -req -in crossbar.csr -CA iotronic_CA.pem -CAkey iotronic_CA.key -CAcreateserial -out crossbar.pem -days 18250 -sha256 && - - echo "[INFO] Impostazione permessi certificati..." - chmod 644 iotronic_CA.key iotronic_CA.pem crossbar.key crossbar.pem - chmod 755 /etc/ssl/iotronic - - echo "[INFO] Certificati generati con successo." - tail -f /dev/null - - crossbar: - image: crossbario/crossbar - container_name: crossbar - restart: unless-stopped - networks: - - s4t - volumes: - - iotronic_ssl:/node/.crossbar/ssl # Condiviso con iotronic-wstun - - crossbar_data:/node/.crossbar - ports: - - "8181:8181" - entrypoint: ["/bin/sh", "-c"] - command: - - | - echo "[INFO] Attesa dei certificati..." - while [ ! -f /node/.crossbar/ssl/crossbar.pem ] || [ ! -f /node/.crossbar/ssl/crossbar.key ]; do - sleep 2 - done - echo "[INFO] Certificati trovati!" - - echo "[INFO] Scrittura configurazione Crossbar..." - cat < /node/.crossbar/config.json - { - "version": 2, - "controller": {}, - "workers": [ - { - "type": "router", - "realms": [ - { - "name": "s4t", - "roles": [ - { - "name": "anonymous", - "permissions": [ - { - "uri": "*", - "allow": { - "publish": true, - "subscribe": true, - "call": true, - "register": true - } - } - ] - } - ] - } - ], - "transports": [ - { - "type": "websocket", - "endpoint": { - "type": "tcp", - "port": 8181, - "tls": { - "chain_certificates": ["/node/.crossbar/ssl/iotronic_CA.pem"], - "key": "/node/.crossbar/ssl/crossbar.key", - "certificate": "/node/.crossbar/ssl/crossbar.pem" - } - }, - "options":{ - "enable_webstatus": true, - "fail_by_drop": true, - "open_handshake_timeout": 2500, - "close_handshake_timeout": 1000, - "auto_ping_interval": 30000, - "auto_ping_timeout": 5000, - "auto_ping_size": 13 - } - } - ] - } - ] - } - EOF - - echo "[INFO] Avvio di Crossbar..." - crossbar start - - iotronic-wstun: - image: lucadagati/iotronic-wstun:latest - container_name: iotronic-wstun - restart: unless-stopped - networks: - - s4t - ports: - - "8080:8080" - - "50000-50100:50000-50100" - volumes: - - iotronic_ssl:/var/lib/iotronic/ssl - entrypoint: ["/bin/sh", "-c"] - command: - - | - set -x # DEBUG: Mostra i comandi eseguiti - echo "[INFO] Verifica permessi certificati..." - ls -l /var/lib/iotronic/ssl - while [ ! -e /var/lib/iotronic/ssl/iotronic_CA.pem ] || [ ! -e /var/lib/iotronic/ssl/crossbar.key ]; do - echo "[DEBUG] Certificati mancanti:" - ls -l /var/lib/iotronic/ssl - sleep 2 - done - - echo "[INFO] Certificati SSL trovati!" - ls -l /var/lib/iotronic/ssl - - echo "[INFO] Avvio di iotronic-wstun..." - exec node /usr/local/lib/node_modules/@mdslab/wstun/bin/wstun.js -r -s 8080 --ssl=true --key=/var/lib/iotronic/ssl/iotronic_CA.key --cert=/var/lib/iotronic/ssl/iotronic_CA.pem - - iotronic-db: - image: mariadb:bionic - container_name: iotronic-db - restart: unless-stopped - networks: - - s4t - environment: - MYSQL_ROOT_PASSWORD: "s4t" - MYSQL_DATABASE: "unime" - MYSQL_USER: "admin" - MYSQL_PASSWORD: "s4t" - command: > - mysqld --bind-address=0.0.0.0 - --default-storage-engine=innodb - --innodb-file-per-table=on - --max-connections=4096 - --collation-server=utf8_general_ci - --character-set-server=utf8 - --max_allowed_packet=128M - --connect_timeout=120 - --wait_timeout=48800 - --interactive_timeout=48800 - ports: - - "3306:3306" - volumes: - - ./conf_mysql:/etc/mysql - - db_data:/var/lib/mysql - - ./init-db.sql:/docker-entrypoint-initdb.d/init-db.sql - healthcheck: - test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-ps4t"] - interval: 10s - retries: 5 - start_period: 20s - timeout: 5s - - rabbitmq: - image: rabbitmq:3 - container_name: rabbitmq - restart: unless-stopped - networks: - - s4t - ports: - - "5672:5672" - environment: - RABBIT_PASS: "unime" - healthcheck: - test: ["CMD", "rabbitmqctl", "status"] - interval: 10s - retries: 5 - start_period: 20s - timeout: 5s - entrypoint: ["/bin/bash", "-c"] - command: | - rabbitmq-server & - sleep 30 && - rabbitmqctl add_user openstack unime && - rabbitmqctl set_permissions openstack ".*" ".*" ".*" && - wait -n - - keystone: - image: lucadagati/iotronic-keystone - container_name: keystone - restart: unless-stopped - depends_on: - iotronic-db: - condition: service_healthy - rabbitmq: - condition: service_healthy - networks: - - s4t - environment: - # Credenziali admin e impostazioni Keystone - ADMIN_PASS: "s4t" - OS_USERNAME: "admin" - OS_PASSWORD: "s4t" - OS_PROJECT_NAME: "admin" - OS_USER_DOMAIN_NAME: "Default" - OS_PROJECT_DOMAIN_NAME: "Default" - OS_AUTH_URL: "http://keystone:5000/v3" - OS_IDENTITY_API_VERSION: "3" - - KEYSTONE_DB_NAME: "keystone" - KEYSTONE_DB_USER: "keystone" - KEYSTONE_DBPASS: "unime" - - DB_HOST: "iotronic-db" - RABBIT_PASS: "unime" - REGION_NAME: "RegionOne" - ports: - - "5000:5000" - volumes: - - ./conf_keystone:/etc/keystone - - keystone_data:/var/lib/keystone - - /var/log/keystone:/var/log/keystone - - /var/log/keystone-api:/var/log/apache2 - command: > - /bin/bash -c " - echo '[INFO] Attesa del database Keystone...'; - until mysql -h iotronic-db -uroot -ps4t -e 'SELECT 1' >/dev/null 2>&1; do - echo '[INFO] Database non ancora pronto, riprovo...'; - sleep 5; - done; - echo '[INFO] Database pronto!'; - mysql -u root -ps4t -h iotronic-db -e \"CREATE DATABASE IF NOT EXISTS keystone; - CREATE DATABASE IF NOT EXISTS iotronic; - DROP USER IF EXISTS 'keystone'@'localhost'; - DROP USER IF EXISTS 'keystone'@'%'; - CREATE USER 'keystone'@'localhost' IDENTIFIED BY 'unime'; - CREATE USER 'keystone'@'%' IDENTIFIED BY 'unime'; - GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost'; - GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%'; - DROP USER IF EXISTS 'iotronic'@'localhost'; - DROP USER IF EXISTS 'iotronic'@'%'; - CREATE USER 'iotronic'@'localhost' IDENTIFIED BY 'unime'; - CREATE USER 'iotronic'@'%' IDENTIFIED BY 'unime'; - GRANT ALL PRIVILEGES ON iotronic.* TO 'iotronic'@'localhost'; - GRANT ALL PRIVILEGES ON iotronic.* TO 'iotronic'@'%'; - FLUSH PRIVILEGES;\"; - echo '[INFO] Creazione delle cartelle per le chiavi Fernet e credenziali...'; - mkdir -p /etc/keystone/fernet-keys; - mkdir -p /etc/keystone/credential-keys; - chown -R keystone:keystone /etc/keystone; - - echo '[INFO] Verifica delle chiavi Fernet...'; - if [ ! -f /etc/keystone/fernet-keys/0 ]; then - echo '[INFO] Nessuna chiave Fernet trovata, eseguo fernet_setup...'; - su -s /bin/sh -c 'keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone' keystone; - else - echo '[INFO] Chiavi Fernet già presenti.'; - fi - - echo '[INFO] Verifica delle credenziali crittografate...'; - if [ ! -f /etc/keystone/credential-keys/0 ]; then - echo '[INFO] Nessuna chiave di credenziali trovata, eseguo credential_setup...'; - su -s /bin/sh -c 'keystone-manage credential_setup --keystone-user keystone --keystone-group keystone' keystone; - else - echo '[INFO] Chiavi di credenziali già presenti.'; - fi - - echo '[INFO] Sincronizzazione delle tabelle di Keystone...'; - su -s /bin/sh -c 'keystone-manage db_sync' keystone; - echo '[INFO] Configurazione dei token Fernet...'; - su -s /bin/sh -c 'keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone' keystone; - echo '[INFO] Configurazione delle credenziali crittografate...'; - su -s /bin/sh -c 'keystone-manage credential_setup --keystone-user keystone --keystone-group keystone' keystone; - echo '[INFO] Bootstrap di Keystone...'; - su -s /bin/sh -c 'keystone-manage bootstrap --bootstrap-password s4t --bootstrap-admin-url http://keystone:5000/v3 --bootstrap-internal-url http://keystone:5000/v3 --bootstrap-public-url http://keystone:5000/v3 --bootstrap-region-id RegionOne' keystone; - exec apache2ctl -D FOREGROUND" - - iotronic-conductor: - image: lucadagati/iotronic-conductor:latest - container_name: iotronic-conductor - restart: unless-stopped - networks: - - s4t - environment: - # Credenziali DB Iotronic - MYSQL_ROOT_PASSWORD: "s4t" - DB_HOST: "iotronic-db" - IOTRONIC_DB_NAME: "iotronic" - IOTRONIC_DB_USER: "iotronic" - IOTRONIC_DBPASS: "unime" - - # Credenziali OpenStack - OS_AUTH_URL: "http://keystone:5000/v3" - OS_USERNAME: "admin" - OS_PASSWORD: "s4t" - OS_PROJECT_NAME: "admin" - OS_USER_DOMAIN_NAME: "Default" - OS_PROJECT_DOMAIN_NAME: "Default" - - # Stringa di connessione - DB_CONNECTION_STRING: "mysql+pymysql://iotronic:unime@iotronic-db/iotronic" - ports: - - "8812:8812" - volumes: - - ./conf_conductor:/etc/iotronic - - iotronic_logs:/var/log/iotronic - command: > - /bin/bash -c " - echo '[INFO] Attesa del database MySQL...'; - until mysql -h iotronic-db -uroot -ps4t -e 'SELECT 1' >/dev/null 2>&1; do - echo '[INFO] Database non ancora pronto, riprovo...'; - sleep 5; - done; - iotronic-dbsync; - echo '[INFO] Configurazione dei permessi sui log...'; - chown -R iotronic:iotronic /var/log/iotronic; - echo '[INFO] Avvio di Iotronic Conductor...'; - iotronic-conductor" - - wagent: - image: lucadagati/iotronic-wagent:latest - container_name: iotronic-wagent - restart: unless-stopped - networks: - - s4t - environment: - # DB info - MYSQL_ROOT_PASSWORD: "s4t" - DB_HOST: "iotronic-db" - - # Stringa di connessione - DB_CONNECTION_STRING: "mysql+pymysql://iotronic:unime@iotronic-db/iotronic" - - # Credenziali OpenStack - OS_AUTH_URL: "http://keystone:5000/v3" - OS_USERNAME: "admin" - OS_PASSWORD: "s4t" - OS_PROJECT_NAME: "admin" - OS_USER_DOMAIN_NAME: "Default" - OS_PROJECT_DOMAIN_NAME: "Default" - volumes: - - ./conf_wagent:/etc/iotronic - - iotronic_logs:/var/log/iotronic - command: > - /bin/bash -c " - echo '[INFO] Configurazione dei permessi sui log...'; - chown -R iotronic:iotronic /var/log/iotronic; - echo '[INFO] Avvio del Wagent...'; - exec /usr/local/bin/iotronic-wamp-agent --config-file /etc/iotronic/iotronic.conf" - - iotronic-ui: - image: lucadagati/iotronic-ui:latest - container_name: iotronic-ui - restart: unless-stopped - networks: - - s4t - ports: - - "8585:80" - volumes: - - iotronic-ui_config:/etc/openstack-dashboard - - iotronic-ui_logs:/var/log/apache2 - - ./conf_ui:/etc/openstack-dashboard # <--- Monta tutta la cartella - -networks: - s4t: - driver: bridge - -volumes: - db_data: - keystone_data: - iotronic_logs: - iotronic-ui_logs: - iotronic-ui_config: - crossbar_data: - ca_data: - iotronic_ssl: From 1170dafd155e9ec79b5bb1c9bf21231d95308eba Mon Sep 17 00:00:00 2001 From: Luca D'Agati <66645997+lucadagati@users.noreply.github.com> Date: Fri, 21 Feb 2025 17:21:49 +0100 Subject: [PATCH 03/18] Delete conf_wagent directory --- conf_wagent/iotronic.conf | 96 --------------------------------------- 1 file changed, 96 deletions(-) delete mode 100644 conf_wagent/iotronic.conf diff --git a/conf_wagent/iotronic.conf b/conf_wagent/iotronic.conf deleted file mode 100644 index c096c3c..0000000 --- a/conf_wagent/iotronic.conf +++ /dev/null @@ -1,96 +0,0 @@ -[DEFAULT] -transport_url = rabbit://openstack:unime@rabbitmq - -debug=True -proxy=nginx -log_file = /var/log/iotronic/iotronic-wagent.log - -# Authentication strategy used by iotronic-api: one of -# "keystone" or "noauth". "noauth" should not be used in a -# production environment because all authentication will be -# disabled. (string value) -auth_strategy=keystone - -# Enable pecan debug mode. WARNING: this is insecure and -# should not be used in a production environment. (boolean -# value) -#pecan_debug=false - - -[wamp] -wamp_transport_url = wss://iotronic-wagent:8181/ -wamp_realm = s4t -skip_cert_verify= True -register_agent = True - - - -[database] -connection = mysql+pymysql://iotronic:unime@iotronic-db/iotronic - -[keystone_authtoken] -www_authenticate_uri = http://keystone:5000 -auth_url = http://keystone:5000 -auth_plugin = password -auth_type = password -project_domain_id = default -user_domain_id = default -project_name = service -username = iotronic -password = unime - - -[neutron] -auth_url = http://controller:5000 -url = http://controller:9696 -auth_strategy = password -project_domain_name = default -user_domain_name = default -region_name = RegionOne -project_name = service -username = neutron -password = netrn_pwd -retries = 3 -project_domain_id= default - - -[designate] -auth_url = http://controller:35357 -url = http://controller:9001 -auth_strategy = password -project_domain_name = default -user_domain_name = default -region_name = RegionOne -project_name = service -username = designate -password = password -retries = 3 -project_domain_id= default - - -[cors] -# Indicate whether this resource may be shared with the domain -# received in the requests "origin" header. Format: -# "://[:]", no trailing slash. Example: -# https://horizon.example.com (list value) -#allowed_origin = - -# Indicate that the actual request can include user -# credentials (boolean value) -#allow_credentials = true - -# Indicate which headers are safe to expose to the API. -# Defaults to HTTP Simple Headers. (list value) -#expose_headers = - -# Maximum cache age of CORS preflight requests. (integer -# value) -#max_age = 3600 - -# Indicate which methods can be used during the actual -# request. (list value) -#allow_methods = OPTIONS,GET,HEAD,POST,PUT,DELETE,TRACE,PATCH - -# Indicate which header field names may be used during the -# actual request. (list value) -#allow_headers = From 545692da4bb73e5a33815e61531953c3bf8c6960 Mon Sep 17 00:00:00 2001 From: Luca D'Agati <66645997+lucadagati@users.noreply.github.com> Date: Fri, 21 Feb 2025 17:21:58 +0100 Subject: [PATCH 04/18] Delete conf_ui directory --- conf_ui/local_settings.py | 916 -------------------------------------- 1 file changed, 916 deletions(-) delete mode 100644 conf_ui/local_settings.py diff --git a/conf_ui/local_settings.py b/conf_ui/local_settings.py deleted file mode 100644 index 042b682..0000000 --- a/conf_ui/local_settings.py +++ /dev/null @@ -1,916 +0,0 @@ -# -*- coding: utf-8 -*- - -import os - -from django.utils.translation import ugettext_lazy as _ - -from horizon.utils import secret_key - -from openstack_dashboard.settings import HORIZON_CONFIG - -DEBUG = False - -# This setting controls whether or not compression is enabled. Disabling -# compression makes Horizon considerably slower, but makes it much easier -# to debug JS and CSS changes -#COMPRESS_ENABLED = not DEBUG - -# This setting controls whether compression happens on the fly, or offline -# with `python manage.py compress` -# See https://django-compressor.readthedocs.io/en/latest/usage/#offline-compression -# for more information -#COMPRESS_OFFLINE = not DEBUG - -# WEBROOT is the location relative to Webserver root -# should end with a slash. -WEBROOT = '/' -#LOGIN_URL = WEBROOT + 'auth/login/' -#LOGOUT_URL = WEBROOT + 'auth/logout/' -# -# LOGIN_REDIRECT_URL can be used as an alternative for -# HORIZON_CONFIG.user_home, if user_home is not set. -# Do not set it to '/home/', as this will cause circular redirect loop -#LOGIN_REDIRECT_URL = WEBROOT - -# If horizon is running in production (DEBUG is False), set this -# with the list of host/domain names that the application can serve. -# For more information see: -# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts -ALLOWED_HOSTS = ['*', ] - -# Set SSL proxy settings: -# Pass this header from the proxy after terminating the SSL, -# and don't forget to strip it from the client's request. -# For more information see: -# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header -#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') - -# If Horizon is being served through SSL, then uncomment the following two -# settings to better secure the cookies from security exploits -#CSRF_COOKIE_SECURE = True -#SESSION_COOKIE_SECURE = True - -# The absolute path to the directory where message files are collected. -# The message file must have a .json file extension. When the user logins to -# horizon, the message files collected are processed and displayed to the user. -#MESSAGES_PATH=None - -# Overrides for OpenStack API versions. Use this setting to force the -# OpenStack dashboard to use a specific API version for a given service API. -# Versions specified here should be integers or floats, not strings. -# NOTE: The version should be formatted as it appears in the URL for the -# service API. For example, The identity service APIs have inconsistent -# use of the decimal point, so valid options would be 2.0 or 3. -# Minimum compute version to get the instance locked status is 2.9. -#OPENSTACK_API_VERSIONS = { -# "data-processing": 1.1, -# "identity": 3, -# "image": 2, -# "volume": 2, -# "compute": 2, -#} - -# Set this to True if running on a multi-domain model. When this is enabled, it -# will require the user to enter the Domain name in addition to the username -# for login. -#OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False - -# Set this to True if you want available domains displayed as a dropdown menu -# on the login screen. It is strongly advised NOT to enable this for public -# clouds, as advertising enabled domains to unauthenticated customers -# irresponsibly exposes private information. This should only be used for -# private clouds where the dashboard sits behind a corporate firewall. -#OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN = False - -# If OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN is enabled, this option can be used to -# set the available domains to choose from. This is a list of pairs whose first -# value is the domain name and the second is the display name. -#OPENSTACK_KEYSTONE_DOMAIN_CHOICES = ( -# ('Default', 'Default'), -#) - -# Overrides the default domain used when running on single-domain model -# with Keystone V3. All entities will be created in the default domain. -# NOTE: This value must be the name of the default domain, NOT the ID. -# Also, you will most likely have a value in the keystone policy file like this -# "cloud_admin": "rule:admin_required and domain_id:" -# This value must be the name of the domain whose ID is specified there. -#OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default' - -# Set this to True to enable panels that provide the ability for users to -# manage Identity Providers (IdPs) and establish a set of rules to map -# federation protocol attributes to Identity API attributes. -# This extension requires v3.0+ of the Identity API. -#OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT = False - -# Set Console type: -# valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL", "MKS" -# or None. Set to None explicitly if you want to deactivate the console. -#CONSOLE_TYPE = "AUTO" - -# Toggle showing the openrc file for Keystone V2. -# If set to false the link will be removed from the user dropdown menu -# and the API Access page -#SHOW_KEYSTONE_V2_RC = True - -# If provided, a "Report Bug" link will be displayed in the site header -# which links to the value of this setting (ideally a URL containing -# information on how to report issues). -#HORIZON_CONFIG["bug_url"] = "http://bug-report.example.com" - -# Show backdrop element outside the modal, do not close the modal -# after clicking on backdrop. -#HORIZON_CONFIG["modal_backdrop"] = "static" - -# Specify a regular expression to validate user passwords. -#HORIZON_CONFIG["password_validator"] = { -# "regex": '.*', -# "help_text": _("Your password does not meet the requirements."), -#} - -# Turn off browser autocompletion for forms including the login form and -# the database creation workflow if so desired. -#HORIZON_CONFIG["password_autocomplete"] = "off" - -# Setting this to True will disable the reveal button for password fields, -# including on the login form. -#HORIZON_CONFIG["disable_password_reveal"] = False - -LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) - -# Set custom secret key: -# You can either set it to a specific value or you can let horizon generate a -# default secret key that is unique on this machine, e.i. regardless of the -# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, -# there may be situations where you would want to set this explicitly, e.g. -# when multiple dashboard instances are distributed on different machines -# (usually behind a load-balancer). Either you have to make sure that a session -# gets all requests routed to the same dashboard instance or you set the same -# SECRET_KEY for all of them. -SECRET_KEY = secret_key.generate_or_read_from_file('/var/lib/openstack-dashboard/secret_key') - -# We recommend you use memcached for development; otherwise after every reload -# of the django development server, you will have to login again. To use -# memcached set CACHES to something like - -CACHES = { - 'default': { - 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', - 'LOCATION': 'iotronic-ui:11211', - }, -} - -#CACHES = { -# 'default': { -# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', -# } -#} - -# Send email to the console by default -EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' -# Or send them to /dev/null -#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' - -# Configure these for your outgoing email host -#EMAIL_HOST = 'smtp.my-company.com' -#EMAIL_PORT = 25 -#EMAIL_HOST_USER = 'djangomail' -#EMAIL_HOST_PASSWORD = 'top-secret!' - -# For multiple regions uncomment this configuration, and add (endpoint, title). -#AVAILABLE_REGIONS = [ -# ('http://cluster1.example.com:5000/v3', 'cluster1'), -# ('http://cluster2.example.com:5000/v3', 'cluster2'), -#] - -OPENSTACK_HOST = "keystone" -OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST -OPENSTACK_KEYSTONE_DEFAULT_ROLE = "admin" - -# For setting the default service region on a per-endpoint basis. Note that the -# default value for this setting is {}, and below is just an example of how it -# should be specified. -# A key of '*' is an optional global default if no other key matches. -#DEFAULT_SERVICE_REGIONS = { -# '*': 'RegionOne' -# OPENSTACK_KEYSTONE_URL: 'RegionTwo' -#} - -# Enables keystone web single-sign-on if set to True. -#WEBSSO_ENABLED = False - -# Authentication mechanism to be selected as default. -# The value must be a key from WEBSSO_CHOICES. -#WEBSSO_INITIAL_CHOICE = "credentials" - -# The list of authentication mechanisms which include keystone -# federation protocols and identity provider/federation protocol -# mapping keys (WEBSSO_IDP_MAPPING). Current supported protocol -# IDs are 'saml2' and 'oidc' which represent SAML 2.0, OpenID -# Connect respectively. -# Do not remove the mandatory credentials mechanism. -# Note: The last two tuples are sample mapping keys to a identity provider -# and federation protocol combination (WEBSSO_IDP_MAPPING). -#WEBSSO_CHOICES = ( -# ("credentials", _("Keystone Credentials")), -# ("oidc", _("OpenID Connect")), -# ("saml2", _("Security Assertion Markup Language")), -# ("acme_oidc", "ACME - OpenID Connect"), -# ("acme_saml2", "ACME - SAML2"), -#) - -# A dictionary of specific identity provider and federation protocol -# combinations. From the selected authentication mechanism, the value -# will be looked up as keys in the dictionary. If a match is found, -# it will redirect the user to a identity provider and federation protocol -# specific WebSSO endpoint in keystone, otherwise it will use the value -# as the protocol_id when redirecting to the WebSSO by protocol endpoint. -# NOTE: The value is expected to be a tuple formatted as: (, ). -#WEBSSO_IDP_MAPPING = { -# "acme_oidc": ("acme", "oidc"), -# "acme_saml2": ("acme", "saml2"), -#} - -# If set this URL will be used for web single-sign-on authentication -# instead of OPENSTACK_KEYSTONE_URL. This is needed in the deployment -# scenarios where network segmentation is used per security requirement. -# In this case, the controllers are not reachable from public network. -# Therefore, user's browser will not be able to access OPENSTACK_KEYSTONE_URL -# if it is set to the internal endpoint. -#WEBSSO_KEYSTONE_URL = "http://keystone-public.example.com/v3" - -# The Keystone Provider drop down uses Keystone to Keystone federation -# to switch between Keystone service providers. -# Set display name for Identity Provider (dropdown display name) -#KEYSTONE_PROVIDER_IDP_NAME = "Local Keystone" -# This id is used for only for comparison with the service provider IDs. This ID -# should not match any service provider IDs. -#KEYSTONE_PROVIDER_IDP_ID = "localkeystone" - -# Disable SSL certificate checks (useful for self-signed certificates): -#OPENSTACK_SSL_NO_VERIFY = True - -# The CA certificate to use to verify SSL connections -#OPENSTACK_SSL_CACERT = '/path/to/cacert.pem' - -# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the -# capabilities of the auth backend for Keystone. -# If Keystone has been configured to use LDAP as the auth backend then set -# can_edit_user to False and name to 'ldap'. -# -# TODO(tres): Remove these once Keystone has an API to identify auth backend. -OPENSTACK_KEYSTONE_BACKEND = { - 'name': 'native', - 'can_edit_user': True, - 'can_edit_group': True, - 'can_edit_project': True, - 'can_edit_domain': True, - 'can_edit_role': True, -} - -# Setting this to True, will add a new "Retrieve Password" action on instance, -# allowing Admin session password retrieval/decryption. -#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False - -# The Launch Instance user experience has been significantly enhanced. -# You can choose whether to enable the new launch instance experience, -# the legacy experience, or both. The legacy experience will be removed -# in a future release, but is available as a temporary backup setting to ensure -# compatibility with existing deployments. Further development will not be -# done on the legacy experience. Please report any problems with the new -# experience via the Launchpad tracking system. -# -# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to -# determine the experience to enable. Set them both to true to enable -# both. -#LAUNCH_INSTANCE_LEGACY_ENABLED = True -#LAUNCH_INSTANCE_NG_ENABLED = False - -# A dictionary of settings which can be used to provide the default values for -# properties found in the Launch Instance modal. -#LAUNCH_INSTANCE_DEFAULTS = { -# 'config_drive': False, -# 'enable_scheduler_hints': True, -# 'disable_image': False, -# 'disable_instance_snapshot': False, -# 'disable_volume': False, -# 'disable_volume_snapshot': False, -# 'create_volume': True, -#} - -# The Xen Hypervisor has the ability to set the mount point for volumes -# attached to instances (other Hypervisors currently do not). Setting -# can_set_mount_point to True will add the option to set the mount point -# from the UI. -OPENSTACK_HYPERVISOR_FEATURES = { - 'can_set_mount_point': False, - 'can_set_password': False, - 'requires_keypair': False, - 'enable_quotas': True -} - -# This settings controls whether IP addresses of servers are retrieved from -# neutron in the project instance table. Setting this to ``False`` may mitigate -# a performance issue in the project instance table in large deployments. -#OPENSTACK_INSTANCE_RETRIEVE_IP_ADDRESSES = True - -# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional -# services provided by cinder that is not exposed by its extension API. -OPENSTACK_CINDER_FEATURES = { - 'enable_backup': False, -} - -# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional -# services provided by neutron. Options currently available are load -# balancer service, security groups, quotas, VPN service. -OPENSTACK_NEUTRON_NETWORK = { - 'enable_router': True, - 'enable_quotas': True, - 'enable_ipv6': True, - 'enable_distributed_router': False, - 'enable_ha_router': False, - 'enable_fip_topology_check': True, - - # Default dns servers you would like to use when a subnet is - # created. This is only a default, users can still choose a different - # list of dns servers when creating a new subnet. - # The entries below are examples only, and are not appropriate for - # real deployments - # 'default_dns_nameservers': ["8.8.8.8", "8.8.4.4", "208.67.222.222"], - - # Set which provider network types are supported. Only the network types - # in this list will be available to choose from when creating a network. - # Network types include local, flat, vlan, gre, vxlan and geneve. - # 'supported_provider_types': ['*'], - - # You can configure available segmentation ID range per network type - # in your deployment. - # 'segmentation_id_range': { - # 'vlan': [1024, 2048], - # 'vxlan': [4094, 65536], - # }, - - # You can define additional provider network types here. - # 'extra_provider_types': { - # 'awesome_type': { - # 'display_name': 'Awesome New Type', - # 'require_physical_network': False, - # 'require_segmentation_id': True, - # } - # }, - - # Set which VNIC types are supported for port binding. Only the VNIC - # types in this list will be available to choose from when creating a - # port. - # VNIC types include 'normal', 'direct', 'direct-physical', 'macvtap', - # 'baremetal' and 'virtio-forwarder' - # Set to empty list or None to disable VNIC type selection. - 'supported_vnic_types': ['*'], - - # Set list of available physical networks to be selected in the physical - # network field on the admin create network modal. If it's set to an empty - # list, the field will be a regular input field. - # e.g. ['default', 'test'] - 'physical_networks': [], - -} - -# The OPENSTACK_HEAT_STACK settings can be used to disable password -# field required while launching the stack. -OPENSTACK_HEAT_STACK = { - 'enable_user_pass': True, -} - -# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features -# in the OpenStack Dashboard related to the Image service, such as the list -# of supported image formats. -#OPENSTACK_IMAGE_BACKEND = { -# 'image_formats': [ -# ('', _('Select format')), -# ('aki', _('AKI - Amazon Kernel Image')), -# ('ami', _('AMI - Amazon Machine Image')), -# ('ari', _('ARI - Amazon Ramdisk Image')), -# ('docker', _('Docker')), -# ('iso', _('ISO - Optical Disk Image')), -# ('ova', _('OVA - Open Virtual Appliance')), -# ('qcow2', _('QCOW2 - QEMU Emulator')), -# ('raw', _('Raw')), -# ('vdi', _('VDI - Virtual Disk Image')), -# ('vhd', _('VHD - Virtual Hard Disk')), -# ('vhdx', _('VHDX - Large Virtual Hard Disk')), -# ('vmdk', _('VMDK - Virtual Machine Disk')), -# ], -#} - -# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for -# image custom property attributes that appear on image detail pages. -IMAGE_CUSTOM_PROPERTY_TITLES = { - "architecture": _("Architecture"), - "kernel_id": _("Kernel ID"), - "ramdisk_id": _("Ramdisk ID"), - "image_state": _("Euca2ools state"), - "project_id": _("Project ID"), - "image_type": _("Image Type"), -} - -# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image -# custom properties should not be displayed in the Image Custom Properties -# table. -IMAGE_RESERVED_CUSTOM_PROPERTIES = [] - -# Set to 'legacy' or 'direct' to allow users to upload images to glance via -# Horizon server. When enabled, a file form field will appear on the create -# image form. If set to 'off', there will be no file form field on the create -# image form. See documentation for deployment considerations. -#HORIZON_IMAGES_UPLOAD_MODE = 'legacy' - -# Allow a location to be set when creating or updating Glance images. -# If using Glance V2, this value should be False unless the Glance -# configuration and policies allow setting locations. -#IMAGES_ALLOW_LOCATION = False - -# A dictionary of default settings for create image modal. -#CREATE_IMAGE_DEFAULTS = { -# 'image_visibility': "public", -#} - -# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints -# in the Keystone service catalog. Use this setting when Horizon is running -# external to the OpenStack environment. The default is 'publicURL'. -#OPENSTACK_ENDPOINT_TYPE = "publicURL" - -# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the -# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints -# in the Keystone service catalog. Use this setting when Horizon is running -# external to the OpenStack environment. The default is None. This -# value should differ from OPENSTACK_ENDPOINT_TYPE if used. -#SECONDARY_ENDPOINT_TYPE = None - -# The number of objects (Swift containers/objects or images) to display -# on a single page before providing a paging element (a "more" link) -# to paginate results. -API_RESULT_LIMIT = 1000 -API_RESULT_PAGE_SIZE = 20 - -# The size of chunk in bytes for downloading objects from Swift -SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024 - -# The default number of lines displayed for instance console log. -INSTANCE_LOG_LENGTH = 35 - -# Specify a maximum number of items to display in a dropdown. -DROPDOWN_MAX_ITEMS = 30 - -# The timezone of the server. This should correspond with the timezone -# of your entire OpenStack installation, and hopefully be in UTC. -TIME_ZONE = "UTC" - -# When launching an instance, the menu of available flavors is -# sorted by RAM usage, ascending. If you would like a different sort order, -# you can provide another flavor attribute as sorting key. Alternatively, you -# can provide a custom callback method to use for sorting. You can also provide -# a flag for reverse sort. For more info, see -# http://docs.python.org/2/library/functions.html#sorted -#CREATE_INSTANCE_FLAVOR_SORT = { -# 'key': 'name', -# # or -# 'key': my_awesome_callback_method, -# 'reverse': False, -#} - -# Set this to True to display an 'Admin Password' field on the Change Password -# form to verify that it is indeed the admin logged-in who wants to change -# the password. -#ENFORCE_PASSWORD_CHECK = False - -# Modules that provide /auth routes that can be used to handle different types -# of user authentication. Add auth plugins that require extra route handling to -# this list. -#AUTHENTICATION_URLS = [ -# 'openstack_auth.urls', -#] - -# The Horizon Policy Enforcement engine uses these values to load per service -# policy rule files. The content of these files should match the files the -# OpenStack services are using to determine role based access control in the -# target installation. - -# Path to directory containing policy.json files -#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") - -# Map of local copy of service policy files. -# Please insure that your identity policy file matches the one being used on -# your keystone servers. There is an alternate policy file that may be used -# in the Keystone v3 multi-domain case, policy.v3cloudsample.json. -# This file is not included in the Horizon repository by default but can be -# found at -# http://git.openstack.org/cgit/openstack/keystone/tree/etc/ \ -# policy.v3cloudsample.json -# Having matching policy files on the Horizon and Keystone servers is essential -# for normal operation. This holds true for all services and their policy files. -#POLICY_FILES = { -# 'identity': 'keystone_policy.json', -# 'compute': 'nova_policy.json', -# 'volume': 'cinder_policy.json', -# 'image': 'glance_policy.json', -# 'network': 'neutron_policy.json', -#} - -# Change this patch to the appropriate list of tuples containing -# a key, label and static directory containing two files: -# _variables.scss and _styles.scss -AVAILABLE_THEMES = [ - ('default', 'Default', 'themes/default'), -# ('material', 'Material', 'themes/material'), -] - -LOGGING = { - 'version': 1, - # When set to True this will disable all logging except - # for loggers specified in this configuration dictionary. Note that - # if nothing is specified here and disable_existing_loggers is True, - # django.db.backends will still log unless it is disabled explicitly. - 'disable_existing_loggers': False, - # If apache2 mod_wsgi is used to deploy OpenStack dashboard - # timestamp is output by mod_wsgi. If WSGI framework you use does not - # output timestamp for logging, add %(asctime)s in the following - # format definitions. - 'formatters': { - 'console': { - 'format': '%(levelname)s %(name)s %(message)s' - }, - 'operation': { - # The format of "%(message)s" is defined by - # OPERATION_LOG_OPTIONS['format'] - 'format': '%(message)s' - }, - }, - 'handlers': { - 'null': { - 'level': 'DEBUG', - 'class': 'logging.NullHandler', - }, - 'console': { - # Set the level to "DEBUG" for verbose output logging. - 'level': 'INFO', - 'class': 'logging.StreamHandler', - 'formatter': 'console', - }, - 'operation': { - 'level': 'INFO', - 'class': 'logging.StreamHandler', - 'formatter': 'operation', - }, - }, - 'loggers': { - 'horizon': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'horizon.operation_log': { - 'handlers': ['operation'], - 'level': 'INFO', - 'propagate': False, - }, - 'openstack_dashboard': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'novaclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'cinderclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'keystoneauth': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'keystoneclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'glanceclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'neutronclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'swiftclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'oslo_policy': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'openstack_auth': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'django': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - # Logging from django.db.backends is VERY verbose, send to null - # by default. - 'django.db.backends': { - 'handlers': ['null'], - 'propagate': False, - }, - 'requests': { - 'handlers': ['null'], - 'propagate': False, - }, - 'urllib3': { - 'handlers': ['null'], - 'propagate': False, - }, - 'chardet.charsetprober': { - 'handlers': ['null'], - 'propagate': False, - }, - 'iso8601': { - 'handlers': ['null'], - 'propagate': False, - }, - 'scss': { - 'handlers': ['null'], - 'propagate': False, - }, - }, -} - -# 'direction' should not be specified for all_tcp/udp/icmp. -# It is specified in the form. -SECURITY_GROUP_RULES = { - 'all_tcp': { - 'name': _('All TCP'), - 'ip_protocol': 'tcp', - 'from_port': '1', - 'to_port': '65535', - }, - 'all_udp': { - 'name': _('All UDP'), - 'ip_protocol': 'udp', - 'from_port': '1', - 'to_port': '65535', - }, - 'all_icmp': { - 'name': _('All ICMP'), - 'ip_protocol': 'icmp', - 'from_port': '-1', - 'to_port': '-1', - }, - 'ssh': { - 'name': 'SSH', - 'ip_protocol': 'tcp', - 'from_port': '22', - 'to_port': '22', - }, - 'smtp': { - 'name': 'SMTP', - 'ip_protocol': 'tcp', - 'from_port': '25', - 'to_port': '25', - }, - 'dns': { - 'name': 'DNS', - 'ip_protocol': 'tcp', - 'from_port': '53', - 'to_port': '53', - }, - 'http': { - 'name': 'HTTP', - 'ip_protocol': 'tcp', - 'from_port': '80', - 'to_port': '80', - }, - 'pop3': { - 'name': 'POP3', - 'ip_protocol': 'tcp', - 'from_port': '110', - 'to_port': '110', - }, - 'imap': { - 'name': 'IMAP', - 'ip_protocol': 'tcp', - 'from_port': '143', - 'to_port': '143', - }, - 'ldap': { - 'name': 'LDAP', - 'ip_protocol': 'tcp', - 'from_port': '389', - 'to_port': '389', - }, - 'https': { - 'name': 'HTTPS', - 'ip_protocol': 'tcp', - 'from_port': '443', - 'to_port': '443', - }, - 'smtps': { - 'name': 'SMTPS', - 'ip_protocol': 'tcp', - 'from_port': '465', - 'to_port': '465', - }, - 'imaps': { - 'name': 'IMAPS', - 'ip_protocol': 'tcp', - 'from_port': '993', - 'to_port': '993', - }, - 'pop3s': { - 'name': 'POP3S', - 'ip_protocol': 'tcp', - 'from_port': '995', - 'to_port': '995', - }, - 'ms_sql': { - 'name': 'MS SQL', - 'ip_protocol': 'tcp', - 'from_port': '1433', - 'to_port': '1433', - }, - 'mysql': { - 'name': 'MYSQL', - 'ip_protocol': 'tcp', - 'from_port': '3306', - 'to_port': '3306', - }, - 'rdp': { - 'name': 'RDP', - 'ip_protocol': 'tcp', - 'from_port': '3389', - 'to_port': '3389', - }, -} - -# Deprecation Notice: -# -# The setting FLAVOR_EXTRA_KEYS has been deprecated. -# Please load extra spec metadata into the Glance Metadata Definition Catalog. -# -# The sample quota definitions can be found in: -# /etc/metadefs/compute-quota.json -# -# The metadata definition catalog supports CLI and API: -# $glance --os-image-api-version 2 help md-namespace-import -# $glance-manage db_load_metadefs -# -# See Metadata Definitions on: -# https://docs.openstack.org/glance/latest/user/glancemetadefcatalogapi.html - -# The hash algorithm to use for authentication tokens. This must -# match the hash algorithm that the identity server and the -# auth_token middleware are using. Allowed values are the -# algorithms supported by Python's hashlib library. -#OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5' - -# AngularJS requires some settings to be made available to -# the client side. Some settings are required by in-tree / built-in horizon -# features. These settings must be added to REST_API_REQUIRED_SETTINGS in the -# form of ['SETTING_1','SETTING_2'], etc. -# -# You may remove settings from this list for security purposes, but do so at -# the risk of breaking a built-in horizon feature. These settings are required -# for horizon to function properly. Only remove them if you know what you -# are doing. These settings may in the future be moved to be defined within -# the enabled panel configuration. -# You should not add settings to this list for out of tree extensions. -# See: https://wiki.openstack.org/wiki/Horizon/RESTAPI -REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES', - 'LAUNCH_INSTANCE_DEFAULTS', - 'OPENSTACK_IMAGE_FORMATS', - 'OPENSTACK_KEYSTONE_BACKEND', - 'OPENSTACK_KEYSTONE_DEFAULT_DOMAIN', - 'CREATE_IMAGE_DEFAULTS', - 'ENFORCE_PASSWORD_CHECK'] - -# Additional settings can be made available to the client side for -# extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS -# !! Please use extreme caution as the settings are transferred via HTTP/S -# and are not encrypted on the browser. This is an experimental API and -# may be deprecated in the future without notice. -#REST_API_ADDITIONAL_SETTINGS = [] - -############################################################################### -# Ubuntu Settings -############################################################################### - - # The default theme if no cookie is present -DEFAULT_THEME = 'default' - -# Default Ubuntu apache configuration uses /horizon as the application root. -WEBROOT='/horizon/' - -# By default, validation of the HTTP Host header is disabled. Production -# installations should have this set accordingly. For more information -# see https://docs.djangoproject.com/en/dev/ref/settings/. -ALLOWED_HOSTS = '*' - -# Compress all assets offline as part of packaging installation -COMPRESS_OFFLINE = True - -# DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded -# within an iframe. Legacy browsers are still vulnerable to a Cross-Frame -# Scripting (XFS) vulnerability, so this option allows extra security hardening -# where iframes are not used in deployment. Default setting is True. -# For more information see: -# http://tinyurl.com/anticlickjack -#DISALLOW_IFRAME_EMBED = True - -# Help URL can be made available for the client. To provide a help URL, edit the -# following attribute to the URL of your choice. -#HORIZON_CONFIG["help_url"] = "http://openstack.mycompany.org" - -# Settings for OperationLogMiddleware -# OPERATION_LOG_ENABLED is flag to use the function to log an operation on -# Horizon. -# mask_targets is arrangement for appointing a target to mask. -# method_targets is arrangement of HTTP method to output log. -# format is the log contents. -#OPERATION_LOG_ENABLED = False -#OPERATION_LOG_OPTIONS = { -# 'mask_fields': ['password'], -# 'target_methods': ['POST'], -# 'ignored_urls': ['/js/', '/static/', '^/api/'], -# 'format': ("[%(client_ip)s] [%(domain_name)s]" -# " [%(domain_id)s] [%(project_name)s]" -# " [%(project_id)s] [%(user_name)s] [%(user_id)s] [%(request_scheme)s]" -# " [%(referer_url)s] [%(request_url)s] [%(message)s] [%(method)s]" -# " [%(http_status)s] [%(param)s]"), -#} - -# The default date range in the Overview panel meters - either minus N -# days (if the value is integer N), or from the beginning of the current month -# until today (if set to None). This setting should be used to limit the amount -# of data fetched by default when rendering the Overview panel. -#OVERVIEW_DAYS_RANGE = 1 - -# To allow operators to require users provide a search criteria first -# before loading any data into the views, set the following dict -# attributes to True in each one of the panels you want to enable this feature. -# Follow the convention . -#FILTER_DATA_FIRST = { -# 'admin.instances': False, -# 'admin.images': False, -# 'admin.networks': False, -# 'admin.routers': False, -# 'admin.volumes': False, -# 'identity.users': False, -# 'identity.projects': False, -# 'identity.groups': False, -# 'identity.roles': False -#} - -# Dict used to restrict user private subnet cidr range. -# An empty list means that user input will not be restricted -# for a corresponding IP version. By default, there is -# no restriction for IPv4 or IPv6. To restrict -# user private subnet cidr range set ALLOWED_PRIVATE_SUBNET_CIDR -# to something like -#ALLOWED_PRIVATE_SUBNET_CIDR = { -# 'ipv4': ['10.0.0.0/8', '192.168.0.0/16'], -# 'ipv6': ['fc00::/7'] -#} -ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []} - -# Projects and users can have extra attributes as defined by keystone v3. -# Horizon has the ability to display these extra attributes via this setting. -# If you'd like to display extra data in the project or user tables, set the -# corresponding dict key to the attribute name, followed by the display name. -# For more information, see horizon's customization -# (https://docs.openstack.org/horizon/latest/configuration/customizing.html#horizon-customization-module-overrides) -#PROJECT_TABLE_EXTRA_INFO = { -# 'phone_num': _('Phone Number'), -#} -#USER_TABLE_EXTRA_INFO = { -# 'phone_num': _('Phone Number'), -#} - -# Password will have an expiration date when using keystone v3 and enabling the -# feature. -# This setting allows you to set the number of days that the user will be alerted -# prior to the password expiration. -# Once the password expires keystone will deny the access and users must -# contact an admin to change their password. -#PASSWORD_EXPIRES_WARNING_THRESHOLD_DAYS = 0 From 915fb7149ef3175cada5d4183eb3eea7dbda2ca9 Mon Sep 17 00:00:00 2001 From: Luca D'Agati <66645997+lucadagati@users.noreply.github.com> Date: Fri, 21 Feb 2025 17:22:07 +0100 Subject: [PATCH 05/18] Delete conf_conductor directory --- conf_conductor/iotronic.conf | 102 ----------------------------------- 1 file changed, 102 deletions(-) delete mode 100644 conf_conductor/iotronic.conf diff --git a/conf_conductor/iotronic.conf b/conf_conductor/iotronic.conf deleted file mode 100644 index 30371a1..0000000 --- a/conf_conductor/iotronic.conf +++ /dev/null @@ -1,102 +0,0 @@ -[DEFAULT] -transport_url = rabbit://openstack:unime@rabbitmq - -debug=True -log_file = /var/log/iotronic/iotronic-conductor.log -proxy=nginx - - -# Authentication strategy used by iotronic-api: one of -# "keystone" or "noauth". "noauth" should not be used in a -# production environment because all authentication will be -# disabled. (string value) -auth_strategy=keystone - -# Enable pecan debug mode. WARNING: this is insecure and -# should not be used in a production environment. (boolean -# value) -#pecan_debug=false - - -[conductor] -service_port_min=50000 -service_port_max=50100 - -[wamp] -wamp_transport_url = ws://iotronic-wagent:8181/ -wamp_realm = s4t -#skip_cert_verify= False -register_agent = True - - - -[database] -connection = mysql+pymysql://iotronic:unime@iotronic-db/iotronic - -[keystone_authtoken] -www_authenticate_uri = http://keystone:5000 -auth_url = http://keystone:5000 -auth_plugin = password -auth_type = password -project_domain_id = default -user_domain_id = default -project_name = service -username = iotronic -password = unime - - -[neutron] -auth_url = http://controller:5000 -url = http://controller:9696 -auth_strategy = password -auth_type = password -project_domain_name = default -user_domain_name = default -region_name = RegionOne -project_name = service -username = neutron -password = NEUTRON_PASS -retries = 3 -project_domain_id= default - - -[designate] -auth_url = http://controller:5000 -url = http://controller:9001 -auth_strategy = password -project_domain_name = default -user_domain_name = default -region_name = RegionOne -project_name = service -username = designate -password = password -retries = 3 -project_domain_id= default - - -[cors] -# Indicate whether this resource may be shared with the domain -# received in the requests "origin" header. Format: -# "://[:]", no trailing slash. Example: -# https://horizon.example.com (list value) -#allowed_origin = - -# Indicate that the actual request can include user -# credentials (boolean value) -#allow_credentials = true - -# Indicate which headers are safe to expose to the API. -# Defaults to HTTP Simple Headers. (list value) -#expose_headers = - -# Maximum cache age of CORS preflight requests. (integer -# value) -#max_age = 3600 - -# Indicate which methods can be used during the actual -# request. (list value) -#allow_methods = OPTIONS,GET,HEAD,POST,PUT,DELETE,TRACE,PATCH - -# Indicate which header field names may be used during the -# actual request. (list value) -#allow_headers = From 9abb78af2bb0011e082b77fa61779d0b2c2e04a0 Mon Sep 17 00:00:00 2001 From: Luca D'Agati <66645997+lucadagati@users.noreply.github.com> Date: Fri, 21 Feb 2025 17:22:16 +0100 Subject: [PATCH 06/18] Delete conf_keystone directory --- conf_keystone/keystone.conf | 2715 ----------------------------------- 1 file changed, 2715 deletions(-) delete mode 100644 conf_keystone/keystone.conf diff --git a/conf_keystone/keystone.conf b/conf_keystone/keystone.conf deleted file mode 100644 index 01e3d6f..0000000 --- a/conf_keystone/keystone.conf +++ /dev/null @@ -1,2715 +0,0 @@ -[DEFAULT] -debug = True -#log_config = /etc/keystone/logging.conf -log_dir = /var/log/keystone - -# -# From keystone -# - -# Using this feature is *NOT* recommended. Instead, use the `keystone-manage -# bootstrap` command. The value of this option is treated as a "shared secret" -# that can be used to bootstrap Keystone through the API. This "token" does not -# represent a user (it has no identity), and carries no explicit authorization -# (it effectively bypasses most authorization checks). If set to `None`, the -# value is ignored and the `admin_token` middleware is effectively disabled. -# (string value) -#admin_token = - -# The base public endpoint URL for Keystone that is advertised to clients -# (NOTE: this does NOT affect how Keystone listens for connections). Defaults -# to the base host URL of the request. For example, if keystone receives a -# request to `http://server:5000/v3/users`, then this will option will be -# automatically treated as `http://server:5000`. You should only need to set -# option if either the value of the base URL contains a path that keystone does -# not automatically infer (`/prefix/v3`), or if the endpoint should be found on -# a different host. (uri value) -#public_endpoint = - -# DEPRECATED: The base admin endpoint URL for Keystone that is advertised to -# clients (NOTE: this does NOT affect how Keystone listens for connections). -# Defaults to the base host URL of the request. For example, if keystone -# receives a request to `http://server:35357/v3/users`, then this will option -# will be automatically treated as `http://server:35357`. You should only need -# to set option if either the value of the base URL contains a path that -# keystone does not automatically infer (`/prefix/v3`), or if the endpoint -# should be found on a different host. (uri value) -# This option is deprecated for removal since R. -# Its value may be silently ignored in the future. -# Reason: With the removal of the 2.0 API keystone does not distinguish between -# admin and public endpoints. -#admin_endpoint = - -# Maximum depth of the project hierarchy, excluding the project acting as a -# domain at the top of the hierarchy. WARNING: Setting it to a large value may -# adversely impact performance. (integer value) -#max_project_tree_depth = 5 - -# Limit the sizes of user & project ID/names. (integer value) -#max_param_size = 64 - -# Similar to `[DEFAULT] max_param_size`, but provides an exception for token -# values. With Fernet tokens, this can be set as low as 255. With UUID tokens, -# this should be set to 32). (integer value) -#max_token_size = 255 - -# The maximum number of entities that will be returned in a collection. This -# global limit may be then overridden for a specific driver, by specifying a -# list_limit in the appropriate section (for example, `[assignment]`). No limit -# is set by default. In larger deployments, it is recommended that you set this -# to a reasonable number to prevent operations like listing all users and -# projects from placing an unnecessary load on the system. (integer value) -#list_limit = - -# If set to true, strict password length checking is performed for password -# manipulation. If a password exceeds the maximum length, the operation will -# fail with an HTTP 403 Forbidden error. If set to false, passwords are -# automatically truncated to the maximum length. (boolean value) -#strict_password_check = false - -# If set to true, then the server will return information in HTTP responses -# that may allow an unauthenticated or authenticated user to get more -# information than normal, such as additional details about why authentication -# failed. This may be useful for debugging but is insecure. (boolean value) -#insecure_debug = false - -# Default `publisher_id` for outgoing notifications. If left undefined, -# Keystone will default to using the server's host name. (string value) -#default_publisher_id = - -# Define the notification format for identity service events. A `basic` -# notification only has information about the resource being operated on. A -# `cadf` notification has the same information, as well as information about -# the initiator of the event. The `cadf` option is entirely backwards -# compatible with the `basic` option, but is fully CADF-compliant, and is -# recommended for auditing use cases. (string value) -# Possible values: -# basic - -# cadf - -#notification_format = cadf - -# You can reduce the number of notifications keystone emits by explicitly -# opting out. Keystone will not emit notifications that match the patterns -# expressed in this list. Values are expected to be in the form of -# `identity..`. By default, all notifications related -# to authentication are automatically suppressed. This field can be set -# multiple times in order to opt-out of multiple notification topics. For -# example, the following suppresses notifications describing user creation or -# successful authentication events: notification_opt_out=identity.user.create -# notification_opt_out=identity.authenticate.success (multi valued) -#notification_opt_out = identity.authenticate.success -#notification_opt_out = identity.authenticate.pending -#notification_opt_out = identity.authenticate.failed - -# -# From oslo.log -# - -# If set to true, the logging level will be set to DEBUG instead of the default -# INFO level. (boolean value) -# Note: This option can be changed without restarting. -#debug = false - -# The name of a logging configuration file. This file is appended to any -# existing logging configuration files. For details about logging configuration -# files, see the Python logging module documentation. Note that when logging -# configuration files are used then all logging configuration is set in the -# configuration file and other logging configuration options are ignored (for -# example, log-date-format). (string value) -# Note: This option can be changed without restarting. -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append = - -# Defines the format string for %%(asctime)s in log records. Default: -# %(default)s . This option is ignored if log_config_append is set. (string -# value) -#log_date_format = %Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to send logging output to. If no default is set, -# logging will go to stderr as defined by use_stderr. This option is ignored if -# log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file = - -# (Optional) The base directory used for relative log_file paths. This option -# is ignored if log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir = - -# Uses logging handler designed to watch file system. When log file is moved or -# removed this handler will open a new log file with specified path -# instantaneously. It makes sense only if log_file option is specified and -# Linux platform is used. This option is ignored if log_config_append is set. -# (boolean value) -#watch_log_file = false - -# Use syslog for logging. Existing syslog format is DEPRECATED and will be -# changed later to honor RFC5424. This option is ignored if log_config_append -# is set. (boolean value) -#use_syslog = false - -# Enable journald for logging. If running in a systemd environment you may wish -# to enable journal support. Doing so will use the journal native protocol -# which includes structured metadata in addition to log messages.This option is -# ignored if log_config_append is set. (boolean value) -#use_journal = false - -# Syslog facility to receive log lines. This option is ignored if -# log_config_append is set. (string value) -#syslog_log_facility = LOG_USER - -# Use JSON formatting for logging. This option is ignored if log_config_append -# is set. (boolean value) -#use_json = false - -# Log output to standard error. This option is ignored if log_config_append is -# set. (boolean value) -#use_stderr = false - -# Log output to Windows Event Log. (boolean value) -#use_eventlog = false - -# The amount of time before the log files are rotated. This option is ignored -# unless log_rotation_type is setto "interval". (integer value) -#log_rotate_interval = 1 - -# Rotation interval type. The time of the last file change (or the time when -# the service was started) is used when scheduling the next rotation. (string -# value) -# Possible values: -# Seconds - -# Minutes - -# Hours - -# Days - -# Weekday - -# Midnight - -#log_rotate_interval_type = days - -# Maximum number of rotated log files. (integer value) -#max_logfile_count = 30 - -# Log file maximum size in MB. This option is ignored if "log_rotation_type" is -# not set to "size". (integer value) -#max_logfile_size_mb = 200 - -# Log rotation type. (string value) -# Possible values: -# interval - Rotate logs at predefined time intervals. -# size - Rotate logs once they reach a predefined size. -# none - Do not rotate log files. -#log_rotation_type = none - -# Format string to use for log messages with context. Used by -# oslo_log.formatters.ContextFormatter (string value) -#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages when context is undefined. Used by -# oslo_log.formatters.ContextFormatter (string value) -#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Additional data to append to log message when logging level for the message -# is DEBUG. Used by oslo_log.formatters.ContextFormatter (string value) -#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. Used by -# oslo_log.formatters.ContextFormatter (string value) -#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s - -# Defines the format string for %(user_identity)s that is used in -# logging_context_format_string. Used by oslo_log.formatters.ContextFormatter -# (string value) -#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s - -# List of package logging levels in logger=LEVEL pairs. This option is ignored -# if log_config_append is set. (list value) -#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO - -# Enables or disables publication of error events. (boolean value) -#publish_errors = false - -# The format for an instance that is passed with the log message. (string -# value) -#instance_format = "[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log message. (string -# value) -#instance_uuid_format = "[instance: %(uuid)s] " - -# Interval, number of seconds, of log rate limiting. (integer value) -#rate_limit_interval = 0 - -# Maximum number of logged messages per rate_limit_interval. (integer value) -#rate_limit_burst = 0 - -# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG -# or empty string. Logs with level greater or equal to rate_limit_except_level -# are not filtered. An empty string means that all levels are filtered. (string -# value) -#rate_limit_except_level = CRITICAL - -# Enables or disables fatal status of deprecations. (boolean value) -#fatal_deprecations = false - -# -# From oslo.messaging -# - -# Size of RPC connection pool. (integer value) -#rpc_conn_pool_size = 30 - -# The pool size limit for connections expiration policy (integer value) -#conn_pool_min_size = 2 - -# The time-to-live in sec of idle connections in the pool (integer value) -#conn_pool_ttl = 1200 - -# Size of executor thread pool when executor is threading or eventlet. (integer -# value) -# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size -#executor_thread_pool_size = 64 - -# Seconds to wait for a response from a call. (integer value) -#rpc_response_timeout = 60 - -# The network address and optional user credentials for connecting to the -# messaging backend, in URL format. The expected format is: -# -# driver://[user:pass@]host:port[,[userN:passN@]hostN:portN]/virtual_host?query -# -# Example: rabbit://rabbitmq:password@127.0.0.1:5672// -# -# For full details on the fields in the URL see the documentation of -# oslo_messaging.TransportURL at -# https://docs.openstack.org/oslo.messaging/latest/reference/transport.html -# (string value) -#transport_url = rabbit:// - -# The default exchange under which topics are scoped. May be overridden by an -# exchange name specified in the transport_url option. (string value) -#control_exchange = keystone - - -[access_rules_config] - -# -# From keystone -# - -# Entry point for the access rules config backend driver in the -# `keystone.access_rules_config` namespace. Keystone only provides a `json` -# driver, so there is no reason to change this unless you are providing a -# custom entry point. (string value) -#driver = json - -# Toggle for access rules caching. This has no effect unless global caching is -# enabled. (boolean value) -#caching = true - -# Time to cache access rule data in seconds. This has no effect unless global -# caching is enabled. (integer value) -#cache_time = - -# Path to access rules configuration. If not present, no access rule -# configuration will be loaded and application credential access rules will be -# unavailable. (string value) -#rules_file = /etc/keystone/access_rules.json - -# Toggles permissive mode for access rules. When enabled, application -# credentials can be created with any access rules regardless of operator's -# configuration. (boolean value) -#permissive = false - - -[application_credential] - -# -# From keystone -# - -# Entry point for the application credential backend driver in the -# `keystone.application_credential` namespace. Keystone only provides a `sql` -# driver, so there is no reason to change this unless you are providing a -# custom entry point. (string value) -#driver = sql - -# Toggle for application credential caching. This has no effect unless global -# caching is enabled. (boolean value) -#caching = true - -# Time to cache application credential data in seconds. This has no effect -# unless global caching is enabled. (integer value) -#cache_time = - -# Maximum number of application credentials a user is permitted to create. A -# value of -1 means unlimited. If a limit is not set, users are permitted to -# create application credentials at will, which could lead to bloat in the -# keystone database or open keystone to a DoS attack. (integer value) -#user_limit = -1 - - -[assignment] - -# -# From keystone -# - -# Entry point for the assignment backend driver (where role assignments are -# stored) in the `keystone.assignment` namespace. Only a SQL driver is supplied -# by keystone itself. Unless you are writing proprietary drivers for keystone, -# you do not need to set this option. (string value) -#driver = sql - -# A list of role names which are prohibited from being an implied role. (list -# value) -#prohibited_implied_role = admin - - -[auth] - -# -# From keystone -# - -# Allowed authentication methods. Note: You should disable the `external` auth -# method if you are currently using federation. External auth and federation -# both use the REMOTE_USER variable. Since both the mapped and external plugin -# are being invoked to validate attributes in the request environment, it can -# cause conflicts. (list value) -#methods = external,password,token,oauth1,mapped,application_credential - -# Entry point for the password auth plugin module in the -# `keystone.auth.password` namespace. You do not need to set this unless you -# are overriding keystone's own password authentication plugin. (string value) -#password = - -# Entry point for the token auth plugin module in the `keystone.auth.token` -# namespace. You do not need to set this unless you are overriding keystone's -# own token authentication plugin. (string value) -#token = - -# Entry point for the external (`REMOTE_USER`) auth plugin module in the -# `keystone.auth.external` namespace. Supplied drivers are `DefaultDomain` and -# `Domain`. The default driver is `DefaultDomain`, which assumes that all users -# identified by the username specified to keystone in the `REMOTE_USER` -# variable exist within the context of the default domain. The `Domain` option -# expects an additional environment variable be presented to keystone, -# `REMOTE_DOMAIN`, containing the domain name of the `REMOTE_USER` (if -# `REMOTE_DOMAIN` is not set, then the default domain will be used instead). -# You do not need to set this unless you are taking advantage of "external -# authentication", where the application server (such as Apache) is handling -# authentication instead of keystone. (string value) -#external = - -# Entry point for the OAuth 1.0a auth plugin module in the -# `keystone.auth.oauth1` namespace. You do not need to set this unless you are -# overriding keystone's own `oauth1` authentication plugin. (string value) -#oauth1 = - -# Entry point for the mapped auth plugin module in the `keystone.auth.mapped` -# namespace. You do not need to set this unless you are overriding keystone's -# own `mapped` authentication plugin. (string value) -#mapped = - -# Entry point for the application_credential auth plugin module in the -# `keystone.auth.application_credential` namespace. You do not need to set this -# unless you are overriding keystone's own `application_credential` -# authentication plugin. (string value) -#application_credential = - - -[cache] - -# -# From oslo.cache -# - -# Prefix for building the configuration dictionary for the cache region. This -# should not need to be changed unless there is another dogpile.cache region -# with the same configuration name. (string value) -#config_prefix = cache.oslo - -# Default TTL, in seconds, for any cached item in the dogpile.cache region. -# This applies to any cached method that doesn't have an explicit cache -# expiration time defined for it. (integer value) -#expiration_time = 600 - -# Cache backend module. For eventlet-based or environments with hundreds of -# threaded servers, Memcache with pooling (oslo_cache.memcache_pool) is -# recommended. For environments with less than 100 threaded servers, Memcached -# (dogpile.cache.memcached) or Redis (dogpile.cache.redis) is recommended. Test -# environments with a single instance of the server can use the -# dogpile.cache.memory backend. (string value) -# Possible values: -# oslo_cache.memcache_pool - -# oslo_cache.dict - -# oslo_cache.mongo - -# oslo_cache.etcd3gw - -# dogpile.cache.memcached - -# dogpile.cache.pylibmc - -# dogpile.cache.bmemcached - -# dogpile.cache.dbm - -# dogpile.cache.redis - -# dogpile.cache.memory - -# dogpile.cache.memory_pickle - -# dogpile.cache.null - -#backend = dogpile.cache.null - -# Arguments supplied to the backend module. Specify this option once per -# argument to be passed to the dogpile.cache backend. Example format: -# ":". (multi valued) -#backend_argument = - -# Proxy classes to import that will affect the way the dogpile.cache backend -# functions. See the dogpile.cache documentation on changing-backend-behavior. -# (list value) -#proxies = - -# Global toggle for caching. (boolean value) -#enabled = true - -# Extra debugging from the cache backend (cache keys, get/set/delete/etc -# calls). This is only really useful if you need to see the specific cache- -# backend get/set/delete calls with the keys/values. Typically this should be -# left set to false. (boolean value) -#debug_cache_backend = false - -# Memcache servers in the format of "host:port". (dogpile.cache.memcache and -# oslo_cache.memcache_pool backends only). (list value) -#memcache_servers = localhost:11211 - -# Number of seconds memcached server is considered dead before it is tried -# again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only). -# (integer value) -#memcache_dead_retry = 300 - -# Timeout in seconds for every call to a server. (dogpile.cache.memcache and -# oslo_cache.memcache_pool backends only). (floating point value) -#memcache_socket_timeout = 3.0 - -# Max total number of open connections to every memcached server. -# (oslo_cache.memcache_pool backend only). (integer value) -#memcache_pool_maxsize = 10 - -# Number of seconds a connection to memcached is held unused in the pool before -# it is closed. (oslo_cache.memcache_pool backend only). (integer value) -#memcache_pool_unused_timeout = 60 - -# Number of seconds that an operation will wait to get a memcache client -# connection. (integer value) -#memcache_pool_connection_get_timeout = 10 - - -[catalog] - -# -# From keystone -# - -# Absolute path to the file used for the templated catalog backend. This option -# is only used if the `[catalog] driver` is set to `templated`. (string value) -#template_file = default_catalog.templates - -# Entry point for the catalog driver in the `keystone.catalog` namespace. -# Keystone provides a `sql` option (which supports basic CRUD operations -# through SQL), a `templated` option (which loads the catalog from a templated -# catalog file on disk), and a `endpoint_filter.sql` option (which supports -# arbitrary service catalogs per project). (string value) -#driver = sql - -# Toggle for catalog caching. This has no effect unless global caching is -# enabled. In a typical deployment, there is no reason to disable this. -# (boolean value) -#caching = true - -# Time to cache catalog data (in seconds). This has no effect unless global and -# catalog caching are both enabled. Catalog data (services, endpoints, etc.) -# typically does not change frequently, and so a longer duration than the -# global default may be desirable. (integer value) -#cache_time = - -# Maximum number of entities that will be returned in a catalog collection. -# There is typically no reason to set this, as it would be unusual for a -# deployment to have enough services or endpoints to exceed a reasonable limit. -# (integer value) -#list_limit = - - -[cors] - -# -# From oslo.middleware -# - -# Indicate whether this resource may be shared with the domain received in the -# requests "origin" header. Format: "://[:]", no trailing -# slash. Example: https://horizon.example.com (list value) -#allowed_origin = - -# Indicate that the actual request can include user credentials (boolean value) -#allow_credentials = true - -# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple -# Headers. (list value) -#expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,Openstack-Auth-Receipt - -# Maximum cache age of CORS preflight requests. (integer value) -#max_age = 3600 - -# Indicate which methods can be used during the actual request. (list value) -#allow_methods = GET,PUT,POST,DELETE,PATCH - -# Indicate which header field names may be used during the actual request. -# (list value) -#allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Project-Id,X-Project-Name,X-Project-Domain-Id,X-Project-Domain-Name,X-Domain-Id,X-Domain-Name,Openstack-Auth-Receipt - - -[credential] - -# -# From keystone -# - -# Entry point for the credential backend driver in the `keystone.credential` -# namespace. Keystone only provides a `sql` driver, so there's no reason to -# change this unless you are providing a custom entry point. (string value) -#driver = sql - -# Entry point for credential encryption and decryption operations in the -# `keystone.credential.provider` namespace. Keystone only provides a `fernet` -# driver, so there's no reason to change this unless you are providing a custom -# entry point to encrypt and decrypt credentials. (string value) -#provider = fernet - -# Directory containing Fernet keys used to encrypt and decrypt credentials -# stored in the credential backend. Fernet keys used to encrypt credentials -# have no relationship to Fernet keys used to encrypt Fernet tokens. Both sets -# of keys should be managed separately and require different rotation policies. -# Do not share this repository with the repository used to manage keys for -# Fernet tokens. (string value) -#key_repository = /etc/keystone/credential-keys/ - - -[database] -connection = mysql+pymysql://keystone:unime@iotronic-db/keystone - -# -# From oslo.db -# - -# If True, SQLite uses synchronous mode. (boolean value) -#sqlite_synchronous = true - -# The back end to use for the database. (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend = sqlalchemy - -# The SQLAlchemy connection string to use to connect to the database. (string -# value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection = - -# The SQLAlchemy connection string to use to connect to the slave database. -# (string value) -#slave_connection = - -# The SQL mode to be used for MySQL sessions. This option, including the -# default, overrides any server-set SQL mode. To use whatever SQL mode is set -# by the server configuration, set this to no value. Example: mysql_sql_mode= -# (string value) -#mysql_sql_mode = TRADITIONAL - -# If True, transparently enables support for handling MySQL Cluster (NDB). -# (boolean value) -#mysql_enable_ndb = false - -# Connections which have been present in the connection pool longer than this -# number of seconds will be replaced with a new one the next time they are -# checked out from the pool. (integer value) -# Deprecated group/name - [DATABASE]/idle_timeout -# Deprecated group/name - [database]/idle_timeout -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#connection_recycle_time = 3600 - -# DEPRECATED: Minimum number of SQL connections to keep open in a pool. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: The option to set the minimum pool size is not supported by -# sqlalchemy. -#min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool. Setting a value of -# 0 indicates no limit. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size = 5 - -# Maximum number of database connection retries during startup. Set to -1 to -# specify an infinite retry count. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries = 10 - -# Interval between retries of opening a SQL connection. (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval = 10 - -# If set, use this value for max_overflow with SQLAlchemy. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow = 50 - -# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer -# value) -# Minimum value: 0 -# Maximum value: 100 -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug = 0 - -# Add Python stack traces to SQL as comment strings. (boolean value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace = false - -# If set, use this value for pool_timeout with SQLAlchemy. (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout = - -# Enable the experimental use of database reconnect on connection lost. -# (boolean value) -#use_db_reconnect = false - -# Seconds between retries of a database transaction. (integer value) -#db_retry_interval = 1 - -# If True, increases the interval between retries of a database operation up to -# db_max_retry_interval. (boolean value) -#db_inc_retry_interval = true - -# If db_inc_retry_interval is set, the maximum seconds between retries of a -# database operation. (integer value) -#db_max_retry_interval = 10 - -# Maximum retries in case of connection error or deadlock error before error is -# raised. Set to -1 to specify an infinite retry count. (integer value) -#db_max_retries = 20 - -# Optional URL parameters to append onto the connection URL at connect time; -# specify as param1=value1¶m2=value2&... (string value) -#connection_parameters = - - -[domain_config] - -# -# From keystone -# - -# Entry point for the domain-specific configuration driver in the -# `keystone.resource.domain_config` namespace. Only a `sql` option is provided -# by keystone, so there is no reason to set this unless you are providing a -# custom entry point. (string value) -#driver = sql - -# Toggle for caching of the domain-specific configuration backend. This has no -# effect unless global caching is enabled. There is normally no reason to -# disable this. (boolean value) -#caching = true - -# Time-to-live (TTL, in seconds) to cache domain-specific configuration data. -# This has no effect unless `[domain_config] caching` is enabled. (integer -# value) -#cache_time = 300 - - -[endpoint_filter] - -# -# From keystone -# - -# Entry point for the endpoint filter driver in the `keystone.endpoint_filter` -# namespace. Only a `sql` option is provided by keystone, so there is no reason -# to set this unless you are providing a custom entry point. (string value) -#driver = sql - -# This controls keystone's behavior if the configured endpoint filters do not -# result in any endpoints for a user + project pair (and therefore a -# potentially empty service catalog). If set to true, keystone will return the -# entire service catalog. If set to false, keystone will return an empty -# service catalog. (boolean value) -#return_all_endpoints_if_no_filter = true - - -[endpoint_policy] - -# -# From keystone -# - -# Entry point for the endpoint policy driver in the `keystone.endpoint_policy` -# namespace. Only a `sql` driver is provided by keystone, so there is no reason -# to set this unless you are providing a custom entry point. (string value) -#driver = sql - - -[eventlet_server] - -# -# From keystone -# - -# DEPRECATED: The IP address of the network interface for the public service to -# listen on. (host address value) -# Deprecated group/name - [DEFAULT]/bind_host -# Deprecated group/name - [DEFAULT]/public_bind_host -# This option is deprecated for removal since K. -# Its value may be silently ignored in the future. -# Reason: Support for running keystone under eventlet has been removed in the -# Newton release. These options remain for backwards compatibility because they -# are used for URL substitutions. -#public_bind_host = 0.0.0.0 - -# DEPRECATED: The port number for the public service to listen on. (port value) -# Minimum value: 0 -# Maximum value: 65535 -# Deprecated group/name - [DEFAULT]/public_port -# This option is deprecated for removal since K. -# Its value may be silently ignored in the future. -# Reason: Support for running keystone under eventlet has been removed in the -# Newton release. These options remain for backwards compatibility because they -# are used for URL substitutions. -#public_port = 5000 - -# DEPRECATED: The IP address of the network interface for the admin service to -# listen on. (host address value) -# Deprecated group/name - [DEFAULT]/bind_host -# Deprecated group/name - [DEFAULT]/admin_bind_host -# This option is deprecated for removal since K. -# Its value may be silently ignored in the future. -# Reason: Support for running keystone under eventlet has been removed in the -# Newton release. These options remain for backwards compatibility because they -# are used for URL substitutions. -#admin_bind_host = 0.0.0.0 - -# DEPRECATED: The port number for the admin service to listen on. (port value) -# Minimum value: 0 -# Maximum value: 65535 -# Deprecated group/name - [DEFAULT]/admin_port -# This option is deprecated for removal since K. -# Its value may be silently ignored in the future. -# Reason: Support for running keystone under eventlet has been removed in the -# Newton release. These options remain for backwards compatibility because they -# are used for URL substitutions. -#admin_port = 35357 - - -[extra_headers] -Distribution = Ubuntu - -# -# From keystone -# - -# Specifies the distribution of the keystone server. (string value) -#Distribution = Ubuntu - - -[federation] - -# -# From keystone -# - -# Entry point for the federation backend driver in the `keystone.federation` -# namespace. Keystone only provides a `sql` driver, so there is no reason to -# set this option unless you are providing a custom entry point. (string value) -#driver = sql - -# Prefix to use when filtering environment variable names for federated -# assertions. Matched variables are passed into the federated mapping engine. -# (string value) -#assertion_prefix = - -# Value to be used to obtain the entity ID of the Identity Provider from the -# environment. For `mod_shib`, this would be `Shib-Identity-Provider`. For -# `mod_auth_openidc`, this could be `HTTP_OIDC_ISS`. For `mod_auth_mellon`, -# this could be `MELLON_IDP`. (string value) -#remote_id_attribute = - -# An arbitrary domain name that is reserved to allow federated ephemeral users -# to have a domain concept. Note that an admin will not be able to create a -# domain with this name or update an existing domain to this name. You are not -# advised to change this value unless you really have to. (string value) -#federated_domain_name = Federated - -# A list of trusted dashboard hosts. Before accepting a Single Sign-On request -# to return a token, the origin host must be a member of this list. This -# configuration option may be repeated for multiple values. You must set this -# in order to use web-based SSO flows. For example: -# trusted_dashboard=https://acme.example.com/auth/websso -# trusted_dashboard=https://beta.example.com/auth/websso (multi valued) -#trusted_dashboard = - -# Absolute path to an HTML file used as a Single Sign-On callback handler. This -# page is expected to redirect the user from keystone back to a trusted -# dashboard host, by form encoding a token in a POST request. Keystone's -# default value should be sufficient for most deployments. (string value) -#sso_callback_template = /etc/keystone/sso_callback_template.html - -# Toggle for federation caching. This has no effect unless global caching is -# enabled. There is typically no reason to disable this. (boolean value) -#caching = true - - -[fernet_receipts] - -# -# From keystone -# - -# Directory containing Fernet receipt keys. This directory must exist before -# using `keystone-manage fernet_setup` for the first time, must be writable by -# the user running `keystone-manage fernet_setup` or `keystone-manage -# fernet_rotate`, and of course must be readable by keystone's server process. -# The repository may contain keys in one of three states: a single staged key -# (always index 0) used for receipt validation, a single primary key (always -# the highest index) used for receipt creation and validation, and any number -# of secondary keys (all other index values) used for receipt validation. With -# multiple keystone nodes, each node must share the same key repository -# contents, with the exception of the staged key (index 0). It is safe to run -# `keystone-manage fernet_rotate` once on any one node to promote a staged key -# (index 0) to be the new primary (incremented from the previous highest -# index), and produce a new staged key (a new key with index 0); the resulting -# repository can then be atomically replicated to other nodes without any risk -# of race conditions (for example, it is safe to run `keystone-manage -# fernet_rotate` on host A, wait any amount of time, create a tarball of the -# directory on host A, unpack it on host B to a temporary location, and -# atomically move (`mv`) the directory into place on host B). Running -# `keystone-manage fernet_rotate` *twice* on a key repository without syncing -# other nodes will result in receipts that can not be validated by all nodes. -# (string value) -#key_repository = /etc/keystone/fernet-keys/ - -# This controls how many keys are held in rotation by `keystone-manage -# fernet_rotate` before they are discarded. The default value of 3 means that -# keystone will maintain one staged key (always index 0), one primary key (the -# highest numerical index), and one secondary key (every other index). -# Increasing this value means that additional secondary keys will be kept in -# the rotation. (integer value) -# Minimum value: 1 -#max_active_keys = 3 - - -[fernet_tokens] - -# -# From keystone -# - -# Directory containing Fernet token keys. This directory must exist before -# using `keystone-manage fernet_setup` for the first time, must be writable by -# the user running `keystone-manage fernet_setup` or `keystone-manage -# fernet_rotate`, and of course must be readable by keystone's server process. -# The repository may contain keys in one of three states: a single staged key -# (always index 0) used for token validation, a single primary key (always the -# highest index) used for token creation and validation, and any number of -# secondary keys (all other index values) used for token validation. With -# multiple keystone nodes, each node must share the same key repository -# contents, with the exception of the staged key (index 0). It is safe to run -# `keystone-manage fernet_rotate` once on any one node to promote a staged key -# (index 0) to be the new primary (incremented from the previous highest -# index), and produce a new staged key (a new key with index 0); the resulting -# repository can then be atomically replicated to other nodes without any risk -# of race conditions (for example, it is safe to run `keystone-manage -# fernet_rotate` on host A, wait any amount of time, create a tarball of the -# directory on host A, unpack it on host B to a temporary location, and -# atomically move (`mv`) the directory into place on host B). Running -# `keystone-manage fernet_rotate` *twice* on a key repository without syncing -# other nodes will result in tokens that can not be validated by all nodes. -# (string value) -#key_repository = /etc/keystone/fernet-keys/ - -# This controls how many keys are held in rotation by `keystone-manage -# fernet_rotate` before they are discarded. The default value of 3 means that -# keystone will maintain one staged key (always index 0), one primary key (the -# highest numerical index), and one secondary key (every other index). -# Increasing this value means that additional secondary keys will be kept in -# the rotation. (integer value) -# Minimum value: 1 -#max_active_keys = 3 - - -[healthcheck] - -# -# From oslo.middleware -# - -# DEPRECATED: The path to respond to healtcheck requests on. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#path = /healthcheck - -# Show more detailed information as part of the response. Security note: -# Enabling this option may expose sensitive details about the service being -# monitored. Be sure to verify that it will not violate your security policies. -# (boolean value) -#detailed = false - -# Additional backends that can perform health checks and report that -# information back as part of a request. (list value) -#backends = - -# Check the presence of a file to determine if an application is running on a -# port. Used by DisableByFileHealthcheck plugin. (string value) -#disable_by_file_path = - -# Check the presence of a file based on a port to determine if an application -# is running on a port. Expects a "port:path" list of strings. Used by -# DisableByFilesPortsHealthcheck plugin. (list value) -#disable_by_file_paths = - - -[identity] - -# -# From keystone -# - -# This references the domain to use for all Identity API v2 requests (which are -# not aware of domains). A domain with this ID can optionally be created for -# you by `keystone-manage bootstrap`. The domain referenced by this ID cannot -# be deleted on the v3 API, to prevent accidentally breaking the v2 API. There -# is nothing special about this domain, other than the fact that it must exist -# to order to maintain support for your v2 clients. There is typically no -# reason to change this value. (string value) -#default_domain_id = default - -# A subset (or all) of domains can have their own identity driver, each with -# their own partial configuration options, stored in either the resource -# backend or in a file in a domain configuration directory (depending on the -# setting of `[identity] domain_configurations_from_database`). Only values -# specific to the domain need to be specified in this manner. This feature is -# disabled by default, but may be enabled by default in a future release; set -# to true to enable. (boolean value) -#domain_specific_drivers_enabled = false - -# By default, domain-specific configuration data is read from files in the -# directory identified by `[identity] domain_config_dir`. Enabling this -# configuration option allows you to instead manage domain-specific -# configurations through the API, which are then persisted in the backend -# (typically, a SQL database), rather than using configuration files on disk. -# (boolean value) -#domain_configurations_from_database = false - -# Absolute path where keystone should locate domain-specific `[identity]` -# configuration files. This option has no effect unless `[identity] -# domain_specific_drivers_enabled` is set to true. There is typically no reason -# to change this value. (string value) -#domain_config_dir = /etc/keystone/domains - -# Entry point for the identity backend driver in the `keystone.identity` -# namespace. Keystone provides a `sql` and `ldap` driver. This option is also -# used as the default driver selection (along with the other configuration -# variables in this section) in the event that `[identity] -# domain_specific_drivers_enabled` is enabled, but no applicable domain- -# specific configuration is defined for the domain in question. Unless your -# deployment primarily relies on `ldap` AND is not using domain-specific -# configuration, you should typically leave this set to `sql`. (string value) -#driver = sql - -# Toggle for identity caching. This has no effect unless global caching is -# enabled. There is typically no reason to disable this. (boolean value) -#caching = true - -# Time to cache identity data (in seconds). This has no effect unless global -# and identity caching are enabled. (integer value) -#cache_time = 600 - -# Maximum allowed length for user passwords. Decrease this value to improve -# performance. Changing this value does not effect existing passwords. (integer -# value) -# Maximum value: 4096 -#max_password_length = 4096 - -# Maximum number of entities that will be returned in an identity collection. -# (integer value) -#list_limit = - -# The password hashing algorithm to use for passwords stored within keystone. -# (string value) -# Possible values: -# bcrypt - -# scrypt - -# pbkdf2_sha512 - -#password_hash_algorithm = bcrypt - -# This option represents a trade off between security and performance. Higher -# values lead to slower performance, but higher security. Changing this option -# will only affect newly created passwords as existing password hashes already -# have a fixed number of rounds applied, so it is safe to tune this option in a -# running cluster. The default for bcrypt is 12, must be between 4 and 31, -# inclusive. The default for scrypt is 16, must be within `range(1,32)`. The -# default for pbkdf_sha512 is 60000, must be within `range(1,1<<32)` WARNING: -# If using scrypt, increasing this value increases BOTH time AND memory -# requirements to hash a password. (integer value) -#password_hash_rounds = - -# Optional block size to pass to scrypt hash function (the `r` parameter). -# Useful for tuning scrypt to optimal performance for your CPU architecture. -# This option is only used when the `password_hash_algorithm` option is set to -# `scrypt`. Defaults to 8. (integer value) -#scrypt_block_size = - -# Optional parallelism to pass to scrypt hash function (the `p` parameter). -# This option is only used when the `password_hash_algorithm` option is set to -# `scrypt`. Defaults to 1. (integer value) -#scrypt_parallelism = - -# Number of bytes to use in scrypt and pbkfd2_sha512 hashing salt. Default for -# scrypt is 16 bytes. Default for pbkfd2_sha512 is 16 bytes. Limited to a -# maximum of 96 bytes due to the size of the column used to store password -# hashes. (integer value) -# Minimum value: 0 -# Maximum value: 96 -#salt_bytesize = - - -[identity_mapping] - -# -# From keystone -# - -# Entry point for the identity mapping backend driver in the -# `keystone.identity.id_mapping` namespace. Keystone only provides a `sql` -# driver, so there is no reason to change this unless you are providing a -# custom entry point. (string value) -#driver = sql - -# Entry point for the public ID generator for user and group entities in the -# `keystone.identity.id_generator` namespace. The Keystone identity mapper only -# supports generators that produce 64 bytes or less. Keystone only provides a -# `sha256` entry point, so there is no reason to change this value unless -# you're providing a custom entry point. (string value) -#generator = sha256 - -# The format of user and group IDs changed in Juno for backends that do not -# generate UUIDs (for example, LDAP), with keystone providing a hash mapping to -# the underlying attribute in LDAP. By default this mapping is disabled, which -# ensures that existing IDs will not change. Even when the mapping is enabled -# by using domain-specific drivers (`[identity] -# domain_specific_drivers_enabled`), any users and groups from the default -# domain being handled by LDAP will still not be mapped to ensure their IDs -# remain backward compatible. Setting this value to false will enable the new -# mapping for all backends, including the default LDAP driver. It is only -# guaranteed to be safe to enable this option if you do not already have -# assignments for users and groups from the default LDAP domain, and you -# consider it to be acceptable for Keystone to provide the different IDs to -# clients than it did previously (existing IDs in the API will suddenly -# change). Typically this means that the only time you can set this value to -# false is when configuring a fresh installation, although that is the -# recommended value. (boolean value) -#backward_compatible_ids = true - - -[jwt_tokens] - -# -# From keystone -# - -# Directory containing public keys for validating JWS token signatures. This -# directory must exist in order for keystone's server process to start. It must -# also be readable by keystone's server process. It must contain at least one -# public key that corresponds to a private key in `keystone.conf [jwt_tokens] -# jws_private_key_repository`. This option is only applicable in deployments -# issuing JWS tokens and setting `keystone.conf [tokens] provider = jws`. -# (string value) -#jws_public_key_repository = /etc/keystone/jws-keys/public - -# Directory containing private keys for signing JWS tokens. This directory must -# exist in order for keystone's server process to start. It must also be -# readable by keystone's server process. It must contain at least one private -# key that corresponds to a public key in `keystone.conf [jwt_tokens] -# jws_public_key_repository`. In the event there are multiple private keys in -# this directory, keystone will use a key named `private.pem` to sign tokens. -# In the future, keystone may support the ability to sign tokens with multiple -# private keys. For now, only a key named `private.pem` within this directory -# is required to issue JWS tokens. This option is only applicable in -# deployments issuing JWS tokens and setting `keystone.conf [tokens] provider = -# jws`. (string value) -#jws_private_key_repository = /etc/keystone/jws-keys/private - - -[ldap] - -# -# From keystone -# - -# URL(s) for connecting to the LDAP server. Multiple LDAP URLs may be specified -# as a comma separated string. The first URL to successfully bind is used for -# the connection. (string value) -#url = ldap://localhost - -# The user name of the administrator bind DN to use when querying the LDAP -# server, if your LDAP server requires it. (string value) -#user = - -# The password of the administrator bind DN to use when querying the LDAP -# server, if your LDAP server requires it. (string value) -#password = - -# The default LDAP server suffix to use, if a DN is not defined via either -# `[ldap] user_tree_dn` or `[ldap] group_tree_dn`. (string value) -#suffix = cn=example,cn=com - -# The search scope which defines how deep to search within the search base. A -# value of `one` (representing `oneLevel` or `singleLevel`) indicates a search -# of objects immediately below to the base object, but does not include the -# base object itself. A value of `sub` (representing `subtree` or -# `wholeSubtree`) indicates a search of both the base object itself and the -# entire subtree below it. (string value) -# Possible values: -# one - -# sub - -#query_scope = one - -# Defines the maximum number of results per page that keystone should request -# from the LDAP server when listing objects. A value of zero (`0`) disables -# paging. (integer value) -# Minimum value: 0 -#page_size = 0 - -# The LDAP dereferencing option to use for queries involving aliases. A value -# of `default` falls back to using default dereferencing behavior configured by -# your `ldap.conf`. A value of `never` prevents aliases from being dereferenced -# at all. A value of `searching` dereferences aliases only after name -# resolution. A value of `finding` dereferences aliases only during name -# resolution. A value of `always` dereferences aliases in all cases. (string -# value) -# Possible values: -# never - -# searching - -# always - -# finding - -# default - -#alias_dereferencing = default - -# Sets the LDAP debugging level for LDAP calls. A value of 0 means that -# debugging is not enabled. This value is a bitmask, consult your LDAP -# documentation for possible values. (integer value) -# Minimum value: -1 -#debug_level = - -# Sets keystone's referral chasing behavior across directory partitions. If -# left unset, the system's default behavior will be used. (boolean value) -#chase_referrals = - -# The search base to use for users. Defaults to the `[ldap] suffix` value. -# (string value) -#user_tree_dn = - -# The LDAP search filter to use for users. (string value) -#user_filter = - -# The LDAP object class to use for users. (string value) -#user_objectclass = inetOrgPerson - -# The LDAP attribute mapped to user IDs in keystone. This must NOT be a -# multivalued attribute. User IDs are expected to be globally unique across -# keystone domains and URL-safe. (string value) -#user_id_attribute = cn - -# The LDAP attribute mapped to user names in keystone. User names are expected -# to be unique only within a keystone domain and are not expected to be URL- -# safe. (string value) -#user_name_attribute = sn - -# The LDAP attribute mapped to user descriptions in keystone. (string value) -#user_description_attribute = description - -# The LDAP attribute mapped to user emails in keystone. (string value) -#user_mail_attribute = mail - -# The LDAP attribute mapped to user passwords in keystone. (string value) -#user_pass_attribute = userPassword - -# The LDAP attribute mapped to the user enabled attribute in keystone. If -# setting this option to `userAccountControl`, then you may be interested in -# setting `[ldap] user_enabled_mask` and `[ldap] user_enabled_default` as well. -# (string value) -#user_enabled_attribute = enabled - -# Logically negate the boolean value of the enabled attribute obtained from the -# LDAP server. Some LDAP servers use a boolean lock attribute where "true" -# means an account is disabled. Setting `[ldap] user_enabled_invert = true` -# will allow these lock attributes to be used. This option will have no effect -# if either the `[ldap] user_enabled_mask` or `[ldap] user_enabled_emulation` -# options are in use. (boolean value) -#user_enabled_invert = false - -# Bitmask integer to select which bit indicates the enabled value if the LDAP -# server represents "enabled" as a bit on an integer rather than as a discrete -# boolean. A value of `0` indicates that the mask is not used. If this is not -# set to `0` the typical value is `2`. This is typically used when `[ldap] -# user_enabled_attribute = userAccountControl`. Setting this option causes -# keystone to ignore the value of `[ldap] user_enabled_invert`. (integer value) -# Minimum value: 0 -#user_enabled_mask = 0 - -# The default value to enable users. This should match an appropriate integer -# value if the LDAP server uses non-boolean (bitmask) values to indicate if a -# user is enabled or disabled. If this is not set to `True`, then the typical -# value is `512`. This is typically used when `[ldap] user_enabled_attribute = -# userAccountControl`. (string value) -#user_enabled_default = True - -# List of user attributes to ignore on create and update, or whether a specific -# user attribute should be filtered for list or show user. (list value) -#user_attribute_ignore = default_project_id - -# The LDAP attribute mapped to a user's default_project_id in keystone. This is -# most commonly used when keystone has write access to LDAP. (string value) -#user_default_project_id_attribute = - -# If enabled, keystone uses an alternative method to determine if a user is -# enabled or not by checking if they are a member of the group defined by the -# `[ldap] user_enabled_emulation_dn` option. Enabling this option causes -# keystone to ignore the value of `[ldap] user_enabled_invert`. (boolean value) -#user_enabled_emulation = false - -# DN of the group entry to hold enabled users when using enabled emulation. -# Setting this option has no effect unless `[ldap] user_enabled_emulation` is -# also enabled. (string value) -#user_enabled_emulation_dn = - -# Use the `[ldap] group_member_attribute` and `[ldap] group_objectclass` -# settings to determine membership in the emulated enabled group. Enabling this -# option has no effect unless `[ldap] user_enabled_emulation` is also enabled. -# (boolean value) -#user_enabled_emulation_use_group_config = false - -# A list of LDAP attribute to keystone user attribute pairs used for mapping -# additional attributes to users in keystone. The expected format is -# `:`, where `ldap_attr` is the attribute in the LDAP -# object and `user_attr` is the attribute which should appear in the identity -# API. (list value) -#user_additional_attribute_mapping = - -# The search base to use for groups. Defaults to the `[ldap] suffix` value. -# (string value) -#group_tree_dn = - -# The LDAP search filter to use for groups. (string value) -#group_filter = - -# The LDAP object class to use for groups. If setting this option to -# `posixGroup`, you may also be interested in enabling the `[ldap] -# group_members_are_ids` option. (string value) -#group_objectclass = groupOfNames - -# The LDAP attribute mapped to group IDs in keystone. This must NOT be a -# multivalued attribute. Group IDs are expected to be globally unique across -# keystone domains and URL-safe. (string value) -#group_id_attribute = cn - -# The LDAP attribute mapped to group names in keystone. Group names are -# expected to be unique only within a keystone domain and are not expected to -# be URL-safe. (string value) -#group_name_attribute = ou - -# The LDAP attribute used to indicate that a user is a member of the group. -# (string value) -#group_member_attribute = member - -# Enable this option if the members of the group object class are keystone user -# IDs rather than LDAP DNs. This is the case when using `posixGroup` as the -# group object class in Open Directory. (boolean value) -#group_members_are_ids = false - -# The LDAP attribute mapped to group descriptions in keystone. (string value) -#group_desc_attribute = description - -# List of group attributes to ignore on create and update. or whether a -# specific group attribute should be filtered for list or show group. (list -# value) -#group_attribute_ignore = - -# A list of LDAP attribute to keystone group attribute pairs used for mapping -# additional attributes to groups in keystone. The expected format is -# `:`, where `ldap_attr` is the attribute in the LDAP -# object and `group_attr` is the attribute which should appear in the identity -# API. (list value) -#group_additional_attribute_mapping = - -# If enabled, group queries will use Active Directory specific filters for -# nested groups. (boolean value) -#group_ad_nesting = false - -# An absolute path to a CA certificate file to use when communicating with LDAP -# servers. This option will take precedence over `[ldap] tls_cacertdir`, so -# there is no reason to set both. (string value) -#tls_cacertfile = - -# An absolute path to a CA certificate directory to use when communicating with -# LDAP servers. There is no reason to set this option if you've also set -# `[ldap] tls_cacertfile`. (string value) -#tls_cacertdir = - -# Enable TLS when communicating with LDAP servers. You should also set the -# `[ldap] tls_cacertfile` and `[ldap] tls_cacertdir` options when using this -# option. Do not set this option if you are using LDAP over SSL (LDAPS) instead -# of TLS. (boolean value) -#use_tls = false - -# Specifies which checks to perform against client certificates on incoming TLS -# sessions. If set to `demand`, then a certificate will always be requested and -# required from the LDAP server. If set to `allow`, then a certificate will -# always be requested but not required from the LDAP server. If set to `never`, -# then a certificate will never be requested. (string value) -# Possible values: -# demand - -# never - -# allow - -#tls_req_cert = demand - -# The connection timeout to use with the LDAP server. A value of `-1` means -# that connections will never timeout. (integer value) -# Minimum value: -1 -#connection_timeout = -1 - -# Enable LDAP connection pooling for queries to the LDAP server. There is -# typically no reason to disable this. (boolean value) -#use_pool = true - -# The size of the LDAP connection pool. This option has no effect unless -# `[ldap] use_pool` is also enabled. (integer value) -# Minimum value: 1 -#pool_size = 10 - -# The maximum number of times to attempt reconnecting to the LDAP server before -# aborting. A value of zero prevents retries. This option has no effect unless -# `[ldap] use_pool` is also enabled. (integer value) -# Minimum value: 0 -#pool_retry_max = 3 - -# The number of seconds to wait before attempting to reconnect to the LDAP -# server. This option has no effect unless `[ldap] use_pool` is also enabled. -# (floating point value) -#pool_retry_delay = 0.1 - -# The connection timeout to use when pooling LDAP connections. A value of `-1` -# means that connections will never timeout. This option has no effect unless -# `[ldap] use_pool` is also enabled. (integer value) -# Minimum value: -1 -#pool_connection_timeout = -1 - -# The maximum connection lifetime to the LDAP server in seconds. When this -# lifetime is exceeded, the connection will be unbound and removed from the -# connection pool. This option has no effect unless `[ldap] use_pool` is also -# enabled. (integer value) -# Minimum value: 1 -#pool_connection_lifetime = 600 - -# Enable LDAP connection pooling for end user authentication. There is -# typically no reason to disable this. (boolean value) -#use_auth_pool = true - -# The size of the connection pool to use for end user authentication. This -# option has no effect unless `[ldap] use_auth_pool` is also enabled. (integer -# value) -# Minimum value: 1 -#auth_pool_size = 100 - -# The maximum end user authentication connection lifetime to the LDAP server in -# seconds. When this lifetime is exceeded, the connection will be unbound and -# removed from the connection pool. This option has no effect unless `[ldap] -# use_auth_pool` is also enabled. (integer value) -# Minimum value: 1 -#auth_pool_connection_lifetime = 60 - - -[memcache] - -# -# From keystone -# - -# Number of seconds memcached server is considered dead before it is tried -# again. This is used by the key value store system. (integer value) -#dead_retry = 300 - -# Timeout in seconds for every call to a server. This is used by the key value -# store system. (integer value) -#socket_timeout = 3 - -# Max total number of open connections to every memcached server. This is used -# by the key value store system. (integer value) -#pool_maxsize = 10 - -# Number of seconds a connection to memcached is held unused in the pool before -# it is closed. This is used by the key value store system. (integer value) -#pool_unused_timeout = 60 - -# Number of seconds that an operation will wait to get a memcache client -# connection. This is used by the key value store system. (integer value) -#pool_connection_get_timeout = 10 - - -[oauth1] - -# -# From keystone -# - -# Entry point for the OAuth backend driver in the `keystone.oauth1` namespace. -# Typically, there is no reason to set this option unless you are providing a -# custom entry point. (string value) -#driver = sql - -# Number of seconds for the OAuth Request Token to remain valid after being -# created. This is the amount of time the user has to authorize the token. -# Setting this option to zero means that request tokens will last forever. -# (integer value) -# Minimum value: 0 -#request_token_duration = 28800 - -# Number of seconds for the OAuth Access Token to remain valid after being -# created. This is the amount of time the consumer has to interact with the -# service provider (which is typically keystone). Setting this option to zero -# means that access tokens will last forever. (integer value) -# Minimum value: 0 -#access_token_duration = 86400 - - -[oslo_messaging_amqp] - -# -# From oslo.messaging -# - -# Name for the AMQP container. must be globally unique. Defaults to a generated -# UUID (string value) -#container_name = - -# Timeout for inactive connections (in seconds) (integer value) -#idle_timeout = 0 - -# Debug: dump AMQP frames to stdout (boolean value) -#trace = false - -# Attempt to connect via SSL. If no other ssl-related parameters are given, it -# will use the system's CA-bundle to verify the server's certificate. (boolean -# value) -#ssl = false - -# CA certificate PEM file used to verify the server's certificate (string -# value) -#ssl_ca_file = - -# Self-identifying certificate PEM file for client authentication (string -# value) -#ssl_cert_file = - -# Private key PEM file used to sign ssl_cert_file certificate (optional) -# (string value) -#ssl_key_file = - -# Password for decrypting ssl_key_file (if encrypted) (string value) -#ssl_key_password = - -# By default SSL checks that the name in the server's certificate matches the -# hostname in the transport_url. In some configurations it may be preferable to -# use the virtual hostname instead, for example if the server uses the Server -# Name Indication TLS extension (rfc6066) to provide a certificate per virtual -# host. Set ssl_verify_vhost to True if the server's SSL certificate uses the -# virtual host name instead of the DNS name. (boolean value) -#ssl_verify_vhost = false - -# Space separated list of acceptable SASL mechanisms (string value) -#sasl_mechanisms = - -# Path to directory that contains the SASL configuration (string value) -#sasl_config_dir = - -# Name of configuration file (without .conf suffix) (string value) -#sasl_config_name = - -# SASL realm to use if no realm present in username (string value) -#sasl_default_realm = - -# Seconds to pause before attempting to re-connect. (integer value) -# Minimum value: 1 -#connection_retry_interval = 1 - -# Increase the connection_retry_interval by this many seconds after each -# unsuccessful failover attempt. (integer value) -# Minimum value: 0 -#connection_retry_backoff = 2 - -# Maximum limit for connection_retry_interval + connection_retry_backoff -# (integer value) -# Minimum value: 1 -#connection_retry_interval_max = 30 - -# Time to pause between re-connecting an AMQP 1.0 link that failed due to a -# recoverable error. (integer value) -# Minimum value: 1 -#link_retry_delay = 10 - -# The maximum number of attempts to re-send a reply message which failed due to -# a recoverable error. (integer value) -# Minimum value: -1 -#default_reply_retry = 0 - -# The deadline for an rpc reply message delivery. (integer value) -# Minimum value: 5 -#default_reply_timeout = 30 - -# The deadline for an rpc cast or call message delivery. Only used when caller -# does not provide a timeout expiry. (integer value) -# Minimum value: 5 -#default_send_timeout = 30 - -# The deadline for a sent notification message delivery. Only used when caller -# does not provide a timeout expiry. (integer value) -# Minimum value: 5 -#default_notify_timeout = 30 - -# The duration to schedule a purge of idle sender links. Detach link after -# expiry. (integer value) -# Minimum value: 1 -#default_sender_link_timeout = 600 - -# Indicates the addressing mode used by the driver. -# Permitted values: -# 'legacy' - use legacy non-routable addressing -# 'routable' - use routable addresses -# 'dynamic' - use legacy addresses if the message bus does not support routing -# otherwise use routable addressing (string value) -#addressing_mode = dynamic - -# Enable virtual host support for those message buses that do not natively -# support virtual hosting (such as qpidd). When set to true the virtual host -# name will be added to all message bus addresses, effectively creating a -# private 'subnet' per virtual host. Set to False if the message bus supports -# virtual hosting using the 'hostname' field in the AMQP 1.0 Open performative -# as the name of the virtual host. (boolean value) -#pseudo_vhost = true - -# address prefix used when sending to a specific server (string value) -#server_request_prefix = exclusive - -# address prefix used when broadcasting to all servers (string value) -#broadcast_prefix = broadcast - -# address prefix when sending to any server in group (string value) -#group_request_prefix = unicast - -# Address prefix for all generated RPC addresses (string value) -#rpc_address_prefix = openstack.org/om/rpc - -# Address prefix for all generated Notification addresses (string value) -#notify_address_prefix = openstack.org/om/notify - -# Appended to the address prefix when sending a fanout message. Used by the -# message bus to identify fanout messages. (string value) -#multicast_address = multicast - -# Appended to the address prefix when sending to a particular RPC/Notification -# server. Used by the message bus to identify messages sent to a single -# destination. (string value) -#unicast_address = unicast - -# Appended to the address prefix when sending to a group of consumers. Used by -# the message bus to identify messages that should be delivered in a round- -# robin fashion across consumers. (string value) -#anycast_address = anycast - -# Exchange name used in notification addresses. -# Exchange name resolution precedence: -# Target.exchange if set -# else default_notification_exchange if set -# else control_exchange if set -# else 'notify' (string value) -#default_notification_exchange = - -# Exchange name used in RPC addresses. -# Exchange name resolution precedence: -# Target.exchange if set -# else default_rpc_exchange if set -# else control_exchange if set -# else 'rpc' (string value) -#default_rpc_exchange = - -# Window size for incoming RPC Reply messages. (integer value) -# Minimum value: 1 -#reply_link_credit = 200 - -# Window size for incoming RPC Request messages (integer value) -# Minimum value: 1 -#rpc_server_credit = 100 - -# Window size for incoming Notification messages (integer value) -# Minimum value: 1 -#notify_server_credit = 100 - -# Send messages of this type pre-settled. -# Pre-settled messages will not receive acknowledgement -# from the peer. Note well: pre-settled messages may be -# silently discarded if the delivery fails. -# Permitted values: -# 'rpc-call' - send RPC Calls pre-settled -# 'rpc-reply'- send RPC Replies pre-settled -# 'rpc-cast' - Send RPC Casts pre-settled -# 'notify' - Send Notifications pre-settled -# (multi valued) -#pre_settled = rpc-cast -#pre_settled = rpc-reply - - -[oslo_messaging_kafka] - -# -# From oslo.messaging -# - -# Max fetch bytes of Kafka consumer (integer value) -#kafka_max_fetch_bytes = 1048576 - -# Default timeout(s) for Kafka consumers (floating point value) -#kafka_consumer_timeout = 1.0 - -# DEPRECATED: Pool Size for Kafka Consumers (integer value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Driver no longer uses connection pool. -#pool_size = 10 - -# DEPRECATED: The pool size limit for connections expiration policy (integer -# value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Driver no longer uses connection pool. -#conn_pool_min_size = 2 - -# DEPRECATED: The time-to-live in sec of idle connections in the pool (integer -# value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -# Reason: Driver no longer uses connection pool. -#conn_pool_ttl = 1200 - -# Group id for Kafka consumer. Consumers in one group will coordinate message -# consumption (string value) -#consumer_group = oslo_messaging_consumer - -# Upper bound on the delay for KafkaProducer batching in seconds (floating -# point value) -#producer_batch_timeout = 0.0 - -# Size of batch for the producer async send (integer value) -#producer_batch_size = 16384 - -# Enable asynchronous consumer commits (boolean value) -#enable_auto_commit = false - -# The maximum number of records returned in a poll call (integer value) -#max_poll_records = 500 - -# Protocol used to communicate with brokers (string value) -# Possible values: -# PLAINTEXT - -# SASL_PLAINTEXT - -# SSL - -# SASL_SSL - -#security_protocol = PLAINTEXT - -# Mechanism when security protocol is SASL (string value) -#sasl_mechanism = PLAIN - -# CA certificate PEM file used to verify the server certificate (string value) -#ssl_cafile = - - -[oslo_messaging_notifications] - -# -# From oslo.messaging -# - -# The Drivers(s) to handle sending notifications. Possible values are -# messaging, messagingv2, routing, log, test, noop (multi valued) -# Deprecated group/name - [DEFAULT]/notification_driver -#driver = - -# A URL representing the messaging driver to use for notifications. If not set, -# we fall back to the same configuration used for RPC. (string value) -# Deprecated group/name - [DEFAULT]/notification_transport_url -#transport_url = - -# AMQP topic used for OpenStack notifications. (list value) -# Deprecated group/name - [rpc_notifier2]/topics -# Deprecated group/name - [DEFAULT]/notification_topics -#topics = notifications - -# The maximum number of attempts to re-send a notification message which failed -# to be delivered due to a recoverable error. 0 - No retry, -1 - indefinite -# (integer value) -#retry = -1 - - -[oslo_messaging_rabbit] - -# -# From oslo.messaging -# - -# Use durable queues in AMQP. (boolean value) -#amqp_durable_queues = false - -# Auto-delete queues in AMQP. (boolean value) -#amqp_auto_delete = false - -# Connect over SSL. (boolean value) -# Deprecated group/name - [oslo_messaging_rabbit]/rabbit_use_ssl -#ssl = false - -# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and -# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some -# distributions. (string value) -# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_version -#ssl_version = - -# SSL key file (valid only if SSL enabled). (string value) -# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_keyfile -#ssl_key_file = - -# SSL cert file (valid only if SSL enabled). (string value) -# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_certfile -#ssl_cert_file = - -# SSL certification authority file (valid only if SSL enabled). (string value) -# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_ca_certs -#ssl_ca_file = - -# How long to wait before reconnecting in response to an AMQP consumer cancel -# notification. (floating point value) -#kombu_reconnect_delay = 1.0 - -# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not -# be used. This option may not be available in future versions. (string value) -#kombu_compression = - -# How long to wait a missing client before abandoning to send it its replies. -# This value should not be longer than rpc_response_timeout. (integer value) -# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout -#kombu_missing_consumer_retry_timeout = 60 - -# Determines how the next RabbitMQ node is chosen in case the one we are -# currently connected to becomes unavailable. Takes effect only if more than -# one RabbitMQ node is provided in config. (string value) -# Possible values: -# round-robin - -# shuffle - -#kombu_failover_strategy = round-robin - -# The RabbitMQ login method. (string value) -# Possible values: -# PLAIN - -# AMQPLAIN - -# RABBIT-CR-DEMO - -#rabbit_login_method = AMQPLAIN - -# How frequently to retry connecting with RabbitMQ. (integer value) -#rabbit_retry_interval = 1 - -# How long to backoff for between retries when connecting to RabbitMQ. (integer -# value) -#rabbit_retry_backoff = 2 - -# Maximum interval of RabbitMQ connection retries. Default is 30 seconds. -# (integer value) -#rabbit_interval_max = 30 - -# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this -# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring -# is no longer controlled by the x-ha-policy argument when declaring a queue. -# If you just want to make sure that all queues (except those with auto- -# generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy -# HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value) -#rabbit_ha_queues = false - -# Positive integer representing duration in seconds for queue TTL (x-expires). -# Queues which are unused for the duration of the TTL are automatically -# deleted. The parameter affects only reply and fanout queues. (integer value) -# Minimum value: 1 -#rabbit_transient_queues_ttl = 1800 - -# Specifies the number of messages to prefetch. Setting to zero allows -# unlimited messages. (integer value) -#rabbit_qos_prefetch_count = 0 - -# Number of seconds after which the Rabbit broker is considered down if -# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer -# value) -#heartbeat_timeout_threshold = 60 - -# How often times during the heartbeat_timeout_threshold we check the -# heartbeat. (integer value) -#heartbeat_rate = 2 - - -[oslo_middleware] - -# -# From oslo.middleware -# - -# The maximum body size for each request, in bytes. (integer value) -# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size -# Deprecated group/name - [DEFAULT]/max_request_body_size -#max_request_body_size = 114688 - -# DEPRECATED: The HTTP Header that will be used to determine what the original -# request protocol scheme was, even if it was hidden by a SSL termination -# proxy. (string value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#secure_proxy_ssl_header = X-Forwarded-Proto - -# Whether the application is behind a proxy or not. This determines if the -# middleware should parse the headers or not. (boolean value) -#enable_proxy_headers_parsing = false - - -[oslo_policy] - -# -# From oslo.policy -# - -# This option controls whether or not to enforce scope when evaluating -# policies. If ``True``, the scope of the token used in the request is compared -# to the ``scope_types`` of the policy being enforced. If the scopes do not -# match, an ``InvalidScope`` exception will be raised. If ``False``, a message -# will be logged informing operators that policies are being invoked with -# mismatching scope. (boolean value) -#enforce_scope = false - -# The file that defines policies. (string value) -#policy_file = policy.json - -# Default rule. Enforced when a requested rule is not found. (string value) -#policy_default_rule = default - -# Directories where policy configuration files are stored. They can be relative -# to any directory in the search path defined by the config_dir option, or -# absolute paths. The file defined by policy_file must exist for these -# directories to be searched. Missing or empty directories are ignored. (multi -# valued) -#policy_dirs = policy.d - -# Content Type to send and receive data for REST based policy check (string -# value) -# Possible values: -# application/x-www-form-urlencoded - -# application/json - -#remote_content_type = application/x-www-form-urlencoded - -# server identity verification for REST based policy check (boolean value) -#remote_ssl_verify_server_crt = false - -# Absolute path to ca cert file for REST based policy check (string value) -#remote_ssl_ca_crt_file = - -# Absolute path to client cert for REST based policy check (string value) -#remote_ssl_client_crt_file = - -# Absolute path client key file REST based policy check (string value) -#remote_ssl_client_key_file = - - -[policy] - -# -# From keystone -# - -# Entry point for the policy backend driver in the `keystone.policy` namespace. -# Supplied drivers are `rules` (which does not support any CRUD operations for -# the v3 policy API) and `sql`. Typically, there is no reason to set this -# option unless you are providing a custom entry point. (string value) -#driver = sql - -# Maximum number of entities that will be returned in a policy collection. -# (integer value) -#list_limit = - - -[profiler] - -# -# From osprofiler -# - -# -# Enable the profiling for all services on this node. -# -# Default value is False (fully disable the profiling feature). -# -# Possible values: -# -# * True: Enables the feature -# * False: Disables the feature. The profiling cannot be started via this -# project -# operations. If the profiling is triggered by another project, this project -# part will be empty. -# (boolean value) -# Deprecated group/name - [profiler]/profiler_enabled -#enabled = false - -# -# Enable SQL requests profiling in services. -# -# Default value is False (SQL requests won't be traced). -# -# Possible values: -# -# * True: Enables SQL requests profiling. Each SQL query will be part of the -# trace and can the be analyzed by how much time was spent for that. -# * False: Disables SQL requests profiling. The spent time is only shown on a -# higher level of operations. Single SQL queries cannot be analyzed this way. -# (boolean value) -#trace_sqlalchemy = false - -# -# Secret key(s) to use for encrypting context data for performance profiling. -# -# This string value should have the following format: -# [,,...], -# where each key is some random string. A user who triggers the profiling via -# the REST API has to set one of these keys in the headers of the REST API call -# to include profiling results of this node for this particular project. -# -# Both "enabled" flag and "hmac_keys" config options should be set to enable -# profiling. Also, to generate correct profiling information across all -# services -# at least one key needs to be consistent between OpenStack projects. This -# ensures it can be used from client side to generate the trace, containing -# information from all possible resources. -# (string value) -#hmac_keys = SECRET_KEY - -# -# Connection string for a notifier backend. -# -# Default value is ``messaging://`` which sets the notifier to oslo_messaging. -# -# Examples of possible values: -# -# * ``messaging://`` - use oslo_messaging driver for sending spans. -# * ``redis://127.0.0.1:6379`` - use redis driver for sending spans. -# * ``mongodb://127.0.0.1:27017`` - use mongodb driver for sending spans. -# * ``elasticsearch://127.0.0.1:9200`` - use elasticsearch driver for sending -# spans. -# * ``jaeger://127.0.0.1:6831`` - use jaeger tracing as driver for sending -# spans. -# (string value) -#connection_string = messaging:// - -# -# Document type for notification indexing in elasticsearch. -# (string value) -#es_doc_type = notification - -# -# This parameter is a time value parameter (for example: es_scroll_time=2m), -# indicating for how long the nodes that participate in the search will -# maintain -# relevant resources in order to continue and support it. -# (string value) -#es_scroll_time = 2m - -# -# Elasticsearch splits large requests in batches. This parameter defines -# maximum size of each batch (for example: es_scroll_size=10000). -# (integer value) -#es_scroll_size = 10000 - -# -# Redissentinel provides a timeout option on the connections. -# This parameter defines that timeout (for example: socket_timeout=0.1). -# (floating point value) -#socket_timeout = 0.1 - -# -# Redissentinel uses a service name to identify a master redis service. -# This parameter defines the name (for example: -# ``sentinal_service_name=mymaster``). -# (string value) -#sentinel_service_name = mymaster - -# -# Enable filter traces that contain error/exception to a separated place. -# -# Default value is set to False. -# -# Possible values: -# -# * True: Enable filter traces that contain error/exception. -# * False: Disable the filter. -# (boolean value) -#filter_error_trace = false - - -[receipt] - -# -# From keystone -# - -# The amount of time that a receipt should remain valid (in seconds). This -# value should always be very short, as it represents how long a user has to -# reattempt auth with the missing auth methods. (integer value) -# Minimum value: 0 -# Maximum value: 86400 -#expiration = 300 - -# Entry point for the receipt provider in the `keystone.receipt.provider` -# namespace. The receipt provider controls the receipt construction and -# validation operations. Keystone includes just the `fernet` receipt provider -# for now. `fernet` receipts do not need to be persisted at all, but require -# that you run `keystone-manage fernet_setup` (also see the `keystone-manage -# fernet_rotate` command). (string value) -#provider = fernet - -# Toggle for caching receipt creation and validation data. This has no effect -# unless global caching is enabled, or if cache_on_issue is disabled as we only -# cache receipts on issue. (boolean value) -#caching = true - -# The number of seconds to cache receipt creation and validation data. This has -# no effect unless both global and `[receipt] caching` are enabled. (integer -# value) -# Minimum value: 0 -#cache_time = 300 - -# Enable storing issued receipt data to receipt validation cache so that first -# receipt validation doesn't actually cause full validation cycle. This option -# has no effect unless global caching and receipt caching are enabled. (boolean -# value) -#cache_on_issue = true - - -[resource] - -# -# From keystone -# - -# DEPRECATED: Entry point for the resource driver in the `keystone.resource` -# namespace. Only a `sql` driver is supplied by keystone. Unless you are -# writing proprietary drivers for keystone, you do not need to set this option. -# (string value) -# This option is deprecated for removal since P. -# Its value may be silently ignored in the future. -# Reason: Non-SQL resource cannot be used with SQL Identity and has been unable -# to be used since Ocata. SQL Resource backend is a requirement as of Pike. -# Setting this option no longer has an effect on how Keystone operates. -#driver = sql - -# Toggle for resource caching. This has no effect unless global caching is -# enabled. (boolean value) -# Deprecated group/name - [assignment]/caching -#caching = true - -# Time to cache resource data in seconds. This has no effect unless global -# caching is enabled. (integer value) -# Deprecated group/name - [assignment]/cache_time -#cache_time = - -# Maximum number of entities that will be returned in a resource collection. -# (integer value) -# Deprecated group/name - [assignment]/list_limit -#list_limit = - -# Name of the domain that owns the `admin_project_name`. If left unset, then -# there is no admin project. `[resource] admin_project_name` must also be set -# to use this option. (string value) -#admin_project_domain_name = - -# This is a special project which represents cloud-level administrator -# privileges across services. Tokens scoped to this project will contain a true -# `is_admin_project` attribute to indicate to policy systems that the role -# assignments on that specific project should apply equally across every -# project. If left unset, then there is no admin project, and thus no explicit -# means of cross-project role assignments. `[resource] -# admin_project_domain_name` must also be set to use this option. (string -# value) -#admin_project_name = - -# This controls whether the names of projects are restricted from containing -# URL-reserved characters. If set to `new`, attempts to create or update a -# project with a URL-unsafe name will fail. If set to `strict`, attempts to -# scope a token with a URL-unsafe project name will fail, thereby forcing all -# project names to be updated to be URL-safe. (string value) -# Possible values: -# off - -# new - -# strict - -#project_name_url_safe = off - -# This controls whether the names of domains are restricted from containing -# URL-reserved characters. If set to `new`, attempts to create or update a -# domain with a URL-unsafe name will fail. If set to `strict`, attempts to -# scope a token with a URL-unsafe domain name will fail, thereby forcing all -# domain names to be updated to be URL-safe. (string value) -# Possible values: -# off - -# new - -# strict - -#domain_name_url_safe = off - - -[revoke] - -# -# From keystone -# - -# Entry point for the token revocation backend driver in the `keystone.revoke` -# namespace. Keystone only provides a `sql` driver, so there is no reason to -# set this option unless you are providing a custom entry point. (string value) -#driver = sql - -# The number of seconds after a token has expired before a corresponding -# revocation event may be purged from the backend. (integer value) -# Minimum value: 0 -#expiration_buffer = 1800 - -# Toggle for revocation event caching. This has no effect unless global caching -# is enabled. (boolean value) -#caching = true - -# Time to cache the revocation list and the revocation events (in seconds). -# This has no effect unless global and `[revoke] caching` are both enabled. -# (integer value) -# Deprecated group/name - [token]/revocation_cache_time -#cache_time = 3600 - - -[role] - -# -# From keystone -# - -# Entry point for the role backend driver in the `keystone.role` namespace. -# Keystone only provides a `sql` driver, so there's no reason to change this -# unless you are providing a custom entry point. (string value) -#driver = - -# Toggle for role caching. This has no effect unless global caching is enabled. -# In a typical deployment, there is no reason to disable this. (boolean value) -#caching = true - -# Time to cache role data, in seconds. This has no effect unless both global -# caching and `[role] caching` are enabled. (integer value) -#cache_time = - -# Maximum number of entities that will be returned in a role collection. This -# may be useful to tune if you have a large number of discrete roles in your -# deployment. (integer value) -#list_limit = - - -[saml] - -# -# From keystone -# - -# Determines the lifetime for any SAML assertions generated by keystone, using -# `NotOnOrAfter` attributes. (integer value) -#assertion_expiration_time = 3600 - -# Name of, or absolute path to, the binary to be used for XML signing. Although -# only the XML Security Library (`xmlsec1`) is supported, it may have a non- -# standard name or path on your system. If keystone cannot find the binary -# itself, you may need to install the appropriate package, use this option to -# specify an absolute path, or adjust keystone's PATH environment variable. -# (string value) -#xmlsec1_binary = xmlsec1 - -# Absolute path to the public certificate file to use for SAML signing. The -# value cannot contain a comma (`,`). (string value) -#certfile = /etc/keystone/ssl/certs/signing_cert.pem - -# Absolute path to the private key file to use for SAML signing. The value -# cannot contain a comma (`,`). (string value) -#keyfile = /etc/keystone/ssl/private/signing_key.pem - -# This is the unique entity identifier of the identity provider (keystone) to -# use when generating SAML assertions. This value is required to generate -# identity provider metadata and must be a URI (a URL is recommended). For -# example: `https://keystone.example.com/v3/OS-FEDERATION/saml2/idp`. (uri -# value) -#idp_entity_id = - -# This is the single sign-on (SSO) service location of the identity provider -# which accepts HTTP POST requests. A value is required to generate identity -# provider metadata. For example: `https://keystone.example.com/v3/OS- -# FEDERATION/saml2/sso`. (uri value) -#idp_sso_endpoint = - -# This is the language used by the identity provider's organization. (string -# value) -#idp_lang = en - -# This is the name of the identity provider's organization. (string value) -#idp_organization_name = SAML Identity Provider - -# This is the name of the identity provider's organization to be displayed. -# (string value) -#idp_organization_display_name = OpenStack SAML Identity Provider - -# This is the URL of the identity provider's organization. The URL referenced -# here should be useful to humans. (uri value) -#idp_organization_url = https://example.com/ - -# This is the company name of the identity provider's contact person. (string -# value) -#idp_contact_company = Example, Inc. - -# This is the given name of the identity provider's contact person. (string -# value) -#idp_contact_name = SAML Identity Provider Support - -# This is the surname of the identity provider's contact person. (string value) -#idp_contact_surname = Support - -# This is the email address of the identity provider's contact person. (string -# value) -#idp_contact_email = support@example.com - -# This is the telephone number of the identity provider's contact person. -# (string value) -#idp_contact_telephone = +1 800 555 0100 - -# This is the type of contact that best describes the identity provider's -# contact person. (string value) -# Possible values: -# technical - -# support - -# administrative - -# billing - -# other - -#idp_contact_type = other - -# Absolute path to the identity provider metadata file. This file should be -# generated with the `keystone-manage saml_idp_metadata` command. There is -# typically no reason to change this value. (string value) -#idp_metadata_path = /etc/keystone/saml2_idp_metadata.xml - -# The prefix of the RelayState SAML attribute to use when generating enhanced -# client and proxy (ECP) assertions. In a typical deployment, there is no -# reason to change this value. (string value) -#relay_state_prefix = ss:mem: - - -[security_compliance] - -# -# From keystone -# - -# The maximum number of days a user can go without authenticating before being -# considered "inactive" and automatically disabled (locked). This feature is -# disabled by default; set any value to enable it. This feature depends on the -# `sql` backend for the `[identity] driver`. When a user exceeds this threshold -# and is considered "inactive", the user's `enabled` attribute in the HTTP API -# may not match the value of the user's `enabled` column in the user table. -# (integer value) -# Minimum value: 1 -#disable_user_account_days_inactive = - -# The maximum number of times that a user can fail to authenticate before the -# user account is locked for the number of seconds specified by -# `[security_compliance] lockout_duration`. This feature is disabled by -# default. If this feature is enabled and `[security_compliance] -# lockout_duration` is not set, then users may be locked out indefinitely until -# the user is explicitly enabled via the API. This feature depends on the `sql` -# backend for the `[identity] driver`. (integer value) -# Minimum value: 1 -#lockout_failure_attempts = - -# The number of seconds a user account will be locked when the maximum number -# of failed authentication attempts (as specified by `[security_compliance] -# lockout_failure_attempts`) is exceeded. Setting this option will have no -# effect unless you also set `[security_compliance] lockout_failure_attempts` -# to a non-zero value. This feature depends on the `sql` backend for the -# `[identity] driver`. (integer value) -# Minimum value: 1 -#lockout_duration = 1800 - -# The number of days for which a password will be considered valid before -# requiring it to be changed. This feature is disabled by default. If enabled, -# new password changes will have an expiration date, however existing passwords -# would not be impacted. This feature depends on the `sql` backend for the -# `[identity] driver`. (integer value) -# Minimum value: 1 -#password_expires_days = - -# This controls the number of previous user password iterations to keep in -# history, in order to enforce that newly created passwords are unique. The -# total number which includes the new password should not be greater or equal -# to this value. Setting the value to zero (the default) disables this feature. -# Thus, to enable this feature, values must be greater than 0. This feature -# depends on the `sql` backend for the `[identity] driver`. (integer value) -# Minimum value: 0 -#unique_last_password_count = 0 - -# The number of days that a password must be used before the user can change -# it. This prevents users from changing their passwords immediately in order to -# wipe out their password history and reuse an old password. This feature does -# not prevent administrators from manually resetting passwords. It is disabled -# by default and allows for immediate password changes. This feature depends on -# the `sql` backend for the `[identity] driver`. Note: If -# `[security_compliance] password_expires_days` is set, then the value for this -# option should be less than the `password_expires_days`. (integer value) -# Minimum value: 0 -#minimum_password_age = 0 - -# The regular expression used to validate password strength requirements. By -# default, the regular expression will match any password. The following is an -# example of a pattern which requires at least 1 letter, 1 digit, and have a -# minimum length of 7 characters: ^(?=.*\d)(?=.*[a-zA-Z]).{7,}$ This feature -# depends on the `sql` backend for the `[identity] driver`. (string value) -#password_regex = - -# Describe your password regular expression here in language for humans. If a -# password fails to match the regular expression, the contents of this -# configuration variable will be returned to users to explain why their -# requested password was insufficient. (string value) -#password_regex_description = - -# Enabling this option requires users to change their password when the user is -# created, or upon administrative reset. Before accessing any services, -# affected users will have to change their password. To ignore this requirement -# for specific users, such as service users, set the `options` attribute -# `ignore_change_password_upon_first_use` to `True` for the desired user via -# the update user API. This feature is disabled by default. This feature is -# only applicable with the `sql` backend for the `[identity] driver`. (boolean -# value) -#change_password_upon_first_use = false - - -[shadow_users] - -# -# From keystone -# - -# Entry point for the shadow users backend driver in the -# `keystone.identity.shadow_users` namespace. This driver is used for -# persisting local user references to externally-managed identities (via -# federation, LDAP, etc). Keystone only provides a `sql` driver, so there is no -# reason to change this option unless you are providing a custom entry point. -# (string value) -#driver = sql - - -[signing] - -# -# From keystone -# - -# DEPRECATED: Absolute path to the public certificate file to use for signing -# responses to revocation lists requests. Set this together with `[signing] -# keyfile`. For non-production environments, you may be interested in using -# `keystone-manage pki_setup` to generate self-signed certificates. (string -# value) -# This option is deprecated for removal since P. -# Its value may be silently ignored in the future. -# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in -# Pike. These options remain for backwards compatibility. -#certfile = /etc/keystone/ssl/certs/signing_cert.pem - -# DEPRECATED: Absolute path to the private key file to use for signing -# responses to revocation lists requests. Set this together with `[signing] -# certfile`. (string value) -# This option is deprecated for removal since P. -# Its value may be silently ignored in the future. -# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in -# Pike. These options remain for backwards compatibility. -#keyfile = /etc/keystone/ssl/private/signing_key.pem - -# DEPRECATED: Absolute path to the public certificate authority (CA) file to -# use when creating self-signed certificates with `keystone-manage pki_setup`. -# Set this together with `[signing] ca_key`. There is no reason to set this -# option unless you are requesting revocation lists in a non-production -# environment. Use a `[signing] certfile` issued from a trusted certificate -# authority instead. (string value) -# This option is deprecated for removal since P. -# Its value may be silently ignored in the future. -# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in -# Pike. These options remain for backwards compatibility. -#ca_certs = /etc/keystone/ssl/certs/ca.pem - -# DEPRECATED: Absolute path to the private certificate authority (CA) key file -# to use when creating self-signed certificates with `keystone-manage -# pki_setup`. Set this together with `[signing] ca_certs`. There is no reason -# to set this option unless you are requesting revocation lists in a non- -# production environment. Use a `[signing] certfile` issued from a trusted -# certificate authority instead. (string value) -# This option is deprecated for removal since P. -# Its value may be silently ignored in the future. -# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in -# Pike. These options remain for backwards compatibility. -#ca_key = /etc/keystone/ssl/private/cakey.pem - -# DEPRECATED: Key size (in bits) to use when generating a self-signed token -# signing certificate. There is no reason to set this option unless you are -# requesting revocation lists in a non-production environment. Use a `[signing] -# certfile` issued from a trusted certificate authority instead. (integer -# value) -# Minimum value: 1024 -# This option is deprecated for removal since P. -# Its value may be silently ignored in the future. -# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in -# Pike. These options remain for backwards compatibility. -#key_size = 2048 - -# DEPRECATED: The validity period (in days) to use when generating a self- -# signed token signing certificate. There is no reason to set this option -# unless you are requesting revocation lists in a non-production environment. -# Use a `[signing] certfile` issued from a trusted certificate authority -# instead. (integer value) -# This option is deprecated for removal since P. -# Its value may be silently ignored in the future. -# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in -# Pike. These options remain for backwards compatibility. -#valid_days = 3650 - -# DEPRECATED: The certificate subject to use when generating a self-signed -# token signing certificate. There is no reason to set this option unless you -# are requesting revocation lists in a non-production environment. Use a -# `[signing] certfile` issued from a trusted certificate authority instead. -# (string value) -# This option is deprecated for removal since P. -# Its value may be silently ignored in the future. -# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in -# Pike. These options remain for backwards compatibility. -#cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com - - -[token] - -provider = fernet - -# -# From keystone -# - -# The amount of time that a token should remain valid (in seconds). Drastically -# reducing this value may break "long-running" operations that involve multiple -# services to coordinate together, and will force users to authenticate with -# keystone more frequently. Drastically increasing this value will increase the -# number of tokens that will be simultaneously valid. Keystone tokens are also -# bearer tokens, so a shorter duration will also reduce the potential security -# impact of a compromised token. (integer value) -# Minimum value: 0 -# Maximum value: 9223372036854775807 -#expiration = 3600 - -# Entry point for the token provider in the `keystone.token.provider` -# namespace. The token provider controls the token construction, validation, -# and revocation operations. Supported upstream providers are `fernet` and -# `jws`. Neither `fernet` or `jws` tokens require persistence and both require -# additional setup. If using `fernet`, you're required to run `keystone-manage -# fernet_setup`, which creates symmetric keys used to encrypt tokens. If using -# `jws`, you're required to generate an ECDSA keypair using a SHA-256 hash -# algorithm for signing and validating token, which can be done with `keystone- -# manage create_jws_keypair`. Note that `fernet` tokens are encrypted and `jws` -# tokens are only signed. Please be sure to consider this if your deployment -# has security requirements regarding payload contents used to generate token -# IDs. (string value) -#provider = fernet - -# Toggle for caching token creation and validation data. This has no effect -# unless global caching is enabled. (boolean value) -#caching = true - -# The number of seconds to cache token creation and validation data. This has -# no effect unless both global and `[token] caching` are enabled. (integer -# value) -# Minimum value: 0 -# Maximum value: 9223372036854775807 -#cache_time = - -# This toggles support for revoking individual tokens by the token identifier -# and thus various token enumeration operations (such as listing all tokens -# issued to a specific user). These operations are used to determine the list -# of tokens to consider revoked. Do not disable this option if you're using the -# `kvs` `[revoke] driver`. (boolean value) -#revoke_by_id = true - -# This toggles whether scoped tokens may be re-scoped to a new project or -# domain, thereby preventing users from exchanging a scoped token (including -# those with a default project scope) for any other token. This forces users to -# either authenticate for unscoped tokens (and later exchange that unscoped -# token for tokens with a more specific scope) or to provide their credentials -# in every request for a scoped token to avoid re-scoping altogether. (boolean -# value) -#allow_rescope_scoped_token = true - -# DEPRECATED: This controls whether roles should be included with tokens that -# are not directly assigned to the token's scope, but are instead linked -# implicitly to other role assignments. (boolean value) -# This option is deprecated for removal since R. -# Its value may be silently ignored in the future. -# Reason: Default roles depend on a chain of implied role assignments. Ex: an -# admin user will also have the reader and member role. By ensuring that all -# these roles will always appear on the token validation response, we can -# improve the simplicity and readability of policy files. -#infer_roles = true - -# DEPRECATED: Enable storing issued token data to token validation cache so -# that first token validation doesn't actually cause full validation cycle. -# This option has no effect unless global caching is enabled and will still -# cache tokens even if `[token] caching = False`. (boolean value) -# This option is deprecated for removal since S. -# Its value may be silently ignored in the future. -# Reason: Keystone already exposes a configuration option for caching tokens. -# Having a separate configuration option to cache tokens when they are issued -# is redundant, unnecessarily complicated, and is misleading if token caching -# is disabled because tokens will still be pre-cached by default when they are -# issued. The ability to pre-cache tokens when they are issued is going to rely -# exclusively on the ``keystone.conf [token] caching`` option in the future. -#cache_on_issue = true - -# This controls the number of seconds that a token can be retrieved for beyond -# the built-in expiry time. This allows long running operations to succeed. -# Defaults to two days. (integer value) -#allow_expired_window = 172800 - - -[tokenless_auth] - -# -# From keystone -# - -# The list of distinguished names which identify trusted issuers of client -# certificates allowed to use X.509 tokenless authorization. If the option is -# absent then no certificates will be allowed. The format for the values of a -# distinguished name (DN) must be separated by a comma and contain no spaces. -# Furthermore, because an individual DN may contain commas, this configuration -# option may be repeated multiple times to represent multiple values. For -# example, keystone.conf would include two consecutive lines in order to trust -# two different DNs, such as `trusted_issuer = CN=john,OU=keystone,O=openstack` -# and `trusted_issuer = CN=mary,OU=eng,O=abc`. (multi valued) -#trusted_issuer = - -# The federated protocol ID used to represent X.509 tokenless authorization. -# This is used in combination with the value of `[tokenless_auth] -# issuer_attribute` to find a corresponding federated mapping. In a typical -# deployment, there is no reason to change this value. (string value) -#protocol = x509 - -# The name of the WSGI environment variable used to pass the issuer of the -# client certificate to keystone. This attribute is used as an identity -# provider ID for the X.509 tokenless authorization along with the protocol to -# look up its corresponding mapping. In a typical deployment, there is no -# reason to change this value. (string value) -#issuer_attribute = SSL_CLIENT_I_DN - - -[trust] - -# -# From keystone -# - -# Allows authorization to be redelegated from one user to another, effectively -# chaining trusts together. When disabled, the `remaining_uses` attribute of a -# trust is constrained to be zero. (boolean value) -#allow_redelegation = false - -# Maximum number of times that authorization can be redelegated from one user -# to another in a chain of trusts. This number may be reduced further for a -# specific trust. (integer value) -#max_redelegation_count = 3 - -# Entry point for the trust backend driver in the `keystone.trust` namespace. -# Keystone only provides a `sql` driver, so there is no reason to change this -# unless you are providing a custom entry point. (string value) -#driver = sql - - -[unified_limit] - -# -# From keystone -# - -# Entry point for the unified limit backend driver in the -# `keystone.unified_limit` namespace. Keystone only provides a `sql` driver, so -# there's no reason to change this unless you are providing a custom entry -# point. (string value) -#driver = sql - -# Toggle for unified limit caching. This has no effect unless global caching is -# enabled. In a typical deployment, there is no reason to disable this. -# (boolean value) -#caching = true - -# Time to cache unified limit data, in seconds. This has no effect unless both -# global caching and `[unified_limit] caching` are enabled. (integer value) -#cache_time = - -# Maximum number of entities that will be returned in a role collection. This -# may be useful to tune if you have a large number of unified limits in your -# deployment. (integer value) -#list_limit = - -# The enforcement model to use when validating limits associated to projects. -# Enforcement models will behave differently depending on the existing limits, -# which may result in backwards incompatible changes if a model is switched in -# a running deployment. (string value) -# Possible values: -# flat - -# strict_two_level - -#enforcement_model = flat - - -[wsgi] - -# -# From keystone -# - -# If set to true, this enables the oslo debug middleware in Keystone. This -# Middleware prints a lot of information about the request and the response. It -# is useful for getting information about the data on the wire (decoded) and -# passed to the WSGI application pipeline. This middleware has no effect on the -# "debug" setting in the [DEFAULT] section of the config file or setting -# Keystone's log-level to "DEBUG"; it is specific to debugging the WSGI data as -# it enters and leaves Keystone (specific request-related data). This option is -# used for introspection on the request and response data between the web -# server (apache, nginx, etc) and Keystone. This middleware is inserted as the -# first element in the middleware chain and will show the data closest to the -# wire. WARNING: NOT INTENDED FOR USE IN PRODUCTION. THIS MIDDLEWARE CAN AND -# WILL EMIT SENSITIVE/PRIVILEGED DATA. (boolean value) -#debug_middleware = false From 65f20384faa28e469b1f47713825706c157275cd Mon Sep 17 00:00:00 2001 From: Luca D'Agati <66645997+lucadagati@users.noreply.github.com> Date: Fri, 21 Feb 2025 17:22:25 +0100 Subject: [PATCH 07/18] Delete conf_mysql directory --- conf_mysql/99-openstack.conf | 13 ------------- 1 file changed, 13 deletions(-) delete mode 100644 conf_mysql/99-openstack.conf diff --git a/conf_mysql/99-openstack.conf b/conf_mysql/99-openstack.conf deleted file mode 100644 index 4cd6688..0000000 --- a/conf_mysql/99-openstack.conf +++ /dev/null @@ -1,13 +0,0 @@ -[mysqld] -bind-address = 0.0.0.0 - -default-storage-engine = innodb -innodb_file_per_table = on -max_connections = 4096 -collation-server = utf8_general_ci -character-set-server = utf8 -wait_timeout = 600 -interactive_timeout = 600 -net_read_timeout = 600 -net_write_timeout = 600 - From 98fb9e15b8f011ccf102e7b88f215c6462da8f4f Mon Sep 17 00:00:00 2001 From: Luca D'Agati <66645997+lucadagati@users.noreply.github.com> Date: Fri, 21 Feb 2025 17:22:59 +0100 Subject: [PATCH 08/18] Add files via upload added the compose dir for deployment --- .../conf_conductor/iotronic.conf | 102 + .../conf_keystone/keystone.conf | 2715 +++++++++++++++++ .../conf_mysql/99-openstack.conf | 13 + compose_deployment/conf_ui/local_settings.py | 916 ++++++ compose_deployment/conf_wagent/iotronic.conf | 96 + compose_deployment/docker-compose.yml | 393 +++ 6 files changed, 4235 insertions(+) create mode 100644 compose_deployment/conf_conductor/iotronic.conf create mode 100644 compose_deployment/conf_keystone/keystone.conf create mode 100644 compose_deployment/conf_mysql/99-openstack.conf create mode 100644 compose_deployment/conf_ui/local_settings.py create mode 100644 compose_deployment/conf_wagent/iotronic.conf create mode 100644 compose_deployment/docker-compose.yml diff --git a/compose_deployment/conf_conductor/iotronic.conf b/compose_deployment/conf_conductor/iotronic.conf new file mode 100644 index 0000000..30371a1 --- /dev/null +++ b/compose_deployment/conf_conductor/iotronic.conf @@ -0,0 +1,102 @@ +[DEFAULT] +transport_url = rabbit://openstack:unime@rabbitmq + +debug=True +log_file = /var/log/iotronic/iotronic-conductor.log +proxy=nginx + + +# Authentication strategy used by iotronic-api: one of +# "keystone" or "noauth". "noauth" should not be used in a +# production environment because all authentication will be +# disabled. (string value) +auth_strategy=keystone + +# Enable pecan debug mode. WARNING: this is insecure and +# should not be used in a production environment. (boolean +# value) +#pecan_debug=false + + +[conductor] +service_port_min=50000 +service_port_max=50100 + +[wamp] +wamp_transport_url = ws://iotronic-wagent:8181/ +wamp_realm = s4t +#skip_cert_verify= False +register_agent = True + + + +[database] +connection = mysql+pymysql://iotronic:unime@iotronic-db/iotronic + +[keystone_authtoken] +www_authenticate_uri = http://keystone:5000 +auth_url = http://keystone:5000 +auth_plugin = password +auth_type = password +project_domain_id = default +user_domain_id = default +project_name = service +username = iotronic +password = unime + + +[neutron] +auth_url = http://controller:5000 +url = http://controller:9696 +auth_strategy = password +auth_type = password +project_domain_name = default +user_domain_name = default +region_name = RegionOne +project_name = service +username = neutron +password = NEUTRON_PASS +retries = 3 +project_domain_id= default + + +[designate] +auth_url = http://controller:5000 +url = http://controller:9001 +auth_strategy = password +project_domain_name = default +user_domain_name = default +region_name = RegionOne +project_name = service +username = designate +password = password +retries = 3 +project_domain_id= default + + +[cors] +# Indicate whether this resource may be shared with the domain +# received in the requests "origin" header. Format: +# "://[:]", no trailing slash. Example: +# https://horizon.example.com (list value) +#allowed_origin = + +# Indicate that the actual request can include user +# credentials (boolean value) +#allow_credentials = true + +# Indicate which headers are safe to expose to the API. +# Defaults to HTTP Simple Headers. (list value) +#expose_headers = + +# Maximum cache age of CORS preflight requests. (integer +# value) +#max_age = 3600 + +# Indicate which methods can be used during the actual +# request. (list value) +#allow_methods = OPTIONS,GET,HEAD,POST,PUT,DELETE,TRACE,PATCH + +# Indicate which header field names may be used during the +# actual request. (list value) +#allow_headers = diff --git a/compose_deployment/conf_keystone/keystone.conf b/compose_deployment/conf_keystone/keystone.conf new file mode 100644 index 0000000..01e3d6f --- /dev/null +++ b/compose_deployment/conf_keystone/keystone.conf @@ -0,0 +1,2715 @@ +[DEFAULT] +debug = True +#log_config = /etc/keystone/logging.conf +log_dir = /var/log/keystone + +# +# From keystone +# + +# Using this feature is *NOT* recommended. Instead, use the `keystone-manage +# bootstrap` command. The value of this option is treated as a "shared secret" +# that can be used to bootstrap Keystone through the API. This "token" does not +# represent a user (it has no identity), and carries no explicit authorization +# (it effectively bypasses most authorization checks). If set to `None`, the +# value is ignored and the `admin_token` middleware is effectively disabled. +# (string value) +#admin_token = + +# The base public endpoint URL for Keystone that is advertised to clients +# (NOTE: this does NOT affect how Keystone listens for connections). Defaults +# to the base host URL of the request. For example, if keystone receives a +# request to `http://server:5000/v3/users`, then this will option will be +# automatically treated as `http://server:5000`. You should only need to set +# option if either the value of the base URL contains a path that keystone does +# not automatically infer (`/prefix/v3`), or if the endpoint should be found on +# a different host. (uri value) +#public_endpoint = + +# DEPRECATED: The base admin endpoint URL for Keystone that is advertised to +# clients (NOTE: this does NOT affect how Keystone listens for connections). +# Defaults to the base host URL of the request. For example, if keystone +# receives a request to `http://server:35357/v3/users`, then this will option +# will be automatically treated as `http://server:35357`. You should only need +# to set option if either the value of the base URL contains a path that +# keystone does not automatically infer (`/prefix/v3`), or if the endpoint +# should be found on a different host. (uri value) +# This option is deprecated for removal since R. +# Its value may be silently ignored in the future. +# Reason: With the removal of the 2.0 API keystone does not distinguish between +# admin and public endpoints. +#admin_endpoint = + +# Maximum depth of the project hierarchy, excluding the project acting as a +# domain at the top of the hierarchy. WARNING: Setting it to a large value may +# adversely impact performance. (integer value) +#max_project_tree_depth = 5 + +# Limit the sizes of user & project ID/names. (integer value) +#max_param_size = 64 + +# Similar to `[DEFAULT] max_param_size`, but provides an exception for token +# values. With Fernet tokens, this can be set as low as 255. With UUID tokens, +# this should be set to 32). (integer value) +#max_token_size = 255 + +# The maximum number of entities that will be returned in a collection. This +# global limit may be then overridden for a specific driver, by specifying a +# list_limit in the appropriate section (for example, `[assignment]`). No limit +# is set by default. In larger deployments, it is recommended that you set this +# to a reasonable number to prevent operations like listing all users and +# projects from placing an unnecessary load on the system. (integer value) +#list_limit = + +# If set to true, strict password length checking is performed for password +# manipulation. If a password exceeds the maximum length, the operation will +# fail with an HTTP 403 Forbidden error. If set to false, passwords are +# automatically truncated to the maximum length. (boolean value) +#strict_password_check = false + +# If set to true, then the server will return information in HTTP responses +# that may allow an unauthenticated or authenticated user to get more +# information than normal, such as additional details about why authentication +# failed. This may be useful for debugging but is insecure. (boolean value) +#insecure_debug = false + +# Default `publisher_id` for outgoing notifications. If left undefined, +# Keystone will default to using the server's host name. (string value) +#default_publisher_id = + +# Define the notification format for identity service events. A `basic` +# notification only has information about the resource being operated on. A +# `cadf` notification has the same information, as well as information about +# the initiator of the event. The `cadf` option is entirely backwards +# compatible with the `basic` option, but is fully CADF-compliant, and is +# recommended for auditing use cases. (string value) +# Possible values: +# basic - +# cadf - +#notification_format = cadf + +# You can reduce the number of notifications keystone emits by explicitly +# opting out. Keystone will not emit notifications that match the patterns +# expressed in this list. Values are expected to be in the form of +# `identity..`. By default, all notifications related +# to authentication are automatically suppressed. This field can be set +# multiple times in order to opt-out of multiple notification topics. For +# example, the following suppresses notifications describing user creation or +# successful authentication events: notification_opt_out=identity.user.create +# notification_opt_out=identity.authenticate.success (multi valued) +#notification_opt_out = identity.authenticate.success +#notification_opt_out = identity.authenticate.pending +#notification_opt_out = identity.authenticate.failed + +# +# From oslo.log +# + +# If set to true, the logging level will be set to DEBUG instead of the default +# INFO level. (boolean value) +# Note: This option can be changed without restarting. +#debug = false + +# The name of a logging configuration file. This file is appended to any +# existing logging configuration files. For details about logging configuration +# files, see the Python logging module documentation. Note that when logging +# configuration files are used then all logging configuration is set in the +# configuration file and other logging configuration options are ignored (for +# example, log-date-format). (string value) +# Note: This option can be changed without restarting. +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append = + +# Defines the format string for %%(asctime)s in log records. Default: +# %(default)s . This option is ignored if log_config_append is set. (string +# value) +#log_date_format = %Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to send logging output to. If no default is set, +# logging will go to stderr as defined by use_stderr. This option is ignored if +# log_config_append is set. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file = + +# (Optional) The base directory used for relative log_file paths. This option +# is ignored if log_config_append is set. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir = + +# Uses logging handler designed to watch file system. When log file is moved or +# removed this handler will open a new log file with specified path +# instantaneously. It makes sense only if log_file option is specified and +# Linux platform is used. This option is ignored if log_config_append is set. +# (boolean value) +#watch_log_file = false + +# Use syslog for logging. Existing syslog format is DEPRECATED and will be +# changed later to honor RFC5424. This option is ignored if log_config_append +# is set. (boolean value) +#use_syslog = false + +# Enable journald for logging. If running in a systemd environment you may wish +# to enable journal support. Doing so will use the journal native protocol +# which includes structured metadata in addition to log messages.This option is +# ignored if log_config_append is set. (boolean value) +#use_journal = false + +# Syslog facility to receive log lines. This option is ignored if +# log_config_append is set. (string value) +#syslog_log_facility = LOG_USER + +# Use JSON formatting for logging. This option is ignored if log_config_append +# is set. (boolean value) +#use_json = false + +# Log output to standard error. This option is ignored if log_config_append is +# set. (boolean value) +#use_stderr = false + +# Log output to Windows Event Log. (boolean value) +#use_eventlog = false + +# The amount of time before the log files are rotated. This option is ignored +# unless log_rotation_type is setto "interval". (integer value) +#log_rotate_interval = 1 + +# Rotation interval type. The time of the last file change (or the time when +# the service was started) is used when scheduling the next rotation. (string +# value) +# Possible values: +# Seconds - +# Minutes - +# Hours - +# Days - +# Weekday - +# Midnight - +#log_rotate_interval_type = days + +# Maximum number of rotated log files. (integer value) +#max_logfile_count = 30 + +# Log file maximum size in MB. This option is ignored if "log_rotation_type" is +# not set to "size". (integer value) +#max_logfile_size_mb = 200 + +# Log rotation type. (string value) +# Possible values: +# interval - Rotate logs at predefined time intervals. +# size - Rotate logs once they reach a predefined size. +# none - Do not rotate log files. +#log_rotation_type = none + +# Format string to use for log messages with context. Used by +# oslo_log.formatters.ContextFormatter (string value) +#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages when context is undefined. Used by +# oslo_log.formatters.ContextFormatter (string value) +#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Additional data to append to log message when logging level for the message +# is DEBUG. Used by oslo_log.formatters.ContextFormatter (string value) +#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. Used by +# oslo_log.formatters.ContextFormatter (string value) +#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s + +# Defines the format string for %(user_identity)s that is used in +# logging_context_format_string. Used by oslo_log.formatters.ContextFormatter +# (string value) +#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s + +# List of package logging levels in logger=LEVEL pairs. This option is ignored +# if log_config_append is set. (list value) +#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO + +# Enables or disables publication of error events. (boolean value) +#publish_errors = false + +# The format for an instance that is passed with the log message. (string +# value) +#instance_format = "[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log message. (string +# value) +#instance_uuid_format = "[instance: %(uuid)s] " + +# Interval, number of seconds, of log rate limiting. (integer value) +#rate_limit_interval = 0 + +# Maximum number of logged messages per rate_limit_interval. (integer value) +#rate_limit_burst = 0 + +# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG +# or empty string. Logs with level greater or equal to rate_limit_except_level +# are not filtered. An empty string means that all levels are filtered. (string +# value) +#rate_limit_except_level = CRITICAL + +# Enables or disables fatal status of deprecations. (boolean value) +#fatal_deprecations = false + +# +# From oslo.messaging +# + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size = 30 + +# The pool size limit for connections expiration policy (integer value) +#conn_pool_min_size = 2 + +# The time-to-live in sec of idle connections in the pool (integer value) +#conn_pool_ttl = 1200 + +# Size of executor thread pool when executor is threading or eventlet. (integer +# value) +# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size +#executor_thread_pool_size = 64 + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout = 60 + +# The network address and optional user credentials for connecting to the +# messaging backend, in URL format. The expected format is: +# +# driver://[user:pass@]host:port[,[userN:passN@]hostN:portN]/virtual_host?query +# +# Example: rabbit://rabbitmq:password@127.0.0.1:5672// +# +# For full details on the fields in the URL see the documentation of +# oslo_messaging.TransportURL at +# https://docs.openstack.org/oslo.messaging/latest/reference/transport.html +# (string value) +#transport_url = rabbit:// + +# The default exchange under which topics are scoped. May be overridden by an +# exchange name specified in the transport_url option. (string value) +#control_exchange = keystone + + +[access_rules_config] + +# +# From keystone +# + +# Entry point for the access rules config backend driver in the +# `keystone.access_rules_config` namespace. Keystone only provides a `json` +# driver, so there is no reason to change this unless you are providing a +# custom entry point. (string value) +#driver = json + +# Toggle for access rules caching. This has no effect unless global caching is +# enabled. (boolean value) +#caching = true + +# Time to cache access rule data in seconds. This has no effect unless global +# caching is enabled. (integer value) +#cache_time = + +# Path to access rules configuration. If not present, no access rule +# configuration will be loaded and application credential access rules will be +# unavailable. (string value) +#rules_file = /etc/keystone/access_rules.json + +# Toggles permissive mode for access rules. When enabled, application +# credentials can be created with any access rules regardless of operator's +# configuration. (boolean value) +#permissive = false + + +[application_credential] + +# +# From keystone +# + +# Entry point for the application credential backend driver in the +# `keystone.application_credential` namespace. Keystone only provides a `sql` +# driver, so there is no reason to change this unless you are providing a +# custom entry point. (string value) +#driver = sql + +# Toggle for application credential caching. This has no effect unless global +# caching is enabled. (boolean value) +#caching = true + +# Time to cache application credential data in seconds. This has no effect +# unless global caching is enabled. (integer value) +#cache_time = + +# Maximum number of application credentials a user is permitted to create. A +# value of -1 means unlimited. If a limit is not set, users are permitted to +# create application credentials at will, which could lead to bloat in the +# keystone database or open keystone to a DoS attack. (integer value) +#user_limit = -1 + + +[assignment] + +# +# From keystone +# + +# Entry point for the assignment backend driver (where role assignments are +# stored) in the `keystone.assignment` namespace. Only a SQL driver is supplied +# by keystone itself. Unless you are writing proprietary drivers for keystone, +# you do not need to set this option. (string value) +#driver = sql + +# A list of role names which are prohibited from being an implied role. (list +# value) +#prohibited_implied_role = admin + + +[auth] + +# +# From keystone +# + +# Allowed authentication methods. Note: You should disable the `external` auth +# method if you are currently using federation. External auth and federation +# both use the REMOTE_USER variable. Since both the mapped and external plugin +# are being invoked to validate attributes in the request environment, it can +# cause conflicts. (list value) +#methods = external,password,token,oauth1,mapped,application_credential + +# Entry point for the password auth plugin module in the +# `keystone.auth.password` namespace. You do not need to set this unless you +# are overriding keystone's own password authentication plugin. (string value) +#password = + +# Entry point for the token auth plugin module in the `keystone.auth.token` +# namespace. You do not need to set this unless you are overriding keystone's +# own token authentication plugin. (string value) +#token = + +# Entry point for the external (`REMOTE_USER`) auth plugin module in the +# `keystone.auth.external` namespace. Supplied drivers are `DefaultDomain` and +# `Domain`. The default driver is `DefaultDomain`, which assumes that all users +# identified by the username specified to keystone in the `REMOTE_USER` +# variable exist within the context of the default domain. The `Domain` option +# expects an additional environment variable be presented to keystone, +# `REMOTE_DOMAIN`, containing the domain name of the `REMOTE_USER` (if +# `REMOTE_DOMAIN` is not set, then the default domain will be used instead). +# You do not need to set this unless you are taking advantage of "external +# authentication", where the application server (such as Apache) is handling +# authentication instead of keystone. (string value) +#external = + +# Entry point for the OAuth 1.0a auth plugin module in the +# `keystone.auth.oauth1` namespace. You do not need to set this unless you are +# overriding keystone's own `oauth1` authentication plugin. (string value) +#oauth1 = + +# Entry point for the mapped auth plugin module in the `keystone.auth.mapped` +# namespace. You do not need to set this unless you are overriding keystone's +# own `mapped` authentication plugin. (string value) +#mapped = + +# Entry point for the application_credential auth plugin module in the +# `keystone.auth.application_credential` namespace. You do not need to set this +# unless you are overriding keystone's own `application_credential` +# authentication plugin. (string value) +#application_credential = + + +[cache] + +# +# From oslo.cache +# + +# Prefix for building the configuration dictionary for the cache region. This +# should not need to be changed unless there is another dogpile.cache region +# with the same configuration name. (string value) +#config_prefix = cache.oslo + +# Default TTL, in seconds, for any cached item in the dogpile.cache region. +# This applies to any cached method that doesn't have an explicit cache +# expiration time defined for it. (integer value) +#expiration_time = 600 + +# Cache backend module. For eventlet-based or environments with hundreds of +# threaded servers, Memcache with pooling (oslo_cache.memcache_pool) is +# recommended. For environments with less than 100 threaded servers, Memcached +# (dogpile.cache.memcached) or Redis (dogpile.cache.redis) is recommended. Test +# environments with a single instance of the server can use the +# dogpile.cache.memory backend. (string value) +# Possible values: +# oslo_cache.memcache_pool - +# oslo_cache.dict - +# oslo_cache.mongo - +# oslo_cache.etcd3gw - +# dogpile.cache.memcached - +# dogpile.cache.pylibmc - +# dogpile.cache.bmemcached - +# dogpile.cache.dbm - +# dogpile.cache.redis - +# dogpile.cache.memory - +# dogpile.cache.memory_pickle - +# dogpile.cache.null - +#backend = dogpile.cache.null + +# Arguments supplied to the backend module. Specify this option once per +# argument to be passed to the dogpile.cache backend. Example format: +# ":". (multi valued) +#backend_argument = + +# Proxy classes to import that will affect the way the dogpile.cache backend +# functions. See the dogpile.cache documentation on changing-backend-behavior. +# (list value) +#proxies = + +# Global toggle for caching. (boolean value) +#enabled = true + +# Extra debugging from the cache backend (cache keys, get/set/delete/etc +# calls). This is only really useful if you need to see the specific cache- +# backend get/set/delete calls with the keys/values. Typically this should be +# left set to false. (boolean value) +#debug_cache_backend = false + +# Memcache servers in the format of "host:port". (dogpile.cache.memcache and +# oslo_cache.memcache_pool backends only). (list value) +#memcache_servers = localhost:11211 + +# Number of seconds memcached server is considered dead before it is tried +# again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only). +# (integer value) +#memcache_dead_retry = 300 + +# Timeout in seconds for every call to a server. (dogpile.cache.memcache and +# oslo_cache.memcache_pool backends only). (floating point value) +#memcache_socket_timeout = 3.0 + +# Max total number of open connections to every memcached server. +# (oslo_cache.memcache_pool backend only). (integer value) +#memcache_pool_maxsize = 10 + +# Number of seconds a connection to memcached is held unused in the pool before +# it is closed. (oslo_cache.memcache_pool backend only). (integer value) +#memcache_pool_unused_timeout = 60 + +# Number of seconds that an operation will wait to get a memcache client +# connection. (integer value) +#memcache_pool_connection_get_timeout = 10 + + +[catalog] + +# +# From keystone +# + +# Absolute path to the file used for the templated catalog backend. This option +# is only used if the `[catalog] driver` is set to `templated`. (string value) +#template_file = default_catalog.templates + +# Entry point for the catalog driver in the `keystone.catalog` namespace. +# Keystone provides a `sql` option (which supports basic CRUD operations +# through SQL), a `templated` option (which loads the catalog from a templated +# catalog file on disk), and a `endpoint_filter.sql` option (which supports +# arbitrary service catalogs per project). (string value) +#driver = sql + +# Toggle for catalog caching. This has no effect unless global caching is +# enabled. In a typical deployment, there is no reason to disable this. +# (boolean value) +#caching = true + +# Time to cache catalog data (in seconds). This has no effect unless global and +# catalog caching are both enabled. Catalog data (services, endpoints, etc.) +# typically does not change frequently, and so a longer duration than the +# global default may be desirable. (integer value) +#cache_time = + +# Maximum number of entities that will be returned in a catalog collection. +# There is typically no reason to set this, as it would be unusual for a +# deployment to have enough services or endpoints to exceed a reasonable limit. +# (integer value) +#list_limit = + + +[cors] + +# +# From oslo.middleware +# + +# Indicate whether this resource may be shared with the domain received in the +# requests "origin" header. Format: "://[:]", no trailing +# slash. Example: https://horizon.example.com (list value) +#allowed_origin = + +# Indicate that the actual request can include user credentials (boolean value) +#allow_credentials = true + +# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple +# Headers. (list value) +#expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,Openstack-Auth-Receipt + +# Maximum cache age of CORS preflight requests. (integer value) +#max_age = 3600 + +# Indicate which methods can be used during the actual request. (list value) +#allow_methods = GET,PUT,POST,DELETE,PATCH + +# Indicate which header field names may be used during the actual request. +# (list value) +#allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Project-Id,X-Project-Name,X-Project-Domain-Id,X-Project-Domain-Name,X-Domain-Id,X-Domain-Name,Openstack-Auth-Receipt + + +[credential] + +# +# From keystone +# + +# Entry point for the credential backend driver in the `keystone.credential` +# namespace. Keystone only provides a `sql` driver, so there's no reason to +# change this unless you are providing a custom entry point. (string value) +#driver = sql + +# Entry point for credential encryption and decryption operations in the +# `keystone.credential.provider` namespace. Keystone only provides a `fernet` +# driver, so there's no reason to change this unless you are providing a custom +# entry point to encrypt and decrypt credentials. (string value) +#provider = fernet + +# Directory containing Fernet keys used to encrypt and decrypt credentials +# stored in the credential backend. Fernet keys used to encrypt credentials +# have no relationship to Fernet keys used to encrypt Fernet tokens. Both sets +# of keys should be managed separately and require different rotation policies. +# Do not share this repository with the repository used to manage keys for +# Fernet tokens. (string value) +#key_repository = /etc/keystone/credential-keys/ + + +[database] +connection = mysql+pymysql://keystone:unime@iotronic-db/keystone + +# +# From oslo.db +# + +# If True, SQLite uses synchronous mode. (boolean value) +#sqlite_synchronous = true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string to use to connect to the database. (string +# value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = + +# The SQLAlchemy connection string to use to connect to the slave database. +# (string value) +#slave_connection = + +# The SQL mode to be used for MySQL sessions. This option, including the +# default, overrides any server-set SQL mode. To use whatever SQL mode is set +# by the server configuration, set this to no value. Example: mysql_sql_mode= +# (string value) +#mysql_sql_mode = TRADITIONAL + +# If True, transparently enables support for handling MySQL Cluster (NDB). +# (boolean value) +#mysql_enable_ndb = false + +# Connections which have been present in the connection pool longer than this +# number of seconds will be replaced with a new one the next time they are +# checked out from the pool. (integer value) +# Deprecated group/name - [DATABASE]/idle_timeout +# Deprecated group/name - [database]/idle_timeout +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#connection_recycle_time = 3600 + +# DEPRECATED: Minimum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: The option to set the minimum pool size is not supported by +# sqlalchemy. +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool. Setting a value of +# 0 indicates no limit. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = 5 + +# Maximum number of database connection retries during startup. Set to -1 to +# specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a SQL connection. (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with SQLAlchemy. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = 50 + +# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer +# value) +# Minimum value: 0 +# Maximum value: 100 +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add Python stack traces to SQL as comment strings. (boolean value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = false + +# If set, use this value for pool_timeout with SQLAlchemy. (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on connection lost. +# (boolean value) +#use_db_reconnect = false + +# Seconds between retries of a database transaction. (integer value) +#db_retry_interval = 1 + +# If True, increases the interval between retries of a database operation up to +# db_max_retry_interval. (boolean value) +#db_inc_retry_interval = true + +# If db_inc_retry_interval is set, the maximum seconds between retries of a +# database operation. (integer value) +#db_max_retry_interval = 10 + +# Maximum retries in case of connection error or deadlock error before error is +# raised. Set to -1 to specify an infinite retry count. (integer value) +#db_max_retries = 20 + +# Optional URL parameters to append onto the connection URL at connect time; +# specify as param1=value1¶m2=value2&... (string value) +#connection_parameters = + + +[domain_config] + +# +# From keystone +# + +# Entry point for the domain-specific configuration driver in the +# `keystone.resource.domain_config` namespace. Only a `sql` option is provided +# by keystone, so there is no reason to set this unless you are providing a +# custom entry point. (string value) +#driver = sql + +# Toggle for caching of the domain-specific configuration backend. This has no +# effect unless global caching is enabled. There is normally no reason to +# disable this. (boolean value) +#caching = true + +# Time-to-live (TTL, in seconds) to cache domain-specific configuration data. +# This has no effect unless `[domain_config] caching` is enabled. (integer +# value) +#cache_time = 300 + + +[endpoint_filter] + +# +# From keystone +# + +# Entry point for the endpoint filter driver in the `keystone.endpoint_filter` +# namespace. Only a `sql` option is provided by keystone, so there is no reason +# to set this unless you are providing a custom entry point. (string value) +#driver = sql + +# This controls keystone's behavior if the configured endpoint filters do not +# result in any endpoints for a user + project pair (and therefore a +# potentially empty service catalog). If set to true, keystone will return the +# entire service catalog. If set to false, keystone will return an empty +# service catalog. (boolean value) +#return_all_endpoints_if_no_filter = true + + +[endpoint_policy] + +# +# From keystone +# + +# Entry point for the endpoint policy driver in the `keystone.endpoint_policy` +# namespace. Only a `sql` driver is provided by keystone, so there is no reason +# to set this unless you are providing a custom entry point. (string value) +#driver = sql + + +[eventlet_server] + +# +# From keystone +# + +# DEPRECATED: The IP address of the network interface for the public service to +# listen on. (host address value) +# Deprecated group/name - [DEFAULT]/bind_host +# Deprecated group/name - [DEFAULT]/public_bind_host +# This option is deprecated for removal since K. +# Its value may be silently ignored in the future. +# Reason: Support for running keystone under eventlet has been removed in the +# Newton release. These options remain for backwards compatibility because they +# are used for URL substitutions. +#public_bind_host = 0.0.0.0 + +# DEPRECATED: The port number for the public service to listen on. (port value) +# Minimum value: 0 +# Maximum value: 65535 +# Deprecated group/name - [DEFAULT]/public_port +# This option is deprecated for removal since K. +# Its value may be silently ignored in the future. +# Reason: Support for running keystone under eventlet has been removed in the +# Newton release. These options remain for backwards compatibility because they +# are used for URL substitutions. +#public_port = 5000 + +# DEPRECATED: The IP address of the network interface for the admin service to +# listen on. (host address value) +# Deprecated group/name - [DEFAULT]/bind_host +# Deprecated group/name - [DEFAULT]/admin_bind_host +# This option is deprecated for removal since K. +# Its value may be silently ignored in the future. +# Reason: Support for running keystone under eventlet has been removed in the +# Newton release. These options remain for backwards compatibility because they +# are used for URL substitutions. +#admin_bind_host = 0.0.0.0 + +# DEPRECATED: The port number for the admin service to listen on. (port value) +# Minimum value: 0 +# Maximum value: 65535 +# Deprecated group/name - [DEFAULT]/admin_port +# This option is deprecated for removal since K. +# Its value may be silently ignored in the future. +# Reason: Support for running keystone under eventlet has been removed in the +# Newton release. These options remain for backwards compatibility because they +# are used for URL substitutions. +#admin_port = 35357 + + +[extra_headers] +Distribution = Ubuntu + +# +# From keystone +# + +# Specifies the distribution of the keystone server. (string value) +#Distribution = Ubuntu + + +[federation] + +# +# From keystone +# + +# Entry point for the federation backend driver in the `keystone.federation` +# namespace. Keystone only provides a `sql` driver, so there is no reason to +# set this option unless you are providing a custom entry point. (string value) +#driver = sql + +# Prefix to use when filtering environment variable names for federated +# assertions. Matched variables are passed into the federated mapping engine. +# (string value) +#assertion_prefix = + +# Value to be used to obtain the entity ID of the Identity Provider from the +# environment. For `mod_shib`, this would be `Shib-Identity-Provider`. For +# `mod_auth_openidc`, this could be `HTTP_OIDC_ISS`. For `mod_auth_mellon`, +# this could be `MELLON_IDP`. (string value) +#remote_id_attribute = + +# An arbitrary domain name that is reserved to allow federated ephemeral users +# to have a domain concept. Note that an admin will not be able to create a +# domain with this name or update an existing domain to this name. You are not +# advised to change this value unless you really have to. (string value) +#federated_domain_name = Federated + +# A list of trusted dashboard hosts. Before accepting a Single Sign-On request +# to return a token, the origin host must be a member of this list. This +# configuration option may be repeated for multiple values. You must set this +# in order to use web-based SSO flows. For example: +# trusted_dashboard=https://acme.example.com/auth/websso +# trusted_dashboard=https://beta.example.com/auth/websso (multi valued) +#trusted_dashboard = + +# Absolute path to an HTML file used as a Single Sign-On callback handler. This +# page is expected to redirect the user from keystone back to a trusted +# dashboard host, by form encoding a token in a POST request. Keystone's +# default value should be sufficient for most deployments. (string value) +#sso_callback_template = /etc/keystone/sso_callback_template.html + +# Toggle for federation caching. This has no effect unless global caching is +# enabled. There is typically no reason to disable this. (boolean value) +#caching = true + + +[fernet_receipts] + +# +# From keystone +# + +# Directory containing Fernet receipt keys. This directory must exist before +# using `keystone-manage fernet_setup` for the first time, must be writable by +# the user running `keystone-manage fernet_setup` or `keystone-manage +# fernet_rotate`, and of course must be readable by keystone's server process. +# The repository may contain keys in one of three states: a single staged key +# (always index 0) used for receipt validation, a single primary key (always +# the highest index) used for receipt creation and validation, and any number +# of secondary keys (all other index values) used for receipt validation. With +# multiple keystone nodes, each node must share the same key repository +# contents, with the exception of the staged key (index 0). It is safe to run +# `keystone-manage fernet_rotate` once on any one node to promote a staged key +# (index 0) to be the new primary (incremented from the previous highest +# index), and produce a new staged key (a new key with index 0); the resulting +# repository can then be atomically replicated to other nodes without any risk +# of race conditions (for example, it is safe to run `keystone-manage +# fernet_rotate` on host A, wait any amount of time, create a tarball of the +# directory on host A, unpack it on host B to a temporary location, and +# atomically move (`mv`) the directory into place on host B). Running +# `keystone-manage fernet_rotate` *twice* on a key repository without syncing +# other nodes will result in receipts that can not be validated by all nodes. +# (string value) +#key_repository = /etc/keystone/fernet-keys/ + +# This controls how many keys are held in rotation by `keystone-manage +# fernet_rotate` before they are discarded. The default value of 3 means that +# keystone will maintain one staged key (always index 0), one primary key (the +# highest numerical index), and one secondary key (every other index). +# Increasing this value means that additional secondary keys will be kept in +# the rotation. (integer value) +# Minimum value: 1 +#max_active_keys = 3 + + +[fernet_tokens] + +# +# From keystone +# + +# Directory containing Fernet token keys. This directory must exist before +# using `keystone-manage fernet_setup` for the first time, must be writable by +# the user running `keystone-manage fernet_setup` or `keystone-manage +# fernet_rotate`, and of course must be readable by keystone's server process. +# The repository may contain keys in one of three states: a single staged key +# (always index 0) used for token validation, a single primary key (always the +# highest index) used for token creation and validation, and any number of +# secondary keys (all other index values) used for token validation. With +# multiple keystone nodes, each node must share the same key repository +# contents, with the exception of the staged key (index 0). It is safe to run +# `keystone-manage fernet_rotate` once on any one node to promote a staged key +# (index 0) to be the new primary (incremented from the previous highest +# index), and produce a new staged key (a new key with index 0); the resulting +# repository can then be atomically replicated to other nodes without any risk +# of race conditions (for example, it is safe to run `keystone-manage +# fernet_rotate` on host A, wait any amount of time, create a tarball of the +# directory on host A, unpack it on host B to a temporary location, and +# atomically move (`mv`) the directory into place on host B). Running +# `keystone-manage fernet_rotate` *twice* on a key repository without syncing +# other nodes will result in tokens that can not be validated by all nodes. +# (string value) +#key_repository = /etc/keystone/fernet-keys/ + +# This controls how many keys are held in rotation by `keystone-manage +# fernet_rotate` before they are discarded. The default value of 3 means that +# keystone will maintain one staged key (always index 0), one primary key (the +# highest numerical index), and one secondary key (every other index). +# Increasing this value means that additional secondary keys will be kept in +# the rotation. (integer value) +# Minimum value: 1 +#max_active_keys = 3 + + +[healthcheck] + +# +# From oslo.middleware +# + +# DEPRECATED: The path to respond to healtcheck requests on. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#path = /healthcheck + +# Show more detailed information as part of the response. Security note: +# Enabling this option may expose sensitive details about the service being +# monitored. Be sure to verify that it will not violate your security policies. +# (boolean value) +#detailed = false + +# Additional backends that can perform health checks and report that +# information back as part of a request. (list value) +#backends = + +# Check the presence of a file to determine if an application is running on a +# port. Used by DisableByFileHealthcheck plugin. (string value) +#disable_by_file_path = + +# Check the presence of a file based on a port to determine if an application +# is running on a port. Expects a "port:path" list of strings. Used by +# DisableByFilesPortsHealthcheck plugin. (list value) +#disable_by_file_paths = + + +[identity] + +# +# From keystone +# + +# This references the domain to use for all Identity API v2 requests (which are +# not aware of domains). A domain with this ID can optionally be created for +# you by `keystone-manage bootstrap`. The domain referenced by this ID cannot +# be deleted on the v3 API, to prevent accidentally breaking the v2 API. There +# is nothing special about this domain, other than the fact that it must exist +# to order to maintain support for your v2 clients. There is typically no +# reason to change this value. (string value) +#default_domain_id = default + +# A subset (or all) of domains can have their own identity driver, each with +# their own partial configuration options, stored in either the resource +# backend or in a file in a domain configuration directory (depending on the +# setting of `[identity] domain_configurations_from_database`). Only values +# specific to the domain need to be specified in this manner. This feature is +# disabled by default, but may be enabled by default in a future release; set +# to true to enable. (boolean value) +#domain_specific_drivers_enabled = false + +# By default, domain-specific configuration data is read from files in the +# directory identified by `[identity] domain_config_dir`. Enabling this +# configuration option allows you to instead manage domain-specific +# configurations through the API, which are then persisted in the backend +# (typically, a SQL database), rather than using configuration files on disk. +# (boolean value) +#domain_configurations_from_database = false + +# Absolute path where keystone should locate domain-specific `[identity]` +# configuration files. This option has no effect unless `[identity] +# domain_specific_drivers_enabled` is set to true. There is typically no reason +# to change this value. (string value) +#domain_config_dir = /etc/keystone/domains + +# Entry point for the identity backend driver in the `keystone.identity` +# namespace. Keystone provides a `sql` and `ldap` driver. This option is also +# used as the default driver selection (along with the other configuration +# variables in this section) in the event that `[identity] +# domain_specific_drivers_enabled` is enabled, but no applicable domain- +# specific configuration is defined for the domain in question. Unless your +# deployment primarily relies on `ldap` AND is not using domain-specific +# configuration, you should typically leave this set to `sql`. (string value) +#driver = sql + +# Toggle for identity caching. This has no effect unless global caching is +# enabled. There is typically no reason to disable this. (boolean value) +#caching = true + +# Time to cache identity data (in seconds). This has no effect unless global +# and identity caching are enabled. (integer value) +#cache_time = 600 + +# Maximum allowed length for user passwords. Decrease this value to improve +# performance. Changing this value does not effect existing passwords. (integer +# value) +# Maximum value: 4096 +#max_password_length = 4096 + +# Maximum number of entities that will be returned in an identity collection. +# (integer value) +#list_limit = + +# The password hashing algorithm to use for passwords stored within keystone. +# (string value) +# Possible values: +# bcrypt - +# scrypt - +# pbkdf2_sha512 - +#password_hash_algorithm = bcrypt + +# This option represents a trade off between security and performance. Higher +# values lead to slower performance, but higher security. Changing this option +# will only affect newly created passwords as existing password hashes already +# have a fixed number of rounds applied, so it is safe to tune this option in a +# running cluster. The default for bcrypt is 12, must be between 4 and 31, +# inclusive. The default for scrypt is 16, must be within `range(1,32)`. The +# default for pbkdf_sha512 is 60000, must be within `range(1,1<<32)` WARNING: +# If using scrypt, increasing this value increases BOTH time AND memory +# requirements to hash a password. (integer value) +#password_hash_rounds = + +# Optional block size to pass to scrypt hash function (the `r` parameter). +# Useful for tuning scrypt to optimal performance for your CPU architecture. +# This option is only used when the `password_hash_algorithm` option is set to +# `scrypt`. Defaults to 8. (integer value) +#scrypt_block_size = + +# Optional parallelism to pass to scrypt hash function (the `p` parameter). +# This option is only used when the `password_hash_algorithm` option is set to +# `scrypt`. Defaults to 1. (integer value) +#scrypt_parallelism = + +# Number of bytes to use in scrypt and pbkfd2_sha512 hashing salt. Default for +# scrypt is 16 bytes. Default for pbkfd2_sha512 is 16 bytes. Limited to a +# maximum of 96 bytes due to the size of the column used to store password +# hashes. (integer value) +# Minimum value: 0 +# Maximum value: 96 +#salt_bytesize = + + +[identity_mapping] + +# +# From keystone +# + +# Entry point for the identity mapping backend driver in the +# `keystone.identity.id_mapping` namespace. Keystone only provides a `sql` +# driver, so there is no reason to change this unless you are providing a +# custom entry point. (string value) +#driver = sql + +# Entry point for the public ID generator for user and group entities in the +# `keystone.identity.id_generator` namespace. The Keystone identity mapper only +# supports generators that produce 64 bytes or less. Keystone only provides a +# `sha256` entry point, so there is no reason to change this value unless +# you're providing a custom entry point. (string value) +#generator = sha256 + +# The format of user and group IDs changed in Juno for backends that do not +# generate UUIDs (for example, LDAP), with keystone providing a hash mapping to +# the underlying attribute in LDAP. By default this mapping is disabled, which +# ensures that existing IDs will not change. Even when the mapping is enabled +# by using domain-specific drivers (`[identity] +# domain_specific_drivers_enabled`), any users and groups from the default +# domain being handled by LDAP will still not be mapped to ensure their IDs +# remain backward compatible. Setting this value to false will enable the new +# mapping for all backends, including the default LDAP driver. It is only +# guaranteed to be safe to enable this option if you do not already have +# assignments for users and groups from the default LDAP domain, and you +# consider it to be acceptable for Keystone to provide the different IDs to +# clients than it did previously (existing IDs in the API will suddenly +# change). Typically this means that the only time you can set this value to +# false is when configuring a fresh installation, although that is the +# recommended value. (boolean value) +#backward_compatible_ids = true + + +[jwt_tokens] + +# +# From keystone +# + +# Directory containing public keys for validating JWS token signatures. This +# directory must exist in order for keystone's server process to start. It must +# also be readable by keystone's server process. It must contain at least one +# public key that corresponds to a private key in `keystone.conf [jwt_tokens] +# jws_private_key_repository`. This option is only applicable in deployments +# issuing JWS tokens and setting `keystone.conf [tokens] provider = jws`. +# (string value) +#jws_public_key_repository = /etc/keystone/jws-keys/public + +# Directory containing private keys for signing JWS tokens. This directory must +# exist in order for keystone's server process to start. It must also be +# readable by keystone's server process. It must contain at least one private +# key that corresponds to a public key in `keystone.conf [jwt_tokens] +# jws_public_key_repository`. In the event there are multiple private keys in +# this directory, keystone will use a key named `private.pem` to sign tokens. +# In the future, keystone may support the ability to sign tokens with multiple +# private keys. For now, only a key named `private.pem` within this directory +# is required to issue JWS tokens. This option is only applicable in +# deployments issuing JWS tokens and setting `keystone.conf [tokens] provider = +# jws`. (string value) +#jws_private_key_repository = /etc/keystone/jws-keys/private + + +[ldap] + +# +# From keystone +# + +# URL(s) for connecting to the LDAP server. Multiple LDAP URLs may be specified +# as a comma separated string. The first URL to successfully bind is used for +# the connection. (string value) +#url = ldap://localhost + +# The user name of the administrator bind DN to use when querying the LDAP +# server, if your LDAP server requires it. (string value) +#user = + +# The password of the administrator bind DN to use when querying the LDAP +# server, if your LDAP server requires it. (string value) +#password = + +# The default LDAP server suffix to use, if a DN is not defined via either +# `[ldap] user_tree_dn` or `[ldap] group_tree_dn`. (string value) +#suffix = cn=example,cn=com + +# The search scope which defines how deep to search within the search base. A +# value of `one` (representing `oneLevel` or `singleLevel`) indicates a search +# of objects immediately below to the base object, but does not include the +# base object itself. A value of `sub` (representing `subtree` or +# `wholeSubtree`) indicates a search of both the base object itself and the +# entire subtree below it. (string value) +# Possible values: +# one - +# sub - +#query_scope = one + +# Defines the maximum number of results per page that keystone should request +# from the LDAP server when listing objects. A value of zero (`0`) disables +# paging. (integer value) +# Minimum value: 0 +#page_size = 0 + +# The LDAP dereferencing option to use for queries involving aliases. A value +# of `default` falls back to using default dereferencing behavior configured by +# your `ldap.conf`. A value of `never` prevents aliases from being dereferenced +# at all. A value of `searching` dereferences aliases only after name +# resolution. A value of `finding` dereferences aliases only during name +# resolution. A value of `always` dereferences aliases in all cases. (string +# value) +# Possible values: +# never - +# searching - +# always - +# finding - +# default - +#alias_dereferencing = default + +# Sets the LDAP debugging level for LDAP calls. A value of 0 means that +# debugging is not enabled. This value is a bitmask, consult your LDAP +# documentation for possible values. (integer value) +# Minimum value: -1 +#debug_level = + +# Sets keystone's referral chasing behavior across directory partitions. If +# left unset, the system's default behavior will be used. (boolean value) +#chase_referrals = + +# The search base to use for users. Defaults to the `[ldap] suffix` value. +# (string value) +#user_tree_dn = + +# The LDAP search filter to use for users. (string value) +#user_filter = + +# The LDAP object class to use for users. (string value) +#user_objectclass = inetOrgPerson + +# The LDAP attribute mapped to user IDs in keystone. This must NOT be a +# multivalued attribute. User IDs are expected to be globally unique across +# keystone domains and URL-safe. (string value) +#user_id_attribute = cn + +# The LDAP attribute mapped to user names in keystone. User names are expected +# to be unique only within a keystone domain and are not expected to be URL- +# safe. (string value) +#user_name_attribute = sn + +# The LDAP attribute mapped to user descriptions in keystone. (string value) +#user_description_attribute = description + +# The LDAP attribute mapped to user emails in keystone. (string value) +#user_mail_attribute = mail + +# The LDAP attribute mapped to user passwords in keystone. (string value) +#user_pass_attribute = userPassword + +# The LDAP attribute mapped to the user enabled attribute in keystone. If +# setting this option to `userAccountControl`, then you may be interested in +# setting `[ldap] user_enabled_mask` and `[ldap] user_enabled_default` as well. +# (string value) +#user_enabled_attribute = enabled + +# Logically negate the boolean value of the enabled attribute obtained from the +# LDAP server. Some LDAP servers use a boolean lock attribute where "true" +# means an account is disabled. Setting `[ldap] user_enabled_invert = true` +# will allow these lock attributes to be used. This option will have no effect +# if either the `[ldap] user_enabled_mask` or `[ldap] user_enabled_emulation` +# options are in use. (boolean value) +#user_enabled_invert = false + +# Bitmask integer to select which bit indicates the enabled value if the LDAP +# server represents "enabled" as a bit on an integer rather than as a discrete +# boolean. A value of `0` indicates that the mask is not used. If this is not +# set to `0` the typical value is `2`. This is typically used when `[ldap] +# user_enabled_attribute = userAccountControl`. Setting this option causes +# keystone to ignore the value of `[ldap] user_enabled_invert`. (integer value) +# Minimum value: 0 +#user_enabled_mask = 0 + +# The default value to enable users. This should match an appropriate integer +# value if the LDAP server uses non-boolean (bitmask) values to indicate if a +# user is enabled or disabled. If this is not set to `True`, then the typical +# value is `512`. This is typically used when `[ldap] user_enabled_attribute = +# userAccountControl`. (string value) +#user_enabled_default = True + +# List of user attributes to ignore on create and update, or whether a specific +# user attribute should be filtered for list or show user. (list value) +#user_attribute_ignore = default_project_id + +# The LDAP attribute mapped to a user's default_project_id in keystone. This is +# most commonly used when keystone has write access to LDAP. (string value) +#user_default_project_id_attribute = + +# If enabled, keystone uses an alternative method to determine if a user is +# enabled or not by checking if they are a member of the group defined by the +# `[ldap] user_enabled_emulation_dn` option. Enabling this option causes +# keystone to ignore the value of `[ldap] user_enabled_invert`. (boolean value) +#user_enabled_emulation = false + +# DN of the group entry to hold enabled users when using enabled emulation. +# Setting this option has no effect unless `[ldap] user_enabled_emulation` is +# also enabled. (string value) +#user_enabled_emulation_dn = + +# Use the `[ldap] group_member_attribute` and `[ldap] group_objectclass` +# settings to determine membership in the emulated enabled group. Enabling this +# option has no effect unless `[ldap] user_enabled_emulation` is also enabled. +# (boolean value) +#user_enabled_emulation_use_group_config = false + +# A list of LDAP attribute to keystone user attribute pairs used for mapping +# additional attributes to users in keystone. The expected format is +# `:`, where `ldap_attr` is the attribute in the LDAP +# object and `user_attr` is the attribute which should appear in the identity +# API. (list value) +#user_additional_attribute_mapping = + +# The search base to use for groups. Defaults to the `[ldap] suffix` value. +# (string value) +#group_tree_dn = + +# The LDAP search filter to use for groups. (string value) +#group_filter = + +# The LDAP object class to use for groups. If setting this option to +# `posixGroup`, you may also be interested in enabling the `[ldap] +# group_members_are_ids` option. (string value) +#group_objectclass = groupOfNames + +# The LDAP attribute mapped to group IDs in keystone. This must NOT be a +# multivalued attribute. Group IDs are expected to be globally unique across +# keystone domains and URL-safe. (string value) +#group_id_attribute = cn + +# The LDAP attribute mapped to group names in keystone. Group names are +# expected to be unique only within a keystone domain and are not expected to +# be URL-safe. (string value) +#group_name_attribute = ou + +# The LDAP attribute used to indicate that a user is a member of the group. +# (string value) +#group_member_attribute = member + +# Enable this option if the members of the group object class are keystone user +# IDs rather than LDAP DNs. This is the case when using `posixGroup` as the +# group object class in Open Directory. (boolean value) +#group_members_are_ids = false + +# The LDAP attribute mapped to group descriptions in keystone. (string value) +#group_desc_attribute = description + +# List of group attributes to ignore on create and update. or whether a +# specific group attribute should be filtered for list or show group. (list +# value) +#group_attribute_ignore = + +# A list of LDAP attribute to keystone group attribute pairs used for mapping +# additional attributes to groups in keystone. The expected format is +# `:`, where `ldap_attr` is the attribute in the LDAP +# object and `group_attr` is the attribute which should appear in the identity +# API. (list value) +#group_additional_attribute_mapping = + +# If enabled, group queries will use Active Directory specific filters for +# nested groups. (boolean value) +#group_ad_nesting = false + +# An absolute path to a CA certificate file to use when communicating with LDAP +# servers. This option will take precedence over `[ldap] tls_cacertdir`, so +# there is no reason to set both. (string value) +#tls_cacertfile = + +# An absolute path to a CA certificate directory to use when communicating with +# LDAP servers. There is no reason to set this option if you've also set +# `[ldap] tls_cacertfile`. (string value) +#tls_cacertdir = + +# Enable TLS when communicating with LDAP servers. You should also set the +# `[ldap] tls_cacertfile` and `[ldap] tls_cacertdir` options when using this +# option. Do not set this option if you are using LDAP over SSL (LDAPS) instead +# of TLS. (boolean value) +#use_tls = false + +# Specifies which checks to perform against client certificates on incoming TLS +# sessions. If set to `demand`, then a certificate will always be requested and +# required from the LDAP server. If set to `allow`, then a certificate will +# always be requested but not required from the LDAP server. If set to `never`, +# then a certificate will never be requested. (string value) +# Possible values: +# demand - +# never - +# allow - +#tls_req_cert = demand + +# The connection timeout to use with the LDAP server. A value of `-1` means +# that connections will never timeout. (integer value) +# Minimum value: -1 +#connection_timeout = -1 + +# Enable LDAP connection pooling for queries to the LDAP server. There is +# typically no reason to disable this. (boolean value) +#use_pool = true + +# The size of the LDAP connection pool. This option has no effect unless +# `[ldap] use_pool` is also enabled. (integer value) +# Minimum value: 1 +#pool_size = 10 + +# The maximum number of times to attempt reconnecting to the LDAP server before +# aborting. A value of zero prevents retries. This option has no effect unless +# `[ldap] use_pool` is also enabled. (integer value) +# Minimum value: 0 +#pool_retry_max = 3 + +# The number of seconds to wait before attempting to reconnect to the LDAP +# server. This option has no effect unless `[ldap] use_pool` is also enabled. +# (floating point value) +#pool_retry_delay = 0.1 + +# The connection timeout to use when pooling LDAP connections. A value of `-1` +# means that connections will never timeout. This option has no effect unless +# `[ldap] use_pool` is also enabled. (integer value) +# Minimum value: -1 +#pool_connection_timeout = -1 + +# The maximum connection lifetime to the LDAP server in seconds. When this +# lifetime is exceeded, the connection will be unbound and removed from the +# connection pool. This option has no effect unless `[ldap] use_pool` is also +# enabled. (integer value) +# Minimum value: 1 +#pool_connection_lifetime = 600 + +# Enable LDAP connection pooling for end user authentication. There is +# typically no reason to disable this. (boolean value) +#use_auth_pool = true + +# The size of the connection pool to use for end user authentication. This +# option has no effect unless `[ldap] use_auth_pool` is also enabled. (integer +# value) +# Minimum value: 1 +#auth_pool_size = 100 + +# The maximum end user authentication connection lifetime to the LDAP server in +# seconds. When this lifetime is exceeded, the connection will be unbound and +# removed from the connection pool. This option has no effect unless `[ldap] +# use_auth_pool` is also enabled. (integer value) +# Minimum value: 1 +#auth_pool_connection_lifetime = 60 + + +[memcache] + +# +# From keystone +# + +# Number of seconds memcached server is considered dead before it is tried +# again. This is used by the key value store system. (integer value) +#dead_retry = 300 + +# Timeout in seconds for every call to a server. This is used by the key value +# store system. (integer value) +#socket_timeout = 3 + +# Max total number of open connections to every memcached server. This is used +# by the key value store system. (integer value) +#pool_maxsize = 10 + +# Number of seconds a connection to memcached is held unused in the pool before +# it is closed. This is used by the key value store system. (integer value) +#pool_unused_timeout = 60 + +# Number of seconds that an operation will wait to get a memcache client +# connection. This is used by the key value store system. (integer value) +#pool_connection_get_timeout = 10 + + +[oauth1] + +# +# From keystone +# + +# Entry point for the OAuth backend driver in the `keystone.oauth1` namespace. +# Typically, there is no reason to set this option unless you are providing a +# custom entry point. (string value) +#driver = sql + +# Number of seconds for the OAuth Request Token to remain valid after being +# created. This is the amount of time the user has to authorize the token. +# Setting this option to zero means that request tokens will last forever. +# (integer value) +# Minimum value: 0 +#request_token_duration = 28800 + +# Number of seconds for the OAuth Access Token to remain valid after being +# created. This is the amount of time the consumer has to interact with the +# service provider (which is typically keystone). Setting this option to zero +# means that access tokens will last forever. (integer value) +# Minimum value: 0 +#access_token_duration = 86400 + + +[oslo_messaging_amqp] + +# +# From oslo.messaging +# + +# Name for the AMQP container. must be globally unique. Defaults to a generated +# UUID (string value) +#container_name = + +# Timeout for inactive connections (in seconds) (integer value) +#idle_timeout = 0 + +# Debug: dump AMQP frames to stdout (boolean value) +#trace = false + +# Attempt to connect via SSL. If no other ssl-related parameters are given, it +# will use the system's CA-bundle to verify the server's certificate. (boolean +# value) +#ssl = false + +# CA certificate PEM file used to verify the server's certificate (string +# value) +#ssl_ca_file = + +# Self-identifying certificate PEM file for client authentication (string +# value) +#ssl_cert_file = + +# Private key PEM file used to sign ssl_cert_file certificate (optional) +# (string value) +#ssl_key_file = + +# Password for decrypting ssl_key_file (if encrypted) (string value) +#ssl_key_password = + +# By default SSL checks that the name in the server's certificate matches the +# hostname in the transport_url. In some configurations it may be preferable to +# use the virtual hostname instead, for example if the server uses the Server +# Name Indication TLS extension (rfc6066) to provide a certificate per virtual +# host. Set ssl_verify_vhost to True if the server's SSL certificate uses the +# virtual host name instead of the DNS name. (boolean value) +#ssl_verify_vhost = false + +# Space separated list of acceptable SASL mechanisms (string value) +#sasl_mechanisms = + +# Path to directory that contains the SASL configuration (string value) +#sasl_config_dir = + +# Name of configuration file (without .conf suffix) (string value) +#sasl_config_name = + +# SASL realm to use if no realm present in username (string value) +#sasl_default_realm = + +# Seconds to pause before attempting to re-connect. (integer value) +# Minimum value: 1 +#connection_retry_interval = 1 + +# Increase the connection_retry_interval by this many seconds after each +# unsuccessful failover attempt. (integer value) +# Minimum value: 0 +#connection_retry_backoff = 2 + +# Maximum limit for connection_retry_interval + connection_retry_backoff +# (integer value) +# Minimum value: 1 +#connection_retry_interval_max = 30 + +# Time to pause between re-connecting an AMQP 1.0 link that failed due to a +# recoverable error. (integer value) +# Minimum value: 1 +#link_retry_delay = 10 + +# The maximum number of attempts to re-send a reply message which failed due to +# a recoverable error. (integer value) +# Minimum value: -1 +#default_reply_retry = 0 + +# The deadline for an rpc reply message delivery. (integer value) +# Minimum value: 5 +#default_reply_timeout = 30 + +# The deadline for an rpc cast or call message delivery. Only used when caller +# does not provide a timeout expiry. (integer value) +# Minimum value: 5 +#default_send_timeout = 30 + +# The deadline for a sent notification message delivery. Only used when caller +# does not provide a timeout expiry. (integer value) +# Minimum value: 5 +#default_notify_timeout = 30 + +# The duration to schedule a purge of idle sender links. Detach link after +# expiry. (integer value) +# Minimum value: 1 +#default_sender_link_timeout = 600 + +# Indicates the addressing mode used by the driver. +# Permitted values: +# 'legacy' - use legacy non-routable addressing +# 'routable' - use routable addresses +# 'dynamic' - use legacy addresses if the message bus does not support routing +# otherwise use routable addressing (string value) +#addressing_mode = dynamic + +# Enable virtual host support for those message buses that do not natively +# support virtual hosting (such as qpidd). When set to true the virtual host +# name will be added to all message bus addresses, effectively creating a +# private 'subnet' per virtual host. Set to False if the message bus supports +# virtual hosting using the 'hostname' field in the AMQP 1.0 Open performative +# as the name of the virtual host. (boolean value) +#pseudo_vhost = true + +# address prefix used when sending to a specific server (string value) +#server_request_prefix = exclusive + +# address prefix used when broadcasting to all servers (string value) +#broadcast_prefix = broadcast + +# address prefix when sending to any server in group (string value) +#group_request_prefix = unicast + +# Address prefix for all generated RPC addresses (string value) +#rpc_address_prefix = openstack.org/om/rpc + +# Address prefix for all generated Notification addresses (string value) +#notify_address_prefix = openstack.org/om/notify + +# Appended to the address prefix when sending a fanout message. Used by the +# message bus to identify fanout messages. (string value) +#multicast_address = multicast + +# Appended to the address prefix when sending to a particular RPC/Notification +# server. Used by the message bus to identify messages sent to a single +# destination. (string value) +#unicast_address = unicast + +# Appended to the address prefix when sending to a group of consumers. Used by +# the message bus to identify messages that should be delivered in a round- +# robin fashion across consumers. (string value) +#anycast_address = anycast + +# Exchange name used in notification addresses. +# Exchange name resolution precedence: +# Target.exchange if set +# else default_notification_exchange if set +# else control_exchange if set +# else 'notify' (string value) +#default_notification_exchange = + +# Exchange name used in RPC addresses. +# Exchange name resolution precedence: +# Target.exchange if set +# else default_rpc_exchange if set +# else control_exchange if set +# else 'rpc' (string value) +#default_rpc_exchange = + +# Window size for incoming RPC Reply messages. (integer value) +# Minimum value: 1 +#reply_link_credit = 200 + +# Window size for incoming RPC Request messages (integer value) +# Minimum value: 1 +#rpc_server_credit = 100 + +# Window size for incoming Notification messages (integer value) +# Minimum value: 1 +#notify_server_credit = 100 + +# Send messages of this type pre-settled. +# Pre-settled messages will not receive acknowledgement +# from the peer. Note well: pre-settled messages may be +# silently discarded if the delivery fails. +# Permitted values: +# 'rpc-call' - send RPC Calls pre-settled +# 'rpc-reply'- send RPC Replies pre-settled +# 'rpc-cast' - Send RPC Casts pre-settled +# 'notify' - Send Notifications pre-settled +# (multi valued) +#pre_settled = rpc-cast +#pre_settled = rpc-reply + + +[oslo_messaging_kafka] + +# +# From oslo.messaging +# + +# Max fetch bytes of Kafka consumer (integer value) +#kafka_max_fetch_bytes = 1048576 + +# Default timeout(s) for Kafka consumers (floating point value) +#kafka_consumer_timeout = 1.0 + +# DEPRECATED: Pool Size for Kafka Consumers (integer value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Driver no longer uses connection pool. +#pool_size = 10 + +# DEPRECATED: The pool size limit for connections expiration policy (integer +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Driver no longer uses connection pool. +#conn_pool_min_size = 2 + +# DEPRECATED: The time-to-live in sec of idle connections in the pool (integer +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Driver no longer uses connection pool. +#conn_pool_ttl = 1200 + +# Group id for Kafka consumer. Consumers in one group will coordinate message +# consumption (string value) +#consumer_group = oslo_messaging_consumer + +# Upper bound on the delay for KafkaProducer batching in seconds (floating +# point value) +#producer_batch_timeout = 0.0 + +# Size of batch for the producer async send (integer value) +#producer_batch_size = 16384 + +# Enable asynchronous consumer commits (boolean value) +#enable_auto_commit = false + +# The maximum number of records returned in a poll call (integer value) +#max_poll_records = 500 + +# Protocol used to communicate with brokers (string value) +# Possible values: +# PLAINTEXT - +# SASL_PLAINTEXT - +# SSL - +# SASL_SSL - +#security_protocol = PLAINTEXT + +# Mechanism when security protocol is SASL (string value) +#sasl_mechanism = PLAIN + +# CA certificate PEM file used to verify the server certificate (string value) +#ssl_cafile = + + +[oslo_messaging_notifications] + +# +# From oslo.messaging +# + +# The Drivers(s) to handle sending notifications. Possible values are +# messaging, messagingv2, routing, log, test, noop (multi valued) +# Deprecated group/name - [DEFAULT]/notification_driver +#driver = + +# A URL representing the messaging driver to use for notifications. If not set, +# we fall back to the same configuration used for RPC. (string value) +# Deprecated group/name - [DEFAULT]/notification_transport_url +#transport_url = + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +# Deprecated group/name - [DEFAULT]/notification_topics +#topics = notifications + +# The maximum number of attempts to re-send a notification message which failed +# to be delivered due to a recoverable error. 0 - No retry, -1 - indefinite +# (integer value) +#retry = -1 + + +[oslo_messaging_rabbit] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +#amqp_durable_queues = false + +# Auto-delete queues in AMQP. (boolean value) +#amqp_auto_delete = false + +# Connect over SSL. (boolean value) +# Deprecated group/name - [oslo_messaging_rabbit]/rabbit_use_ssl +#ssl = false + +# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and +# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some +# distributions. (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_version +#ssl_version = + +# SSL key file (valid only if SSL enabled). (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_keyfile +#ssl_key_file = + +# SSL cert file (valid only if SSL enabled). (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_certfile +#ssl_cert_file = + +# SSL certification authority file (valid only if SSL enabled). (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_ca_certs +#ssl_ca_file = + +# How long to wait before reconnecting in response to an AMQP consumer cancel +# notification. (floating point value) +#kombu_reconnect_delay = 1.0 + +# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not +# be used. This option may not be available in future versions. (string value) +#kombu_compression = + +# How long to wait a missing client before abandoning to send it its replies. +# This value should not be longer than rpc_response_timeout. (integer value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout +#kombu_missing_consumer_retry_timeout = 60 + +# Determines how the next RabbitMQ node is chosen in case the one we are +# currently connected to becomes unavailable. Takes effect only if more than +# one RabbitMQ node is provided in config. (string value) +# Possible values: +# round-robin - +# shuffle - +#kombu_failover_strategy = round-robin + +# The RabbitMQ login method. (string value) +# Possible values: +# PLAIN - +# AMQPLAIN - +# RABBIT-CR-DEMO - +#rabbit_login_method = AMQPLAIN + +# How frequently to retry connecting with RabbitMQ. (integer value) +#rabbit_retry_interval = 1 + +# How long to backoff for between retries when connecting to RabbitMQ. (integer +# value) +#rabbit_retry_backoff = 2 + +# Maximum interval of RabbitMQ connection retries. Default is 30 seconds. +# (integer value) +#rabbit_interval_max = 30 + +# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this +# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring +# is no longer controlled by the x-ha-policy argument when declaring a queue. +# If you just want to make sure that all queues (except those with auto- +# generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy +# HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value) +#rabbit_ha_queues = false + +# Positive integer representing duration in seconds for queue TTL (x-expires). +# Queues which are unused for the duration of the TTL are automatically +# deleted. The parameter affects only reply and fanout queues. (integer value) +# Minimum value: 1 +#rabbit_transient_queues_ttl = 1800 + +# Specifies the number of messages to prefetch. Setting to zero allows +# unlimited messages. (integer value) +#rabbit_qos_prefetch_count = 0 + +# Number of seconds after which the Rabbit broker is considered down if +# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer +# value) +#heartbeat_timeout_threshold = 60 + +# How often times during the heartbeat_timeout_threshold we check the +# heartbeat. (integer value) +#heartbeat_rate = 2 + + +[oslo_middleware] + +# +# From oslo.middleware +# + +# The maximum body size for each request, in bytes. (integer value) +# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size +# Deprecated group/name - [DEFAULT]/max_request_body_size +#max_request_body_size = 114688 + +# DEPRECATED: The HTTP Header that will be used to determine what the original +# request protocol scheme was, even if it was hidden by a SSL termination +# proxy. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#secure_proxy_ssl_header = X-Forwarded-Proto + +# Whether the application is behind a proxy or not. This determines if the +# middleware should parse the headers or not. (boolean value) +#enable_proxy_headers_parsing = false + + +[oslo_policy] + +# +# From oslo.policy +# + +# This option controls whether or not to enforce scope when evaluating +# policies. If ``True``, the scope of the token used in the request is compared +# to the ``scope_types`` of the policy being enforced. If the scopes do not +# match, an ``InvalidScope`` exception will be raised. If ``False``, a message +# will be logged informing operators that policies are being invoked with +# mismatching scope. (boolean value) +#enforce_scope = false + +# The file that defines policies. (string value) +#policy_file = policy.json + +# Default rule. Enforced when a requested rule is not found. (string value) +#policy_default_rule = default + +# Directories where policy configuration files are stored. They can be relative +# to any directory in the search path defined by the config_dir option, or +# absolute paths. The file defined by policy_file must exist for these +# directories to be searched. Missing or empty directories are ignored. (multi +# valued) +#policy_dirs = policy.d + +# Content Type to send and receive data for REST based policy check (string +# value) +# Possible values: +# application/x-www-form-urlencoded - +# application/json - +#remote_content_type = application/x-www-form-urlencoded + +# server identity verification for REST based policy check (boolean value) +#remote_ssl_verify_server_crt = false + +# Absolute path to ca cert file for REST based policy check (string value) +#remote_ssl_ca_crt_file = + +# Absolute path to client cert for REST based policy check (string value) +#remote_ssl_client_crt_file = + +# Absolute path client key file REST based policy check (string value) +#remote_ssl_client_key_file = + + +[policy] + +# +# From keystone +# + +# Entry point for the policy backend driver in the `keystone.policy` namespace. +# Supplied drivers are `rules` (which does not support any CRUD operations for +# the v3 policy API) and `sql`. Typically, there is no reason to set this +# option unless you are providing a custom entry point. (string value) +#driver = sql + +# Maximum number of entities that will be returned in a policy collection. +# (integer value) +#list_limit = + + +[profiler] + +# +# From osprofiler +# + +# +# Enable the profiling for all services on this node. +# +# Default value is False (fully disable the profiling feature). +# +# Possible values: +# +# * True: Enables the feature +# * False: Disables the feature. The profiling cannot be started via this +# project +# operations. If the profiling is triggered by another project, this project +# part will be empty. +# (boolean value) +# Deprecated group/name - [profiler]/profiler_enabled +#enabled = false + +# +# Enable SQL requests profiling in services. +# +# Default value is False (SQL requests won't be traced). +# +# Possible values: +# +# * True: Enables SQL requests profiling. Each SQL query will be part of the +# trace and can the be analyzed by how much time was spent for that. +# * False: Disables SQL requests profiling. The spent time is only shown on a +# higher level of operations. Single SQL queries cannot be analyzed this way. +# (boolean value) +#trace_sqlalchemy = false + +# +# Secret key(s) to use for encrypting context data for performance profiling. +# +# This string value should have the following format: +# [,,...], +# where each key is some random string. A user who triggers the profiling via +# the REST API has to set one of these keys in the headers of the REST API call +# to include profiling results of this node for this particular project. +# +# Both "enabled" flag and "hmac_keys" config options should be set to enable +# profiling. Also, to generate correct profiling information across all +# services +# at least one key needs to be consistent between OpenStack projects. This +# ensures it can be used from client side to generate the trace, containing +# information from all possible resources. +# (string value) +#hmac_keys = SECRET_KEY + +# +# Connection string for a notifier backend. +# +# Default value is ``messaging://`` which sets the notifier to oslo_messaging. +# +# Examples of possible values: +# +# * ``messaging://`` - use oslo_messaging driver for sending spans. +# * ``redis://127.0.0.1:6379`` - use redis driver for sending spans. +# * ``mongodb://127.0.0.1:27017`` - use mongodb driver for sending spans. +# * ``elasticsearch://127.0.0.1:9200`` - use elasticsearch driver for sending +# spans. +# * ``jaeger://127.0.0.1:6831`` - use jaeger tracing as driver for sending +# spans. +# (string value) +#connection_string = messaging:// + +# +# Document type for notification indexing in elasticsearch. +# (string value) +#es_doc_type = notification + +# +# This parameter is a time value parameter (for example: es_scroll_time=2m), +# indicating for how long the nodes that participate in the search will +# maintain +# relevant resources in order to continue and support it. +# (string value) +#es_scroll_time = 2m + +# +# Elasticsearch splits large requests in batches. This parameter defines +# maximum size of each batch (for example: es_scroll_size=10000). +# (integer value) +#es_scroll_size = 10000 + +# +# Redissentinel provides a timeout option on the connections. +# This parameter defines that timeout (for example: socket_timeout=0.1). +# (floating point value) +#socket_timeout = 0.1 + +# +# Redissentinel uses a service name to identify a master redis service. +# This parameter defines the name (for example: +# ``sentinal_service_name=mymaster``). +# (string value) +#sentinel_service_name = mymaster + +# +# Enable filter traces that contain error/exception to a separated place. +# +# Default value is set to False. +# +# Possible values: +# +# * True: Enable filter traces that contain error/exception. +# * False: Disable the filter. +# (boolean value) +#filter_error_trace = false + + +[receipt] + +# +# From keystone +# + +# The amount of time that a receipt should remain valid (in seconds). This +# value should always be very short, as it represents how long a user has to +# reattempt auth with the missing auth methods. (integer value) +# Minimum value: 0 +# Maximum value: 86400 +#expiration = 300 + +# Entry point for the receipt provider in the `keystone.receipt.provider` +# namespace. The receipt provider controls the receipt construction and +# validation operations. Keystone includes just the `fernet` receipt provider +# for now. `fernet` receipts do not need to be persisted at all, but require +# that you run `keystone-manage fernet_setup` (also see the `keystone-manage +# fernet_rotate` command). (string value) +#provider = fernet + +# Toggle for caching receipt creation and validation data. This has no effect +# unless global caching is enabled, or if cache_on_issue is disabled as we only +# cache receipts on issue. (boolean value) +#caching = true + +# The number of seconds to cache receipt creation and validation data. This has +# no effect unless both global and `[receipt] caching` are enabled. (integer +# value) +# Minimum value: 0 +#cache_time = 300 + +# Enable storing issued receipt data to receipt validation cache so that first +# receipt validation doesn't actually cause full validation cycle. This option +# has no effect unless global caching and receipt caching are enabled. (boolean +# value) +#cache_on_issue = true + + +[resource] + +# +# From keystone +# + +# DEPRECATED: Entry point for the resource driver in the `keystone.resource` +# namespace. Only a `sql` driver is supplied by keystone. Unless you are +# writing proprietary drivers for keystone, you do not need to set this option. +# (string value) +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: Non-SQL resource cannot be used with SQL Identity and has been unable +# to be used since Ocata. SQL Resource backend is a requirement as of Pike. +# Setting this option no longer has an effect on how Keystone operates. +#driver = sql + +# Toggle for resource caching. This has no effect unless global caching is +# enabled. (boolean value) +# Deprecated group/name - [assignment]/caching +#caching = true + +# Time to cache resource data in seconds. This has no effect unless global +# caching is enabled. (integer value) +# Deprecated group/name - [assignment]/cache_time +#cache_time = + +# Maximum number of entities that will be returned in a resource collection. +# (integer value) +# Deprecated group/name - [assignment]/list_limit +#list_limit = + +# Name of the domain that owns the `admin_project_name`. If left unset, then +# there is no admin project. `[resource] admin_project_name` must also be set +# to use this option. (string value) +#admin_project_domain_name = + +# This is a special project which represents cloud-level administrator +# privileges across services. Tokens scoped to this project will contain a true +# `is_admin_project` attribute to indicate to policy systems that the role +# assignments on that specific project should apply equally across every +# project. If left unset, then there is no admin project, and thus no explicit +# means of cross-project role assignments. `[resource] +# admin_project_domain_name` must also be set to use this option. (string +# value) +#admin_project_name = + +# This controls whether the names of projects are restricted from containing +# URL-reserved characters. If set to `new`, attempts to create or update a +# project with a URL-unsafe name will fail. If set to `strict`, attempts to +# scope a token with a URL-unsafe project name will fail, thereby forcing all +# project names to be updated to be URL-safe. (string value) +# Possible values: +# off - +# new - +# strict - +#project_name_url_safe = off + +# This controls whether the names of domains are restricted from containing +# URL-reserved characters. If set to `new`, attempts to create or update a +# domain with a URL-unsafe name will fail. If set to `strict`, attempts to +# scope a token with a URL-unsafe domain name will fail, thereby forcing all +# domain names to be updated to be URL-safe. (string value) +# Possible values: +# off - +# new - +# strict - +#domain_name_url_safe = off + + +[revoke] + +# +# From keystone +# + +# Entry point for the token revocation backend driver in the `keystone.revoke` +# namespace. Keystone only provides a `sql` driver, so there is no reason to +# set this option unless you are providing a custom entry point. (string value) +#driver = sql + +# The number of seconds after a token has expired before a corresponding +# revocation event may be purged from the backend. (integer value) +# Minimum value: 0 +#expiration_buffer = 1800 + +# Toggle for revocation event caching. This has no effect unless global caching +# is enabled. (boolean value) +#caching = true + +# Time to cache the revocation list and the revocation events (in seconds). +# This has no effect unless global and `[revoke] caching` are both enabled. +# (integer value) +# Deprecated group/name - [token]/revocation_cache_time +#cache_time = 3600 + + +[role] + +# +# From keystone +# + +# Entry point for the role backend driver in the `keystone.role` namespace. +# Keystone only provides a `sql` driver, so there's no reason to change this +# unless you are providing a custom entry point. (string value) +#driver = + +# Toggle for role caching. This has no effect unless global caching is enabled. +# In a typical deployment, there is no reason to disable this. (boolean value) +#caching = true + +# Time to cache role data, in seconds. This has no effect unless both global +# caching and `[role] caching` are enabled. (integer value) +#cache_time = + +# Maximum number of entities that will be returned in a role collection. This +# may be useful to tune if you have a large number of discrete roles in your +# deployment. (integer value) +#list_limit = + + +[saml] + +# +# From keystone +# + +# Determines the lifetime for any SAML assertions generated by keystone, using +# `NotOnOrAfter` attributes. (integer value) +#assertion_expiration_time = 3600 + +# Name of, or absolute path to, the binary to be used for XML signing. Although +# only the XML Security Library (`xmlsec1`) is supported, it may have a non- +# standard name or path on your system. If keystone cannot find the binary +# itself, you may need to install the appropriate package, use this option to +# specify an absolute path, or adjust keystone's PATH environment variable. +# (string value) +#xmlsec1_binary = xmlsec1 + +# Absolute path to the public certificate file to use for SAML signing. The +# value cannot contain a comma (`,`). (string value) +#certfile = /etc/keystone/ssl/certs/signing_cert.pem + +# Absolute path to the private key file to use for SAML signing. The value +# cannot contain a comma (`,`). (string value) +#keyfile = /etc/keystone/ssl/private/signing_key.pem + +# This is the unique entity identifier of the identity provider (keystone) to +# use when generating SAML assertions. This value is required to generate +# identity provider metadata and must be a URI (a URL is recommended). For +# example: `https://keystone.example.com/v3/OS-FEDERATION/saml2/idp`. (uri +# value) +#idp_entity_id = + +# This is the single sign-on (SSO) service location of the identity provider +# which accepts HTTP POST requests. A value is required to generate identity +# provider metadata. For example: `https://keystone.example.com/v3/OS- +# FEDERATION/saml2/sso`. (uri value) +#idp_sso_endpoint = + +# This is the language used by the identity provider's organization. (string +# value) +#idp_lang = en + +# This is the name of the identity provider's organization. (string value) +#idp_organization_name = SAML Identity Provider + +# This is the name of the identity provider's organization to be displayed. +# (string value) +#idp_organization_display_name = OpenStack SAML Identity Provider + +# This is the URL of the identity provider's organization. The URL referenced +# here should be useful to humans. (uri value) +#idp_organization_url = https://example.com/ + +# This is the company name of the identity provider's contact person. (string +# value) +#idp_contact_company = Example, Inc. + +# This is the given name of the identity provider's contact person. (string +# value) +#idp_contact_name = SAML Identity Provider Support + +# This is the surname of the identity provider's contact person. (string value) +#idp_contact_surname = Support + +# This is the email address of the identity provider's contact person. (string +# value) +#idp_contact_email = support@example.com + +# This is the telephone number of the identity provider's contact person. +# (string value) +#idp_contact_telephone = +1 800 555 0100 + +# This is the type of contact that best describes the identity provider's +# contact person. (string value) +# Possible values: +# technical - +# support - +# administrative - +# billing - +# other - +#idp_contact_type = other + +# Absolute path to the identity provider metadata file. This file should be +# generated with the `keystone-manage saml_idp_metadata` command. There is +# typically no reason to change this value. (string value) +#idp_metadata_path = /etc/keystone/saml2_idp_metadata.xml + +# The prefix of the RelayState SAML attribute to use when generating enhanced +# client and proxy (ECP) assertions. In a typical deployment, there is no +# reason to change this value. (string value) +#relay_state_prefix = ss:mem: + + +[security_compliance] + +# +# From keystone +# + +# The maximum number of days a user can go without authenticating before being +# considered "inactive" and automatically disabled (locked). This feature is +# disabled by default; set any value to enable it. This feature depends on the +# `sql` backend for the `[identity] driver`. When a user exceeds this threshold +# and is considered "inactive", the user's `enabled` attribute in the HTTP API +# may not match the value of the user's `enabled` column in the user table. +# (integer value) +# Minimum value: 1 +#disable_user_account_days_inactive = + +# The maximum number of times that a user can fail to authenticate before the +# user account is locked for the number of seconds specified by +# `[security_compliance] lockout_duration`. This feature is disabled by +# default. If this feature is enabled and `[security_compliance] +# lockout_duration` is not set, then users may be locked out indefinitely until +# the user is explicitly enabled via the API. This feature depends on the `sql` +# backend for the `[identity] driver`. (integer value) +# Minimum value: 1 +#lockout_failure_attempts = + +# The number of seconds a user account will be locked when the maximum number +# of failed authentication attempts (as specified by `[security_compliance] +# lockout_failure_attempts`) is exceeded. Setting this option will have no +# effect unless you also set `[security_compliance] lockout_failure_attempts` +# to a non-zero value. This feature depends on the `sql` backend for the +# `[identity] driver`. (integer value) +# Minimum value: 1 +#lockout_duration = 1800 + +# The number of days for which a password will be considered valid before +# requiring it to be changed. This feature is disabled by default. If enabled, +# new password changes will have an expiration date, however existing passwords +# would not be impacted. This feature depends on the `sql` backend for the +# `[identity] driver`. (integer value) +# Minimum value: 1 +#password_expires_days = + +# This controls the number of previous user password iterations to keep in +# history, in order to enforce that newly created passwords are unique. The +# total number which includes the new password should not be greater or equal +# to this value. Setting the value to zero (the default) disables this feature. +# Thus, to enable this feature, values must be greater than 0. This feature +# depends on the `sql` backend for the `[identity] driver`. (integer value) +# Minimum value: 0 +#unique_last_password_count = 0 + +# The number of days that a password must be used before the user can change +# it. This prevents users from changing their passwords immediately in order to +# wipe out their password history and reuse an old password. This feature does +# not prevent administrators from manually resetting passwords. It is disabled +# by default and allows for immediate password changes. This feature depends on +# the `sql` backend for the `[identity] driver`. Note: If +# `[security_compliance] password_expires_days` is set, then the value for this +# option should be less than the `password_expires_days`. (integer value) +# Minimum value: 0 +#minimum_password_age = 0 + +# The regular expression used to validate password strength requirements. By +# default, the regular expression will match any password. The following is an +# example of a pattern which requires at least 1 letter, 1 digit, and have a +# minimum length of 7 characters: ^(?=.*\d)(?=.*[a-zA-Z]).{7,}$ This feature +# depends on the `sql` backend for the `[identity] driver`. (string value) +#password_regex = + +# Describe your password regular expression here in language for humans. If a +# password fails to match the regular expression, the contents of this +# configuration variable will be returned to users to explain why their +# requested password was insufficient. (string value) +#password_regex_description = + +# Enabling this option requires users to change their password when the user is +# created, or upon administrative reset. Before accessing any services, +# affected users will have to change their password. To ignore this requirement +# for specific users, such as service users, set the `options` attribute +# `ignore_change_password_upon_first_use` to `True` for the desired user via +# the update user API. This feature is disabled by default. This feature is +# only applicable with the `sql` backend for the `[identity] driver`. (boolean +# value) +#change_password_upon_first_use = false + + +[shadow_users] + +# +# From keystone +# + +# Entry point for the shadow users backend driver in the +# `keystone.identity.shadow_users` namespace. This driver is used for +# persisting local user references to externally-managed identities (via +# federation, LDAP, etc). Keystone only provides a `sql` driver, so there is no +# reason to change this option unless you are providing a custom entry point. +# (string value) +#driver = sql + + +[signing] + +# +# From keystone +# + +# DEPRECATED: Absolute path to the public certificate file to use for signing +# responses to revocation lists requests. Set this together with `[signing] +# keyfile`. For non-production environments, you may be interested in using +# `keystone-manage pki_setup` to generate self-signed certificates. (string +# value) +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in +# Pike. These options remain for backwards compatibility. +#certfile = /etc/keystone/ssl/certs/signing_cert.pem + +# DEPRECATED: Absolute path to the private key file to use for signing +# responses to revocation lists requests. Set this together with `[signing] +# certfile`. (string value) +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in +# Pike. These options remain for backwards compatibility. +#keyfile = /etc/keystone/ssl/private/signing_key.pem + +# DEPRECATED: Absolute path to the public certificate authority (CA) file to +# use when creating self-signed certificates with `keystone-manage pki_setup`. +# Set this together with `[signing] ca_key`. There is no reason to set this +# option unless you are requesting revocation lists in a non-production +# environment. Use a `[signing] certfile` issued from a trusted certificate +# authority instead. (string value) +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in +# Pike. These options remain for backwards compatibility. +#ca_certs = /etc/keystone/ssl/certs/ca.pem + +# DEPRECATED: Absolute path to the private certificate authority (CA) key file +# to use when creating self-signed certificates with `keystone-manage +# pki_setup`. Set this together with `[signing] ca_certs`. There is no reason +# to set this option unless you are requesting revocation lists in a non- +# production environment. Use a `[signing] certfile` issued from a trusted +# certificate authority instead. (string value) +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in +# Pike. These options remain for backwards compatibility. +#ca_key = /etc/keystone/ssl/private/cakey.pem + +# DEPRECATED: Key size (in bits) to use when generating a self-signed token +# signing certificate. There is no reason to set this option unless you are +# requesting revocation lists in a non-production environment. Use a `[signing] +# certfile` issued from a trusted certificate authority instead. (integer +# value) +# Minimum value: 1024 +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in +# Pike. These options remain for backwards compatibility. +#key_size = 2048 + +# DEPRECATED: The validity period (in days) to use when generating a self- +# signed token signing certificate. There is no reason to set this option +# unless you are requesting revocation lists in a non-production environment. +# Use a `[signing] certfile` issued from a trusted certificate authority +# instead. (integer value) +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in +# Pike. These options remain for backwards compatibility. +#valid_days = 3650 + +# DEPRECATED: The certificate subject to use when generating a self-signed +# token signing certificate. There is no reason to set this option unless you +# are requesting revocation lists in a non-production environment. Use a +# `[signing] certfile` issued from a trusted certificate authority instead. +# (string value) +# This option is deprecated for removal since P. +# Its value may be silently ignored in the future. +# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in +# Pike. These options remain for backwards compatibility. +#cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com + + +[token] + +provider = fernet + +# +# From keystone +# + +# The amount of time that a token should remain valid (in seconds). Drastically +# reducing this value may break "long-running" operations that involve multiple +# services to coordinate together, and will force users to authenticate with +# keystone more frequently. Drastically increasing this value will increase the +# number of tokens that will be simultaneously valid. Keystone tokens are also +# bearer tokens, so a shorter duration will also reduce the potential security +# impact of a compromised token. (integer value) +# Minimum value: 0 +# Maximum value: 9223372036854775807 +#expiration = 3600 + +# Entry point for the token provider in the `keystone.token.provider` +# namespace. The token provider controls the token construction, validation, +# and revocation operations. Supported upstream providers are `fernet` and +# `jws`. Neither `fernet` or `jws` tokens require persistence and both require +# additional setup. If using `fernet`, you're required to run `keystone-manage +# fernet_setup`, which creates symmetric keys used to encrypt tokens. If using +# `jws`, you're required to generate an ECDSA keypair using a SHA-256 hash +# algorithm for signing and validating token, which can be done with `keystone- +# manage create_jws_keypair`. Note that `fernet` tokens are encrypted and `jws` +# tokens are only signed. Please be sure to consider this if your deployment +# has security requirements regarding payload contents used to generate token +# IDs. (string value) +#provider = fernet + +# Toggle for caching token creation and validation data. This has no effect +# unless global caching is enabled. (boolean value) +#caching = true + +# The number of seconds to cache token creation and validation data. This has +# no effect unless both global and `[token] caching` are enabled. (integer +# value) +# Minimum value: 0 +# Maximum value: 9223372036854775807 +#cache_time = + +# This toggles support for revoking individual tokens by the token identifier +# and thus various token enumeration operations (such as listing all tokens +# issued to a specific user). These operations are used to determine the list +# of tokens to consider revoked. Do not disable this option if you're using the +# `kvs` `[revoke] driver`. (boolean value) +#revoke_by_id = true + +# This toggles whether scoped tokens may be re-scoped to a new project or +# domain, thereby preventing users from exchanging a scoped token (including +# those with a default project scope) for any other token. This forces users to +# either authenticate for unscoped tokens (and later exchange that unscoped +# token for tokens with a more specific scope) or to provide their credentials +# in every request for a scoped token to avoid re-scoping altogether. (boolean +# value) +#allow_rescope_scoped_token = true + +# DEPRECATED: This controls whether roles should be included with tokens that +# are not directly assigned to the token's scope, but are instead linked +# implicitly to other role assignments. (boolean value) +# This option is deprecated for removal since R. +# Its value may be silently ignored in the future. +# Reason: Default roles depend on a chain of implied role assignments. Ex: an +# admin user will also have the reader and member role. By ensuring that all +# these roles will always appear on the token validation response, we can +# improve the simplicity and readability of policy files. +#infer_roles = true + +# DEPRECATED: Enable storing issued token data to token validation cache so +# that first token validation doesn't actually cause full validation cycle. +# This option has no effect unless global caching is enabled and will still +# cache tokens even if `[token] caching = False`. (boolean value) +# This option is deprecated for removal since S. +# Its value may be silently ignored in the future. +# Reason: Keystone already exposes a configuration option for caching tokens. +# Having a separate configuration option to cache tokens when they are issued +# is redundant, unnecessarily complicated, and is misleading if token caching +# is disabled because tokens will still be pre-cached by default when they are +# issued. The ability to pre-cache tokens when they are issued is going to rely +# exclusively on the ``keystone.conf [token] caching`` option in the future. +#cache_on_issue = true + +# This controls the number of seconds that a token can be retrieved for beyond +# the built-in expiry time. This allows long running operations to succeed. +# Defaults to two days. (integer value) +#allow_expired_window = 172800 + + +[tokenless_auth] + +# +# From keystone +# + +# The list of distinguished names which identify trusted issuers of client +# certificates allowed to use X.509 tokenless authorization. If the option is +# absent then no certificates will be allowed. The format for the values of a +# distinguished name (DN) must be separated by a comma and contain no spaces. +# Furthermore, because an individual DN may contain commas, this configuration +# option may be repeated multiple times to represent multiple values. For +# example, keystone.conf would include two consecutive lines in order to trust +# two different DNs, such as `trusted_issuer = CN=john,OU=keystone,O=openstack` +# and `trusted_issuer = CN=mary,OU=eng,O=abc`. (multi valued) +#trusted_issuer = + +# The federated protocol ID used to represent X.509 tokenless authorization. +# This is used in combination with the value of `[tokenless_auth] +# issuer_attribute` to find a corresponding federated mapping. In a typical +# deployment, there is no reason to change this value. (string value) +#protocol = x509 + +# The name of the WSGI environment variable used to pass the issuer of the +# client certificate to keystone. This attribute is used as an identity +# provider ID for the X.509 tokenless authorization along with the protocol to +# look up its corresponding mapping. In a typical deployment, there is no +# reason to change this value. (string value) +#issuer_attribute = SSL_CLIENT_I_DN + + +[trust] + +# +# From keystone +# + +# Allows authorization to be redelegated from one user to another, effectively +# chaining trusts together. When disabled, the `remaining_uses` attribute of a +# trust is constrained to be zero. (boolean value) +#allow_redelegation = false + +# Maximum number of times that authorization can be redelegated from one user +# to another in a chain of trusts. This number may be reduced further for a +# specific trust. (integer value) +#max_redelegation_count = 3 + +# Entry point for the trust backend driver in the `keystone.trust` namespace. +# Keystone only provides a `sql` driver, so there is no reason to change this +# unless you are providing a custom entry point. (string value) +#driver = sql + + +[unified_limit] + +# +# From keystone +# + +# Entry point for the unified limit backend driver in the +# `keystone.unified_limit` namespace. Keystone only provides a `sql` driver, so +# there's no reason to change this unless you are providing a custom entry +# point. (string value) +#driver = sql + +# Toggle for unified limit caching. This has no effect unless global caching is +# enabled. In a typical deployment, there is no reason to disable this. +# (boolean value) +#caching = true + +# Time to cache unified limit data, in seconds. This has no effect unless both +# global caching and `[unified_limit] caching` are enabled. (integer value) +#cache_time = + +# Maximum number of entities that will be returned in a role collection. This +# may be useful to tune if you have a large number of unified limits in your +# deployment. (integer value) +#list_limit = + +# The enforcement model to use when validating limits associated to projects. +# Enforcement models will behave differently depending on the existing limits, +# which may result in backwards incompatible changes if a model is switched in +# a running deployment. (string value) +# Possible values: +# flat - +# strict_two_level - +#enforcement_model = flat + + +[wsgi] + +# +# From keystone +# + +# If set to true, this enables the oslo debug middleware in Keystone. This +# Middleware prints a lot of information about the request and the response. It +# is useful for getting information about the data on the wire (decoded) and +# passed to the WSGI application pipeline. This middleware has no effect on the +# "debug" setting in the [DEFAULT] section of the config file or setting +# Keystone's log-level to "DEBUG"; it is specific to debugging the WSGI data as +# it enters and leaves Keystone (specific request-related data). This option is +# used for introspection on the request and response data between the web +# server (apache, nginx, etc) and Keystone. This middleware is inserted as the +# first element in the middleware chain and will show the data closest to the +# wire. WARNING: NOT INTENDED FOR USE IN PRODUCTION. THIS MIDDLEWARE CAN AND +# WILL EMIT SENSITIVE/PRIVILEGED DATA. (boolean value) +#debug_middleware = false diff --git a/compose_deployment/conf_mysql/99-openstack.conf b/compose_deployment/conf_mysql/99-openstack.conf new file mode 100644 index 0000000..4cd6688 --- /dev/null +++ b/compose_deployment/conf_mysql/99-openstack.conf @@ -0,0 +1,13 @@ +[mysqld] +bind-address = 0.0.0.0 + +default-storage-engine = innodb +innodb_file_per_table = on +max_connections = 4096 +collation-server = utf8_general_ci +character-set-server = utf8 +wait_timeout = 600 +interactive_timeout = 600 +net_read_timeout = 600 +net_write_timeout = 600 + diff --git a/compose_deployment/conf_ui/local_settings.py b/compose_deployment/conf_ui/local_settings.py new file mode 100644 index 0000000..042b682 --- /dev/null +++ b/compose_deployment/conf_ui/local_settings.py @@ -0,0 +1,916 @@ +# -*- coding: utf-8 -*- + +import os + +from django.utils.translation import ugettext_lazy as _ + +from horizon.utils import secret_key + +from openstack_dashboard.settings import HORIZON_CONFIG + +DEBUG = False + +# This setting controls whether or not compression is enabled. Disabling +# compression makes Horizon considerably slower, but makes it much easier +# to debug JS and CSS changes +#COMPRESS_ENABLED = not DEBUG + +# This setting controls whether compression happens on the fly, or offline +# with `python manage.py compress` +# See https://django-compressor.readthedocs.io/en/latest/usage/#offline-compression +# for more information +#COMPRESS_OFFLINE = not DEBUG + +# WEBROOT is the location relative to Webserver root +# should end with a slash. +WEBROOT = '/' +#LOGIN_URL = WEBROOT + 'auth/login/' +#LOGOUT_URL = WEBROOT + 'auth/logout/' +# +# LOGIN_REDIRECT_URL can be used as an alternative for +# HORIZON_CONFIG.user_home, if user_home is not set. +# Do not set it to '/home/', as this will cause circular redirect loop +#LOGIN_REDIRECT_URL = WEBROOT + +# If horizon is running in production (DEBUG is False), set this +# with the list of host/domain names that the application can serve. +# For more information see: +# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts +ALLOWED_HOSTS = ['*', ] + +# Set SSL proxy settings: +# Pass this header from the proxy after terminating the SSL, +# and don't forget to strip it from the client's request. +# For more information see: +# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header +#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') + +# If Horizon is being served through SSL, then uncomment the following two +# settings to better secure the cookies from security exploits +#CSRF_COOKIE_SECURE = True +#SESSION_COOKIE_SECURE = True + +# The absolute path to the directory where message files are collected. +# The message file must have a .json file extension. When the user logins to +# horizon, the message files collected are processed and displayed to the user. +#MESSAGES_PATH=None + +# Overrides for OpenStack API versions. Use this setting to force the +# OpenStack dashboard to use a specific API version for a given service API. +# Versions specified here should be integers or floats, not strings. +# NOTE: The version should be formatted as it appears in the URL for the +# service API. For example, The identity service APIs have inconsistent +# use of the decimal point, so valid options would be 2.0 or 3. +# Minimum compute version to get the instance locked status is 2.9. +#OPENSTACK_API_VERSIONS = { +# "data-processing": 1.1, +# "identity": 3, +# "image": 2, +# "volume": 2, +# "compute": 2, +#} + +# Set this to True if running on a multi-domain model. When this is enabled, it +# will require the user to enter the Domain name in addition to the username +# for login. +#OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False + +# Set this to True if you want available domains displayed as a dropdown menu +# on the login screen. It is strongly advised NOT to enable this for public +# clouds, as advertising enabled domains to unauthenticated customers +# irresponsibly exposes private information. This should only be used for +# private clouds where the dashboard sits behind a corporate firewall. +#OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN = False + +# If OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN is enabled, this option can be used to +# set the available domains to choose from. This is a list of pairs whose first +# value is the domain name and the second is the display name. +#OPENSTACK_KEYSTONE_DOMAIN_CHOICES = ( +# ('Default', 'Default'), +#) + +# Overrides the default domain used when running on single-domain model +# with Keystone V3. All entities will be created in the default domain. +# NOTE: This value must be the name of the default domain, NOT the ID. +# Also, you will most likely have a value in the keystone policy file like this +# "cloud_admin": "rule:admin_required and domain_id:" +# This value must be the name of the domain whose ID is specified there. +#OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default' + +# Set this to True to enable panels that provide the ability for users to +# manage Identity Providers (IdPs) and establish a set of rules to map +# federation protocol attributes to Identity API attributes. +# This extension requires v3.0+ of the Identity API. +#OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT = False + +# Set Console type: +# valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL", "MKS" +# or None. Set to None explicitly if you want to deactivate the console. +#CONSOLE_TYPE = "AUTO" + +# Toggle showing the openrc file for Keystone V2. +# If set to false the link will be removed from the user dropdown menu +# and the API Access page +#SHOW_KEYSTONE_V2_RC = True + +# If provided, a "Report Bug" link will be displayed in the site header +# which links to the value of this setting (ideally a URL containing +# information on how to report issues). +#HORIZON_CONFIG["bug_url"] = "http://bug-report.example.com" + +# Show backdrop element outside the modal, do not close the modal +# after clicking on backdrop. +#HORIZON_CONFIG["modal_backdrop"] = "static" + +# Specify a regular expression to validate user passwords. +#HORIZON_CONFIG["password_validator"] = { +# "regex": '.*', +# "help_text": _("Your password does not meet the requirements."), +#} + +# Turn off browser autocompletion for forms including the login form and +# the database creation workflow if so desired. +#HORIZON_CONFIG["password_autocomplete"] = "off" + +# Setting this to True will disable the reveal button for password fields, +# including on the login form. +#HORIZON_CONFIG["disable_password_reveal"] = False + +LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) + +# Set custom secret key: +# You can either set it to a specific value or you can let horizon generate a +# default secret key that is unique on this machine, e.i. regardless of the +# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, +# there may be situations where you would want to set this explicitly, e.g. +# when multiple dashboard instances are distributed on different machines +# (usually behind a load-balancer). Either you have to make sure that a session +# gets all requests routed to the same dashboard instance or you set the same +# SECRET_KEY for all of them. +SECRET_KEY = secret_key.generate_or_read_from_file('/var/lib/openstack-dashboard/secret_key') + +# We recommend you use memcached for development; otherwise after every reload +# of the django development server, you will have to login again. To use +# memcached set CACHES to something like + +CACHES = { + 'default': { + 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', + 'LOCATION': 'iotronic-ui:11211', + }, +} + +#CACHES = { +# 'default': { +# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', +# } +#} + +# Send email to the console by default +EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' +# Or send them to /dev/null +#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' + +# Configure these for your outgoing email host +#EMAIL_HOST = 'smtp.my-company.com' +#EMAIL_PORT = 25 +#EMAIL_HOST_USER = 'djangomail' +#EMAIL_HOST_PASSWORD = 'top-secret!' + +# For multiple regions uncomment this configuration, and add (endpoint, title). +#AVAILABLE_REGIONS = [ +# ('http://cluster1.example.com:5000/v3', 'cluster1'), +# ('http://cluster2.example.com:5000/v3', 'cluster2'), +#] + +OPENSTACK_HOST = "keystone" +OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST +OPENSTACK_KEYSTONE_DEFAULT_ROLE = "admin" + +# For setting the default service region on a per-endpoint basis. Note that the +# default value for this setting is {}, and below is just an example of how it +# should be specified. +# A key of '*' is an optional global default if no other key matches. +#DEFAULT_SERVICE_REGIONS = { +# '*': 'RegionOne' +# OPENSTACK_KEYSTONE_URL: 'RegionTwo' +#} + +# Enables keystone web single-sign-on if set to True. +#WEBSSO_ENABLED = False + +# Authentication mechanism to be selected as default. +# The value must be a key from WEBSSO_CHOICES. +#WEBSSO_INITIAL_CHOICE = "credentials" + +# The list of authentication mechanisms which include keystone +# federation protocols and identity provider/federation protocol +# mapping keys (WEBSSO_IDP_MAPPING). Current supported protocol +# IDs are 'saml2' and 'oidc' which represent SAML 2.0, OpenID +# Connect respectively. +# Do not remove the mandatory credentials mechanism. +# Note: The last two tuples are sample mapping keys to a identity provider +# and federation protocol combination (WEBSSO_IDP_MAPPING). +#WEBSSO_CHOICES = ( +# ("credentials", _("Keystone Credentials")), +# ("oidc", _("OpenID Connect")), +# ("saml2", _("Security Assertion Markup Language")), +# ("acme_oidc", "ACME - OpenID Connect"), +# ("acme_saml2", "ACME - SAML2"), +#) + +# A dictionary of specific identity provider and federation protocol +# combinations. From the selected authentication mechanism, the value +# will be looked up as keys in the dictionary. If a match is found, +# it will redirect the user to a identity provider and federation protocol +# specific WebSSO endpoint in keystone, otherwise it will use the value +# as the protocol_id when redirecting to the WebSSO by protocol endpoint. +# NOTE: The value is expected to be a tuple formatted as: (, ). +#WEBSSO_IDP_MAPPING = { +# "acme_oidc": ("acme", "oidc"), +# "acme_saml2": ("acme", "saml2"), +#} + +# If set this URL will be used for web single-sign-on authentication +# instead of OPENSTACK_KEYSTONE_URL. This is needed in the deployment +# scenarios where network segmentation is used per security requirement. +# In this case, the controllers are not reachable from public network. +# Therefore, user's browser will not be able to access OPENSTACK_KEYSTONE_URL +# if it is set to the internal endpoint. +#WEBSSO_KEYSTONE_URL = "http://keystone-public.example.com/v3" + +# The Keystone Provider drop down uses Keystone to Keystone federation +# to switch between Keystone service providers. +# Set display name for Identity Provider (dropdown display name) +#KEYSTONE_PROVIDER_IDP_NAME = "Local Keystone" +# This id is used for only for comparison with the service provider IDs. This ID +# should not match any service provider IDs. +#KEYSTONE_PROVIDER_IDP_ID = "localkeystone" + +# Disable SSL certificate checks (useful for self-signed certificates): +#OPENSTACK_SSL_NO_VERIFY = True + +# The CA certificate to use to verify SSL connections +#OPENSTACK_SSL_CACERT = '/path/to/cacert.pem' + +# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the +# capabilities of the auth backend for Keystone. +# If Keystone has been configured to use LDAP as the auth backend then set +# can_edit_user to False and name to 'ldap'. +# +# TODO(tres): Remove these once Keystone has an API to identify auth backend. +OPENSTACK_KEYSTONE_BACKEND = { + 'name': 'native', + 'can_edit_user': True, + 'can_edit_group': True, + 'can_edit_project': True, + 'can_edit_domain': True, + 'can_edit_role': True, +} + +# Setting this to True, will add a new "Retrieve Password" action on instance, +# allowing Admin session password retrieval/decryption. +#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False + +# The Launch Instance user experience has been significantly enhanced. +# You can choose whether to enable the new launch instance experience, +# the legacy experience, or both. The legacy experience will be removed +# in a future release, but is available as a temporary backup setting to ensure +# compatibility with existing deployments. Further development will not be +# done on the legacy experience. Please report any problems with the new +# experience via the Launchpad tracking system. +# +# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to +# determine the experience to enable. Set them both to true to enable +# both. +#LAUNCH_INSTANCE_LEGACY_ENABLED = True +#LAUNCH_INSTANCE_NG_ENABLED = False + +# A dictionary of settings which can be used to provide the default values for +# properties found in the Launch Instance modal. +#LAUNCH_INSTANCE_DEFAULTS = { +# 'config_drive': False, +# 'enable_scheduler_hints': True, +# 'disable_image': False, +# 'disable_instance_snapshot': False, +# 'disable_volume': False, +# 'disable_volume_snapshot': False, +# 'create_volume': True, +#} + +# The Xen Hypervisor has the ability to set the mount point for volumes +# attached to instances (other Hypervisors currently do not). Setting +# can_set_mount_point to True will add the option to set the mount point +# from the UI. +OPENSTACK_HYPERVISOR_FEATURES = { + 'can_set_mount_point': False, + 'can_set_password': False, + 'requires_keypair': False, + 'enable_quotas': True +} + +# This settings controls whether IP addresses of servers are retrieved from +# neutron in the project instance table. Setting this to ``False`` may mitigate +# a performance issue in the project instance table in large deployments. +#OPENSTACK_INSTANCE_RETRIEVE_IP_ADDRESSES = True + +# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional +# services provided by cinder that is not exposed by its extension API. +OPENSTACK_CINDER_FEATURES = { + 'enable_backup': False, +} + +# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional +# services provided by neutron. Options currently available are load +# balancer service, security groups, quotas, VPN service. +OPENSTACK_NEUTRON_NETWORK = { + 'enable_router': True, + 'enable_quotas': True, + 'enable_ipv6': True, + 'enable_distributed_router': False, + 'enable_ha_router': False, + 'enable_fip_topology_check': True, + + # Default dns servers you would like to use when a subnet is + # created. This is only a default, users can still choose a different + # list of dns servers when creating a new subnet. + # The entries below are examples only, and are not appropriate for + # real deployments + # 'default_dns_nameservers': ["8.8.8.8", "8.8.4.4", "208.67.222.222"], + + # Set which provider network types are supported. Only the network types + # in this list will be available to choose from when creating a network. + # Network types include local, flat, vlan, gre, vxlan and geneve. + # 'supported_provider_types': ['*'], + + # You can configure available segmentation ID range per network type + # in your deployment. + # 'segmentation_id_range': { + # 'vlan': [1024, 2048], + # 'vxlan': [4094, 65536], + # }, + + # You can define additional provider network types here. + # 'extra_provider_types': { + # 'awesome_type': { + # 'display_name': 'Awesome New Type', + # 'require_physical_network': False, + # 'require_segmentation_id': True, + # } + # }, + + # Set which VNIC types are supported for port binding. Only the VNIC + # types in this list will be available to choose from when creating a + # port. + # VNIC types include 'normal', 'direct', 'direct-physical', 'macvtap', + # 'baremetal' and 'virtio-forwarder' + # Set to empty list or None to disable VNIC type selection. + 'supported_vnic_types': ['*'], + + # Set list of available physical networks to be selected in the physical + # network field on the admin create network modal. If it's set to an empty + # list, the field will be a regular input field. + # e.g. ['default', 'test'] + 'physical_networks': [], + +} + +# The OPENSTACK_HEAT_STACK settings can be used to disable password +# field required while launching the stack. +OPENSTACK_HEAT_STACK = { + 'enable_user_pass': True, +} + +# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features +# in the OpenStack Dashboard related to the Image service, such as the list +# of supported image formats. +#OPENSTACK_IMAGE_BACKEND = { +# 'image_formats': [ +# ('', _('Select format')), +# ('aki', _('AKI - Amazon Kernel Image')), +# ('ami', _('AMI - Amazon Machine Image')), +# ('ari', _('ARI - Amazon Ramdisk Image')), +# ('docker', _('Docker')), +# ('iso', _('ISO - Optical Disk Image')), +# ('ova', _('OVA - Open Virtual Appliance')), +# ('qcow2', _('QCOW2 - QEMU Emulator')), +# ('raw', _('Raw')), +# ('vdi', _('VDI - Virtual Disk Image')), +# ('vhd', _('VHD - Virtual Hard Disk')), +# ('vhdx', _('VHDX - Large Virtual Hard Disk')), +# ('vmdk', _('VMDK - Virtual Machine Disk')), +# ], +#} + +# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for +# image custom property attributes that appear on image detail pages. +IMAGE_CUSTOM_PROPERTY_TITLES = { + "architecture": _("Architecture"), + "kernel_id": _("Kernel ID"), + "ramdisk_id": _("Ramdisk ID"), + "image_state": _("Euca2ools state"), + "project_id": _("Project ID"), + "image_type": _("Image Type"), +} + +# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image +# custom properties should not be displayed in the Image Custom Properties +# table. +IMAGE_RESERVED_CUSTOM_PROPERTIES = [] + +# Set to 'legacy' or 'direct' to allow users to upload images to glance via +# Horizon server. When enabled, a file form field will appear on the create +# image form. If set to 'off', there will be no file form field on the create +# image form. See documentation for deployment considerations. +#HORIZON_IMAGES_UPLOAD_MODE = 'legacy' + +# Allow a location to be set when creating or updating Glance images. +# If using Glance V2, this value should be False unless the Glance +# configuration and policies allow setting locations. +#IMAGES_ALLOW_LOCATION = False + +# A dictionary of default settings for create image modal. +#CREATE_IMAGE_DEFAULTS = { +# 'image_visibility': "public", +#} + +# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is 'publicURL'. +#OPENSTACK_ENDPOINT_TYPE = "publicURL" + +# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the +# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is None. This +# value should differ from OPENSTACK_ENDPOINT_TYPE if used. +#SECONDARY_ENDPOINT_TYPE = None + +# The number of objects (Swift containers/objects or images) to display +# on a single page before providing a paging element (a "more" link) +# to paginate results. +API_RESULT_LIMIT = 1000 +API_RESULT_PAGE_SIZE = 20 + +# The size of chunk in bytes for downloading objects from Swift +SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024 + +# The default number of lines displayed for instance console log. +INSTANCE_LOG_LENGTH = 35 + +# Specify a maximum number of items to display in a dropdown. +DROPDOWN_MAX_ITEMS = 30 + +# The timezone of the server. This should correspond with the timezone +# of your entire OpenStack installation, and hopefully be in UTC. +TIME_ZONE = "UTC" + +# When launching an instance, the menu of available flavors is +# sorted by RAM usage, ascending. If you would like a different sort order, +# you can provide another flavor attribute as sorting key. Alternatively, you +# can provide a custom callback method to use for sorting. You can also provide +# a flag for reverse sort. For more info, see +# http://docs.python.org/2/library/functions.html#sorted +#CREATE_INSTANCE_FLAVOR_SORT = { +# 'key': 'name', +# # or +# 'key': my_awesome_callback_method, +# 'reverse': False, +#} + +# Set this to True to display an 'Admin Password' field on the Change Password +# form to verify that it is indeed the admin logged-in who wants to change +# the password. +#ENFORCE_PASSWORD_CHECK = False + +# Modules that provide /auth routes that can be used to handle different types +# of user authentication. Add auth plugins that require extra route handling to +# this list. +#AUTHENTICATION_URLS = [ +# 'openstack_auth.urls', +#] + +# The Horizon Policy Enforcement engine uses these values to load per service +# policy rule files. The content of these files should match the files the +# OpenStack services are using to determine role based access control in the +# target installation. + +# Path to directory containing policy.json files +#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") + +# Map of local copy of service policy files. +# Please insure that your identity policy file matches the one being used on +# your keystone servers. There is an alternate policy file that may be used +# in the Keystone v3 multi-domain case, policy.v3cloudsample.json. +# This file is not included in the Horizon repository by default but can be +# found at +# http://git.openstack.org/cgit/openstack/keystone/tree/etc/ \ +# policy.v3cloudsample.json +# Having matching policy files on the Horizon and Keystone servers is essential +# for normal operation. This holds true for all services and their policy files. +#POLICY_FILES = { +# 'identity': 'keystone_policy.json', +# 'compute': 'nova_policy.json', +# 'volume': 'cinder_policy.json', +# 'image': 'glance_policy.json', +# 'network': 'neutron_policy.json', +#} + +# Change this patch to the appropriate list of tuples containing +# a key, label and static directory containing two files: +# _variables.scss and _styles.scss +AVAILABLE_THEMES = [ + ('default', 'Default', 'themes/default'), +# ('material', 'Material', 'themes/material'), +] + +LOGGING = { + 'version': 1, + # When set to True this will disable all logging except + # for loggers specified in this configuration dictionary. Note that + # if nothing is specified here and disable_existing_loggers is True, + # django.db.backends will still log unless it is disabled explicitly. + 'disable_existing_loggers': False, + # If apache2 mod_wsgi is used to deploy OpenStack dashboard + # timestamp is output by mod_wsgi. If WSGI framework you use does not + # output timestamp for logging, add %(asctime)s in the following + # format definitions. + 'formatters': { + 'console': { + 'format': '%(levelname)s %(name)s %(message)s' + }, + 'operation': { + # The format of "%(message)s" is defined by + # OPERATION_LOG_OPTIONS['format'] + 'format': '%(message)s' + }, + }, + 'handlers': { + 'null': { + 'level': 'DEBUG', + 'class': 'logging.NullHandler', + }, + 'console': { + # Set the level to "DEBUG" for verbose output logging. + 'level': 'INFO', + 'class': 'logging.StreamHandler', + 'formatter': 'console', + }, + 'operation': { + 'level': 'INFO', + 'class': 'logging.StreamHandler', + 'formatter': 'operation', + }, + }, + 'loggers': { + 'horizon': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'horizon.operation_log': { + 'handlers': ['operation'], + 'level': 'INFO', + 'propagate': False, + }, + 'openstack_dashboard': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'novaclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'cinderclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'keystoneauth': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'keystoneclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'glanceclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'neutronclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'swiftclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'oslo_policy': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'openstack_auth': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'django': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + # Logging from django.db.backends is VERY verbose, send to null + # by default. + 'django.db.backends': { + 'handlers': ['null'], + 'propagate': False, + }, + 'requests': { + 'handlers': ['null'], + 'propagate': False, + }, + 'urllib3': { + 'handlers': ['null'], + 'propagate': False, + }, + 'chardet.charsetprober': { + 'handlers': ['null'], + 'propagate': False, + }, + 'iso8601': { + 'handlers': ['null'], + 'propagate': False, + }, + 'scss': { + 'handlers': ['null'], + 'propagate': False, + }, + }, +} + +# 'direction' should not be specified for all_tcp/udp/icmp. +# It is specified in the form. +SECURITY_GROUP_RULES = { + 'all_tcp': { + 'name': _('All TCP'), + 'ip_protocol': 'tcp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_udp': { + 'name': _('All UDP'), + 'ip_protocol': 'udp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_icmp': { + 'name': _('All ICMP'), + 'ip_protocol': 'icmp', + 'from_port': '-1', + 'to_port': '-1', + }, + 'ssh': { + 'name': 'SSH', + 'ip_protocol': 'tcp', + 'from_port': '22', + 'to_port': '22', + }, + 'smtp': { + 'name': 'SMTP', + 'ip_protocol': 'tcp', + 'from_port': '25', + 'to_port': '25', + }, + 'dns': { + 'name': 'DNS', + 'ip_protocol': 'tcp', + 'from_port': '53', + 'to_port': '53', + }, + 'http': { + 'name': 'HTTP', + 'ip_protocol': 'tcp', + 'from_port': '80', + 'to_port': '80', + }, + 'pop3': { + 'name': 'POP3', + 'ip_protocol': 'tcp', + 'from_port': '110', + 'to_port': '110', + }, + 'imap': { + 'name': 'IMAP', + 'ip_protocol': 'tcp', + 'from_port': '143', + 'to_port': '143', + }, + 'ldap': { + 'name': 'LDAP', + 'ip_protocol': 'tcp', + 'from_port': '389', + 'to_port': '389', + }, + 'https': { + 'name': 'HTTPS', + 'ip_protocol': 'tcp', + 'from_port': '443', + 'to_port': '443', + }, + 'smtps': { + 'name': 'SMTPS', + 'ip_protocol': 'tcp', + 'from_port': '465', + 'to_port': '465', + }, + 'imaps': { + 'name': 'IMAPS', + 'ip_protocol': 'tcp', + 'from_port': '993', + 'to_port': '993', + }, + 'pop3s': { + 'name': 'POP3S', + 'ip_protocol': 'tcp', + 'from_port': '995', + 'to_port': '995', + }, + 'ms_sql': { + 'name': 'MS SQL', + 'ip_protocol': 'tcp', + 'from_port': '1433', + 'to_port': '1433', + }, + 'mysql': { + 'name': 'MYSQL', + 'ip_protocol': 'tcp', + 'from_port': '3306', + 'to_port': '3306', + }, + 'rdp': { + 'name': 'RDP', + 'ip_protocol': 'tcp', + 'from_port': '3389', + 'to_port': '3389', + }, +} + +# Deprecation Notice: +# +# The setting FLAVOR_EXTRA_KEYS has been deprecated. +# Please load extra spec metadata into the Glance Metadata Definition Catalog. +# +# The sample quota definitions can be found in: +# /etc/metadefs/compute-quota.json +# +# The metadata definition catalog supports CLI and API: +# $glance --os-image-api-version 2 help md-namespace-import +# $glance-manage db_load_metadefs +# +# See Metadata Definitions on: +# https://docs.openstack.org/glance/latest/user/glancemetadefcatalogapi.html + +# The hash algorithm to use for authentication tokens. This must +# match the hash algorithm that the identity server and the +# auth_token middleware are using. Allowed values are the +# algorithms supported by Python's hashlib library. +#OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5' + +# AngularJS requires some settings to be made available to +# the client side. Some settings are required by in-tree / built-in horizon +# features. These settings must be added to REST_API_REQUIRED_SETTINGS in the +# form of ['SETTING_1','SETTING_2'], etc. +# +# You may remove settings from this list for security purposes, but do so at +# the risk of breaking a built-in horizon feature. These settings are required +# for horizon to function properly. Only remove them if you know what you +# are doing. These settings may in the future be moved to be defined within +# the enabled panel configuration. +# You should not add settings to this list for out of tree extensions. +# See: https://wiki.openstack.org/wiki/Horizon/RESTAPI +REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES', + 'LAUNCH_INSTANCE_DEFAULTS', + 'OPENSTACK_IMAGE_FORMATS', + 'OPENSTACK_KEYSTONE_BACKEND', + 'OPENSTACK_KEYSTONE_DEFAULT_DOMAIN', + 'CREATE_IMAGE_DEFAULTS', + 'ENFORCE_PASSWORD_CHECK'] + +# Additional settings can be made available to the client side for +# extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS +# !! Please use extreme caution as the settings are transferred via HTTP/S +# and are not encrypted on the browser. This is an experimental API and +# may be deprecated in the future without notice. +#REST_API_ADDITIONAL_SETTINGS = [] + +############################################################################### +# Ubuntu Settings +############################################################################### + + # The default theme if no cookie is present +DEFAULT_THEME = 'default' + +# Default Ubuntu apache configuration uses /horizon as the application root. +WEBROOT='/horizon/' + +# By default, validation of the HTTP Host header is disabled. Production +# installations should have this set accordingly. For more information +# see https://docs.djangoproject.com/en/dev/ref/settings/. +ALLOWED_HOSTS = '*' + +# Compress all assets offline as part of packaging installation +COMPRESS_OFFLINE = True + +# DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded +# within an iframe. Legacy browsers are still vulnerable to a Cross-Frame +# Scripting (XFS) vulnerability, so this option allows extra security hardening +# where iframes are not used in deployment. Default setting is True. +# For more information see: +# http://tinyurl.com/anticlickjack +#DISALLOW_IFRAME_EMBED = True + +# Help URL can be made available for the client. To provide a help URL, edit the +# following attribute to the URL of your choice. +#HORIZON_CONFIG["help_url"] = "http://openstack.mycompany.org" + +# Settings for OperationLogMiddleware +# OPERATION_LOG_ENABLED is flag to use the function to log an operation on +# Horizon. +# mask_targets is arrangement for appointing a target to mask. +# method_targets is arrangement of HTTP method to output log. +# format is the log contents. +#OPERATION_LOG_ENABLED = False +#OPERATION_LOG_OPTIONS = { +# 'mask_fields': ['password'], +# 'target_methods': ['POST'], +# 'ignored_urls': ['/js/', '/static/', '^/api/'], +# 'format': ("[%(client_ip)s] [%(domain_name)s]" +# " [%(domain_id)s] [%(project_name)s]" +# " [%(project_id)s] [%(user_name)s] [%(user_id)s] [%(request_scheme)s]" +# " [%(referer_url)s] [%(request_url)s] [%(message)s] [%(method)s]" +# " [%(http_status)s] [%(param)s]"), +#} + +# The default date range in the Overview panel meters - either minus N +# days (if the value is integer N), or from the beginning of the current month +# until today (if set to None). This setting should be used to limit the amount +# of data fetched by default when rendering the Overview panel. +#OVERVIEW_DAYS_RANGE = 1 + +# To allow operators to require users provide a search criteria first +# before loading any data into the views, set the following dict +# attributes to True in each one of the panels you want to enable this feature. +# Follow the convention . +#FILTER_DATA_FIRST = { +# 'admin.instances': False, +# 'admin.images': False, +# 'admin.networks': False, +# 'admin.routers': False, +# 'admin.volumes': False, +# 'identity.users': False, +# 'identity.projects': False, +# 'identity.groups': False, +# 'identity.roles': False +#} + +# Dict used to restrict user private subnet cidr range. +# An empty list means that user input will not be restricted +# for a corresponding IP version. By default, there is +# no restriction for IPv4 or IPv6. To restrict +# user private subnet cidr range set ALLOWED_PRIVATE_SUBNET_CIDR +# to something like +#ALLOWED_PRIVATE_SUBNET_CIDR = { +# 'ipv4': ['10.0.0.0/8', '192.168.0.0/16'], +# 'ipv6': ['fc00::/7'] +#} +ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []} + +# Projects and users can have extra attributes as defined by keystone v3. +# Horizon has the ability to display these extra attributes via this setting. +# If you'd like to display extra data in the project or user tables, set the +# corresponding dict key to the attribute name, followed by the display name. +# For more information, see horizon's customization +# (https://docs.openstack.org/horizon/latest/configuration/customizing.html#horizon-customization-module-overrides) +#PROJECT_TABLE_EXTRA_INFO = { +# 'phone_num': _('Phone Number'), +#} +#USER_TABLE_EXTRA_INFO = { +# 'phone_num': _('Phone Number'), +#} + +# Password will have an expiration date when using keystone v3 and enabling the +# feature. +# This setting allows you to set the number of days that the user will be alerted +# prior to the password expiration. +# Once the password expires keystone will deny the access and users must +# contact an admin to change their password. +#PASSWORD_EXPIRES_WARNING_THRESHOLD_DAYS = 0 diff --git a/compose_deployment/conf_wagent/iotronic.conf b/compose_deployment/conf_wagent/iotronic.conf new file mode 100644 index 0000000..c096c3c --- /dev/null +++ b/compose_deployment/conf_wagent/iotronic.conf @@ -0,0 +1,96 @@ +[DEFAULT] +transport_url = rabbit://openstack:unime@rabbitmq + +debug=True +proxy=nginx +log_file = /var/log/iotronic/iotronic-wagent.log + +# Authentication strategy used by iotronic-api: one of +# "keystone" or "noauth". "noauth" should not be used in a +# production environment because all authentication will be +# disabled. (string value) +auth_strategy=keystone + +# Enable pecan debug mode. WARNING: this is insecure and +# should not be used in a production environment. (boolean +# value) +#pecan_debug=false + + +[wamp] +wamp_transport_url = wss://iotronic-wagent:8181/ +wamp_realm = s4t +skip_cert_verify= True +register_agent = True + + + +[database] +connection = mysql+pymysql://iotronic:unime@iotronic-db/iotronic + +[keystone_authtoken] +www_authenticate_uri = http://keystone:5000 +auth_url = http://keystone:5000 +auth_plugin = password +auth_type = password +project_domain_id = default +user_domain_id = default +project_name = service +username = iotronic +password = unime + + +[neutron] +auth_url = http://controller:5000 +url = http://controller:9696 +auth_strategy = password +project_domain_name = default +user_domain_name = default +region_name = RegionOne +project_name = service +username = neutron +password = netrn_pwd +retries = 3 +project_domain_id= default + + +[designate] +auth_url = http://controller:35357 +url = http://controller:9001 +auth_strategy = password +project_domain_name = default +user_domain_name = default +region_name = RegionOne +project_name = service +username = designate +password = password +retries = 3 +project_domain_id= default + + +[cors] +# Indicate whether this resource may be shared with the domain +# received in the requests "origin" header. Format: +# "://[:]", no trailing slash. Example: +# https://horizon.example.com (list value) +#allowed_origin = + +# Indicate that the actual request can include user +# credentials (boolean value) +#allow_credentials = true + +# Indicate which headers are safe to expose to the API. +# Defaults to HTTP Simple Headers. (list value) +#expose_headers = + +# Maximum cache age of CORS preflight requests. (integer +# value) +#max_age = 3600 + +# Indicate which methods can be used during the actual +# request. (list value) +#allow_methods = OPTIONS,GET,HEAD,POST,PUT,DELETE,TRACE,PATCH + +# Indicate which header field names may be used during the +# actual request. (list value) +#allow_headers = diff --git a/compose_deployment/docker-compose.yml b/compose_deployment/docker-compose.yml new file mode 100644 index 0000000..b807a03 --- /dev/null +++ b/compose_deployment/docker-compose.yml @@ -0,0 +1,393 @@ +version: "3.8" + +services: + ca_service: + image: debian:buster + container_name: ca_service + networks: + - s4t + volumes: + - iotronic_ssl:/etc/ssl/iotronic # Condiviso con iotronic-wstun + entrypoint: ["/bin/bash", "-c"] + command: + - | + echo "[INFO] Installazione di OpenSSL..." + apt-get update && apt-get install -y openssl && + + echo "[INFO] Generazione della Root CA..." + mkdir -p /etc/ssl/iotronic && + cd /etc/ssl/iotronic && + + openssl genrsa -out iotronic_CA.key 2048 && + openssl req -x509 -new -nodes -key iotronic_CA.key -sha256 -days 18250 \ + -subj "/C=IT/O=iotronic" -out iotronic_CA.pem && + + echo "[INFO] Generazione della chiave privata e del certificato per Crossbar..." + openssl genrsa -out crossbar.key 2048 && + openssl req -new -key crossbar.key -subj "/C=IT/O=iotronic/CN=crossbar" -out crossbar.csr && + openssl x509 -req -in crossbar.csr -CA iotronic_CA.pem -CAkey iotronic_CA.key -CAcreateserial -out crossbar.pem -days 18250 -sha256 && + + echo "[INFO] Impostazione permessi certificati..." + chmod 644 iotronic_CA.key iotronic_CA.pem crossbar.key crossbar.pem + chmod 755 /etc/ssl/iotronic + + echo "[INFO] Certificati generati con successo." + tail -f /dev/null + + crossbar: + image: crossbario/crossbar + container_name: crossbar + restart: unless-stopped + networks: + - s4t + volumes: + - iotronic_ssl:/node/.crossbar/ssl # Condiviso con iotronic-wstun + - crossbar_data:/node/.crossbar + ports: + - "8181:8181" + entrypoint: ["/bin/sh", "-c"] + command: + - | + echo "[INFO] Attesa dei certificati..." + while [ ! -f /node/.crossbar/ssl/crossbar.pem ] || [ ! -f /node/.crossbar/ssl/crossbar.key ]; do + sleep 2 + done + echo "[INFO] Certificati trovati!" + + echo "[INFO] Scrittura configurazione Crossbar..." + cat < /node/.crossbar/config.json + { + "version": 2, + "controller": {}, + "workers": [ + { + "type": "router", + "realms": [ + { + "name": "s4t", + "roles": [ + { + "name": "anonymous", + "permissions": [ + { + "uri": "*", + "allow": { + "publish": true, + "subscribe": true, + "call": true, + "register": true + } + } + ] + } + ] + } + ], + "transports": [ + { + "type": "websocket", + "endpoint": { + "type": "tcp", + "port": 8181, + "tls": { + "chain_certificates": ["/node/.crossbar/ssl/iotronic_CA.pem"], + "key": "/node/.crossbar/ssl/crossbar.key", + "certificate": "/node/.crossbar/ssl/crossbar.pem" + } + }, + "options":{ + "enable_webstatus": true, + "fail_by_drop": true, + "open_handshake_timeout": 2500, + "close_handshake_timeout": 1000, + "auto_ping_interval": 30000, + "auto_ping_timeout": 5000, + "auto_ping_size": 13 + } + } + ] + } + ] + } + EOF + + echo "[INFO] Avvio di Crossbar..." + crossbar start + + iotronic-wstun: + image: lucadagati/iotronic-wstun:latest + container_name: iotronic-wstun + restart: unless-stopped + networks: + - s4t + ports: + - "8080:8080" + - "50000-50100:50000-50100" + volumes: + - iotronic_ssl:/var/lib/iotronic/ssl + entrypoint: ["/bin/sh", "-c"] + command: + - | + set -x # DEBUG: Mostra i comandi eseguiti + echo "[INFO] Verifica permessi certificati..." + ls -l /var/lib/iotronic/ssl + while [ ! -e /var/lib/iotronic/ssl/iotronic_CA.pem ] || [ ! -e /var/lib/iotronic/ssl/crossbar.key ]; do + echo "[DEBUG] Certificati mancanti:" + ls -l /var/lib/iotronic/ssl + sleep 2 + done + + echo "[INFO] Certificati SSL trovati!" + ls -l /var/lib/iotronic/ssl + + echo "[INFO] Avvio di iotronic-wstun..." + exec node /usr/local/lib/node_modules/@mdslab/wstun/bin/wstun.js -r -s 8080 --ssl=true --key=/var/lib/iotronic/ssl/iotronic_CA.key --cert=/var/lib/iotronic/ssl/iotronic_CA.pem + + iotronic-db: + image: mariadb:bionic + container_name: iotronic-db + restart: unless-stopped + networks: + - s4t + environment: + MYSQL_ROOT_PASSWORD: "s4t" + MYSQL_DATABASE: "unime" + MYSQL_USER: "admin" + MYSQL_PASSWORD: "s4t" + command: > + mysqld --bind-address=0.0.0.0 + --default-storage-engine=innodb + --innodb-file-per-table=on + --max-connections=4096 + --collation-server=utf8_general_ci + --character-set-server=utf8 + --max_allowed_packet=128M + --connect_timeout=120 + --wait_timeout=48800 + --interactive_timeout=48800 + ports: + - "3306:3306" + volumes: + - ./conf_mysql:/etc/mysql + - db_data:/var/lib/mysql + - ./init-db.sql:/docker-entrypoint-initdb.d/init-db.sql + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-ps4t"] + interval: 10s + retries: 5 + start_period: 20s + timeout: 5s + + rabbitmq: + image: rabbitmq:3 + container_name: rabbitmq + restart: unless-stopped + networks: + - s4t + ports: + - "5672:5672" + environment: + RABBIT_PASS: "unime" + healthcheck: + test: ["CMD", "rabbitmqctl", "status"] + interval: 10s + retries: 5 + start_period: 20s + timeout: 5s + entrypoint: ["/bin/bash", "-c"] + command: | + rabbitmq-server & + sleep 30 && + rabbitmqctl add_user openstack unime && + rabbitmqctl set_permissions openstack ".*" ".*" ".*" && + wait -n + + keystone: + image: lucadagati/iotronic-keystone + container_name: keystone + restart: unless-stopped + depends_on: + iotronic-db: + condition: service_healthy + rabbitmq: + condition: service_healthy + networks: + - s4t + environment: + # Credenziali admin e impostazioni Keystone + ADMIN_PASS: "s4t" + OS_USERNAME: "admin" + OS_PASSWORD: "s4t" + OS_PROJECT_NAME: "admin" + OS_USER_DOMAIN_NAME: "Default" + OS_PROJECT_DOMAIN_NAME: "Default" + OS_AUTH_URL: "http://keystone:5000/v3" + OS_IDENTITY_API_VERSION: "3" + + KEYSTONE_DB_NAME: "keystone" + KEYSTONE_DB_USER: "keystone" + KEYSTONE_DBPASS: "unime" + + DB_HOST: "iotronic-db" + RABBIT_PASS: "unime" + REGION_NAME: "RegionOne" + ports: + - "5000:5000" + volumes: + - ./conf_keystone:/etc/keystone + - keystone_data:/var/lib/keystone + - /var/log/keystone:/var/log/keystone + - /var/log/keystone-api:/var/log/apache2 + command: > + /bin/bash -c " + echo '[INFO] Attesa del database Keystone...'; + until mysql -h iotronic-db -uroot -ps4t -e 'SELECT 1' >/dev/null 2>&1; do + echo '[INFO] Database non ancora pronto, riprovo...'; + sleep 5; + done; + echo '[INFO] Database pronto!'; + mysql -u root -ps4t -h iotronic-db -e \"CREATE DATABASE IF NOT EXISTS keystone; + CREATE DATABASE IF NOT EXISTS iotronic; + DROP USER IF EXISTS 'keystone'@'localhost'; + DROP USER IF EXISTS 'keystone'@'%'; + CREATE USER 'keystone'@'localhost' IDENTIFIED BY 'unime'; + CREATE USER 'keystone'@'%' IDENTIFIED BY 'unime'; + GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost'; + GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%'; + DROP USER IF EXISTS 'iotronic'@'localhost'; + DROP USER IF EXISTS 'iotronic'@'%'; + CREATE USER 'iotronic'@'localhost' IDENTIFIED BY 'unime'; + CREATE USER 'iotronic'@'%' IDENTIFIED BY 'unime'; + GRANT ALL PRIVILEGES ON iotronic.* TO 'iotronic'@'localhost'; + GRANT ALL PRIVILEGES ON iotronic.* TO 'iotronic'@'%'; + FLUSH PRIVILEGES;\"; + echo '[INFO] Creazione delle cartelle per le chiavi Fernet e credenziali...'; + mkdir -p /etc/keystone/fernet-keys; + mkdir -p /etc/keystone/credential-keys; + chown -R keystone:keystone /etc/keystone; + + echo '[INFO] Verifica delle chiavi Fernet...'; + if [ ! -f /etc/keystone/fernet-keys/0 ]; then + echo '[INFO] Nessuna chiave Fernet trovata, eseguo fernet_setup...'; + su -s /bin/sh -c 'keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone' keystone; + else + echo '[INFO] Chiavi Fernet già presenti.'; + fi + + echo '[INFO] Verifica delle credenziali crittografate...'; + if [ ! -f /etc/keystone/credential-keys/0 ]; then + echo '[INFO] Nessuna chiave di credenziali trovata, eseguo credential_setup...'; + su -s /bin/sh -c 'keystone-manage credential_setup --keystone-user keystone --keystone-group keystone' keystone; + else + echo '[INFO] Chiavi di credenziali già presenti.'; + fi + + echo '[INFO] Sincronizzazione delle tabelle di Keystone...'; + su -s /bin/sh -c 'keystone-manage db_sync' keystone; + echo '[INFO] Configurazione dei token Fernet...'; + su -s /bin/sh -c 'keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone' keystone; + echo '[INFO] Configurazione delle credenziali crittografate...'; + su -s /bin/sh -c 'keystone-manage credential_setup --keystone-user keystone --keystone-group keystone' keystone; + echo '[INFO] Bootstrap di Keystone...'; + su -s /bin/sh -c 'keystone-manage bootstrap --bootstrap-password s4t --bootstrap-admin-url http://keystone:5000/v3 --bootstrap-internal-url http://keystone:5000/v3 --bootstrap-public-url http://keystone:5000/v3 --bootstrap-region-id RegionOne' keystone; + exec apache2ctl -D FOREGROUND" + + iotronic-conductor: + image: lucadagati/iotronic-conductor:latest + container_name: iotronic-conductor + restart: unless-stopped + networks: + - s4t + environment: + # Credenziali DB Iotronic + MYSQL_ROOT_PASSWORD: "s4t" + DB_HOST: "iotronic-db" + IOTRONIC_DB_NAME: "iotronic" + IOTRONIC_DB_USER: "iotronic" + IOTRONIC_DBPASS: "unime" + + # Credenziali OpenStack + OS_AUTH_URL: "http://keystone:5000/v3" + OS_USERNAME: "admin" + OS_PASSWORD: "s4t" + OS_PROJECT_NAME: "admin" + OS_USER_DOMAIN_NAME: "Default" + OS_PROJECT_DOMAIN_NAME: "Default" + + # Stringa di connessione + DB_CONNECTION_STRING: "mysql+pymysql://iotronic:unime@iotronic-db/iotronic" + ports: + - "8812:8812" + volumes: + - ./conf_conductor:/etc/iotronic + - iotronic_logs:/var/log/iotronic + command: > + /bin/bash -c " + echo '[INFO] Attesa del database MySQL...'; + until mysql -h iotronic-db -uroot -ps4t -e 'SELECT 1' >/dev/null 2>&1; do + echo '[INFO] Database non ancora pronto, riprovo...'; + sleep 5; + done; + iotronic-dbsync; + echo '[INFO] Configurazione dei permessi sui log...'; + chown -R iotronic:iotronic /var/log/iotronic; + echo '[INFO] Avvio di Iotronic Conductor...'; + iotronic-conductor" + + wagent: + image: lucadagati/iotronic-wagent:latest + container_name: iotronic-wagent + restart: unless-stopped + networks: + - s4t + environment: + # DB info + MYSQL_ROOT_PASSWORD: "s4t" + DB_HOST: "iotronic-db" + + # Stringa di connessione + DB_CONNECTION_STRING: "mysql+pymysql://iotronic:unime@iotronic-db/iotronic" + + # Credenziali OpenStack + OS_AUTH_URL: "http://keystone:5000/v3" + OS_USERNAME: "admin" + OS_PASSWORD: "s4t" + OS_PROJECT_NAME: "admin" + OS_USER_DOMAIN_NAME: "Default" + OS_PROJECT_DOMAIN_NAME: "Default" + volumes: + - ./conf_wagent:/etc/iotronic + - iotronic_logs:/var/log/iotronic + command: > + /bin/bash -c " + echo '[INFO] Configurazione dei permessi sui log...'; + chown -R iotronic:iotronic /var/log/iotronic; + echo '[INFO] Avvio del Wagent...'; + exec /usr/local/bin/iotronic-wamp-agent --config-file /etc/iotronic/iotronic.conf" + + iotronic-ui: + image: lucadagati/iotronic-ui:latest + container_name: iotronic-ui + restart: unless-stopped + networks: + - s4t + ports: + - "8585:80" + volumes: + - iotronic-ui_config:/etc/openstack-dashboard + - iotronic-ui_logs:/var/log/apache2 + - ./conf_ui:/etc/openstack-dashboard # <--- Monta tutta la cartella + +networks: + s4t: + driver: bridge + +volumes: + db_data: + keystone_data: + iotronic_logs: + iotronic-ui_logs: + iotronic-ui_config: + crossbar_data: + ca_data: + iotronic_ssl: From 4db00eab5d663a7b4a8426452bf63d25da7ed4c3 Mon Sep 17 00:00:00 2001 From: trigiu Date: Sun, 23 Feb 2025 23:21:49 +0000 Subject: [PATCH 09/18] =?UTF-8?q?altro=20step=20intermedio.=20problemi=20a?= =?UTF-8?q?ncora=20con=20la=20parte=20di=20horizon-ui/iot=20che=20non=20ri?= =?UTF-8?q?esce=20a=20ritrovare=20<>=20le=20informazioni=20dal=20D?= =?UTF-8?q?B,=20che=20per=C3=B2=20sono=20presenti?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- 2-new-mysql/1-mysql | 27 + 2-new-mysql/99-openstack.conf | 8 + 2-new-mysql/Dockerfile | 5 + 2-new-mysql/build | 4 + 2-new-mysql/create_dbs.sql | 9 + 2-new-mysql/create_dbs_FELOOCA_TEST.sql | 6 + 2-new-mysql/create_dbs_UNIME_TEST.sql | 12 + 2-new-mysql/initfile.sql | 20 + compose_deployment/99-openstack.conf | 8 + compose_deployment/conf-rabbit-script.sh | 23 + compose_deployment/conf_keystone/ck0 | 1 + compose_deployment/conf_keystone/ck1 | 1 + .../conf_keystone/credential-keys/0 | 1 + .../conf_keystone/credential-keys/1 | 1 + .../conf_keystone/fernet-keys/0 | 1 + .../conf_keystone/fernet-keys/1 | 1 + compose_deployment/conf_keystone/fk0 | 1 + compose_deployment/conf_keystone/fk1 | 1 + .../conf_mysql/99-openstack.conf | 2 +- compose_deployment/docker-compose.yml | 66 +- compose_deployment/docker-compose_test-db.yml | 358 +++++++ compose_deployment/index.html | 349 +++++++ compose_deployment/keystone.sql | 886 ++++++++++++++++++ compose_deployment/logs.txt | 407 ++++++++ 24 files changed, 2170 insertions(+), 28 deletions(-) create mode 100755 2-new-mysql/1-mysql create mode 100644 2-new-mysql/99-openstack.conf create mode 100644 2-new-mysql/Dockerfile create mode 100755 2-new-mysql/build create mode 100644 2-new-mysql/create_dbs.sql create mode 100644 2-new-mysql/create_dbs_FELOOCA_TEST.sql create mode 100644 2-new-mysql/create_dbs_UNIME_TEST.sql create mode 100644 2-new-mysql/initfile.sql create mode 100644 compose_deployment/99-openstack.conf create mode 100755 compose_deployment/conf-rabbit-script.sh create mode 100644 compose_deployment/conf_keystone/ck0 create mode 100644 compose_deployment/conf_keystone/ck1 create mode 100644 compose_deployment/conf_keystone/credential-keys/0 create mode 100644 compose_deployment/conf_keystone/credential-keys/1 create mode 100644 compose_deployment/conf_keystone/fernet-keys/0 create mode 100644 compose_deployment/conf_keystone/fernet-keys/1 create mode 100644 compose_deployment/conf_keystone/fk0 create mode 100644 compose_deployment/conf_keystone/fk1 create mode 100644 compose_deployment/docker-compose_test-db.yml create mode 100644 compose_deployment/index.html create mode 100644 compose_deployment/keystone.sql create mode 100644 compose_deployment/logs.txt diff --git a/2-new-mysql/1-mysql b/2-new-mysql/1-mysql new file mode 100755 index 0000000..f279697 --- /dev/null +++ b/2-new-mysql/1-mysql @@ -0,0 +1,27 @@ +#! /bin/bash + + +#if [ "$EUID" -ne 0 ] +# then echo "Please run as root" +# exit +#fi + +MYSQL_ROOT_PASSWORD="unime" + + +docker create \ + --name=unime_test_iotronic_db\ + --network=iotronic_network\ + -p 53306:3306 \ + --restart unless-stopped\ + -e MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD \ + -v unime_test_iotronic_db_data:/var/lib/mysql \ + -v unime_test_iotronic_db_config:/etc/mysql \ +mariadb:focal + +docker cp create_dbs.sql unime_test_iotronic_db:/docker-entrypoint-initdb.d/create_dbs.sql +docker cp 99-openstack.conf unime_test_iotronic_db:/etc/mysql/mariadb.conf.d/99-openstack.cnf + +docker start unime_test_iotronic_db + +echo -e "\e[32mCompleted but wait mariadb to be ready using docker logs -f unime_test_iotronic_db\e[0m" diff --git a/2-new-mysql/99-openstack.conf b/2-new-mysql/99-openstack.conf new file mode 100644 index 0000000..8df566d --- /dev/null +++ b/2-new-mysql/99-openstack.conf @@ -0,0 +1,8 @@ +[mysqld] +bind-address = 0.0.0.0 + +default-storage-engine = innodb +innodb_file_per_table = on +max_connections = 4096 +collation-server = utf8_general_ci +character-set-server = utf8 \ No newline at end of file diff --git a/2-new-mysql/Dockerfile b/2-new-mysql/Dockerfile new file mode 100644 index 0000000..f695dae --- /dev/null +++ b/2-new-mysql/Dockerfile @@ -0,0 +1,5 @@ +#FROM mariadb:10.7.1-focal +FROM mariadb:10.3 + +COPY initfile.sql /docker-entrypoint-initdb.d/initfile.sql +COPY 99-openstack.conf /etc/mysql/mariadb.conf.d/99-openstack.conf diff --git a/2-new-mysql/build b/2-new-mysql/build new file mode 100755 index 0000000..eba856b --- /dev/null +++ b/2-new-mysql/build @@ -0,0 +1,4 @@ +#! /bin/bash + +VERSION=2.0 +docker build -t unime/mariadb_exp:focal . diff --git a/2-new-mysql/create_dbs.sql b/2-new-mysql/create_dbs.sql new file mode 100644 index 0000000..3abb251 --- /dev/null +++ b/2-new-mysql/create_dbs.sql @@ -0,0 +1,9 @@ +CREATE DATABASE keystone; +GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'UN1M3-KEYSTONE_DBPASS'; +GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'UN1M3-KEYSTONE_DBPASS'; +CREATE DATABASE iotronic; +GRANT ALL PRIVILEGES ON iotronic.* TO 'iotronic'@'localhost' IDENTIFIED BY 'UN1M3-IOTRONIC_DBPASS'; +GRANT ALL PRIVILEGES ON iotronic.* TO 'iotronic'@'%' IDENTIFIED BY 'UN1M3-IOTRONIC_DBPASS'; +-- CREATE DATABASE designate; +-- GRANT ALL PRIVILEGES ON designate.* TO 'designate'@'localhost' IDENTIFIED BY 'DESIGNATE_DBPASS'; +-- GRANT ALL PRIVILEGES ON designate.* TO 'designate'@'%' IDENTIFIED BY 'DESIGNATE_DBPASS'; diff --git a/2-new-mysql/create_dbs_FELOOCA_TEST.sql b/2-new-mysql/create_dbs_FELOOCA_TEST.sql new file mode 100644 index 0000000..bfdd93f --- /dev/null +++ b/2-new-mysql/create_dbs_FELOOCA_TEST.sql @@ -0,0 +1,6 @@ +CREATE DATABASE felooca_test_keystone; +GRANT ALL PRIVILEGES ON felooca_test_keystone.* TO 'fe_t_keystone'@'localhost' IDENTIFIED BY 'f3l00caTEST'; +GRANT ALL PRIVILEGES ON felooca_test_keystone.* TO 'fe_t_keystone'@'%' IDENTIFIED BY 'f3l00caTEST'; +CREATE DATABASE felooca_test_iotronic; +GRANT ALL PRIVILEGES ON felooca_test_iotronic.* TO 'fe_t_iotronic'@'localhost' IDENTIFIED BY 'f3l00caTEST'; +GRANT ALL PRIVILEGES ON felooca_test_iotronic.* TO 'fe_t_iotronic'@'%' IDENTIFIED BY 'f3l00caTEST'; \ No newline at end of file diff --git a/2-new-mysql/create_dbs_UNIME_TEST.sql b/2-new-mysql/create_dbs_UNIME_TEST.sql new file mode 100644 index 0000000..1452619 --- /dev/null +++ b/2-new-mysql/create_dbs_UNIME_TEST.sql @@ -0,0 +1,12 @@ +CREATE USER IF NOT EXISTS s4t_keystone@localhost IDENTIFIED BY 'Um3d3m0n'; +SET PASSWORD FOR s4t_keystone@localhost = PASSWORD('Um3d3m0n'); + +CREATE USER IF NOT EXISTS s4t_iotronic@localhost IDENTIFIED BY 'Um3d3m0n'; +SET PASSWORD FOR s4t_iotronic@localhost = PASSWORD('Um3d3m0n'); + +CREATE DATABASE s4t_keystone; +GRANT ALL PRIVILEGES ON s4t_keystone.* TO 's4t_keystone'@'localhost' IDENTIFIED BY 'Um3d3m0n'; +GRANT ALL PRIVILEGES ON s4t_keystone.* TO 's4t_keystone'@'%' IDENTIFIED BY 'Um3d3m0n'; +CREATE DATABASE s4t_iotronic; +GRANT ALL PRIVILEGES ON s4t_iotronic.* TO 's4t_iotronic'@'localhost' IDENTIFIED BY 'Um3d3m0n'; +GRANT ALL PRIVILEGES ON s4t_iotronic.* TO 's4t_iotronic'@'%' IDENTIFIED BY 'Um3d3m0n'; diff --git a/2-new-mysql/initfile.sql b/2-new-mysql/initfile.sql new file mode 100644 index 0000000..f1bacd2 --- /dev/null +++ b/2-new-mysql/initfile.sql @@ -0,0 +1,20 @@ +CREATE USER IF NOT EXISTS s4t_keystone@localhost IDENTIFIED BY 'Um3d3m0n'; +SET PASSWORD FOR s4t_keystone@localhost = PASSWORD('Um3d3m0n'); + +CREATE USER IF NOT EXISTS s4t_iotronic@localhost IDENTIFIED BY 'Um3d3m0n'; +SET PASSWORD FOR s4t_iotronic@localhost = PASSWORD('Um3d3m0n'); + +CREATE USER IF NOT EXISTS s4t_designate@localhost IDENTIFIED BY 'Um3d3m0n'; +SET PASSWORD FOR s4t_designate@localhost = PASSWORD('Um3d3m0n'); + +CREATE DATABASE s4t_keystone; +GRANT ALL PRIVILEGES ON s4t_keystone.* TO 's4t_keystone'@'localhost' IDENTIFIED BY 'Um3d3m0n'; +GRANT ALL PRIVILEGES ON s4t_keystone.* TO 's4t_keystone'@'%' IDENTIFIED BY 'Um3d3m0n'; + +CREATE DATABASE s4t_iotronic; +GRANT ALL PRIVILEGES ON s4t_iotronic.* TO 's4t_iotronic'@'localhost' IDENTIFIED BY 'Um3d3m0n'; +GRANT ALL PRIVILEGES ON s4t_iotronic.* TO 's4t_iotronic'@'%' IDENTIFIED BY 'Um3d3m0n'; + +--CREATE DATABASE s4t_designate; +--GRANT ALL PRIVILEGES ON s4t_designate.* TO 's4t_designate'@'localhost' IDENTIFIED BY 'sm3d3m0n'; +--GRANT ALL PRIVILEGES ON s4t_designate.* TO 's4t_designate'@'%' IDENTIFIED BY 'sm3d3m0n'; diff --git a/compose_deployment/99-openstack.conf b/compose_deployment/99-openstack.conf new file mode 100644 index 0000000..8df566d --- /dev/null +++ b/compose_deployment/99-openstack.conf @@ -0,0 +1,8 @@ +[mysqld] +bind-address = 0.0.0.0 + +default-storage-engine = innodb +innodb_file_per_table = on +max_connections = 4096 +collation-server = utf8_general_ci +character-set-server = utf8 \ No newline at end of file diff --git a/compose_deployment/conf-rabbit-script.sh b/compose_deployment/conf-rabbit-script.sh new file mode 100755 index 0000000..9d941d8 --- /dev/null +++ b/compose_deployment/conf-rabbit-script.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Avvia RabbitMQ in background +rabbitmq-server & + +# Attendi che RabbitMQ sia pronto +sleep 30 + +rabbitmqctl start_app + +# Aggiungi l'utente OpenStack +rabbitmqctl add_user openstack unime || echo "Utente già esistente" +echo "USER openstack added" + +# Attendi di nuovo per sicurezza +sleep 10 + +# Imposta i permessi +rabbitmqctl set_permissions openstack ".*" ".*" ".*" +rabbitmqctl set_permissions -p / openstack ".*" ".*" ".*" + +# Mantieni il container attivo +wait diff --git a/compose_deployment/conf_keystone/ck0 b/compose_deployment/conf_keystone/ck0 new file mode 100644 index 0000000..49623a7 --- /dev/null +++ b/compose_deployment/conf_keystone/ck0 @@ -0,0 +1 @@ +c4hrWpGpoNCcAg76fbJu2oR_wGbejRSigOgLV_tiN0Y= \ No newline at end of file diff --git a/compose_deployment/conf_keystone/ck1 b/compose_deployment/conf_keystone/ck1 new file mode 100644 index 0000000..879e4a1 --- /dev/null +++ b/compose_deployment/conf_keystone/ck1 @@ -0,0 +1 @@ +zjDDEToEprsRRoD250SzK1miVte-_GAHaDsHBysw7Lg= \ No newline at end of file diff --git a/compose_deployment/conf_keystone/credential-keys/0 b/compose_deployment/conf_keystone/credential-keys/0 new file mode 100644 index 0000000..dc9e0d9 --- /dev/null +++ b/compose_deployment/conf_keystone/credential-keys/0 @@ -0,0 +1 @@ +61sERo_5ilNDrQoKTOlAWY41vl91OUywHl8nYWOUDgs= \ No newline at end of file diff --git a/compose_deployment/conf_keystone/credential-keys/1 b/compose_deployment/conf_keystone/credential-keys/1 new file mode 100644 index 0000000..4c5369b --- /dev/null +++ b/compose_deployment/conf_keystone/credential-keys/1 @@ -0,0 +1 @@ +sO7eQ2RcUnA6jp7uiFXkQWnp0Rui7vqsMpEIqLdY0XE= \ No newline at end of file diff --git a/compose_deployment/conf_keystone/fernet-keys/0 b/compose_deployment/conf_keystone/fernet-keys/0 new file mode 100644 index 0000000..1259f0f --- /dev/null +++ b/compose_deployment/conf_keystone/fernet-keys/0 @@ -0,0 +1 @@ +JfzPQSSw65t_HO5eNVrNKhVWX7k0_UNxA183dK1iNCo= \ No newline at end of file diff --git a/compose_deployment/conf_keystone/fernet-keys/1 b/compose_deployment/conf_keystone/fernet-keys/1 new file mode 100644 index 0000000..fdaf82a --- /dev/null +++ b/compose_deployment/conf_keystone/fernet-keys/1 @@ -0,0 +1 @@ +83MyTt4X-OSRVGwIA4VrnJ3UxQAFR6MJptMo3z8-ntY= \ No newline at end of file diff --git a/compose_deployment/conf_keystone/fk0 b/compose_deployment/conf_keystone/fk0 new file mode 100644 index 0000000..28a5594 --- /dev/null +++ b/compose_deployment/conf_keystone/fk0 @@ -0,0 +1 @@ +eHddVpZvjvHjnvcV0LApjlpKx3c5d3Iv0jVZ_hrkOPc= \ No newline at end of file diff --git a/compose_deployment/conf_keystone/fk1 b/compose_deployment/conf_keystone/fk1 new file mode 100644 index 0000000..2e55e5c --- /dev/null +++ b/compose_deployment/conf_keystone/fk1 @@ -0,0 +1 @@ +c3GGEqtpYrNihywHG5kEm7cRDMJvQroGw_oNsIZTRpA= \ No newline at end of file diff --git a/compose_deployment/conf_mysql/99-openstack.conf b/compose_deployment/conf_mysql/99-openstack.conf index 4cd6688..9d52d9f 100644 --- a/compose_deployment/conf_mysql/99-openstack.conf +++ b/compose_deployment/conf_mysql/99-openstack.conf @@ -10,4 +10,4 @@ wait_timeout = 600 interactive_timeout = 600 net_read_timeout = 600 net_write_timeout = 600 - +max_allowed_packet=67108864; diff --git a/compose_deployment/docker-compose.yml b/compose_deployment/docker-compose.yml index b807a03..bf498d9 100644 --- a/compose_deployment/docker-compose.yml +++ b/compose_deployment/docker-compose.yml @@ -144,35 +144,25 @@ services: exec node /usr/local/lib/node_modules/@mdslab/wstun/bin/wstun.js -r -s 8080 --ssl=true --key=/var/lib/iotronic/ssl/iotronic_CA.key --cert=/var/lib/iotronic/ssl/iotronic_CA.pem iotronic-db: - image: mariadb:bionic + image: mariadb:focal container_name: iotronic-db restart: unless-stopped networks: - s4t environment: - MYSQL_ROOT_PASSWORD: "s4t" + MYSQL_ROOT_PASSWORD: "unime" MYSQL_DATABASE: "unime" MYSQL_USER: "admin" MYSQL_PASSWORD: "s4t" - command: > - mysqld --bind-address=0.0.0.0 - --default-storage-engine=innodb - --innodb-file-per-table=on - --max-connections=4096 - --collation-server=utf8_general_ci - --character-set-server=utf8 - --max_allowed_packet=128M - --connect_timeout=120 - --wait_timeout=48800 - --interactive_timeout=48800 ports: - "3306:3306" volumes: - - ./conf_mysql:/etc/mysql - - db_data:/var/lib/mysql - - ./init-db.sql:/docker-entrypoint-initdb.d/init-db.sql + - unime_test_iotronic_db_data:/var/lib/mysql + - unime_test_iotronic_db_config:/etc/mysql + - ./create_dbs.sql:/docker-entrypoint-initdb.d/create_dbs.sql + - ./99-openstack.conf:/etc/mysql/99-openstack.cnf healthcheck: - test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-ps4t"] + test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-punime"] interval: 10s retries: 5 start_period: 20s @@ -188,6 +178,8 @@ services: - "5672:5672" environment: RABBIT_PASS: "unime" + volumes: + - ./conf-rabbit-script.sh:/docker-entrypoint-initRabbit.sh healthcheck: test: ["CMD", "rabbitmqctl", "status"] interval: 10s @@ -195,12 +187,11 @@ services: start_period: 20s timeout: 5s entrypoint: ["/bin/bash", "-c"] - command: | - rabbitmq-server & - sleep 30 && - rabbitmqctl add_user openstack unime && - rabbitmqctl set_permissions openstack ".*" ".*" ".*" && - wait -n + command: + - | + chmod +x /docker-entrypoint-initRabbit.sh + /bin/bash /docker-entrypoint-initRabbit.sh + keystone: image: lucadagati/iotronic-keystone @@ -241,12 +232,12 @@ services: command: > /bin/bash -c " echo '[INFO] Attesa del database Keystone...'; - until mysql -h iotronic-db -uroot -ps4t -e 'SELECT 1' >/dev/null 2>&1; do + until mysql -h iotronic-db -uroot -punime -e 'SELECT 1' >/dev/null 2>&1; do echo '[INFO] Database non ancora pronto, riprovo...'; sleep 5; done; echo '[INFO] Database pronto!'; - mysql -u root -ps4t -h iotronic-db -e \"CREATE DATABASE IF NOT EXISTS keystone; + mysql -u root -punime -h iotronic-db -e \"CREATE DATABASE IF NOT EXISTS keystone; CREATE DATABASE IF NOT EXISTS iotronic; DROP USER IF EXISTS 'keystone'@'localhost'; DROP USER IF EXISTS 'keystone'@'%'; @@ -290,7 +281,26 @@ services: su -s /bin/sh -c 'keystone-manage credential_setup --keystone-user keystone --keystone-group keystone' keystone; echo '[INFO] Bootstrap di Keystone...'; su -s /bin/sh -c 'keystone-manage bootstrap --bootstrap-password s4t --bootstrap-admin-url http://keystone:5000/v3 --bootstrap-internal-url http://keystone:5000/v3 --bootstrap-public-url http://keystone:5000/v3 --bootstrap-region-id RegionOne' keystone; - exec apache2ctl -D FOREGROUND" + exec apache2ctl -D FOREGROUND & + sleep 20; + echo '[INFO] Creazione dei servizi di Iotronic...'; + su -s /bin/sh -c 'openstack project create --domain default --description \"Service Project\" service' keystone; + su -s /bin/sh -c 'openstack service create iot --name Iotronic' keystone; + echo '[INFO] Iotronic User Create...'; + su -s /bin/sh -c 'openstack user create --password unime iotronic' keystone; + echo '[INFO] Iotronic roles...'; + su -s /bin/sh -c 'openstack role add --project service --user iotronic admin' keystone; + su -s /bin/sh -c 'openstack role create admin_iot_project' keystone; + su -s /bin/sh -c 'openstack role create manager_iot_project' keystone; + su -s /bin/sh -c 'openstack role create admin_iot_project' keystone; + su -s /bin/sh -c 'openstack role create user_iot' keystone; + su -s /bin/sh -c 'openstack role add --project service --user iotronic admin_iot_project' keystone; + su -s /bin/sh -c 'openstack role add --project admin --user admin admin_iot_project' keystone; + echo '[INFO] Iotronic endpoints...'; + su -s /bin/sh -c 'openstack endpoint create --region RegionOne iot public controller:8812' keystone; + su -s /bin/sh -c 'openstack endpoint create --region RegionOne iot internal iotronic-conductor:8812' keystone; + su -s /bin/sh -c 'openstack endpoint create --region RegionOne iot admin iotronic-conductor:8812' keystone; + su -s /bin/sh -c 'sleep infinity' keystone;" iotronic-conductor: image: lucadagati/iotronic-conductor:latest @@ -324,7 +334,7 @@ services: command: > /bin/bash -c " echo '[INFO] Attesa del database MySQL...'; - until mysql -h iotronic-db -uroot -ps4t -e 'SELECT 1' >/dev/null 2>&1; do + until mysql -h iotronic-db -uroot -punime -e 'SELECT 1' >/dev/null 2>&1; do echo '[INFO] Database non ancora pronto, riprovo...'; sleep 5; done; @@ -391,3 +401,5 @@ volumes: crossbar_data: ca_data: iotronic_ssl: + unime_test_iotronic_db_data: + unime_test_iotronic_db_config: diff --git a/compose_deployment/docker-compose_test-db.yml b/compose_deployment/docker-compose_test-db.yml new file mode 100644 index 0000000..02c8a56 --- /dev/null +++ b/compose_deployment/docker-compose_test-db.yml @@ -0,0 +1,358 @@ +version: "3.8" + +services: + ca_service: + image: debian:buster + container_name: ca_service + networks: + - iotronic_network + volumes: + - iotronic_ssl:/etc/ssl/iotronic # Condiviso con iotronic-wstun + entrypoint: ["/bin/bash", "-c"] + command: + - | + echo "[INFO] Installazione di OpenSSL..." + apt-get update && apt-get install -y openssl && + + echo "[INFO] Generazione della Root CA..." + mkdir -p /etc/ssl/iotronic && + cd /etc/ssl/iotronic && + + openssl genrsa -out iotronic_CA.key 2048 && + openssl req -x509 -new -nodes -key iotronic_CA.key -sha256 -days 18250 \ + -subj "/C=IT/O=iotronic" -out iotronic_CA.pem && + + echo "[INFO] Generazione della chiave privata e del certificato per Crossbar..." + openssl genrsa -out crossbar.key 2048 && + openssl req -new -key crossbar.key -subj "/C=IT/O=iotronic/CN=crossbar" -out crossbar.csr && + openssl x509 -req -in crossbar.csr -CA iotronic_CA.pem -CAkey iotronic_CA.key -CAcreateserial -out crossbar.pem -days 18250 -sha256 && + + echo "[INFO] Impostazione permessi certificati..." + chmod 644 iotronic_CA.key iotronic_CA.pem crossbar.key crossbar.pem + chmod 755 /etc/ssl/iotronic + + echo "[INFO] Certificati generati con successo." + tail -f /dev/null + + crossbar: + image: crossbario/crossbar + container_name: crossbar + restart: unless-stopped + networks: + - iotronic_network + volumes: + - iotronic_ssl:/node/.crossbar/ssl # Condiviso con iotronic-wstun + - crossbar_data:/node/.crossbar + ports: + - "8181:8181" + entrypoint: ["/bin/sh", "-c"] + command: + - | + echo "[INFO] Attesa dei certificati..." + while [ ! -f /node/.crossbar/ssl/crossbar.pem ] || [ ! -f /node/.crossbar/ssl/crossbar.key ]; do + sleep 2 + done + echo "[INFO] Certificati trovati!" + + echo "[INFO] Scrittura configurazione Crossbar..." + cat < /node/.crossbar/config.json + { + "version": 2, + "controller": {}, + "workers": [ + { + "type": "router", + "realms": [ + { + "name": "s4t", + "roles": [ + { + "name": "anonymous", + "permissions": [ + { + "uri": "*", + "allow": { + "publish": true, + "subscribe": true, + "call": true, + "register": true + } + } + ] + } + ] + } + ], + "transports": [ + { + "type": "websocket", + "endpoint": { + "type": "tcp", + "port": 8181, + "tls": { + "chain_certificates": ["/node/.crossbar/ssl/iotronic_CA.pem"], + "key": "/node/.crossbar/ssl/crossbar.key", + "certificate": "/node/.crossbar/ssl/crossbar.pem" + } + }, + "options":{ + "enable_webstatus": true, + "fail_by_drop": true, + "open_handshake_timeout": 2500, + "close_handshake_timeout": 1000, + "auto_ping_interval": 30000, + "auto_ping_timeout": 5000, + "auto_ping_size": 13 + } + } + ] + } + ] + } + EOF + + echo "[INFO] Avvio di Crossbar..." + crossbar start + + iotronic-wstun: + image: lucadagati/iotronic-wstun:latest + container_name: iotronic-wstun + restart: unless-stopped + networks: + - iotronic_network + ports: + - "8080:8080" + - "50000-50100:50000-50100" + volumes: + - iotronic_ssl:/var/lib/iotronic/ssl + entrypoint: ["/bin/sh", "-c"] + command: + - | + set -x # DEBUG: Mostra i comandi eseguiti + echo "[INFO] Verifica permessi certificati..." + ls -l /var/lib/iotronic/ssl + while [ ! -e /var/lib/iotronic/ssl/iotronic_CA.pem ] || [ ! -e /var/lib/iotronic/ssl/crossbar.key ]; do + echo "[DEBUG] Certificati mancanti:" + ls -l /var/lib/iotronic/ssl + sleep 2 + done + + echo "[INFO] Certificati SSL trovati!" + ls -l /var/lib/iotronic/ssl + + echo "[INFO] Avvio di iotronic-wstun..." + exec node /usr/local/lib/node_modules/@mdslab/wstun/bin/wstun.js -r -s 8080 --ssl=true --key=/var/lib/iotronic/ssl/iotronic_CA.key --cert=/var/lib/iotronic/ssl/iotronic_CA.pem + + + rabbitmq: + image: rabbitmq:3 + container_name: rabbitmq + restart: unless-stopped + networks: + - iotronic_network + ports: + - "5672:5672" + environment: + RABBIT_PASS: "unime" + volumes: + - ./conf-rabbit-script.sh:/docker-entrypoint-initRabbit.sh + healthcheck: + test: ["CMD", "rabbitmqctl", "status"] + interval: 10s + retries: 5 + start_period: 20s + timeout: 5s + entrypoint: ["/bin/bash", "-c"] + command: + - | + chmod +x /docker-entrypoint-initRabbit.sh + /bin/bash /docker-entrypoint-initRabbit.sh + + + keystone: + image: lucadagati/iotronic-keystone + container_name: keystone + restart: unless-stopped + depends_on: + rabbitmq: + condition: service_healthy + networks: + - iotronic_network + environment: + # Credenziali admin e impostazioni Keystone + ADMIN_PASS: "s4t" + OS_USERNAME: "admin" + OS_PASSWORD: "s4t" + OS_PROJECT_NAME: "admin" + OS_USER_DOMAIN_NAME: "Default" + OS_PROJECT_DOMAIN_NAME: "Default" + OS_AUTH_URL: "http://keystone:5000/v3" + OS_IDENTITY_API_VERSION: "3" + + KEYSTONE_DB_NAME: "keystone" + KEYSTONE_DB_USER: "keystone" + KEYSTONE_DBPASS: "unime" + + DB_HOST: "unime_test_iotronic_db" + RABBIT_PASS: "unime" + REGION_NAME: "RegionOne" + ports: + - "5000:5000" + volumes: + - ./conf_keystone:/etc/keystone + - keystone_data:/var/lib/keystone + - /var/log/keystone:/var/log/keystone + - /var/log/keystone-api:/var/log/apache2 + command: > + /bin/bash -c " + echo '[INFO] Attesa del database Keystone...'; + until mysql -h unime_test_iotronic_db -uroot -punime -e 'SELECT 1' >/dev/null 2>&1; do + echo '[INFO] Database non ancora pronto, riprovo...'; + sleep 5; + done; + echo '[INFO] Database pronto!'; + mysql -u root -punime -h unime_test_iotronic_db -e \"CREATE DATABASE IF NOT EXISTS keystone; + CREATE DATABASE IF NOT EXISTS iotronic; + DROP USER IF EXISTS 'keystone'@'localhost'; + DROP USER IF EXISTS 'keystone'@'%'; + CREATE USER 'keystone'@'localhost' IDENTIFIED BY 'unime'; + CREATE USER 'keystone'@'%' IDENTIFIED BY 'unime'; + GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost'; + GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%'; + DROP USER IF EXISTS 'iotronic'@'localhost'; + DROP USER IF EXISTS 'iotronic'@'%'; + CREATE USER 'iotronic'@'localhost' IDENTIFIED BY 'unime'; + CREATE USER 'iotronic'@'%' IDENTIFIED BY 'unime'; + GRANT ALL PRIVILEGES ON iotronic.* TO 'iotronic'@'localhost'; + GRANT ALL PRIVILEGES ON iotronic.* TO 'iotronic'@'%'; + FLUSH PRIVILEGES;\"; + echo '[INFO] Creazione delle cartelle per le chiavi Fernet e credenziali...'; + mkdir -p /etc/keystone/fernet-keys; + mkdir -p /etc/keystone/credential-keys; + chown -R keystone:keystone /etc/keystone; + + echo '[INFO] Verifica delle chiavi Fernet...'; + if [ ! -f /etc/keystone/fernet-keys/0 ]; then + echo '[INFO] Nessuna chiave Fernet trovata, eseguo fernet_setup...'; + su -s /bin/sh -c 'keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone' keystone; + else + echo '[INFO] Chiavi Fernet già presenti.'; + fi + + echo '[INFO] Verifica delle credenziali crittografate...'; + if [ ! -f /etc/keystone/credential-keys/0 ]; then + echo '[INFO] Nessuna chiave di credenziali trovata, eseguo credential_setup...'; + su -s /bin/sh -c 'keystone-manage credential_setup --keystone-user keystone --keystone-group keystone' keystone; + else + echo '[INFO] Chiavi di credenziali già presenti.'; + fi + + echo '[INFO] Sincronizzazione delle tabelle di Keystone...'; + su -s /bin/sh -c 'keystone-manage db_sync' keystone; + echo '[INFO] Configurazione dei token Fernet...'; + su -s /bin/sh -c 'keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone' keystone; + echo '[INFO] Configurazione delle credenziali crittografate...'; + su -s /bin/sh -c 'keystone-manage credential_setup --keystone-user keystone --keystone-group keystone' keystone; + echo '[INFO] Bootstrap di Keystone...'; + su -s /bin/sh -c 'keystone-manage bootstrap --bootstrap-password s4t --bootstrap-admin-url http://keystone:5000/v3 --bootstrap-internal-url http://keystone:5000/v3 --bootstrap-public-url http://keystone:5000/v3 --bootstrap-region-id RegionOne' keystone; + exec apache2ctl -D FOREGROUND" + + iotronic-conductor: + image: lucadagati/iotronic-conductor:latest + container_name: iotronic-conductor + restart: unless-stopped + networks: + - iotronic_network + environment: + # Credenziali DB Iotronic + MYSQL_ROOT_PASSWORD: "unime" + DB_HOST: "unime_test_iotronic_db" + IOTRONIC_DB_NAME: "iotronic" + IOTRONIC_DB_USER: "iotronic" + IOTRONIC_DBPASS: "unime" + + # Credenziali OpenStack + OS_AUTH_URL: "http://keystone:5000/v3" + OS_USERNAME: "admin" + OS_PASSWORD: "s4t" + OS_PROJECT_NAME: "admin" + OS_USER_DOMAIN_NAME: "Default" + OS_PROJECT_DOMAIN_NAME: "Default" + + # Stringa di connessione + DB_CONNECTION_STRING: "mysql+pymysql://iotronic:unime@unime_test_iotronic_db/iotronic" + ports: + - "8812:8812" + volumes: + - ./conf_conductor:/etc/iotronic + - iotronic_logs:/var/log/iotronic + command: > + /bin/bash -c " + echo '[INFO] Attesa del database MySQL...'; + until mysql -h unime_test_iotronic_db -uroot -punime -e 'SELECT 1' >/dev/null 2>&1; do + echo '[INFO] Database non ancora pronto, riprovo...'; + sleep 5; + done; + iotronic-dbsync; + echo '[INFO] Configurazione dei permessi sui log...'; + chown -R iotronic:iotronic /var/log/iotronic; + echo '[INFO] Avvio di Iotronic Conductor...'; + iotronic-conductor" + + wagent: + image: lucadagati/iotronic-wagent:latest + container_name: iotronic-wagent + restart: unless-stopped + networks: + - iotronic_network + environment: + # DB info + MYSQL_ROOT_PASSWORD: "unime" + DB_HOST: "unime_test_iotronic_db" + + # Stringa di connessione + DB_CONNECTION_STRING: "mysql+pymysql://iotronic:unime@unime_test_iotronic_db/iotronic" + + # Credenziali OpenStack + OS_AUTH_URL: "http://keystone:5000/v3" + OS_USERNAME: "admin" + OS_PASSWORD: "s4t" + OS_PROJECT_NAME: "admin" + OS_USER_DOMAIN_NAME: "Default" + OS_PROJECT_DOMAIN_NAME: "Default" + volumes: + - ./conf_wagent:/etc/iotronic + - iotronic_logs:/var/log/iotronic + command: > + /bin/bash -c " + echo '[INFO] Configurazione dei permessi sui log...'; + chown -R iotronic:iotronic /var/log/iotronic; + echo '[INFO] Avvio del Wagent...'; + exec /usr/local/bin/iotronic-wamp-agent --config-file /etc/iotronic/iotronic.conf" + + iotronic-ui: + image: lucadagati/iotronic-ui:latest + container_name: iotronic-ui + restart: unless-stopped + networks: + - iotronic_network + ports: + - "8585:80" + volumes: + - iotronic-ui_config:/etc/openstack-dashboard + - iotronic-ui_logs:/var/log/apache2 + - ./conf_ui:/etc/openstack-dashboard # <--- Monta tutta la cartella + +networks: + iotronic_network: + driver: bridge + +volumes: + db_data: + keystone_data: + iotronic_logs: + iotronic-ui_logs: + iotronic-ui_config: + crossbar_data: + ca_data: + iotronic_ssl: diff --git a/compose_deployment/index.html b/compose_deployment/index.html new file mode 100644 index 0000000..e8d2ed0 --- /dev/null +++ b/compose_deployment/index.html @@ -0,0 +1,349 @@ + + + + + + + + + + + + Login - OpenStack Dashboard + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/compose_deployment/keystone.sql b/compose_deployment/keystone.sql new file mode 100644 index 0000000..c07e9a9 --- /dev/null +++ b/compose_deployment/keystone.sql @@ -0,0 +1,886 @@ +-- MariaDB dump 10.19 Distrib 10.7.1-MariaDB, for debian-linux-gnu (x86_64) +-- +-- Host: localhost Database: keystone +-- ------------------------------------------------------ +-- Server version 10.7.1-MariaDB-1:10.7.1+maria~focal + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8mb4 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + +-- +-- Table structure for table `access_rule` +-- + +DROP TABLE IF EXISTS `access_rule`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `access_rule` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `service` varchar(64) DEFAULT NULL, + `path` varchar(128) DEFAULT NULL, + `method` varchar(16) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `access_token` +-- + +DROP TABLE IF EXISTS `access_token`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `access_token` ( + `id` varchar(64) NOT NULL, + `access_secret` varchar(64) NOT NULL, + `authorizing_user_id` varchar(64) NOT NULL, + `project_id` varchar(64) NOT NULL, + `role_ids` text NOT NULL, + `consumer_id` varchar(64) NOT NULL, + `expires_at` varchar(64) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `ix_access_token_consumer_id` (`consumer_id`), + KEY `ix_access_token_authorizing_user_id` (`authorizing_user_id`), + CONSTRAINT `access_token_ibfk_1` FOREIGN KEY (`consumer_id`) REFERENCES `consumer` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `application_credential` +-- + +DROP TABLE IF EXISTS `application_credential`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `application_credential` ( + `internal_id` int(11) NOT NULL AUTO_INCREMENT, + `id` varchar(64) NOT NULL, + `name` varchar(255) NOT NULL, + `secret_hash` varchar(255) NOT NULL, + `description` text DEFAULT NULL, + `user_id` varchar(64) NOT NULL, + `project_id` varchar(64) DEFAULT NULL, + `expires_at` bigint(20) DEFAULT NULL, + `system` varchar(64) DEFAULT NULL, + `unrestricted` tinyint(1) DEFAULT NULL, + PRIMARY KEY (`internal_id`), + UNIQUE KEY `duplicate_app_cred_constraint` (`user_id`,`name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `application_credential_access_rule` +-- + +DROP TABLE IF EXISTS `application_credential_access_rule`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `application_credential_access_rule` ( + `application_credential_id` int(11) NOT NULL, + `access_rule_id` int(11) NOT NULL, + PRIMARY KEY (`application_credential_id`,`access_rule_id`), + KEY `access_rule_id` (`access_rule_id`), + CONSTRAINT `application_credential_access_rule_ibfk_1` FOREIGN KEY (`application_credential_id`) REFERENCES `application_credential` (`internal_id`) ON DELETE CASCADE, + CONSTRAINT `application_credential_access_rule_ibfk_2` FOREIGN KEY (`access_rule_id`) REFERENCES `access_rule` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `application_credential_role` +-- + +DROP TABLE IF EXISTS `application_credential_role`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `application_credential_role` ( + `application_credential_id` int(11) NOT NULL, + `role_id` varchar(64) NOT NULL, + PRIMARY KEY (`application_credential_id`,`role_id`), + CONSTRAINT `application_credential_role_ibfk_1` FOREIGN KEY (`application_credential_id`) REFERENCES `application_credential` (`internal_id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `assignment` +-- + +DROP TABLE IF EXISTS `assignment`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `assignment` ( + `type` enum('UserProject','GroupProject','UserDomain','GroupDomain') NOT NULL, + `actor_id` varchar(64) NOT NULL, + `target_id` varchar(64) NOT NULL, + `role_id` varchar(64) NOT NULL, + `inherited` tinyint(1) NOT NULL, + PRIMARY KEY (`type`,`actor_id`,`target_id`,`role_id`,`inherited`), + KEY `ix_actor_id` (`actor_id`), + CONSTRAINT `CONSTRAINT_1` CHECK (`inherited` in (0,1)) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `config_register` +-- + +DROP TABLE IF EXISTS `config_register`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `config_register` ( + `type` varchar(64) NOT NULL, + `domain_id` varchar(64) NOT NULL, + PRIMARY KEY (`type`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `consumer` +-- + +DROP TABLE IF EXISTS `consumer`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `consumer` ( + `id` varchar(64) NOT NULL, + `description` varchar(64) DEFAULT NULL, + `secret` varchar(64) NOT NULL, + `extra` text NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `credential` +-- + +DROP TABLE IF EXISTS `credential`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `credential` ( + `id` varchar(64) NOT NULL, + `user_id` varchar(64) NOT NULL, + `project_id` varchar(64) DEFAULT NULL, + `type` varchar(255) NOT NULL, + `extra` text DEFAULT NULL, + `key_hash` varchar(64) NOT NULL, + `encrypted_blob` text NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `endpoint` +-- + +DROP TABLE IF EXISTS `endpoint`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `endpoint` ( + `id` varchar(64) NOT NULL, + `legacy_endpoint_id` varchar(64) DEFAULT NULL, + `interface` varchar(8) NOT NULL, + `service_id` varchar(64) NOT NULL, + `url` text NOT NULL, + `extra` text DEFAULT NULL, + `enabled` tinyint(1) NOT NULL DEFAULT 1, + `region_id` varchar(255) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `service_id` (`service_id`), + KEY `fk_endpoint_region_id` (`region_id`), + CONSTRAINT `endpoint_service_id_fkey` FOREIGN KEY (`service_id`) REFERENCES `service` (`id`), + CONSTRAINT `fk_endpoint_region_id` FOREIGN KEY (`region_id`) REFERENCES `region` (`id`), + CONSTRAINT `CONSTRAINT_1` CHECK (`enabled` in (0,1)) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `endpoint_group` +-- + +DROP TABLE IF EXISTS `endpoint_group`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `endpoint_group` ( + `id` varchar(64) NOT NULL, + `name` varchar(255) NOT NULL, + `description` text DEFAULT NULL, + `filters` text NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `federated_user` +-- + +DROP TABLE IF EXISTS `federated_user`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `federated_user` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user_id` varchar(64) NOT NULL, + `idp_id` varchar(64) NOT NULL, + `protocol_id` varchar(64) NOT NULL, + `unique_id` varchar(255) NOT NULL, + `display_name` varchar(255) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `idp_id` (`idp_id`,`protocol_id`,`unique_id`), + KEY `user_id` (`user_id`), + KEY `federated_user_protocol_id_fkey` (`protocol_id`,`idp_id`), + CONSTRAINT `federated_user_ibfk_1` FOREIGN KEY (`user_id`) REFERENCES `user` (`id`) ON DELETE CASCADE, + CONSTRAINT `federated_user_ibfk_2` FOREIGN KEY (`idp_id`) REFERENCES `identity_provider` (`id`) ON DELETE CASCADE, + CONSTRAINT `federated_user_protocol_id_fkey` FOREIGN KEY (`protocol_id`, `idp_id`) REFERENCES `federation_protocol` (`id`, `idp_id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `federation_protocol` +-- + +DROP TABLE IF EXISTS `federation_protocol`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `federation_protocol` ( + `id` varchar(64) NOT NULL, + `idp_id` varchar(64) NOT NULL, + `mapping_id` varchar(64) NOT NULL, + PRIMARY KEY (`id`,`idp_id`), + KEY `idp_id` (`idp_id`), + CONSTRAINT `federation_protocol_ibfk_1` FOREIGN KEY (`idp_id`) REFERENCES `identity_provider` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `group` +-- + +DROP TABLE IF EXISTS `group`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `group` ( + `id` varchar(64) NOT NULL, + `domain_id` varchar(64) NOT NULL, + `name` varchar(64) NOT NULL, + `description` text DEFAULT NULL, + `extra` text DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `ixu_group_name_domain_id` (`domain_id`,`name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `id_mapping` +-- + +DROP TABLE IF EXISTS `id_mapping`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `id_mapping` ( + `public_id` varchar(64) NOT NULL, + `domain_id` varchar(64) NOT NULL, + `local_id` varchar(64) NOT NULL, + `entity_type` enum('user','group') NOT NULL, + PRIMARY KEY (`public_id`), + UNIQUE KEY `domain_id` (`domain_id`,`local_id`,`entity_type`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `identity_provider` +-- + +DROP TABLE IF EXISTS `identity_provider`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `identity_provider` ( + `id` varchar(64) NOT NULL, + `enabled` tinyint(1) NOT NULL, + `description` text DEFAULT NULL, + `domain_id` varchar(64) NOT NULL, + PRIMARY KEY (`id`), + KEY `domain_id` (`domain_id`), + CONSTRAINT `identity_provider_ibfk_1` FOREIGN KEY (`domain_id`) REFERENCES `project` (`id`), + CONSTRAINT `CONSTRAINT_1` CHECK (`enabled` in (0,1)) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `idp_remote_ids` +-- + +DROP TABLE IF EXISTS `idp_remote_ids`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `idp_remote_ids` ( + `idp_id` varchar(64) DEFAULT NULL, + `remote_id` varchar(255) NOT NULL, + PRIMARY KEY (`remote_id`), + KEY `idp_id` (`idp_id`), + CONSTRAINT `idp_remote_ids_ibfk_1` FOREIGN KEY (`idp_id`) REFERENCES `identity_provider` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `implied_role` +-- + +DROP TABLE IF EXISTS `implied_role`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `implied_role` ( + `prior_role_id` varchar(64) NOT NULL, + `implied_role_id` varchar(64) NOT NULL, + PRIMARY KEY (`prior_role_id`,`implied_role_id`), + KEY `implied_role_implied_role_id_fkey` (`implied_role_id`), + CONSTRAINT `implied_role_implied_role_id_fkey` FOREIGN KEY (`implied_role_id`) REFERENCES `role` (`id`) ON DELETE CASCADE, + CONSTRAINT `implied_role_prior_role_id_fkey` FOREIGN KEY (`prior_role_id`) REFERENCES `role` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `limit` +-- + +DROP TABLE IF EXISTS `limit`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `limit` ( + `id` varchar(64) NOT NULL, + `project_id` varchar(64) DEFAULT NULL, + `service_id` varchar(255) DEFAULT NULL, + `region_id` varchar(64) DEFAULT NULL, + `resource_name` varchar(255) DEFAULT NULL, + `resource_limit` int(11) NOT NULL, + `description` text DEFAULT NULL, + `internal_id` int(11) NOT NULL AUTO_INCREMENT, + `registered_limit_id` varchar(64) DEFAULT NULL, + `domain_id` varchar(64) DEFAULT NULL, + PRIMARY KEY (`internal_id`), + UNIQUE KEY `limit_id_key` (`id`), + KEY `limit_service_id_fkey` (`service_id`,`region_id`,`resource_name`), + KEY `registered_limit_id` (`registered_limit_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `local_user` +-- + +DROP TABLE IF EXISTS `local_user`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `local_user` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `user_id` varchar(64) NOT NULL, + `domain_id` varchar(64) NOT NULL, + `name` varchar(255) NOT NULL, + `failed_auth_count` int(11) DEFAULT NULL, + `failed_auth_at` datetime DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `domain_id` (`domain_id`,`name`), + UNIQUE KEY `user_id` (`user_id`), + KEY `local_user_user_id_fkey` (`user_id`,`domain_id`), + CONSTRAINT `local_user_user_id_fkey` FOREIGN KEY (`user_id`, `domain_id`) REFERENCES `user` (`id`, `domain_id`) ON DELETE CASCADE ON UPDATE CASCADE +) ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `mapping` +-- + +DROP TABLE IF EXISTS `mapping`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `mapping` ( + `id` varchar(64) NOT NULL, + `rules` text NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `migrate_version` +-- + +DROP TABLE IF EXISTS `migrate_version`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `migrate_version` ( + `repository_id` varchar(250) NOT NULL, + `repository_path` text DEFAULT NULL, + `version` int(11) DEFAULT NULL, + PRIMARY KEY (`repository_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `nonlocal_user` +-- + +DROP TABLE IF EXISTS `nonlocal_user`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `nonlocal_user` ( + `domain_id` varchar(64) NOT NULL, + `name` varchar(255) NOT NULL, + `user_id` varchar(64) NOT NULL, + PRIMARY KEY (`domain_id`,`name`), + UNIQUE KEY `ixu_nonlocal_user_user_id` (`user_id`), + KEY `nonlocal_user_user_id_fkey` (`user_id`,`domain_id`), + CONSTRAINT `nonlocal_user_user_id_fkey` FOREIGN KEY (`user_id`, `domain_id`) REFERENCES `user` (`id`, `domain_id`) ON DELETE CASCADE ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `password` +-- + +DROP TABLE IF EXISTS `password`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `password` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `local_user_id` int(11) NOT NULL, + `expires_at` datetime DEFAULT NULL, + `self_service` tinyint(1) NOT NULL DEFAULT 0, + `password_hash` varchar(255) DEFAULT NULL, + `created_at_int` bigint(20) NOT NULL DEFAULT 0, + `expires_at_int` bigint(20) DEFAULT NULL, + `created_at` datetime NOT NULL, + PRIMARY KEY (`id`), + KEY `local_user_id` (`local_user_id`), + CONSTRAINT `password_ibfk_1` FOREIGN KEY (`local_user_id`) REFERENCES `local_user` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `policy` +-- + +DROP TABLE IF EXISTS `policy`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `policy` ( + `id` varchar(64) NOT NULL, + `type` varchar(255) NOT NULL, + `blob` text NOT NULL, + `extra` text DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `policy_association` +-- + +DROP TABLE IF EXISTS `policy_association`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `policy_association` ( + `id` varchar(64) NOT NULL, + `policy_id` varchar(64) NOT NULL, + `endpoint_id` varchar(64) DEFAULT NULL, + `service_id` varchar(64) DEFAULT NULL, + `region_id` varchar(64) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `endpoint_id` (`endpoint_id`,`service_id`,`region_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `project` +-- + +DROP TABLE IF EXISTS `project`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `project` ( + `id` varchar(64) NOT NULL, + `name` varchar(64) NOT NULL, + `extra` text DEFAULT NULL, + `description` text DEFAULT NULL, + `enabled` tinyint(1) DEFAULT NULL, + `domain_id` varchar(64) NOT NULL, + `parent_id` varchar(64) DEFAULT NULL, + `is_domain` tinyint(1) NOT NULL DEFAULT 0, + PRIMARY KEY (`id`), + UNIQUE KEY `ixu_project_name_domain_id` (`domain_id`,`name`), + KEY `project_parent_id_fkey` (`parent_id`), + CONSTRAINT `project_domain_id_fkey` FOREIGN KEY (`domain_id`) REFERENCES `project` (`id`), + CONSTRAINT `project_parent_id_fkey` FOREIGN KEY (`parent_id`) REFERENCES `project` (`id`), + CONSTRAINT `CONSTRAINT_1` CHECK (`enabled` in (0,1)) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `project_endpoint` +-- + +DROP TABLE IF EXISTS `project_endpoint`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `project_endpoint` ( + `endpoint_id` varchar(64) NOT NULL, + `project_id` varchar(64) NOT NULL, + PRIMARY KEY (`endpoint_id`,`project_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `project_endpoint_group` +-- + +DROP TABLE IF EXISTS `project_endpoint_group`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `project_endpoint_group` ( + `endpoint_group_id` varchar(64) NOT NULL, + `project_id` varchar(64) NOT NULL, + PRIMARY KEY (`endpoint_group_id`,`project_id`), + CONSTRAINT `project_endpoint_group_ibfk_1` FOREIGN KEY (`endpoint_group_id`) REFERENCES `endpoint_group` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `project_tag` +-- + +DROP TABLE IF EXISTS `project_tag`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `project_tag` ( + `project_id` varchar(64) NOT NULL, + `name` varchar(255) NOT NULL, + PRIMARY KEY (`project_id`,`name`), + UNIQUE KEY `project_id` (`project_id`,`name`), + CONSTRAINT `project_tag_ibfk_1` FOREIGN KEY (`project_id`) REFERENCES `project` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `region` +-- + +DROP TABLE IF EXISTS `region`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `region` ( + `id` varchar(255) NOT NULL, + `description` varchar(255) NOT NULL, + `parent_region_id` varchar(255) DEFAULT NULL, + `extra` text DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `registered_limit` +-- + +DROP TABLE IF EXISTS `registered_limit`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `registered_limit` ( + `id` varchar(64) NOT NULL, + `service_id` varchar(255) DEFAULT NULL, + `region_id` varchar(64) DEFAULT NULL, + `resource_name` varchar(255) DEFAULT NULL, + `default_limit` int(11) NOT NULL, + `description` text DEFAULT NULL, + `internal_id` int(11) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`internal_id`), + UNIQUE KEY `registered_limit_id_key` (`id`), + KEY `region_id` (`region_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `request_token` +-- + +DROP TABLE IF EXISTS `request_token`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `request_token` ( + `id` varchar(64) NOT NULL, + `request_secret` varchar(64) NOT NULL, + `verifier` varchar(64) DEFAULT NULL, + `authorizing_user_id` varchar(64) DEFAULT NULL, + `requested_project_id` varchar(64) NOT NULL, + `role_ids` text DEFAULT NULL, + `consumer_id` varchar(64) NOT NULL, + `expires_at` varchar(64) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `ix_request_token_consumer_id` (`consumer_id`), + CONSTRAINT `request_token_ibfk_1` FOREIGN KEY (`consumer_id`) REFERENCES `consumer` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `revocation_event` +-- + +DROP TABLE IF EXISTS `revocation_event`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `revocation_event` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `domain_id` varchar(64) DEFAULT NULL, + `project_id` varchar(64) DEFAULT NULL, + `user_id` varchar(64) DEFAULT NULL, + `role_id` varchar(64) DEFAULT NULL, + `trust_id` varchar(64) DEFAULT NULL, + `consumer_id` varchar(64) DEFAULT NULL, + `access_token_id` varchar(64) DEFAULT NULL, + `issued_before` datetime NOT NULL, + `expires_at` datetime DEFAULT NULL, + `revoked_at` datetime NOT NULL, + `audit_id` varchar(32) DEFAULT NULL, + `audit_chain_id` varchar(32) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `ix_revocation_event_new_revoked_at` (`revoked_at`), + KEY `ix_revocation_event_issued_before` (`issued_before`), + KEY `ix_revocation_event_project_id_issued_before` (`project_id`,`issued_before`), + KEY `ix_revocation_event_user_id_issued_before` (`user_id`,`issued_before`), + KEY `ix_revocation_event_audit_id_issued_before` (`audit_id`,`issued_before`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `role` +-- + +DROP TABLE IF EXISTS `role`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `role` ( + `id` varchar(64) NOT NULL, + `name` varchar(255) NOT NULL, + `extra` text DEFAULT NULL, + `domain_id` varchar(64) NOT NULL DEFAULT '<>', + `description` varchar(255) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `ixu_role_name_domain_id` (`name`,`domain_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `sensitive_config` +-- + +DROP TABLE IF EXISTS `sensitive_config`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `sensitive_config` ( + `domain_id` varchar(64) NOT NULL, + `group` varchar(255) NOT NULL, + `option` varchar(255) NOT NULL, + `value` text NOT NULL, + PRIMARY KEY (`domain_id`,`group`,`option`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `service` +-- + +DROP TABLE IF EXISTS `service`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `service` ( + `id` varchar(64) NOT NULL, + `type` varchar(255) DEFAULT NULL, + `enabled` tinyint(1) NOT NULL DEFAULT 1, + `extra` text DEFAULT NULL, + PRIMARY KEY (`id`), + CONSTRAINT `CONSTRAINT_1` CHECK (`enabled` in (0,1)) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `service_provider` +-- + +DROP TABLE IF EXISTS `service_provider`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `service_provider` ( + `auth_url` varchar(256) NOT NULL, + `id` varchar(64) NOT NULL, + `enabled` tinyint(1) NOT NULL, + `description` text DEFAULT NULL, + `sp_url` varchar(256) NOT NULL, + `relay_state_prefix` varchar(256) NOT NULL DEFAULT 'ss:mem:', + PRIMARY KEY (`id`), + CONSTRAINT `CONSTRAINT_1` CHECK (`enabled` in (0,1)) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `system_assignment` +-- + +DROP TABLE IF EXISTS `system_assignment`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `system_assignment` ( + `type` varchar(64) NOT NULL, + `actor_id` varchar(64) NOT NULL, + `target_id` varchar(64) NOT NULL, + `role_id` varchar(64) NOT NULL, + `inherited` tinyint(1) NOT NULL, + PRIMARY KEY (`type`,`actor_id`,`target_id`,`role_id`,`inherited`), + CONSTRAINT `CONSTRAINT_1` CHECK (`inherited` in (0,1)) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `token` +-- + +DROP TABLE IF EXISTS `token`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `token` ( + `id` varchar(64) NOT NULL, + `expires` datetime DEFAULT NULL, + `extra` text DEFAULT NULL, + `valid` tinyint(1) NOT NULL, + `trust_id` varchar(64) DEFAULT NULL, + `user_id` varchar(64) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `ix_token_expires` (`expires`), + KEY `ix_token_expires_valid` (`expires`,`valid`), + KEY `ix_token_user_id` (`user_id`), + KEY `ix_token_trust_id` (`trust_id`), + CONSTRAINT `CONSTRAINT_1` CHECK (`valid` in (0,1)) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `trust` +-- + +DROP TABLE IF EXISTS `trust`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `trust` ( + `id` varchar(64) NOT NULL, + `trustor_user_id` varchar(64) NOT NULL, + `trustee_user_id` varchar(64) NOT NULL, + `project_id` varchar(64) DEFAULT NULL, + `impersonation` tinyint(1) NOT NULL, + `deleted_at` datetime DEFAULT NULL, + `expires_at` datetime DEFAULT NULL, + `remaining_uses` int(11) DEFAULT NULL, + `extra` text DEFAULT NULL, + `expires_at_int` bigint(20) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `duplicate_trust_constraint_expanded` (`trustor_user_id`,`trustee_user_id`,`project_id`,`impersonation`,`expires_at`,`expires_at_int`), + CONSTRAINT `CONSTRAINT_1` CHECK (`impersonation` in (0,1)) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `trust_role` +-- + +DROP TABLE IF EXISTS `trust_role`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `trust_role` ( + `trust_id` varchar(64) NOT NULL, + `role_id` varchar(64) NOT NULL, + PRIMARY KEY (`trust_id`,`role_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `user` +-- + +DROP TABLE IF EXISTS `user`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `user` ( + `id` varchar(64) NOT NULL, + `extra` text DEFAULT NULL, + `enabled` tinyint(1) DEFAULT NULL, + `default_project_id` varchar(64) DEFAULT NULL, + `created_at` datetime DEFAULT NULL, + `last_active_at` date DEFAULT NULL, + `domain_id` varchar(64) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `ixu_user_id_domain_id` (`id`,`domain_id`), + KEY `domain_id` (`domain_id`), + KEY `ix_default_project_id` (`default_project_id`), + CONSTRAINT `user_ibfk_1` FOREIGN KEY (`domain_id`) REFERENCES `project` (`id`), + CONSTRAINT `CONSTRAINT_1` CHECK (`enabled` in (0,1)) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `user_group_membership` +-- + +DROP TABLE IF EXISTS `user_group_membership`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `user_group_membership` ( + `user_id` varchar(64) NOT NULL, + `group_id` varchar(64) NOT NULL, + PRIMARY KEY (`user_id`,`group_id`), + KEY `group_id` (`group_id`), + CONSTRAINT `fk_user_group_membership_group_id` FOREIGN KEY (`group_id`) REFERENCES `group` (`id`), + CONSTRAINT `fk_user_group_membership_user_id` FOREIGN KEY (`user_id`) REFERENCES `user` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `user_option` +-- + +DROP TABLE IF EXISTS `user_option`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `user_option` ( + `user_id` varchar(64) NOT NULL, + `option_id` varchar(4) NOT NULL, + `option_value` text DEFAULT NULL, + PRIMARY KEY (`user_id`,`option_id`), + CONSTRAINT `user_option_ibfk_1` FOREIGN KEY (`user_id`) REFERENCES `user` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `whitelisted_config` +-- + +DROP TABLE IF EXISTS `whitelisted_config`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `whitelisted_config` ( + `domain_id` varchar(64) NOT NULL, + `group` varchar(255) NOT NULL, + `option` varchar(255) NOT NULL, + `value` text NOT NULL, + PRIMARY KEY (`domain_id`,`group`,`option`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3; +/*!40101 SET character_set_client = @saved_cs_client */; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; diff --git a/compose_deployment/logs.txt b/compose_deployment/logs.txt new file mode 100644 index 0000000..01605a5 --- /dev/null +++ b/compose_deployment/logs.txt @@ -0,0 +1,407 @@ +Attaching to ca_service, crossbar, iotronic-conductor, iotronic-db, iotronic-ui, iotronic-wagent, iotronic-wstun, keystone, rabbitmq +iotronic-wagent | [INFO] Configurazione dei permessi sui log... +ca_service | [INFO] Installazione di OpenSSL... +iotronic-wagent | [INFO] Avvio del Wagent... +crossbar | [INFO] Attesa dei certificati... +crossbar | [INFO] Certificati trovati! +crossbar | [INFO] Scrittura configurazione Crossbar... +crossbar | [INFO] Avvio di Crossbar... +iotronic-conductor | [INFO] Attesa del database MySQL... +iotronic-conductor | [INFO] Database non ancora pronto, riprovo... +iotronic-db | 2025-02-22 08:42:42+00:00 [Note] [Entrypoint]: Entrypoint script for MySQL Server 1:10.4.13+maria~bionic started. +ca_service | Hit:1 http://deb.debian.org/debian buster InRelease +ca_service | Hit:2 http://deb.debian.org/debian-security buster/updates InRelease +ca_service | Hit:3 http://deb.debian.org/debian buster-updates InRelease +iotronic-db | 2025-02-22 08:42:42+00:00 [Note] [Entrypoint]: Switching to dedicated user 'mysql' +iotronic-db | 2025-02-22 08:42:42+00:00 [Note] [Entrypoint]: Entrypoint script for MySQL Server 1:10.4.13+maria~bionic started. +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] ::::::::::::::::: +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] ::::: _____ __ +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] ::::: : ::::: / ___/____ ___ ___ ___ / / ___ _ ____ +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] ::::::: ::::::: / /__ / __// _ \ (_-< (_-< / _ \/ _ `// __/ +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] ::::: : ::::: \___//_/ \___//___//___//_.__/\_,_//_/ +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] ::::: +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] ::::::::::::::::: Crossbar v20.12.3 +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Copyright (c) 2013-2025 Crossbar.io Technologies GmbH, licensed under AGPL 3.0. +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Booting standalone node .. +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Node key files exist and are valid. Node public key is 0x2782f5c114863d92983afd174d670a218b011f93a743800ffac6eb8f629cd612 +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Node key loaded from /node/.crossbar/key.priv +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Node configuration loaded [config_source=localfile, config_path=/node/.crossbar/config.json] +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Entering event reactor ... +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Starting node .. [] +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Node ID bc117ba41e8e-8 set from hostname/pid +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] RouterFactory.start_realm: router created for realm "crossbar" +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] node-wide role "controller" added on node management router realm "crossbar" +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] attached session 4319007479234252 to realm "crossbar" (authid="serviceagent", authrole="trusted") +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] : realm service session attached to realm "crossbar" [session_id=4319007479234252, authid="serviceagent", authrole="trusted", on_ready=None] +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] router service agent session attached [] +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] attached session 3935604607560076 to realm "crossbar" (authid="nodecontroller", authrole="controller") +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] : joined realm="crossbar" on local node management router [authid="nodecontroller", authrole="controller"] +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Signal handler installed on process 8 thread 123475127084864 +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] node controller session attached [] +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Using default node shutdown triggers ['shutdown_on_worker_exit'] +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] ::NODE_BOOT_BEGIN +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Booting node +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Booting node from local configuration [parallel_worker_start=False] .. +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Will start 1 worker .. +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Order node to start "Router worker001" .. +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Starting router-worker "worker001" .. +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] worker-specific role "crossbar.worker.worker001" added on node management router realm "crossbar" +ca_service | Reading package lists... +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Starting router-worker "worker001" on node "bc117ba41e8e-8" (personality "standalone") and local node management realm "crossbar" .. +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Running as PID 43 on CPython-EPollReactor +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Entering event reactor ... +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] attached session 8667144518419379 to realm "crossbar" (authid="crossbar.process.43", authrole="crossbar.worker.worker001") +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Router worker session for "worker001" joined realm "crossbar" on node router +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Router worker session for "worker001" ready +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Ok, node has started Router worker001 +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Configuring Router worker001 .. +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Order Router worker001 to start Realm realm001 +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Starting router realm realm001 +crossbar | 2025-02-22T08:42:43+0000 [Router 43] RouterFactory.start_realm: router created for realm "s4t" +crossbar | 2025-02-22T08:42:43+0000 [Router 43] attached session 5253035339889822 to realm "s4t" (authid="routerworker-worker001-realm-realm001-serviceagent", authrole="trusted") +iotronic-wstun | [INFO] Verifica permessi certificati... +crossbar | 2025-02-22T08:42:43+0000 [Router 43] : realm service session attached to realm "s4t" [session_id=5253035339889822, authid="routerworker-worker001-realm-realm001-serviceagent", authrole="trusted", on_ready=] +crossbar | 2025-02-22T08:42:43+0000 [Router 43] (session=, realm="s4t", authrole="trusted") +crossbar | 2025-02-22T08:42:43+0000 [Router 43] RouterServiceAgent started on realm="s4t" with authrole="trusted", authid="routerworker-worker001-realm-realm001-serviceagent" +iotronic-wstun | total 24 +iotronic-wstun | -rw-r--r-- 1 root root 932 Feb 22 08:38 crossbar.csr +iotronic-wstun | -rw-r--r-- 1 root root 1679 Feb 22 08:38 crossbar.key +iotronic-wstun | -rw-r--r-- 1 root root 1054 Feb 22 08:38 crossbar.pem +iotronic-wstun | -rw-r--r-- 1 root root 1675 Feb 22 08:38 iotronic_CA.key +iotronic-wstun | -rw-r--r-- 1 root root 1147 Feb 22 08:38 iotronic_CA.pem +iotronic-wstun | -rw-r--r-- 1 root root 41 Feb 22 08:38 iotronic_CA.srl +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Realm "realm001" (name="s4t", authrole="trusted", authid="routerworker-worker001-realm-realm001-serviceagent") started +iotronic-wstun | [INFO] Certificati SSL trovati! +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Ok, Router worker001 has started Realm realm001 +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Order Realm realm001 to start Role role001 +iotronic-wstun | total 24 +iotronic-wstun | -rw-r--r-- 1 root root 932 Feb 22 08:38 crossbar.csr +iotronic-wstun | -rw-r--r-- 1 root root 1679 Feb 22 08:38 crossbar.key +iotronic-wstun | -rw-r--r-- 1 root root 1054 Feb 22 08:38 crossbar.pem +iotronic-wstun | -rw-r--r-- 1 root root 1675 Feb 22 08:38 iotronic_CA.key +iotronic-wstun | -rw-r--r-- 1 root root 1147 Feb 22 08:38 iotronic_CA.pem +iotronic-wstun | -rw-r--r-- 1 root root 41 Feb 22 08:38 iotronic_CA.srl +iotronic-wstun | [INFO] Avvio di iotronic-wstun... +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Role role001 named "anonymous" started on realm "s4t" +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Ok, Realm realm001 has started Role role001 +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Order Router worker001 to start Transport transport001 +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Starting router transport "transport001" +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Creating router transport for "transport001" .. +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Router transport created for "transport001" [transport_class=] +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Loading server TLS key from /node/.crossbar/ssl/crossbar.key +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Loading server TLS certificate from /node/.crossbar/ssl/crossbar.pem +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Loading server TLS chain certificate from /node/.crossbar/ssl/iotronic_CA.pem +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Using secure default TLS ciphers +crossbar | 2025-02-22T08:42:43+0000 [Router 43] WampWebSocketServerFactory (TLS) starting on 8181 +crossbar | 2025-02-22T08:42:43+0000 [Router 43] Router TCP/8181 transport started as transport "transport001" and listening on TCP port 8181 +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Ok, Router worker001 has started Transport transport001 +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Ok, worker "Router worker001" configured and ready! +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] Ok, local node configuration ran successfully. +crossbar | 2025-02-22T08:42:43+0000 [Controller 8] ::NODE_BOOT_COMPLETE +iotronic-wstun | [2025-02-22 08:42:43.945] [INFO] wstun - WSTUN STARTED! +iotronic-wstun | [2025-02-22 08:42:43.947] [INFO] wstun - [SYSTEM] - WS Reverse Tunnel Server starting with these paramters: +iotronic-wstun | { +iotronic-wstun | "ssl": "true", +iotronic-wstun | "key": "/var/lib/iotronic/ssl/iotronic_CA.key", +iotronic-wstun | "cert": "/var/lib/iotronic/ssl/iotronic_CA.pem" +iotronic-wstun | } +iotronic-wstun | [2025-02-22 08:42:43.947] [INFO] wstun - [SYSTEM] - WS Reverse Tunnel Server over HTTPS. +iotronic-wstun | [2025-02-22 08:42:43.955] [INFO] wstun - [SYSTEM] - WS Reverse Tunnel Server starting on: wss://localhost:8080 - CERT: +iotronic-wstun | -----BEGIN CERTIFICATE----- +iotronic-wstun | MIIDIzCCAgugAwIBAgIUGO21rDqpzdelmHKczIznTz+/FEkwDQYJKoZIhvcNAQEL +iotronic-wstun | BQAwIDELMAkGA1UEBhMCSVQxETAPBgNVBAoMCGlvdHJvbmljMCAXDTI1MDIyMjA4 +iotronic-wstun | MzgxMVoYDzIwNzUwMjEwMDgzODExWjAgMQswCQYDVQQGEwJJVDERMA8GA1UECgwI +iotronic-wstun | aW90cm9uaWMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAWNZPSpg2 +iotronic-wstun | QsrhBPI+4BxS71ZiWn+uv/OwSZcb/EtSnD5LFcTiIKqq7ssKISl6Y2nDIdN/v06D +iotronic-wstun | t/xvXbcIpKXsCfpmW6JiTSBLS4bi4JQkJynoUeq/c3gC3WXk47icr/cjqIHfutG9 +iotronic-wstun | GS2fodQWRB1Kr/UWuPDUkF+ZMQtR7nwIfyfvhM4QLMLTs9dk7I8nAqroGtq+Il8f +iotronic-wstun | c050ou5THeLmTE4kSkwcY2H2fzb+mcMXtsJ1TG9ZKZdHbwcGe0gDqtlD71+OJ5qy +iotronic-wstun | cf3sNWGG/U8FDC1unE92YEVmJg3nC6Z7npfZLhwl4OvVgHqmGs1zJSuxnOXUsJLG +iotronic-wstun | vzjxuKik2heHAgMBAAGjUzBRMB0GA1UdDgQWBBSeFP6MgzixKhwElZCW2exXRZNR +iotronic-wstun | wjAfBgNVHSMEGDAWgBSeFP6MgzixKhwElZCW2exXRZNRwjAPBgNVHRMBAf8EBTAD +iotronic-wstun | AQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBNZzBwWCssL+mhoqgAZrPtMAPAE4HpSStt +iotronic-wstun | myjjk1YeFH7ucZUgB63B1CD1Vrrza4PFoniK08XfWtaOg/fLawKIDZODT25pAqdd +iotronic-wstun | 5N2a2XUGkwExf3fRIreslA98XOjnA8DuMA+RTqj6MtD9wm1LbmnvqdUBfYAweC8/ +iotronic-wstun | AIglzhB5F9mX7Vx7Vnl2CUBJV42nfeFzK3gUtv8eXDrGLdkcfVJLfksMSBXj4MhP +iotronic-wstun | vDtTek1xhkx4/Ugtfk7xpjjqLJaNBsQaXJaY8vCMc8Po3pq5NDCfp0Ay4a0sD0xA +iotronic-wstun | zVHyd655AaZeRqTitd9Hn5gJdROWLVl1o+Y8nGvdeDML4H/ik+wI +iotronic-wstun | -----END CERTIFICATE----- +iotronic-wstun | +iotronic-wstun | [2025-02-22 08:42:43.960] [INFO] wstun - [SYSTEM] - WS Reverse Tunnel Server is listening... +ca_service | Reading package lists... +ca_service | Building dependency tree... +ca_service | Reading state information... +ca_service | openssl is already the newest version (1.1.1n-0+deb10u6). +ca_service | 0 upgraded, 0 newly installed, 0 to remove and 2 not upgraded. +ca_service | [INFO] Generazione della Root CA... +ca_service | [INFO] Generazione della chiave privata e del certificato per Crossbar... +ca_service | [INFO] Impostazione permessi certificati... +ca_service | [INFO] Certificati generati con successo. +rabbitmq | 2025-02-22 08:42:45.645035+00:00 [notice] <0.44.0> Application syslog exited with reason: stopped +rabbitmq | 2025-02-22 08:42:45.651758+00:00 [notice] <0.254.0> Logging: switching to configured handler(s); following messages may not be visible in this log output +rabbitmq | 2025-02-22 08:42:45.652463+00:00 [notice] <0.254.0> Logging: configured log handlers are now ACTIVE +rabbitmq | 2025-02-22 08:42:45.662678+00:00 [info] <0.254.0> ra: starting system quorum_queues +rabbitmq | 2025-02-22 08:42:45.662766+00:00 [info] <0.254.0> starting Ra system: quorum_queues in directory: /var/lib/rabbitmq/mnesia/rabbit@6e754e1ac08a/quorum/rabbit@6e754e1ac08a +rabbitmq | 2025-02-22 08:42:45.721633+00:00 [info] <0.268.0> ra system 'quorum_queues' running pre init for 0 registered servers +rabbitmq | 2025-02-22 08:42:45.731845+00:00 [info] <0.269.0> ra: meta data store initialised for system quorum_queues. 0 record(s) recovered +rabbitmq | 2025-02-22 08:42:45.745659+00:00 [notice] <0.274.0> WAL: ra_log_wal init, open tbls: ra_log_open_mem_tables, closed tbls: ra_log_closed_mem_tables +rabbitmq | 2025-02-22 08:42:45.760952+00:00 [info] <0.254.0> ra: starting system coordination +rabbitmq | 2025-02-22 08:42:45.761025+00:00 [info] <0.254.0> starting Ra system: coordination in directory: /var/lib/rabbitmq/mnesia/rabbit@6e754e1ac08a/coordination/rabbit@6e754e1ac08a +rabbitmq | 2025-02-22 08:42:45.762090+00:00 [info] <0.282.0> ra system 'coordination' running pre init for 1 registered servers +rabbitmq | 2025-02-22 08:42:45.773460+00:00 [info] <0.283.0> ra: meta data store initialised for system coordination. 1 record(s) recovered +rabbitmq | 2025-02-22 08:42:45.773618+00:00 [notice] <0.288.0> WAL: ra_coordination_log_wal init, open tbls: ra_coordination_log_open_mem_tables, closed tbls: ra_coordination_log_closed_mem_tables +rabbitmq | 2025-02-22 08:42:45.777036+00:00 [info] <0.254.0> ra: starting system coordination +rabbitmq | 2025-02-22 08:42:45.777076+00:00 [info] <0.254.0> starting Ra system: coordination in directory: /var/lib/rabbitmq/mnesia/rabbit@6e754e1ac08a/coordination/rabbit@6e754e1ac08a +rabbitmq | 2025-02-22 08:42:45.855500+00:00 [info] <0.254.0> Waiting for Khepri leader for 30000 ms, 9 retries left +rabbitmq | 2025-02-22 08:42:45.880464+00:00 [info] <0.254.0> Khepri leader elected +rabbitmq | 2025-02-22 08:42:45.880570+00:00 [info] <0.254.0> Waiting for Khepri projections for 30000 ms, 9 retries left +rabbitmq | 2025-02-22 08:42:46.246075+00:00 [notice] <0.293.0> RabbitMQ metadata store: candidate -> leader in term: 3 machine version: 1 +rabbitmq | 2025-02-22 08:42:46.554327+00:00 [info] <0.254.0>  +rabbitmq | 2025-02-22 08:42:46.554327+00:00 [info] <0.254.0> Starting RabbitMQ 3.13.7 on Erlang 26.2.5.8 [jit] +rabbitmq | 2025-02-22 08:42:46.554327+00:00 [info] <0.254.0> Copyright (c) 2007-2024 Broadcom Inc and/or its subsidiaries +rabbitmq | 2025-02-22 08:42:46.554327+00:00 [info] <0.254.0> Licensed under the MPL 2.0. Website: https://rabbitmq.com +rabbitmq | +rabbitmq | ## ## RabbitMQ 3.13.7 +rabbitmq | ## ## +rabbitmq | ########## Copyright (c) 2007-2024 Broadcom Inc and/or its subsidiaries +rabbitmq | ###### ## +rabbitmq | ########## Licensed under the MPL 2.0. Website: https://rabbitmq.com +rabbitmq | +rabbitmq | Erlang: 26.2.5.8 [jit] +rabbitmq | TLS Library: OpenSSL - OpenSSL 3.1.8 11 Feb 2025 +rabbitmq | Release series support status: see https://www.rabbitmq.com/release-information +rabbitmq | +rabbitmq | Doc guides: https://www.rabbitmq.com/docs +rabbitmq | Support: https://www.rabbitmq.com/docs/contact +rabbitmq | Tutorials: https://www.rabbitmq.com/tutorials +rabbitmq | Monitoring: https://www.rabbitmq.com/docs/monitoring +rabbitmq | Upgrading: https://www.rabbitmq.com/docs/upgrade +rabbitmq | +rabbitmq | Logs: +rabbitmq | +rabbitmq | Config file(s): /etc/rabbitmq/conf.d/10-defaults.conf +rabbitmq | /etc/rabbitmq/conf.d/20-management_agent.disable_metrics_collector.conf +rabbitmq | +rabbitmq | Starting broker...2025-02-22 08:42:46.556018+00:00 [info] <0.254.0>  +rabbitmq | 2025-02-22 08:42:46.556018+00:00 [info] <0.254.0> node : rabbit@6e754e1ac08a +rabbitmq | 2025-02-22 08:42:46.556018+00:00 [info] <0.254.0> home dir : /var/lib/rabbitmq +rabbitmq | 2025-02-22 08:42:46.556018+00:00 [info] <0.254.0> config file(s) : /etc/rabbitmq/conf.d/10-defaults.conf +rabbitmq | 2025-02-22 08:42:46.556018+00:00 [info] <0.254.0> : /etc/rabbitmq/conf.d/20-management_agent.disable_metrics_collector.conf +rabbitmq | 2025-02-22 08:42:46.556018+00:00 [info] <0.254.0> cookie hash : 8b8OzmpUQESudlbZud9LlQ== +rabbitmq | 2025-02-22 08:42:46.556018+00:00 [info] <0.254.0> log(s) :  +rabbitmq | 2025-02-22 08:42:46.556018+00:00 [info] <0.254.0> data dir : /var/lib/rabbitmq/mnesia/rabbit@6e754e1ac08a +rabbitmq | 2025-02-22 08:42:46.758321+00:00 [info] <0.254.0> Running boot step pre_boot defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.758448+00:00 [info] <0.254.0> Running boot step rabbit_global_counters defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.758896+00:00 [info] <0.254.0> Running boot step rabbit_osiris_metrics defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.759140+00:00 [info] <0.254.0> Running boot step rabbit_core_metrics defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.760264+00:00 [info] <0.254.0> Running boot step rabbit_alarm defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.772856+00:00 [info] <0.385.0> Memory high watermark set to 25718 MiB (26967456153 bytes) of 64295 MiB (67418640384 bytes) total +rabbitmq | 2025-02-22 08:42:46.775904+00:00 [info] <0.387.0> Enabling free disk space monitoring (disk free space: 72704069632, total memory: 67418640384) +rabbitmq | 2025-02-22 08:42:46.775961+00:00 [info] <0.387.0> Disk free limit set to 50MB +rabbitmq | 2025-02-22 08:42:46.777781+00:00 [info] <0.254.0> Running boot step code_server_cache defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.777872+00:00 [info] <0.254.0> Running boot step file_handle_cache defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.785567+00:00 [info] <0.390.0> Limiting to approx 1048479 file handles (943629 sockets) +rabbitmq | 2025-02-22 08:42:46.785754+00:00 [info] <0.391.0> FHC read buffering: OFF +rabbitmq | 2025-02-22 08:42:46.785809+00:00 [info] <0.391.0> FHC write buffering: ON +rabbitmq | 2025-02-22 08:42:46.786217+00:00 [info] <0.254.0> Running boot step worker_pool defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.786308+00:00 [info] <0.339.0> Will use 32 processes for default worker pool +rabbitmq | 2025-02-22 08:42:46.786373+00:00 [info] <0.339.0> Starting worker pool 'worker_pool' with 32 processes in it +rabbitmq | 2025-02-22 08:42:46.787915+00:00 [info] <0.254.0> Running boot step database defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.788319+00:00 [info] <0.254.0> Peer discovery: configured backend: rabbit_peer_discovery_classic_config +rabbitmq | 2025-02-22 08:42:46.789749+00:00 [info] <0.254.0> Waiting for Mnesia tables for 30000 ms, 9 retries left +rabbitmq | 2025-02-22 08:42:46.792528+00:00 [info] <0.254.0> Successfully synced tables from a peer +rabbitmq | 2025-02-22 08:42:46.798485+00:00 [info] <0.254.0> Waiting for Mnesia tables for 30000 ms, 9 retries left +rabbitmq | 2025-02-22 08:42:46.798595+00:00 [info] <0.254.0> Successfully synced tables from a peer +rabbitmq | 2025-02-22 08:42:46.798783+00:00 [info] <0.254.0> Running boot step tracking_metadata_store defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.798867+00:00 [info] <0.433.0> Setting up a table for connection tracking on this node: tracked_connection +rabbitmq | 2025-02-22 08:42:46.798922+00:00 [info] <0.433.0> Setting up a table for per-vhost connection counting on this node: tracked_connection_per_vhost +rabbitmq | 2025-02-22 08:42:46.799013+00:00 [info] <0.433.0> Setting up a table for per-user connection counting on this node: tracked_connection_per_user +rabbitmq | 2025-02-22 08:42:46.799084+00:00 [info] <0.433.0> Setting up a table for channel tracking on this node: tracked_channel +rabbitmq | 2025-02-22 08:42:46.799166+00:00 [info] <0.433.0> Setting up a table for channel tracking on this node: tracked_channel_per_user +rabbitmq | 2025-02-22 08:42:46.799215+00:00 [info] <0.254.0> Running boot step networking_metadata_store defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.799261+00:00 [info] <0.254.0> Running boot step feature_flags defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.799369+00:00 [info] <0.254.0> Running boot step codec_correctness_check defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.799397+00:00 [info] <0.254.0> Running boot step external_infrastructure defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.799433+00:00 [info] <0.254.0> Running boot step rabbit_event defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.799537+00:00 [info] <0.254.0> Running boot step rabbit_registry defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.799594+00:00 [info] <0.254.0> Running boot step rabbit_auth_mechanism_amqplain defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.799654+00:00 [info] <0.254.0> Running boot step rabbit_auth_mechanism_cr_demo defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.799697+00:00 [info] <0.254.0> Running boot step rabbit_auth_mechanism_plain defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.799740+00:00 [info] <0.254.0> Running boot step rabbit_exchange_type_direct defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.799831+00:00 [info] <0.254.0> Running boot step rabbit_exchange_type_fanout defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.799927+00:00 [info] <0.254.0> Running boot step rabbit_exchange_type_headers defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.800028+00:00 [info] <0.254.0> Running boot step rabbit_exchange_type_topic defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.800132+00:00 [info] <0.254.0> Running boot step rabbit_mirror_queue_mode_all defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.800270+00:00 [info] <0.254.0> Running boot step rabbit_mirror_queue_mode_exactly defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.800354+00:00 [info] <0.254.0> Running boot step rabbit_mirror_queue_mode_nodes defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.800441+00:00 [info] <0.254.0> Running boot step rabbit_priority_queue defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.800480+00:00 [info] <0.254.0> Priority queues enabled, real BQ is rabbit_variable_queue +rabbitmq | 2025-02-22 08:42:46.800581+00:00 [info] <0.254.0> Running boot step rabbit_queue_location_client_local defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.800654+00:00 [info] <0.254.0> Running boot step rabbit_queue_location_min_masters defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.800729+00:00 [info] <0.254.0> Running boot step rabbit_queue_location_random defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.800804+00:00 [info] <0.254.0> Running boot step kernel_ready defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.800840+00:00 [info] <0.254.0> Running boot step rabbit_sysmon_minder defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.800984+00:00 [info] <0.254.0> Running boot step rabbit_epmd_monitor defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.801904+00:00 [info] <0.441.0> epmd monitor knows us, inter-node communication (distribution) port: 25672 +rabbitmq | 2025-02-22 08:42:46.802082+00:00 [info] <0.254.0> Running boot step guid_generator defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.804566+00:00 [info] <0.254.0> Running boot step rabbit_node_monitor defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.804889+00:00 [info] <0.445.0> Starting rabbit_node_monitor (in ignore mode) +rabbitmq | 2025-02-22 08:42:46.805040+00:00 [info] <0.254.0> Running boot step delegate_sup defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.805672+00:00 [info] <0.254.0> Running boot step rabbit_memory_monitor defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.805880+00:00 [info] <0.254.0> Running boot step rabbit_fifo_dlx_sup defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.805996+00:00 [info] <0.254.0> Running boot step core_initialized defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.806059+00:00 [info] <0.254.0> Running boot step rabbit_channel_tracking_handler defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.806141+00:00 [info] <0.254.0> Running boot step rabbit_connection_tracking_handler defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.806260+00:00 [info] <0.254.0> Running boot step rabbit_definitions_hashing defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.806370+00:00 [info] <0.254.0> Running boot step rabbit_exchange_parameters defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.818790+00:00 [info] <0.254.0> Running boot step rabbit_mirror_queue_misc defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.819206+00:00 [info] <0.254.0> Running boot step rabbit_policies defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.819455+00:00 [info] <0.254.0> Running boot step rabbit_policy defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.819517+00:00 [info] <0.254.0> Running boot step rabbit_queue_location_validator defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.819567+00:00 [info] <0.254.0> Running boot step rabbit_quorum_memory_manager defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.819631+00:00 [info] <0.254.0> Running boot step rabbit_quorum_queue defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.819819+00:00 [info] <0.254.0> Running boot step rabbit_stream_coordinator defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.819991+00:00 [info] <0.254.0> Running boot step rabbit_vhost_limit defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.820063+00:00 [info] <0.254.0> Running boot step rabbit_federation_parameters defined by app rabbitmq_federation +rabbitmq | 2025-02-22 08:42:46.820217+00:00 [info] <0.254.0> Running boot step rabbit_federation_supervisor defined by app rabbitmq_federation +rabbitmq | 2025-02-22 08:42:46.827541+00:00 [info] <0.254.0> Running boot step rabbit_federation_queue defined by app rabbitmq_federation +rabbitmq | 2025-02-22 08:42:46.827687+00:00 [info] <0.254.0> Running boot step rabbit_federation_upstream_exchange defined by app rabbitmq_federation +rabbitmq | 2025-02-22 08:42:46.827804+00:00 [info] <0.254.0> Running boot step rabbit_mgmt_db_handler defined by app rabbitmq_management_agent +rabbitmq | 2025-02-22 08:42:46.827883+00:00 [info] <0.254.0> Management plugin: using rates mode 'basic' +rabbitmq | 2025-02-22 08:42:46.828138+00:00 [info] <0.254.0> Running boot step recovery defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.840299+00:00 [info] <0.495.0> Making sure data directory '/var/lib/rabbitmq/mnesia/rabbit@6e754e1ac08a/msg_stores/vhosts/628WB79CIFDYO9LJI6DKMI09L' for vhost '/' exists +rabbitmq | 2025-02-22 08:42:46.851436+00:00 [info] <0.495.0> Starting message stores for vhost '/' +rabbitmq | 2025-02-22 08:42:46.851662+00:00 [info] <0.504.0> Message store "628WB79CIFDYO9LJI6DKMI09L/msg_store_transient": using rabbit_msg_store_ets_index to provide index +rabbitmq | 2025-02-22 08:42:46.853993+00:00 [info] <0.495.0> Started message store of type transient for vhost '/' +rabbitmq | 2025-02-22 08:42:46.854201+00:00 [info] <0.508.0> Message store "628WB79CIFDYO9LJI6DKMI09L/msg_store_persistent": using rabbit_msg_store_ets_index to provide index +rabbitmq | 2025-02-22 08:42:46.856364+00:00 [info] <0.495.0> Started message store of type persistent for vhost '/' +rabbitmq | 2025-02-22 08:42:46.856475+00:00 [info] <0.495.0> Recovering 0 queues of type rabbit_classic_queue took 15ms +rabbitmq | 2025-02-22 08:42:46.856506+00:00 [info] <0.495.0> Recovering 0 queues of type rabbit_quorum_queue took 0ms +rabbitmq | 2025-02-22 08:42:46.856551+00:00 [info] <0.495.0> Recovering 0 queues of type rabbit_stream_queue took 0ms +rabbitmq | 2025-02-22 08:42:46.858904+00:00 [info] <0.254.0> Running boot step empty_db_check defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.858957+00:00 [info] <0.254.0> Will not seed default virtual host and user: have definitions to load... +rabbitmq | 2025-02-22 08:42:46.859002+00:00 [info] <0.254.0> Running boot step rabbit_observer_cli defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.859072+00:00 [info] <0.254.0> Running boot step rabbit_looking_glass defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.859126+00:00 [info] <0.254.0> Running boot step rabbit_core_metrics_gc defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.859285+00:00 [info] <0.254.0> Running boot step background_gc defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.859445+00:00 [info] <0.254.0> Running boot step routing_ready defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.859493+00:00 [info] <0.254.0> Running boot step pre_flight defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.859526+00:00 [info] <0.254.0> Running boot step notify_cluster defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.859550+00:00 [info] <0.254.0> Running boot step networking defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.859640+00:00 [info] <0.254.0> Running boot step rabbit_quorum_queue_periodic_membership_reconciliation defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.859788+00:00 [info] <0.254.0> Running boot step definition_import_worker_pool defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.859858+00:00 [info] <0.339.0> Starting worker pool 'definition_import_pool' with 32 processes in it +rabbitmq | 2025-02-22 08:42:46.861106+00:00 [info] <0.254.0> Running boot step cluster_name defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.861165+00:00 [info] <0.254.0> Running boot step virtual_host_reconciliation defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.861479+00:00 [info] <0.254.0> Running boot step direct_client defined by app rabbit +rabbitmq | 2025-02-22 08:42:46.861617+00:00 [info] <0.254.0> Running boot step rabbit_federation_exchange defined by app rabbitmq_federation +rabbitmq | 2025-02-22 08:42:46.861901+00:00 [info] <0.574.0> Resetting node maintenance status +iotronic-conductor | [INFO] Configurazione dei permessi sui log... +iotronic-conductor | [INFO] Avvio di Iotronic Conductor... +rabbitmq | 2025-02-22 08:42:49.107759+00:00 [info] <0.616.0> Prometheus metrics: HTTP (non-TLS) listener started on port 15692 +rabbitmq | 2025-02-22 08:42:49.108045+00:00 [info] <0.574.0> Ready to start client connection listeners +rabbitmq | 2025-02-22 08:42:49.109786+00:00 [info] <0.660.0> started TCP listener on [::]:5672 +rabbitmq | completed with 4 plugins. +rabbitmq | 2025-02-22 08:42:49.202329+00:00 [info] <0.574.0> Server startup complete; 4 plugins started. +rabbitmq | 2025-02-22 08:42:49.202329+00:00 [info] <0.574.0> * rabbitmq_prometheus +rabbitmq | 2025-02-22 08:42:49.202329+00:00 [info] <0.574.0> * rabbitmq_federation +rabbitmq | 2025-02-22 08:42:49.202329+00:00 [info] <0.574.0> * rabbitmq_management_agent +rabbitmq | 2025-02-22 08:42:49.202329+00:00 [info] <0.574.0> * rabbitmq_web_dispatch +rabbitmq | 2025-02-22 08:42:49.274498+00:00 [info] <0.9.0> Time to start RabbitMQ: 6425 ms +rabbitmq | 2025-02-22 08:42:49.353612+00:00 [info] <0.664.0> accepting AMQP connection <0.664.0> (172.19.0.8:33954 -> 172.19.0.5:5672) +rabbitmq | 2025-02-22 08:42:49.355731+00:00 [info] <0.664.0> connection <0.664.0> (172.19.0.8:33954 -> 172.19.0.5:5672) has a client-provided name: iotronic-conductor:1:9902c495-03e2-4b14-8f25-0db4ca1654c6 +rabbitmq | 2025-02-22 08:42:49.355965+00:00 [error] <0.664.0> Error on AMQP connection <0.664.0> (172.19.0.8:33954 -> 172.19.0.5:5672, state: starting): +rabbitmq | 2025-02-22 08:42:49.355965+00:00 [error] <0.664.0> AMQPLAIN login refused: user 'openstack' - invalid credentials +rabbitmq | 2025-02-22 08:42:49.356112+00:00 [info] <0.664.0> closing AMQP connection <0.664.0> (172.19.0.8:33954 -> 172.19.0.5:5672 - iotronic-conductor:1:9902c495-03e2-4b14-8f25-0db4ca1654c6) + iotronic-conductor exited with code 0 +iotronic-conductor | [INFO] Configurazione dei permessi sui log... +iotronic-conductor | [INFO] Avvio di Iotronic Conductor... +rabbitmq | 2025-02-22 08:42:51.585973+00:00 [info] <0.670.0> accepting AMQP connection <0.670.0> (172.19.0.8:33968 -> 172.19.0.5:5672) +rabbitmq | 2025-02-22 08:42:51.587628+00:00 [info] <0.670.0> connection <0.670.0> (172.19.0.8:33968 -> 172.19.0.5:5672) has a client-provided name: iotronic-conductor:1:9023b6a1-0240-4feb-86e0-95e8b42b925c +rabbitmq | 2025-02-22 08:42:51.587978+00:00 [error] <0.670.0> Error on AMQP connection <0.670.0> (172.19.0.8:33968 -> 172.19.0.5:5672, state: starting): +rabbitmq | 2025-02-22 08:42:51.587978+00:00 [error] <0.670.0> AMQPLAIN login refused: user 'openstack' - invalid credentials +rabbitmq | 2025-02-22 08:42:51.588368+00:00 [info] <0.670.0> closing AMQP connection <0.670.0> (172.19.0.8:33968 -> 172.19.0.5:5672 - iotronic-conductor:1:9023b6a1-0240-4feb-86e0-95e8b42b925c) + iotronic-conductor exited with code 1 +iotronic-conductor | [INFO] Configurazione dei permessi sui log... +iotronic-conductor | [INFO] Avvio di Iotronic Conductor... +rabbitmq | 2025-02-22 08:42:53.443398+00:00 [info] <0.675.0> accepting AMQP connection <0.675.0> (172.19.0.3:52666 -> 172.19.0.5:5672) +rabbitmq | 2025-02-22 08:42:53.449499+00:00 [info] <0.675.0> connection <0.675.0> (172.19.0.3:52666 -> 172.19.0.5:5672) has a client-provided name: iotronic-wamp-agent:1:048e68c0-9b30-4a1e-a969-e78c9bca7af0 +rabbitmq | 2025-02-22 08:42:53.449757+00:00 [error] <0.675.0> Error on AMQP connection <0.675.0> (172.19.0.3:52666 -> 172.19.0.5:5672, state: starting): +rabbitmq | 2025-02-22 08:42:53.449757+00:00 [error] <0.675.0> AMQPLAIN login refused: user 'openstack' - invalid credentials +rabbitmq | 2025-02-22 08:42:53.450033+00:00 [info] <0.675.0> closing AMQP connection <0.675.0> (172.19.0.3:52666 -> 172.19.0.5:5672 - iotronic-wamp-agent:1:048e68c0-9b30-4a1e-a969-e78c9bca7af0) +rabbitmq | 2025-02-22 08:42:53.894870+00:00 [info] <0.687.0> accepting AMQP connection <0.687.0> (172.19.0.8:33972 -> 172.19.0.5:5672) +rabbitmq | 2025-02-22 08:42:53.896893+00:00 [info] <0.687.0> connection <0.687.0> (172.19.0.8:33972 -> 172.19.0.5:5672) has a client-provided name: iotronic-conductor:1:1e09e797-666c-47b9-a461-2117ab028cbc +rabbitmq | 2025-02-22 08:42:53.897273+00:00 [error] <0.687.0> Error on AMQP connection <0.687.0> (172.19.0.8:33972 -> 172.19.0.5:5672, state: starting): +rabbitmq | 2025-02-22 08:42:53.897273+00:00 [error] <0.687.0> AMQPLAIN login refused: user 'openstack' - invalid credentials +rabbitmq | 2025-02-22 08:42:53.897469+00:00 [info] <0.687.0> closing AMQP connection <0.687.0> (172.19.0.8:33972 -> 172.19.0.5:5672 - iotronic-conductor:1:1e09e797-666c-47b9-a461-2117ab028cbc) + iotronic-conductor exited with code 1 +keystone | [INFO] Attesa del database Keystone... +keystone | [INFO] Database pronto! +keystone | [INFO] Creazione delle cartelle per le chiavi Fernet e credenziali... +keystone | [INFO] Verifica delle chiavi Fernet... +keystone | [INFO] Chiavi Fernet già presenti. +keystone | [INFO] Verifica delle credenziali crittografate... +keystone | [INFO] Chiavi di credenziali già presenti. +keystone | [INFO] Sincronizzazione delle tabelle di Keystone... +keystone | [INFO] Configurazione dei token Fernet... +iotronic-conductor | [INFO] Configurazione dei permessi sui log... +iotronic-conductor | [INFO] Avvio di Iotronic Conductor... +keystone | [INFO] Configurazione delle credenziali crittografate... +rabbitmq | 2025-02-22 08:42:56.360686+00:00 [info] <0.692.0> accepting AMQP connection <0.692.0> (172.19.0.8:33984 -> 172.19.0.5:5672) +rabbitmq | 2025-02-22 08:42:56.365414+00:00 [info] <0.692.0> connection <0.692.0> (172.19.0.8:33984 -> 172.19.0.5:5672) has a client-provided name: iotronic-conductor:1:74c1bf76-4240-4184-ae65-2fb237ff012f +rabbitmq | 2025-02-22 08:42:56.365722+00:00 [error] <0.692.0> Error on AMQP connection <0.692.0> (172.19.0.8:33984 -> 172.19.0.5:5672, state: starting): +rabbitmq | 2025-02-22 08:42:56.365722+00:00 [error] <0.692.0> AMQPLAIN login refused: user 'openstack' - invalid credentials +rabbitmq | 2025-02-22 08:42:56.365949+00:00 [info] <0.692.0> closing AMQP connection <0.692.0> (172.19.0.8:33984 -> 172.19.0.5:5672 - iotronic-conductor:1:74c1bf76-4240-4184-ae65-2fb237ff012f) + iotronic-conductor exited with code 1 +keystone | [INFO] Bootstrap di Keystone... +iotronic-conductor | [INFO] Configurazione dei permessi sui log... +iotronic-conductor | [INFO] Avvio di Iotronic Conductor... +rabbitmq | 2025-02-22 08:42:59.233449+00:00 [info] <0.697.0> accepting AMQP connection <0.697.0> (172.19.0.8:56728 -> 172.19.0.5:5672) +rabbitmq | 2025-02-22 08:42:59.235547+00:00 [info] <0.697.0> connection <0.697.0> (172.19.0.8:56728 -> 172.19.0.5:5672) has a client-provided name: iotronic-conductor:1:4d703fe5-cb36-4e38-8dfe-f05cd6f346fd +rabbitmq | 2025-02-22 08:42:59.235753+00:00 [error] <0.697.0> Error on AMQP connection <0.697.0> (172.19.0.8:56728 -> 172.19.0.5:5672, state: starting): +rabbitmq | 2025-02-22 08:42:59.235753+00:00 [error] <0.697.0> AMQPLAIN login refused: user 'openstack' - invalid credentials +rabbitmq | 2025-02-22 08:42:59.236248+00:00 [info] <0.697.0> closing AMQP connection <0.697.0> (172.19.0.8:56728 -> 172.19.0.5:5672 - iotronic-conductor:1:4d703fe5-cb36-4e38-8dfe-f05cd6f346fd) + iotronic-conductor exited with code 1 +iotronic-conductor | [INFO] Configurazione dei permessi sui log... +iotronic-conductor | [INFO] Avvio di Iotronic Conductor... +rabbitmq | 2025-02-22 08:43:02.954381+00:00 [info] <0.702.0> accepting AMQP connection <0.702.0> (172.19.0.8:56742 -> 172.19.0.5:5672) +rabbitmq | 2025-02-22 08:43:02.956014+00:00 [info] <0.702.0> connection <0.702.0> (172.19.0.8:56742 -> 172.19.0.5:5672) has a client-provided name: iotronic-conductor:1:f077e793-b6cb-448f-babf-30b8ff7db628 +rabbitmq | 2025-02-22 08:43:02.956227+00:00 [error] <0.702.0> Error on AMQP connection <0.702.0> (172.19.0.8:56742 -> 172.19.0.5:5672, state: starting): +rabbitmq | 2025-02-22 08:43:02.956227+00:00 [error] <0.702.0> AMQPLAIN login refused: user 'openstack' - invalid credentials +rabbitmq | 2025-02-22 08:43:02.956442+00:00 [info] <0.702.0> closing AMQP connection <0.702.0> (172.19.0.8:56742 -> 172.19.0.5:5672 - iotronic-conductor:1:f077e793-b6cb-448f-babf-30b8ff7db628) + iotronic-conductor exited with code 1 +iotronic-conductor | [INFO] Configurazione dei permessi sui log... +iotronic-conductor | [INFO] Avvio di Iotronic Conductor... +rabbitmq | 2025-02-22 08:43:08.322628+00:00 [info] <0.714.0> accepting AMQP connection <0.714.0> (172.19.0.8:56756 -> 172.19.0.5:5672) +rabbitmq | 2025-02-22 08:43:08.324236+00:00 [info] <0.714.0> connection <0.714.0> (172.19.0.8:56756 -> 172.19.0.5:5672) has a client-provided name: iotronic-conductor:1:6d761db6-fee6-42ee-855e-8f4802e5b45b +rabbitmq | 2025-02-22 08:43:08.324481+00:00 [error] <0.714.0> Error on AMQP connection <0.714.0> (172.19.0.8:56756 -> 172.19.0.5:5672, state: starting): +rabbitmq | 2025-02-22 08:43:08.324481+00:00 [error] <0.714.0> AMQPLAIN login refused: user 'openstack' - invalid credentials +rabbitmq | 2025-02-22 08:43:08.324687+00:00 [info] <0.714.0> closing AMQP connection <0.714.0> (172.19.0.8:56756 -> 172.19.0.5:5672 - iotronic-conductor:1:6d761db6-fee6-42ee-855e-8f4802e5b45b) + iotronic-conductor exited with code 1 +iotronic-conductor | [INFO] Configurazione dei permessi sui log... +iotronic-conductor | [INFO] Avvio di Iotronic Conductor... +rabbitmq | 2025-02-22 08:43:16.780454+00:00 [info] <0.726.0> accepting AMQP connection <0.726.0> (172.19.0.8:39898 -> 172.19.0.5:5672) +rabbitmq | 2025-02-22 08:43:16.782015+00:00 [info] <0.726.0> connection <0.726.0> (172.19.0.8:39898 -> 172.19.0.5:5672) has a client-provided name: iotronic-conductor:1:fc358612-c7ed-40fd-9f06-6b55f270c754 +rabbitmq | 2025-02-22 08:43:16.782274+00:00 [error] <0.726.0> Error on AMQP connection <0.726.0> (172.19.0.8:39898 -> 172.19.0.5:5672, state: starting): +rabbitmq | 2025-02-22 08:43:16.782274+00:00 [error] <0.726.0> AMQPLAIN login refused: user 'openstack' - invalid credentials +rabbitmq | 2025-02-22 08:43:16.782758+00:00 [info] <0.726.0> closing AMQP connection <0.726.0> (172.19.0.8:39898 -> 172.19.0.5:5672 - iotronic-conductor:1:fc358612-c7ed-40fd-9f06-6b55f270c754) + iotronic-conductor exited with code 1 +iotronic-conductor | [INFO] Configurazione dei permessi sui log... +iotronic-conductor | [INFO] Avvio di Iotronic Conductor... +rabbitmq | 2025-02-22 08:43:31.596451+00:00 [info] <0.739.0> accepting AMQP connection <0.739.0> (172.19.0.8:35006 -> 172.19.0.5:5672) +rabbitmq | 2025-02-22 08:43:31.598133+00:00 [info] <0.739.0> connection <0.739.0> (172.19.0.8:35006 -> 172.19.0.5:5672) has a client-provided name: iotronic-conductor:1:0b0daa19-5194-4929-846d-537b452d2786 +rabbitmq | 2025-02-22 08:43:31.598312+00:00 [error] <0.739.0> Error on AMQP connection <0.739.0> (172.19.0.8:35006 -> 172.19.0.5:5672, state: starting): +rabbitmq | 2025-02-22 08:43:31.598312+00:00 [error] <0.739.0> AMQPLAIN login refused: user 'openstack' - invalid credentials +rabbitmq | 2025-02-22 08:43:31.598449+00:00 [info] <0.739.0> closing AMQP connection <0.739.0> (172.19.0.8:35006 -> 172.19.0.5:5672 - iotronic-conductor:1:0b0daa19-5194-4929-846d-537b452d2786) + iotronic-conductor exited with code 1 +iotronic-conductor | [INFO] Configurazione dei permessi sui log... +iotronic-conductor | [INFO] Avvio di Iotronic Conductor... +rabbitmq | 2025-02-22 08:43:59.415043+00:00 [info] <0.766.0> accepting AMQP connection <0.766.0> (172.19.0.8:52420 -> 172.19.0.5:5672) +rabbitmq | 2025-02-22 08:43:59.416840+00:00 [info] <0.766.0> connection <0.766.0> (172.19.0.8:52420 -> 172.19.0.5:5672) has a client-provided name: iotronic-conductor:1:59cec0ce-6c8d-4327-baeb-beb659e9a700 +rabbitmq | 2025-02-22 08:43:59.417043+00:00 [error] <0.766.0> Error on AMQP connection <0.766.0> (172.19.0.8:52420 -> 172.19.0.5:5672, state: starting): +rabbitmq | 2025-02-22 08:43:59.417043+00:00 [error] <0.766.0> AMQPLAIN login refused: user 'openstack' - invalid credentials +rabbitmq | 2025-02-22 08:43:59.417313+00:00 [info] <0.766.0> closing AMQP connection <0.766.0> (172.19.0.8:52420 -> 172.19.0.5:5672 - iotronic-conductor:1:59cec0ce-6c8d-4327-baeb-beb659e9a700) + iotronic-conductor exited with code 1 From 2094e728f1e39b8af73e50a49d1cf5aa758d564c Mon Sep 17 00:00:00 2001 From: trigiu Date: Mon, 24 Feb 2025 17:10:39 +0000 Subject: [PATCH 10/18] new update --- compose_deployment/docker-compose.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/compose_deployment/docker-compose.yml b/compose_deployment/docker-compose.yml index bf498d9..f165946 100644 --- a/compose_deployment/docker-compose.yml +++ b/compose_deployment/docker-compose.yml @@ -98,10 +98,10 @@ services: "options":{ "enable_webstatus": true, "fail_by_drop": true, - "open_handshake_timeout": 2500, + "open_handshake_timeout": 5500, "close_handshake_timeout": 1000, "auto_ping_interval": 30000, - "auto_ping_timeout": 5000, + "auto_ping_timeout": 10000, "auto_ping_size": 13 } } @@ -165,7 +165,7 @@ services: test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-punime"] interval: 10s retries: 5 - start_period: 20s + start_period: 40s timeout: 5s rabbitmq: @@ -297,9 +297,9 @@ services: su -s /bin/sh -c 'openstack role add --project service --user iotronic admin_iot_project' keystone; su -s /bin/sh -c 'openstack role add --project admin --user admin admin_iot_project' keystone; echo '[INFO] Iotronic endpoints...'; - su -s /bin/sh -c 'openstack endpoint create --region RegionOne iot public controller:8812' keystone; - su -s /bin/sh -c 'openstack endpoint create --region RegionOne iot internal iotronic-conductor:8812' keystone; - su -s /bin/sh -c 'openstack endpoint create --region RegionOne iot admin iotronic-conductor:8812' keystone; + su -s /bin/sh -c 'openstack endpoint create --region RegionOne iot public http://iotronic-conductor:8812' keystone; + su -s /bin/sh -c 'openstack endpoint create --region RegionOne iot internal http://iotronic-conductor:8812' keystone; + su -s /bin/sh -c 'openstack endpoint create --region RegionOne iot admin http://iotronic-conductor:8812' keystone; su -s /bin/sh -c 'sleep infinity' keystone;" iotronic-conductor: From 2680613b2b0d8da91c131270a081974180541529 Mon Sep 17 00:00:00 2001 From: Luca D'Agati <66645997+lucadagati@users.noreply.github.com> Date: Mon, 24 Feb 2025 22:20:00 +0100 Subject: [PATCH 11/18] Deployment functional with some issues Deployment is fully functional, but issues occur when bringing down and then up the Docker Compose stack --- compose_deployment/docker-compose.yml | 149 +++++++++++++++++--------- 1 file changed, 97 insertions(+), 52 deletions(-) diff --git a/compose_deployment/docker-compose.yml b/compose_deployment/docker-compose.yml index f165946..c09ab81 100644 --- a/compose_deployment/docker-compose.yml +++ b/compose_deployment/docker-compose.yml @@ -98,11 +98,11 @@ services: "options":{ "enable_webstatus": true, "fail_by_drop": true, - "open_handshake_timeout": 5500, - "close_handshake_timeout": 1000, - "auto_ping_interval": 30000, + "open_handshake_timeout": 5000, + "close_handshake_timeout": 2000, + "auto_ping_interval": 50000, "auto_ping_timeout": 10000, - "auto_ping_size": 13 + "auto_ping_size": 15 } } ] @@ -157,40 +157,67 @@ services: ports: - "3306:3306" volumes: - - unime_test_iotronic_db_data:/var/lib/mysql - - unime_test_iotronic_db_config:/etc/mysql - - ./create_dbs.sql:/docker-entrypoint-initdb.d/create_dbs.sql - - ./99-openstack.conf:/etc/mysql/99-openstack.cnf + - unime_iotronic_db_data:/var/lib/mysql healthcheck: test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-punime"] interval: 10s retries: 5 start_period: 40s timeout: 5s + command: > + mysqld + --bind-address=0.0.0.0 + --max_connections=4096 + --collation-server=utf8_general_ci + --character-set-server=utf8 + --innodb-buffer-pool-size=1G + --innodb-flush-log-at-trx-commit=1 + --innodb-file-per-table=1 + rabbitmq: - image: rabbitmq:3 + image: rabbitmq:3.7-management container_name: rabbitmq - restart: unless-stopped - networks: - - s4t + restart: always + environment: + RABBITMQ_DEFAULT_USER: openstack + RABBITMQ_DEFAULT_PASS: unime ports: - "5672:5672" - environment: - RABBIT_PASS: "unime" + - "15672:15672" volumes: - - ./conf-rabbit-script.sh:/docker-entrypoint-initRabbit.sh + - rabbitmq_data:/var/lib/rabbitmq + networks: + - s4t healthcheck: test: ["CMD", "rabbitmqctl", "status"] interval: 10s retries: 5 - start_period: 20s - timeout: 5s - entrypoint: ["/bin/bash", "-c"] - command: - - | - chmod +x /docker-entrypoint-initRabbit.sh - /bin/bash /docker-entrypoint-initRabbit.sh + start_period: 10s + + rabbitmq-setup: + image: rabbitmq:3.7-management + depends_on: + rabbitmq: + condition: service_healthy + networks: + - s4t + entrypoint: ["/bin/sh", "-c"] + command: > + echo 'Aspettando RabbitMQ...'; + for i in {1..30}; do + if rabbitmqctl status; then + echo 'RabbitMQ è pronto!'; + break; + fi + echo 'RabbitMQ non ancora pronto, riprovo in 5 secondi...'; + sleep 5; + done; + echo "Configurazione dell'utente openstack..."; + rabbitmqctl add_user openstack unime; + rabbitmqctl set_permissions openstack '.*' '.*' '.*'; + rabbitmqctl set_user_tags openstack administrator; + echo 'Utente openstack creato con successo.'; keystone: @@ -205,7 +232,6 @@ services: networks: - s4t environment: - # Credenziali admin e impostazioni Keystone ADMIN_PASS: "s4t" OS_USERNAME: "admin" OS_PASSWORD: "s4t" @@ -237,75 +263,82 @@ services: sleep 5; done; echo '[INFO] Database pronto!'; + + echo '[INFO] Configurazione del database Keystone...'; mysql -u root -punime -h iotronic-db -e \"CREATE DATABASE IF NOT EXISTS keystone; CREATE DATABASE IF NOT EXISTS iotronic; - DROP USER IF EXISTS 'keystone'@'localhost'; - DROP USER IF EXISTS 'keystone'@'%'; - CREATE USER 'keystone'@'localhost' IDENTIFIED BY 'unime'; - CREATE USER 'keystone'@'%' IDENTIFIED BY 'unime'; - GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost'; + CREATE USER IF NOT EXISTS 'keystone'@'%' IDENTIFIED BY 'unime'; + CREATE USER IF NOT EXISTS 'iotronic'@'%' IDENTIFIED BY 'unime'; GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%'; - DROP USER IF EXISTS 'iotronic'@'localhost'; - DROP USER IF EXISTS 'iotronic'@'%'; - CREATE USER 'iotronic'@'localhost' IDENTIFIED BY 'unime'; - CREATE USER 'iotronic'@'%' IDENTIFIED BY 'unime'; - GRANT ALL PRIVILEGES ON iotronic.* TO 'iotronic'@'localhost'; GRANT ALL PRIVILEGES ON iotronic.* TO 'iotronic'@'%'; - FLUSH PRIVILEGES;\"; + FLUSH PRIVILEGES;\" + + echo '[INFO] Creazione delle cartelle per le chiavi Fernet e credenziali...'; mkdir -p /etc/keystone/fernet-keys; mkdir -p /etc/keystone/credential-keys; chown -R keystone:keystone /etc/keystone; - echo '[INFO] Verifica delle chiavi Fernet...'; + echo '[INFO] Controllo chiavi Fernet...'; if [ ! -f /etc/keystone/fernet-keys/0 ]; then echo '[INFO] Nessuna chiave Fernet trovata, eseguo fernet_setup...'; su -s /bin/sh -c 'keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone' keystone; else - echo '[INFO] Chiavi Fernet già presenti.'; + echo '[INFO] Chiavi Fernet già presenti, salto la generazione.'; fi - echo '[INFO] Verifica delle credenziali crittografate...'; + echo '[INFO] Controllo chiavi di credenziali crittografate...'; if [ ! -f /etc/keystone/credential-keys/0 ]; then echo '[INFO] Nessuna chiave di credenziali trovata, eseguo credential_setup...'; su -s /bin/sh -c 'keystone-manage credential_setup --keystone-user keystone --keystone-group keystone' keystone; else - echo '[INFO] Chiavi di credenziali già presenti.'; + echo '[INFO] Chiavi di credenziali già presenti, salto la generazione.'; fi echo '[INFO] Sincronizzazione delle tabelle di Keystone...'; su -s /bin/sh -c 'keystone-manage db_sync' keystone; - echo '[INFO] Configurazione dei token Fernet...'; - su -s /bin/sh -c 'keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone' keystone; - echo '[INFO] Configurazione delle credenziali crittografate...'; - su -s /bin/sh -c 'keystone-manage credential_setup --keystone-user keystone --keystone-group keystone' keystone; + echo '[INFO] Bootstrap di Keystone...'; su -s /bin/sh -c 'keystone-manage bootstrap --bootstrap-password s4t --bootstrap-admin-url http://keystone:5000/v3 --bootstrap-internal-url http://keystone:5000/v3 --bootstrap-public-url http://keystone:5000/v3 --bootstrap-region-id RegionOne' keystone; - exec apache2ctl -D FOREGROUND & - sleep 20; + + echo '[INFO] Verifica e avvio di Apache...'; + if ! pgrep -x "apache2" > /dev/null; then + echo '[INFO] Apache non è in esecuzione, lo avvio...'; + exec apache2ctl -D FOREGROUND & + else + echo '[INFO] Apache è già in esecuzione.'; + fi + echo '[INFO] Creazione dei servizi di Iotronic...'; su -s /bin/sh -c 'openstack project create --domain default --description \"Service Project\" service' keystone; su -s /bin/sh -c 'openstack service create iot --name Iotronic' keystone; echo '[INFO] Iotronic User Create...'; su -s /bin/sh -c 'openstack user create --password unime iotronic' keystone; + echo '[INFO] Iotronic roles...'; su -s /bin/sh -c 'openstack role add --project service --user iotronic admin' keystone; su -s /bin/sh -c 'openstack role create admin_iot_project' keystone; su -s /bin/sh -c 'openstack role create manager_iot_project' keystone; - su -s /bin/sh -c 'openstack role create admin_iot_project' keystone; su -s /bin/sh -c 'openstack role create user_iot' keystone; su -s /bin/sh -c 'openstack role add --project service --user iotronic admin_iot_project' keystone; su -s /bin/sh -c 'openstack role add --project admin --user admin admin_iot_project' keystone; + echo '[INFO] Iotronic endpoints...'; su -s /bin/sh -c 'openstack endpoint create --region RegionOne iot public http://iotronic-conductor:8812' keystone; su -s /bin/sh -c 'openstack endpoint create --region RegionOne iot internal http://iotronic-conductor:8812' keystone; su -s /bin/sh -c 'openstack endpoint create --region RegionOne iot admin http://iotronic-conductor:8812' keystone; - su -s /bin/sh -c 'sleep infinity' keystone;" + + exec tail -f /dev/null" iotronic-conductor: image: lucadagati/iotronic-conductor:latest container_name: iotronic-conductor restart: unless-stopped + depends_on: + iotronic-db: + condition: service_healthy + rabbitmq: + condition: service_healthy networks: - s4t environment: @@ -333,8 +366,9 @@ services: - iotronic_logs:/var/log/iotronic command: > /bin/bash -c " + apt-get update && apt-get install -y mysql-client nano; echo '[INFO] Attesa del database MySQL...'; - until mysql -h iotronic-db -uroot -punime -e 'SELECT 1' >/dev/null 2>&1; do + until mysql -h iotronic-db -uiotronic -punime -e 'SELECT 1' >/dev/null 2>&1; do echo '[INFO] Database non ancora pronto, riprovo...'; sleep 5; done; @@ -342,12 +376,17 @@ services: echo '[INFO] Configurazione dei permessi sui log...'; chown -R iotronic:iotronic /var/log/iotronic; echo '[INFO] Avvio di Iotronic Conductor...'; - iotronic-conductor" + exec /usr/local/bin/startConductor" - wagent: + iotronic-wagent: image: lucadagati/iotronic-wagent:latest container_name: iotronic-wagent restart: unless-stopped + depends_on: + iotronic-db: + condition: service_healthy + rabbitmq: + condition: service_healthy networks: - s4t environment: @@ -379,10 +418,15 @@ services: image: lucadagati/iotronic-ui:latest container_name: iotronic-ui restart: unless-stopped + depends_on: + iotronic-db: + condition: service_healthy + rabbitmq: + condition: service_healthy networks: - s4t ports: - - "8585:80" + - "80:80" volumes: - iotronic-ui_config:/etc/openstack-dashboard - iotronic-ui_logs:/var/log/apache2 @@ -401,5 +445,6 @@ volumes: crossbar_data: ca_data: iotronic_ssl: - unime_test_iotronic_db_data: - unime_test_iotronic_db_config: + unime_iotronic_db_data: + rabbitmq_data: + From d871cb1b4a282831ffc528cb5bd9136278f598d1 Mon Sep 17 00:00:00 2001 From: Luca D'Agati <66645997+lucadagati@users.noreply.github.com> Date: Mon, 24 Feb 2025 22:20:53 +0100 Subject: [PATCH 12/18] Deployment functional with some issues Deployment is fully functional, but issues occur when bringing down and then up the Docker Compose stack --- compose_deployment/conf_conductor/iotronic.conf | 4 ++-- compose_deployment/conf_wagent/iotronic.conf | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/compose_deployment/conf_conductor/iotronic.conf b/compose_deployment/conf_conductor/iotronic.conf index 30371a1..d32ed14 100644 --- a/compose_deployment/conf_conductor/iotronic.conf +++ b/compose_deployment/conf_conductor/iotronic.conf @@ -23,10 +23,10 @@ service_port_min=50000 service_port_max=50100 [wamp] -wamp_transport_url = ws://iotronic-wagent:8181/ +wamp_transport_url = wss://crossbar:8181/ wamp_realm = s4t #skip_cert_verify= False -register_agent = True +#register_agent = True diff --git a/compose_deployment/conf_wagent/iotronic.conf b/compose_deployment/conf_wagent/iotronic.conf index c096c3c..f107130 100644 --- a/compose_deployment/conf_wagent/iotronic.conf +++ b/compose_deployment/conf_wagent/iotronic.conf @@ -18,7 +18,7 @@ auth_strategy=keystone [wamp] -wamp_transport_url = wss://iotronic-wagent:8181/ +wamp_transport_url = wss://crossbar:8181/ wamp_realm = s4t skip_cert_verify= True register_agent = True From a1ab46078eb002ed5c7d56eb8824a4635d5c69ac Mon Sep 17 00:00:00 2001 From: Fabio Orazio Mirto Date: Tue, 4 Mar 2025 11:10:46 +0100 Subject: [PATCH 13/18] Deleted folder of previous version, keep just compose folder; added chmod on keystone container --- 0-CA/0-CA_create | 141 -- 0-CA/ca_create | 22 - 0-CA/client_cert | 41 - 0-docker | 35 - 1-iotronic-crossbar/1-crossbar | 38 - 1-iotronic-crossbar/confs/config.json | 56 - 1-iotronic-wstun/1-wstun | 30 - 1-iotronic-wstun/Dockerfile | 44 - 1-iotronic-wstun/confs/bin/wstun.js | 111 -- 1-iotronic-wstun/confs/wstun-1.0.11.tar.gz | Bin 12818 -> 0 bytes 1-rabbitmq | 22 - 2-mysql/2-mysql | 110 -- 2-mysql/99-openstack.conf | 8 - 2-mysql/mysql-config.env | 21 - 2-new-mysql/1-mysql | 27 - 2-new-mysql/99-openstack.conf | 8 - 2-new-mysql/Dockerfile | 5 - 2-new-mysql/build | 4 - 2-new-mysql/create_dbs.sql | 9 - 2-new-mysql/create_dbs_FELOOCA_TEST.sql | 6 - 2-new-mysql/create_dbs_UNIME_TEST.sql | 12 - 2-new-mysql/initfile.sql | 20 - 3-keystone/3-keystone | 104 -- 3-keystone/3.5-keystone | 79 - 3-keystone/build/Dockerfile | 38 - 3-keystone/build/build | 4 - 3-keystone/conf/adminrc | 8 - 3-keystone/keystone-config.env | 26 - 4-conductor/4-conductor | 109 -- 4-conductor/build/Dockerfile | 95 -- 4-conductor/conductor-config.env | 22 - 4-conductor/conf/iotronic.conf | 102 -- 4-conductor/create_IotronicDB_if_needed | 18 - 5-wagent/5-wagent | 70 - 5-wagent/Dockerfile | 72 - 5-wagent/build/Dockerfile | 72 - 5-wagent/conf/iotronic.conf | 95 -- 5-wagent/wagent-config.env | 22 - 6-ui/6-ui | 56 - 6-ui/build/Dockerfile | 66 - 6-ui/build/bin/startUI | 32 - 6-ui/conf/local_settings.py | 916 ----------- README.md | 157 -- compose_deployment/docker-compose.yml | 1 + iotronic/CONTRIBUTING.rst | 17 - iotronic/HACKING.rst | 4 - iotronic/LICENSE | 176 --- iotronic/MANIFEST.in | 6 - iotronic/README.rst | 223 --- iotronic/babel.cfg | 2 - iotronic/doc/source/conf.py | 75 - iotronic/doc/source/contributing.rst | 4 - iotronic/doc/source/index.rst | 25 - iotronic/doc/source/installation.rst | 12 - iotronic/doc/source/readme.rst | 1 - iotronic/doc/source/usage.rst | 7 - iotronic/etc/apache2/iotronic-ssl.conf | 39 - iotronic/etc/apache2/iotronic.conf | 37 - iotronic/etc/iotronic/iotronic.conf | 98 -- iotronic/etc/iotronic/policy.json | 2 - .../systemd/system/iotronic-conductor.service | 9 - .../system/iotronic-wamp-agent.service | 9 - iotronic/iotronic/__init__.py | 0 iotronic/iotronic/api/__init__.py | 0 iotronic/iotronic/api/app.py | 156 -- iotronic/iotronic/api/config.py | 33 - iotronic/iotronic/api/controllers/__init__.py | 0 iotronic/iotronic/api/controllers/base.py | 115 -- iotronic/iotronic/api/controllers/link.py | 51 - iotronic/iotronic/api/controllers/root.py | 115 -- .../iotronic/api/controllers/v1/__init__.py | 222 --- iotronic/iotronic/api/controllers/v1/board.py | 1032 ------------- .../iotronic/api/controllers/v1/collection.py | 48 - .../api/controllers/v1/enabledwebservice.py | 162 -- iotronic/iotronic/api/controllers/v1/fleet.py | 351 ----- .../iotronic/api/controllers/v1/location.py | 45 - .../iotronic/api/controllers/v1/plugin.py | 360 ----- iotronic/iotronic/api/controllers/v1/port.py | 202 --- .../iotronic/api/controllers/v1/request.py | 296 ---- .../iotronic/api/controllers/v1/result.py | 214 --- .../iotronic/api/controllers/v1/service.py | 362 ----- iotronic/iotronic/api/controllers/v1/types.py | 360 ----- iotronic/iotronic/api/controllers/v1/utils.py | 270 ---- .../iotronic/api/controllers/v1/versions.py | 25 - .../iotronic/api/controllers/v1/webservice.py | 277 ---- iotronic/iotronic/api/expose.py | 24 - iotronic/iotronic/api/hooks.py | 145 -- iotronic/iotronic/api/middleware/__init__.py | 23 - .../iotronic/api/middleware/auth_token.py | 64 - .../iotronic/api/middleware/parsable_error.py | 96 -- iotronic/iotronic/cmd/__init__.py | 0 iotronic/iotronic/cmd/conductor.py | 28 - iotronic/iotronic/cmd/dbsync.py | 330 ---- iotronic/iotronic/cmd/wamp_agent.py | 27 - iotronic/iotronic/common/__init__.py | 0 iotronic/iotronic/common/config.py | 31 - iotronic/iotronic/common/context.py | 94 -- iotronic/iotronic/common/designate.py | 92 -- iotronic/iotronic/common/exception.py | 680 --------- iotronic/iotronic/common/hash_ring.py | 200 --- iotronic/iotronic/common/i18n.py | 31 - iotronic/iotronic/common/keystone.py | 139 -- iotronic/iotronic/common/neutron.py | 245 --- iotronic/iotronic/common/paths.py | 64 - iotronic/iotronic/common/policy.py | 364 ----- iotronic/iotronic/common/rpc.py | 152 -- iotronic/iotronic/common/service.py | 148 -- iotronic/iotronic/common/states.py | 22 - iotronic/iotronic/common/utils.py | 617 -------- iotronic/iotronic/conductor/__init__.py | 0 iotronic/iotronic/conductor/endpoints.py | 1260 ---------------- iotronic/iotronic/conductor/manager.py | 131 -- iotronic/iotronic/conductor/provisioner.py | 82 - iotronic/iotronic/conductor/rpcapi.py | 433 ------ iotronic/iotronic/db/__init__.py | 0 iotronic/iotronic/db/api.py | 786 ---------- iotronic/iotronic/db/migration.py | 56 - iotronic/iotronic/db/sqlalchemy/__init__.py | 0 iotronic/iotronic/db/sqlalchemy/alembic.ini | 54 - .../iotronic/db/sqlalchemy/alembic/README | 12 - .../iotronic/db/sqlalchemy/alembic/env.py | 61 - .../db/sqlalchemy/alembic/script.py.mako | 18 - ...60765f337_extended_board_code_attribute.py | 23 - .../57a99337e843_hard_device_delete.py | 64 - .../76c628d60004_add_results_requests.py | 60 - .../9c5c34dfd9f1_add_boards_in_fleet.py | 27 - .../versions/b578199e4e64_add_fleets.py | 37 - .../versions/b98819997377_boards_lr_data.py | 31 - .../versions/df35e9cbeaff_init_schema.py | 177 --- .../versions/f28f3f4494b3_add_webservices.py | 61 - iotronic/iotronic/db/sqlalchemy/api.py | 1341 ----------------- iotronic/iotronic/db/sqlalchemy/migration.py | 113 -- iotronic/iotronic/db/sqlalchemy/models.py | 352 ----- iotronic/iotronic/objects/__init__.py | 63 - iotronic/iotronic/objects/base.py | 598 -------- iotronic/iotronic/objects/board.py | 271 ---- iotronic/iotronic/objects/conductor.py | 80 - .../iotronic/objects/enabledwebservice.py | 226 --- iotronic/iotronic/objects/exposedservice.py | 176 --- iotronic/iotronic/objects/fleet.py | 189 --- iotronic/iotronic/objects/injectionplugin.py | 165 -- iotronic/iotronic/objects/location.py | 240 --- iotronic/iotronic/objects/plugin.py | 210 --- iotronic/iotronic/objects/port.py | 202 --- iotronic/iotronic/objects/request.py | 198 --- iotronic/iotronic/objects/result.py | 155 -- iotronic/iotronic/objects/service.py | 199 --- iotronic/iotronic/objects/sessionwp.py | 164 -- iotronic/iotronic/objects/utils.py | 135 -- iotronic/iotronic/objects/wampagent.py | 114 -- iotronic/iotronic/objects/webservice.py | 190 --- iotronic/iotronic/openstack/__init__.py | 0 .../iotronic/openstack/common/__init__.py | 0 iotronic/iotronic/openstack/common/_i18n.py | 45 - .../openstack/common/eventlet_backdoor.py | 152 -- .../iotronic/openstack/common/fileutils.py | 149 -- .../iotronic/openstack/common/imageutils.py | 152 -- .../iotronic/openstack/common/loopingcall.py | 149 -- .../openstack/common/periodic_task.py | 234 --- iotronic/iotronic/openstack/common/service.py | 520 ------- iotronic/iotronic/openstack/common/systemd.py | 105 -- .../iotronic/openstack/common/threadgroup.py | 151 -- .../iotronic/openstack/common/versionutils.py | 262 ---- iotronic/iotronic/version.py | 18 - iotronic/iotronic/wamp/__init__.py | 0 iotronic/iotronic/wamp/agent.py | 418 ----- iotronic/iotronic/wamp/functions.py | 206 --- iotronic/iotronic/wamp/proxies/__init__.py | 0 iotronic/iotronic/wamp/proxies/nginx.py | 151 -- iotronic/iotronic/wamp/proxies/proxy.py | 34 - iotronic/iotronic/wamp/wampmessage.py | 58 - iotronic/iotronic/wsgi/app.wsgi | 36 - iotronic/requirements.txt | 22 - iotronic/setup.cfg | 70 - iotronic/setup.py | 29 - iotronic/test-requirements.txt | 21 - iotronic/tox.ini | 62 - iotronic/utils/all.sql | 84 -- iotronic/utils/iotronic.sql | 288 ---- iotronic/utils/loaddb | 3 - iotronic/utils/remove_board.sql | 11 - iotronic/utils/test_plugins/runner.py | 36 - iotronic/utils/test_plugins/zero.py | 33 - iotronic/zuul.d/projects.yaml | 7 - 184 files changed, 1 insertion(+), 24251 deletions(-) delete mode 100755 0-CA/0-CA_create delete mode 100755 0-CA/ca_create delete mode 100755 0-CA/client_cert delete mode 100755 0-docker delete mode 100755 1-iotronic-crossbar/1-crossbar delete mode 100644 1-iotronic-crossbar/confs/config.json delete mode 100755 1-iotronic-wstun/1-wstun delete mode 100644 1-iotronic-wstun/Dockerfile delete mode 100755 1-iotronic-wstun/confs/bin/wstun.js delete mode 100644 1-iotronic-wstun/confs/wstun-1.0.11.tar.gz delete mode 100755 1-rabbitmq delete mode 100755 2-mysql/2-mysql delete mode 100644 2-mysql/99-openstack.conf delete mode 100644 2-mysql/mysql-config.env delete mode 100755 2-new-mysql/1-mysql delete mode 100644 2-new-mysql/99-openstack.conf delete mode 100644 2-new-mysql/Dockerfile delete mode 100755 2-new-mysql/build delete mode 100644 2-new-mysql/create_dbs.sql delete mode 100644 2-new-mysql/create_dbs_FELOOCA_TEST.sql delete mode 100644 2-new-mysql/create_dbs_UNIME_TEST.sql delete mode 100644 2-new-mysql/initfile.sql delete mode 100755 3-keystone/3-keystone delete mode 100755 3-keystone/3.5-keystone delete mode 100644 3-keystone/build/Dockerfile delete mode 100755 3-keystone/build/build delete mode 100644 3-keystone/conf/adminrc delete mode 100644 3-keystone/keystone-config.env delete mode 100755 4-conductor/4-conductor delete mode 100644 4-conductor/build/Dockerfile delete mode 100644 4-conductor/conductor-config.env delete mode 100644 4-conductor/conf/iotronic.conf delete mode 100755 4-conductor/create_IotronicDB_if_needed delete mode 100755 5-wagent/5-wagent delete mode 100644 5-wagent/Dockerfile delete mode 100644 5-wagent/build/Dockerfile delete mode 100644 5-wagent/conf/iotronic.conf delete mode 100644 5-wagent/wagent-config.env delete mode 100755 6-ui/6-ui delete mode 100644 6-ui/build/Dockerfile delete mode 100755 6-ui/build/bin/startUI delete mode 100644 6-ui/conf/local_settings.py delete mode 100644 README.md delete mode 100644 iotronic/CONTRIBUTING.rst delete mode 100644 iotronic/HACKING.rst delete mode 100644 iotronic/LICENSE delete mode 100644 iotronic/MANIFEST.in delete mode 100644 iotronic/README.rst delete mode 100644 iotronic/babel.cfg delete mode 100755 iotronic/doc/source/conf.py delete mode 100644 iotronic/doc/source/contributing.rst delete mode 100644 iotronic/doc/source/index.rst delete mode 100644 iotronic/doc/source/installation.rst delete mode 100644 iotronic/doc/source/readme.rst delete mode 100644 iotronic/doc/source/usage.rst delete mode 100644 iotronic/etc/apache2/iotronic-ssl.conf delete mode 100644 iotronic/etc/apache2/iotronic.conf delete mode 100644 iotronic/etc/iotronic/iotronic.conf delete mode 100644 iotronic/etc/iotronic/policy.json delete mode 100644 iotronic/etc/systemd/system/iotronic-conductor.service delete mode 100644 iotronic/etc/systemd/system/iotronic-wamp-agent.service delete mode 100644 iotronic/iotronic/__init__.py delete mode 100644 iotronic/iotronic/api/__init__.py delete mode 100644 iotronic/iotronic/api/app.py delete mode 100644 iotronic/iotronic/api/config.py delete mode 100644 iotronic/iotronic/api/controllers/__init__.py delete mode 100644 iotronic/iotronic/api/controllers/base.py delete mode 100644 iotronic/iotronic/api/controllers/link.py delete mode 100644 iotronic/iotronic/api/controllers/root.py delete mode 100644 iotronic/iotronic/api/controllers/v1/__init__.py delete mode 100644 iotronic/iotronic/api/controllers/v1/board.py delete mode 100644 iotronic/iotronic/api/controllers/v1/collection.py delete mode 100644 iotronic/iotronic/api/controllers/v1/enabledwebservice.py delete mode 100644 iotronic/iotronic/api/controllers/v1/fleet.py delete mode 100644 iotronic/iotronic/api/controllers/v1/location.py delete mode 100644 iotronic/iotronic/api/controllers/v1/plugin.py delete mode 100644 iotronic/iotronic/api/controllers/v1/port.py delete mode 100644 iotronic/iotronic/api/controllers/v1/request.py delete mode 100644 iotronic/iotronic/api/controllers/v1/result.py delete mode 100644 iotronic/iotronic/api/controllers/v1/service.py delete mode 100644 iotronic/iotronic/api/controllers/v1/types.py delete mode 100644 iotronic/iotronic/api/controllers/v1/utils.py delete mode 100644 iotronic/iotronic/api/controllers/v1/versions.py delete mode 100644 iotronic/iotronic/api/controllers/v1/webservice.py delete mode 100644 iotronic/iotronic/api/expose.py delete mode 100644 iotronic/iotronic/api/hooks.py delete mode 100644 iotronic/iotronic/api/middleware/__init__.py delete mode 100644 iotronic/iotronic/api/middleware/auth_token.py delete mode 100644 iotronic/iotronic/api/middleware/parsable_error.py delete mode 100644 iotronic/iotronic/cmd/__init__.py delete mode 100755 iotronic/iotronic/cmd/conductor.py delete mode 100644 iotronic/iotronic/cmd/dbsync.py delete mode 100755 iotronic/iotronic/cmd/wamp_agent.py delete mode 100644 iotronic/iotronic/common/__init__.py delete mode 100644 iotronic/iotronic/common/config.py delete mode 100644 iotronic/iotronic/common/context.py delete mode 100644 iotronic/iotronic/common/designate.py delete mode 100644 iotronic/iotronic/common/exception.py delete mode 100644 iotronic/iotronic/common/hash_ring.py delete mode 100644 iotronic/iotronic/common/i18n.py delete mode 100644 iotronic/iotronic/common/keystone.py delete mode 100644 iotronic/iotronic/common/neutron.py delete mode 100644 iotronic/iotronic/common/paths.py delete mode 100644 iotronic/iotronic/common/policy.py delete mode 100644 iotronic/iotronic/common/rpc.py delete mode 100644 iotronic/iotronic/common/service.py delete mode 100644 iotronic/iotronic/common/states.py delete mode 100644 iotronic/iotronic/common/utils.py delete mode 100644 iotronic/iotronic/conductor/__init__.py delete mode 100644 iotronic/iotronic/conductor/endpoints.py delete mode 100644 iotronic/iotronic/conductor/manager.py delete mode 100644 iotronic/iotronic/conductor/provisioner.py delete mode 100644 iotronic/iotronic/conductor/rpcapi.py delete mode 100644 iotronic/iotronic/db/__init__.py delete mode 100644 iotronic/iotronic/db/api.py delete mode 100644 iotronic/iotronic/db/migration.py delete mode 100644 iotronic/iotronic/db/sqlalchemy/__init__.py delete mode 100644 iotronic/iotronic/db/sqlalchemy/alembic.ini delete mode 100644 iotronic/iotronic/db/sqlalchemy/alembic/README delete mode 100644 iotronic/iotronic/db/sqlalchemy/alembic/env.py delete mode 100644 iotronic/iotronic/db/sqlalchemy/alembic/script.py.mako delete mode 100644 iotronic/iotronic/db/sqlalchemy/alembic/versions/10460765f337_extended_board_code_attribute.py delete mode 100644 iotronic/iotronic/db/sqlalchemy/alembic/versions/57a99337e843_hard_device_delete.py delete mode 100644 iotronic/iotronic/db/sqlalchemy/alembic/versions/76c628d60004_add_results_requests.py delete mode 100644 iotronic/iotronic/db/sqlalchemy/alembic/versions/9c5c34dfd9f1_add_boards_in_fleet.py delete mode 100644 iotronic/iotronic/db/sqlalchemy/alembic/versions/b578199e4e64_add_fleets.py delete mode 100644 iotronic/iotronic/db/sqlalchemy/alembic/versions/b98819997377_boards_lr_data.py delete mode 100644 iotronic/iotronic/db/sqlalchemy/alembic/versions/df35e9cbeaff_init_schema.py delete mode 100644 iotronic/iotronic/db/sqlalchemy/alembic/versions/f28f3f4494b3_add_webservices.py delete mode 100644 iotronic/iotronic/db/sqlalchemy/api.py delete mode 100644 iotronic/iotronic/db/sqlalchemy/migration.py delete mode 100644 iotronic/iotronic/db/sqlalchemy/models.py delete mode 100644 iotronic/iotronic/objects/__init__.py delete mode 100644 iotronic/iotronic/objects/base.py delete mode 100644 iotronic/iotronic/objects/board.py delete mode 100644 iotronic/iotronic/objects/conductor.py delete mode 100644 iotronic/iotronic/objects/enabledwebservice.py delete mode 100644 iotronic/iotronic/objects/exposedservice.py delete mode 100644 iotronic/iotronic/objects/fleet.py delete mode 100644 iotronic/iotronic/objects/injectionplugin.py delete mode 100644 iotronic/iotronic/objects/location.py delete mode 100644 iotronic/iotronic/objects/plugin.py delete mode 100644 iotronic/iotronic/objects/port.py delete mode 100644 iotronic/iotronic/objects/request.py delete mode 100644 iotronic/iotronic/objects/result.py delete mode 100644 iotronic/iotronic/objects/service.py delete mode 100644 iotronic/iotronic/objects/sessionwp.py delete mode 100644 iotronic/iotronic/objects/utils.py delete mode 100644 iotronic/iotronic/objects/wampagent.py delete mode 100644 iotronic/iotronic/objects/webservice.py delete mode 100644 iotronic/iotronic/openstack/__init__.py delete mode 100644 iotronic/iotronic/openstack/common/__init__.py delete mode 100644 iotronic/iotronic/openstack/common/_i18n.py delete mode 100644 iotronic/iotronic/openstack/common/eventlet_backdoor.py delete mode 100644 iotronic/iotronic/openstack/common/fileutils.py delete mode 100644 iotronic/iotronic/openstack/common/imageutils.py delete mode 100644 iotronic/iotronic/openstack/common/loopingcall.py delete mode 100644 iotronic/iotronic/openstack/common/periodic_task.py delete mode 100644 iotronic/iotronic/openstack/common/service.py delete mode 100644 iotronic/iotronic/openstack/common/systemd.py delete mode 100644 iotronic/iotronic/openstack/common/threadgroup.py delete mode 100644 iotronic/iotronic/openstack/common/versionutils.py delete mode 100644 iotronic/iotronic/version.py delete mode 100644 iotronic/iotronic/wamp/__init__.py delete mode 100644 iotronic/iotronic/wamp/agent.py delete mode 100644 iotronic/iotronic/wamp/functions.py delete mode 100644 iotronic/iotronic/wamp/proxies/__init__.py delete mode 100644 iotronic/iotronic/wamp/proxies/nginx.py delete mode 100644 iotronic/iotronic/wamp/proxies/proxy.py delete mode 100644 iotronic/iotronic/wamp/wampmessage.py delete mode 100644 iotronic/iotronic/wsgi/app.wsgi delete mode 100644 iotronic/requirements.txt delete mode 100644 iotronic/setup.cfg delete mode 100644 iotronic/setup.py delete mode 100644 iotronic/test-requirements.txt delete mode 100644 iotronic/tox.ini delete mode 100644 iotronic/utils/all.sql delete mode 100644 iotronic/utils/iotronic.sql delete mode 100755 iotronic/utils/loaddb delete mode 100644 iotronic/utils/remove_board.sql delete mode 100644 iotronic/utils/test_plugins/runner.py delete mode 100644 iotronic/utils/test_plugins/zero.py delete mode 100644 iotronic/zuul.d/projects.yaml diff --git a/0-CA/0-CA_create b/0-CA/0-CA_create deleted file mode 100755 index 17ff804..0000000 --- a/0-CA/0-CA_create +++ /dev/null @@ -1,141 +0,0 @@ -#!/bin/bash - -# Configurazione globale -BIT_ENC=2048 -CA_NAME="S4T" -EXPIRATION=18250 -BASE_DIR=$(pwd) -TARGET_DIR="/etc/ssl/iotronic" -CLIENTS=() - -# Funzione per stampare messaggi di errore -function error_message { - echo -e "\e[31m[ERROR]\e[0m $1" -} - -# Funzione per stampare messaggi di informazione -function info { - echo -e "\e[34m[INFO]\e[0m $1" -} - -# Funzione per stampare messaggi di successo -function success { - echo -e "\e[32m[SUCCESS]\e[0m $1" -} - -# Funzione per chiedere azione tramite scelta numerica -function numeric_choice { - echo -e "\e[33m[WARNING]\e[0m Il $2 '$1' esiste già. Scegli un'opzione:" - echo "1) Sovrascrivere" - echo "2) Saltare" - echo "3) Visualizzare" - while true; do - read -p "Inserisci il numero dell'opzione desiderata: " choice - case $choice in - 1) return 0 ;; # Sovrascrivere - 2) return 1 ;; # Saltare - 3) - if [ "$2" == "file" ]; then - cat "$1" - else - ls -l "$1" - fi - ;; - *) echo "Opzione non valida. Scegli 1, 2 o 3." ;; - esac - done -} - -# Funzione per creare la root CA -function create_ca { - info "Creazione della Root CA..." - CA_DIR="${BASE_DIR}/CA" - - if [ -d "$CA_DIR" ]; then - numeric_choice "$CA_DIR" "directory" || return - rm -rf "$CA_DIR" - fi - - mkdir -p "$CA_DIR" - - # Genera la chiave della root CA - openssl genrsa -out "${CA_DIR}/${CA_NAME}_CA.key" $BIT_ENC || error_message "Errore nella generazione della chiave CA" - openssl rsa -in "${CA_DIR}/${CA_NAME}_CA.key" -check || error_message "La verifica della chiave CA è fallita" - - # Genera il certificato della root CA - openssl req -x509 -new -nodes -key "${CA_DIR}/${CA_NAME}_CA.key" -sha256 -days $EXPIRATION \ - -subj "/C=IT/O=$CA_NAME" -out "${CA_DIR}/${CA_NAME}_CA.pem" || error_message "Errore nella generazione del certificato CA" - openssl x509 -in "${CA_DIR}/${CA_NAME}_CA.pem" -text -noout || error_message "Errore nella verifica del certificato CA" - - # Copia la CA nella directory di destinazione - mkdir -p "${TARGET_DIR}/CA" - cp "${CA_DIR}/${CA_NAME}_CA.key" "${CA_DIR}/${CA_NAME}_CA.pem" "${TARGET_DIR}/CA/" || error_message "Errore nella copia della root CA" - - success "Root CA creata e copiata in ${TARGET_DIR}/CA/" -} - -# Funzione per creare un certificato client -function create_client_cert { - CLIENT_CN="$1" - info "Creazione del certificato per il client: $CLIENT_CN..." - CLIENT_DIR="${BASE_DIR}/client_${CLIENT_CN}" - CA_DIR="${BASE_DIR}/CA" - - if [ -d "$CLIENT_DIR" ]; then - numeric_choice "$CLIENT_DIR" "directory" || return - rm -rf "$CLIENT_DIR" - fi - - mkdir -p "$CLIENT_DIR" - - # Genera la chiave del client - openssl genrsa -out "${CLIENT_DIR}/${CLIENT_CN}.key" $BIT_ENC || error_message "Errore nella generazione della chiave del client" - - # Genera la richiesta di certificato del client - openssl req -new -days $EXPIRATION -subj "/C=IT/O=$CA_NAME/CN=$CLIENT_CN" \ - -key "${CLIENT_DIR}/${CLIENT_CN}.key" -out "${CLIENT_DIR}/${CLIENT_CN}.csr" || error_message "Errore nella generazione della CSR del client" - openssl req -text -noout -verify -in "${CLIENT_DIR}/${CLIENT_CN}.csr" || error_message "Errore nella verifica della CSR del client" - - # Firma la richiesta con la root CA - openssl x509 -req -in "${CLIENT_DIR}/${CLIENT_CN}.csr" -CA "${CA_DIR}/${CA_NAME}_CA.pem" \ - -CAkey "${CA_DIR}/${CA_NAME}_CA.key" -CAcreateserial -out "${CLIENT_DIR}/${CLIENT_CN}.pem" \ - -days $EXPIRATION -sha256 || error_message "Errore nella firma del certificato client" - openssl x509 -in "${CLIENT_DIR}/${CLIENT_CN}.pem" -text -noout || error_message "Errore nella verifica del certificato client" - - chmod 644 "${CLIENT_DIR}/${CLIENT_CN}.key" || error_message "Errore nell'impostazione dei permessi della chiave" - - # Copia i file nella directory di destinazione - CLIENT_TARGET_DIR="${TARGET_DIR}/client_${CLIENT_CN}" - mkdir -p "$CLIENT_TARGET_DIR" - cp "${CLIENT_DIR}/${CLIENT_CN}.key" "${CLIENT_DIR}/${CLIENT_CN}.pem" "$CLIENT_TARGET_DIR/" || error_message "Errore nella copia dei file del client" - cp "${CA_DIR}/${CA_NAME}_CA.pem" "$CLIENT_TARGET_DIR/CA.pem" || error_message "Errore nella copia del certificato CA per il client" - - success "Certificato client per $CLIENT_CN creato e copiato in $CLIENT_TARGET_DIR/" -} - -# Funzione principale -function main { - info "Inizio del processo..." - - # Creazione della root CA - create_ca - - # Creazione dei certificati client - for CLIENT_CN in "${CLIENTS[@]}"; do - create_client_cert "$CLIENT_CN" - done - - success "Processo completato." -} - -# Parsing dei parametri -if [ "$#" -lt 1 ]; then - echo -e "\e[33m[USO]\e[0m $0 [ALTRI-CLIENT-CN...]" - exit 1 -fi - -# Aggiungi tutti i client specificati alla lista -CLIENTS=("$@") - -# Esegui lo script principale -main diff --git a/0-CA/ca_create b/0-CA/ca_create deleted file mode 100755 index 3ec620e..0000000 --- a/0-CA/ca_create +++ /dev/null @@ -1,22 +0,0 @@ -#! /bin/bash - -BIT_ENC=2048 -CA_NAME="Tecnokar" #same for Organization Name -EXPIRATION=18250 - - - -mkdir -p ./CA -cd ./CA - -## Generate root CA key -openssl genrsa -out $CA_NAME"_CA.key" $BIT_ENC - -#check -openssl rsa -in $CA_NAME"_CA.key" -check - -## Generate root CA certificate -openssl req -x509 -new -nodes -key $CA_NAME"_CA.key" -sha256 -days $EXPIRATION -subj "/C=IT/O="$CA_NAME -out $CA_NAME"_CA.pem" - -#check -openssl x509 -in $CA_NAME"_CA.pem" -text -noout diff --git a/0-CA/client_cert b/0-CA/client_cert deleted file mode 100755 index 389ac36..0000000 --- a/0-CA/client_cert +++ /dev/null @@ -1,41 +0,0 @@ -#! /bin/bash - -if [ "$#" -ne 1 ]; then - echo "Usage: ./client_cert " - exit -fi - -#$1 -> CN == certificate name -BIT_ENC=2048 -CA_NAME="Tecnokar" #same for Organization Name -EXPIRATION=18250 - - - -mkdir -p "./client_"$1 -cd "./client_"$1 - -## Generate client key -openssl genrsa -out $1".key" $BIT_ENC - -## Generate client certificate request -openssl req -new -days $EXPIRATION -subj "/C=IT/O="$CA_NAME"/CN="$1 -key $1".key" -out $1".csr" - -#check -openssl req -text -noout -verify -in $1".csr" - - -## Generate client certificate -openssl x509 -req -in $1".csr" -CA "../CA/"$CA_NAME"_CA.pem" -CAkey "../CA/"$CA_NAME"_CA.key" -CAcreateserial -out $1".pem" -days $EXPIRATION -sha256 - -#check -openssl x509 -in $1".pem" -text -noout - -chmod 644 $1".key" - -cp ../CA/$CA_NAME"_CA.pem" CA.pem - -mkdir /etc/ssl/iotronic - -cp ../CA/$CA_NAME/* /etc/ssl/iotronic/ - diff --git a/0-docker b/0-docker deleted file mode 100755 index 5a6f5a1..0000000 --- a/0-docker +++ /dev/null @@ -1,35 +0,0 @@ -#! /bin/bash - -if [ "$EUID" -ne 0 ] - then echo "Please run as root" - exit -fi - -apt-get update && apt-get install -y \ - apt-transport-https \ - ca-certificates \ - curl \ - gnupg-agent \ - software-properties-common - -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - - -add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) \ - stable" - -apt-get update && apt-get install -y docker-ce docker-ce-cli containerd.io - -usermod -aG docker $USER -usermod -aG docker iotronic - -systemctl enable docker - -curl -L "https://github.com/docker/compose/releases/download/1.24.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose -ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose -chmod +x /usr/local/bin/docker-compose - -#docker network create iotronic_network - -echo -e "\e[32mCompleted - Log out and log back in so that your group membership is re-evaluated.\e[0m" diff --git a/1-iotronic-crossbar/1-crossbar b/1-iotronic-crossbar/1-crossbar deleted file mode 100755 index eab54a4..0000000 --- a/1-iotronic-crossbar/1-crossbar +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# Configurazione -CERT_PATH="/etc/ssl/iotronic/" -CONTAINER_NAME="iotronic-crossbar" -IMAGE_NAME="crossbario/crossbar:cpy3-20.2.1" -CONFIG_FILE="confs/config.json" - -# Funzione per gestire errori -function error_exit { - echo "Errore: $1" - exit 1 -} - -# Creazione del container -echo "Creazione del container Docker..." -docker create \ - --name="$CONTAINER_NAME" \ - --net=host \ - --restart unless-stopped \ - -p 8181:8181 \ - -v crossbar_config:/node/.crossbar \ - -v "$CERT_PATH:/node/.crossbar/ssl:ro" \ - "$IMAGE_NAME" || error_exit "Impossibile creare il container Docker." - -# Copia del file di configurazione -if [ -f "$CONFIG_FILE" ]; then - echo "Copia del file di configurazione nel container..." - docker cp "$CONFIG_FILE" "$CONTAINER_NAME:/node/.crossbar/" || error_exit "Errore durante la copia del file di configurazione." -else - error_exit "Il file di configurazione $CONFIG_FILE non esiste." -fi - -# Avvio del container -echo "Avvio del container Docker..." -docker start "$CONTAINER_NAME" || error_exit "Errore durante l'avvio del container Docker." - -echo "Container $CONTAINER_NAME creato e avviato con successo." diff --git a/1-iotronic-crossbar/confs/config.json b/1-iotronic-crossbar/confs/config.json deleted file mode 100644 index b462fba..0000000 --- a/1-iotronic-crossbar/confs/config.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "version": 2, - "controller": { - }, - "workers": [ - { - "type": "router", - "realms": [ - { - "name": "s4t", - "roles": [ - { - "name": "anonymous", - "permissions": [ - { - "uri": "*", - "allow": { - "publish": true, - "subscribe": true, - "call": true, - "register": true - } - } - ] - } - ] - } - ], - "transports": [ - { - "type": "websocket", - "endpoint": { - "type": "tcp", - "port":8181, - "tls": { - "chain_certificates": ["/node/.crossbar/ssl/CA.pem"], - "key": "/node/.crossbar/ssl/iotronic.key", - "certificate": "/node/.crossbar/ssl/iotronic.pem" - } - - }, - "options":{ - "enable_webstatus":true, - "fail_by_drop": true, - "open_handshake_timeout": 2500, - "close_handshake_timeout": 1000, - "auto_ping_interval": 30000, - "auto_ping_timeout": 5000, - "auto_ping_size": 13 - } - } - ] - } - ] -} - diff --git a/1-iotronic-wstun/1-wstun b/1-iotronic-wstun/1-wstun deleted file mode 100755 index c0f8ae1..0000000 --- a/1-iotronic-wstun/1-wstun +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -# Configurazione -VERSION=1.0.11 -CERT_PATH="/etc/ssl/iotronic/" -DOCKER_IMAGE="s4t/iotronic-wstun:$VERSION" -RANGE="50000-50100:50000-50100" - -# Funzione per gestire errori -function error_exit { - echo "Errore: $1" - exit 1 -} - -# Costruisce l'immagine Docker -echo "Costruzione dell'immagine Docker..." -docker build -t "$DOCKER_IMAGE" . || error_exit "Errore durante la build dell'immagine Docker." - -# Avvio del container Docker -echo "Avvio del container Docker..." -docker run -d \ - --name=iotronic-wstun \ - --net=host \ - --restart unless-stopped \ - -p 8080:8080 \ - -p "$RANGE" \ - -v "$CERT_PATH:/var/lib/iotronic/ssl/:ro" \ - "$DOCKER_IMAGE" || error_exit "Errore durante l'avvio del container Docker." - -echo "Container iotronic-wstun avviato con successo." diff --git a/1-iotronic-wstun/Dockerfile b/1-iotronic-wstun/Dockerfile deleted file mode 100644 index dbbe525..0000000 --- a/1-iotronic-wstun/Dockerfile +++ /dev/null @@ -1,44 +0,0 @@ -# Dockerfile per wstun -FROM node:carbon-alpine - -# Versione del pacchetto wstun -ENV VERSION=1.0.11 - -# Installazione dipendenze necessarie -RUN npm install -g --unsafe websocket@1.0.26 \ - optimist@0.6.1 \ - node-uuid@1.4.7 \ - under_score \ - log4js@1.1.1 \ - && npm cache --force clean - -# Imposta la directory di lavoro per l'installazione di wstun -WORKDIR /usr/local/lib/node_modules/@mdslab/ - -# Copia il pacchetto wstun dalla cartella `confs` e lo estrae -COPY ./confs/wstun-$VERSION.tar.gz ./wstun-$VERSION.tar.gz -RUN tar zxfv wstun-$VERSION.tar.gz \ - && mv wstun-$VERSION wstun \ - && rm wstun-$VERSION.tar.gz - -# Configura il percorso NODE_PATH per npm -ENV NODE_PATH=/usr/local/lib/node_modules - -# Imposta la directory di lavoro finale -WORKDIR /usr/bin/ - -# Esponi porte utilizzate da wstun -EXPOSE 40001-40050 -EXPOSE 8080 - -# Crea directory per i log di wstun -RUN mkdir -p /var/log/wstun/ - -# Copia il file binario wstun.js nel percorso corretto -COPY ./confs/bin/wstun.js /usr/local/lib/node_modules/@mdslab/wstun/bin/ - -# Imposta i certificati SSL nella directory corretta -RUN mkdir -p /var/lib/iotronic/ssl/ - -# Entry point del container, configurato per usare SSL -ENTRYPOINT ["node", "/usr/local/lib/node_modules/@mdslab/wstun/bin/wstun.js", "-r", "-s", "8080", "--ssl=true", "--key=/var/lib/iotronic/ssl/iotronic.key", "--cert=/var/lib/iotronic/ssl/iotronic.pem"] diff --git a/1-iotronic-wstun/confs/bin/wstun.js b/1-iotronic-wstun/confs/bin/wstun.js deleted file mode 100755 index 11c11f0..0000000 --- a/1-iotronic-wstun/confs/bin/wstun.js +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env node - -//############################################################################### -//## -//# Copyright (C) 2014-2015 Andrea Rocco Lotronto, 2017 Nicola Peditto -//## -//# Licensed under the Apache License, Version 2.0 (the "License"); -//# you may not use this file except in compliance with the License. -//# You may obtain a copy of the License at -//## -//# http://www.apache.org/licenses/LICENSE-2.0 -//## -//# Unless required by applicable law or agreed to in writing, software -//# distributed under the License is distributed on an "AS IS" BASIS, -//# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//# See the License for the specific language governing permissions and -//# limitations under the License. -//## -//############################################################################### - - -log4js = require('log4js'); - -try{ - //log4js.loadAppender('file'); - //logfile = '/var/log/wstun/wstun.log'; - loglevel = 'debug'; - //log4js.addAppender(log4js.appenders.file(logfile)); -} -catch (err) { - console.log("error log folder creation") -} - -var logger = log4js.getLogger('main'); -logger.setLevel(loglevel); - -var portTunnel , argv, client, host, localport, optimist, port, server, wsHost, wst, _, _ref, _ref1; - -var _ = require("under_score"); - -optimist = require('optimist').usage("Tunnels and reverse tunnels over WebSocket.\n" + - "\nUsage: https://github.com/MDSLab/wstun/blob/master/readme.md") - .string("s").alias('s', "server").describe('s', 'run as server, specify listening port') - .string("t").alias('t', "tunnel").describe('t', 'run as tunnel client, specify localport:host:port') - .boolean("r").alias('r', "reverse").describe('r', 'run in reverse tunneling mode') - .string("ssl").describe('ssl', '\"true\" | \"false\" to enable|disable HTTPS communication.') - .string("key").describe('key', '[only with --ssl="true"] path to private key certificate.') - .string("cert").describe('cert', '[only with --ssl="true"] path to public key certificate.'); - -argv = optimist.argv; - -wst = require("../lib/wrapper"); - -if (argv.s && !argv.r) { - - // WS tunnel server side - if (argv.t) { - _ref = argv.t.split(":"), host = _ref[0], port = _ref[1]; - server_opts = {dstHost:dstHost, dstPort:dstPort, ssl:https_flag, key:key, cert:cert}; - } - else { - server_opts = {ssl:argv.ssl, key:argv.key, cert:argv.cert}; - } - - server = new wst.server(server_opts); - server.start(argv.s); - -}else if (argv.t) { - - // WS tunnel client side - - client = new wst.client; - - wsHost = _.last(argv._); - _ref1 = argv.t.split(":"), localport = _ref1[0], host = _ref1[1], port = _ref1[2]; - - if (host && port) { - client.start(localport, wsHost, "" + host + ":" + port); - } else { - client.start(localport, wsHost); - } - - -}else if (argv.r) { - - // WS reverse tunnel - - if (argv.s){ - - // Server side - server_opts = {ssl:argv.ssl, key:argv.key, cert:argv.cert}; - server = new wst.server_reverse(server_opts); - server.start(argv.s); - - } - else{ - - // Client side - client = new wst.client_reverse; - wsHost = _.last(argv._); - _ref1 = argv.r.split(":"), portTunnel = _ref1[0], host = _ref1[1], port =_ref1[2]; - client.start(portTunnel, wsHost, "" + host + ":" + port); - - } - -} else { - - // Wrong options - return console.log(optimist.help()); - -} \ No newline at end of file diff --git a/1-iotronic-wstun/confs/wstun-1.0.11.tar.gz b/1-iotronic-wstun/confs/wstun-1.0.11.tar.gz deleted file mode 100644 index 255473a97e9006e6cd7b0ce58ee322065b73d931..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12818 zcmV+tGVRSDiwFP!000001MEEgciT3S{h5CSs`S=!S0Db2vu@pYZmcxAIJM?!sd z_s13-j~_p#zi=%7g`0a{ZSU?K>>TWFZ5`~u_3gd=?SrqzUx1dCBi9*MfKsJtcH6j5 z-Cv&E|B+*VhlS~MDw6DIw8i;;BzD97$NS-vC)>klwEZO7KiJ+2w?`r0J=opZ+8RB1 z68zaTKOA>=%ypJ0&F$7!YkRx>g>axf931T5Z2$0m$^Lf^_7A?=|3dUsj(>CeugJeO zmYEzUsSP6j1Oq&)0x!2a?V8XNL(B4F4k zRiR*EEO-(Go!&33cXrabJU_m8e$>vjYR75F<8~~EZQuO_!K<_W&nMkJqd`NNwBhOh zK0h0D*psa%TL|i+|N8u_+q+APmP~Aj?S9k%8o`n)}r<#{lgK>9z@@QViw09-rodl?c)4Lg;n+y(Ovz!6*BRF=LX;w6!v!0N&;Z#6 z_$tlE6ZSWjj^ISXxHQbM{9dLjRV##r>1?6oc#^Sno(RQ&BzRC}3&!(ok}CNR!e`;E zdm@|gi~-WI;_zTHra_hu9sy#^W7Z+iD@5c8PLQY$LdS6yin&D`P;AN*J{BlE#H;ggVhL-o z`9#q4LHdN36KtO|bBWpnQ0fvWA@S&moFSx<9050H0)i-juJ3RC$0nhJy38`(0m?J* zUO+F%0K}w(b`SyNP$a;FP=Y8c&^gmF%+03(EyCS$RjO2iIw)afXjqk3sV2ttW~`_A?VkOgA~G@Sj9Q>BpOF@r%-eywx=V;nPCrsYg8v_ zVJuA(%w{Ihpi&~vPIC;J0CRxLl?nUa3uby{ythU;#&)M7lAL9WnLq7Usd~TS6y|V| zC?-FInpT>COx!dI12C+!)0*-K9LW`zkWt3Y{yo3cKyQi)7;>vhyzmRoy8#OD>!UC9i;s zSAro!bZuD!5XX8R*x6f1h6j%DA=fCpgiJ9)9H{-EJ;qldDiRmk!h9mbiRTPJSWvU0 zxhrucNtCF?z#3~OOaO;d<<0IzZF9g&VosXs+mZe#pfi7(qgVX_{+f`QiHTr4fbE;WbCf*lr86FZb7@$hQ88=W0LmrbJ%@sbE zQ1X!{mir8?z-PKB8HIexbZIQaf8Dq>6cxV9#{vIJn!qE<7$1%yr37eR$SP7Hz0jGM zYTt!J%*_RckdR`e9bv?ekM!rFg8|Ip#JYk%YzqD!OEAn_{ zXcE_s3DvneeT1p#nGEwZ*I>L;uHK{TQ)PDKQZAw8I3eE;s)#Zo#;(;wbXK(<@S8K= zR$H}I6JE-%i)lLx{A9}GGZ6jbbO|Y&05L-W`UEMXAWsfN`*=zt)Z*_s=ueDj!xZ>! z!Zjwmp7|L^w9{fgVOoV~pBB@1nH3x4Cb(K%U0cj}X6Ack0a2293}Wa)fM&|#C@+BI z5M~J(-b`e`8K-{W`r>FVG1o}aq)C$3Fk?JzLe`_kSU9B%9%qZ@NC`NTkl0=}LD#%ZKnRa1j)B7IRSa;tJ4gLanANHnH&QHb2cTd@&`< zD;U>=bMh@lK)arR&N({W{|C~k!((EWp@Bikk+~EJ2-Kz^+GH~`S5JD7t^=?MzY>(? zIfAH;PDdlm^B@R_7~HS<1do!c%m}9N12$2%Ss}R@H#1~`k&(m7XR{c~%`^d_6PM9< zS<=FoOW?N|>rD>0NI-n<7Jf4U>(!boN!B@1;C)<0BczLczTwyPCX~TxB5Xti_XinC zk=4^<%MZ8-nF`lNTOi*gc~#;piaCnH#kv;jj!^zZEu_Iyp-vY<$>i9Oz{ebKlasTB zd%X-;MQ*K9t((L!oJN@Dm_M0wkUVEGpX*#^IMrB;O%MQ%IpWKlV96`q=HMtM7{awJ zwM#f*DV&9ymXpVnGF1R$Qn9Kgx|GQkqt;Sfp;6kH4S6oBGSLA|6s6~Mg=?3vMR3t6 zU%_2~hIL|OT)|$8^@ZOVY7y!wUzDD0*$aUmlrFca_@i60JQ4|}jesl%Cr5gO84dhR zi-=q;n@tG6!IL%0QivE*s@7BpBjr&VLrrAjyYpR#F3#5ZrkQCDnl?sykvvmxfLJ9= z5uKKw`WDq89xD^&6d#r=iSHRRvYqWiQ{1LM2Sh~H>-76+!>9vc|^19?uMXU6!G<@ef^NN!Bc3o{iF*z#z42H~c839+}r>e2aW$to4ughg?gEd?l zEm;aB5KsXcVF5rDHF1)QLN*EZ=b&K5#6oZ=LN6w!3Pb<3%4SdAlsuSHx@hu24N(@a zn4wh3v9?7XYfQL(tVp%n4EbA>4pJVfnv z^GX6%R;wyexX8M(mrSWN>QSXFQ&cKTl|V%x`&8`Ml>k$#mRX=~v6l%%QB6V<*MKOL zSpE`3Z-J!fZ!DIxNUxdYwXtq&U6q+WVq9+Im{f1*cc1^6(#)n=M5d=O282mnBe(i! zp6{g@K2)sUP<$DtrYJ+R9aDLSVT4H2IfMf(B4HMS&54FdI@!fJ$-u$G+7-sgPqbkik+sEWvUW?&!t^G!ho(pCK%F5q!9Si zDK1YVHNa)aJ3^SC6-7~WV5fGaKkKZ?$<0QCH6g%o>Y&9=q^8md7i~xE73815mqlR& z1*yY@DPXDc#uBS^10+Q%Udk?5qZF96eO;1TM_RFIuw0`0qp^Ki5$-1TwZP@qjt8te zsIec92i<{V?yK&_^Rt&1?A39}#b%>e?QT?Bxkcyallopai30ZYV>zURj))s$KcZN-a40j}fuL!1 ze7$`b?^y{S*pzW1izOkp9RU(8pYn0FhmTLYEdk}`06i19w(hMygF!)JYnBo)fiXQ@ zT#&OsorlJr8_cn&~TEbrVC1)&dOpt^;{uu8wv6!;OdA%o`k|oVrJT)*843;@5D60 zttDkc%&;|fw6k;=+b$I4Ep2o=nE9He7MKs(i>o@4{$j@ z+R>84-fBpZbK7MRYU0|<=txU9O7v2uQ?8=W0rx$R*bW7fW-_l`2=uZ08t!GF0VQtePfjW3_KQT-aozoX`R} zQ1Y}$=I5R^dTC=pi&5ZC?*!xOx@~ZGu>Y^J{}Z>|-T}z8wgP$!fidF9D5`~ zm|Zf?A*X}<6mpbgd7CM=?hsB{J?hn|wF*|=v2F)JoR0T?)9lCzr@mpXY=DvlnOgi# zxq!LWic=n$eT72RH&E4Zm>ocxcf>Z@R~%oH85?(Ooidzn6eDBE)?yqQiDB+tM--4u z6d2qkd#+oEqHa-b0_KlF$TP@B>O!eaW=O?^5i&N!Sg*l#O4P@=h(MdO!BS}p+$KC6 z0Ht_y3^$G(0b`M!(xv(aZr#HPnjtL>caZYBn*y>3L14yvktYdE3b%)kub|q9B`tJK zQp~jIr6H3Sr;td2@Nq4S&aqd7sq0)nr=fHFxP*U7jLhe@#d7HhsYWVtsl!x}I|v+D zUy$zF#%3$mpn>(8AyV5`5N_fF<+ayPJJ~BS9N>#|S?g_5V}B)v*4`#B@$o}4eGRrh z29wB#E$}Vv7bk;L_cm2~7^lPbltZ?z+PDlg6|HGhGaCe2XqX9zMAtT399)vFZ|Du& z^F|DSi?)v?%=HbG)4K?cKgOOGw6RPWD+2y)ECJ6-;28opldAyFJmBVbA*=$26W1Y% z@6bmyOjCR{tX@+kCWV+RJ1eM^LnV_XcH5ocVwrxetHi=N!gyQ56@|Ao_JO^v(VJoL z9UP^gw%`ZwdgMr-UtF9Is$14t%NX8^MFj(#BZfCro=}J|uaObP+B;maogk_i6^(+y zc!n>JnSQ@mfsgkp2=Kz^VW8bjYY>2XqfvosJB@DPIb5Ds6u#DK(TjO=g&sqZT|mKA z$H6U)FZz8=U#+Vky4!8`YT)$1=o%}Ql8T2j=UnhWn*^-pMoU91$m+Gj+NLpfxPt@U zY`rrU?#{R0eOEv*crIZ!=y-pRbcVKnX#X}Cd^tz$vwuM`b$m$1cR7kV1V!M{A^dL; zqYm-oqk;aBKEa%YNAMCl1ZG&GV*`rLN_ccy0UDHG074?>=%!lcLfr#zgMqoy!t6A& z+(%h_B)Y0h4VFY;Owot{?%C54r@8PQ-ZLH(`P+)Y=NO=uL;|_Z^)f1K+X`lzB;`qN z8$?wR-S(TEcZ3irr-7hJc)y4^M3bGa=jxqHuYo5u82-wD2>7I@k-HfB!%Ha3F>HJ_x)BjZ!xj{EBEjgdBBSwXBh9$92J)J1|w4xLanSpYHR`e|A}R zZ%3v6kJ3Hi#TJ@kL><5-n{L_Y!nN#~v6xc&X_t?jM7mEZr`-#d8t{fGNF z9LGWsD@v?`D-KnM*%NB>2H8GcQ$*?5nPC z#yKu*m!MInec1N*EG+1sL{{wKgE>FN%2t$D=%qThVC!#qolec$0YkH0883d1E6K+% zIv1V(fc@Y)MfB!OBDCvenC&ueYE%ufI_;iv;Gm)ro3)bk8%o4SRERe5(WZU0Nsr?0 zI=3Ca75w_^uYs92!}5tWBd8g0@5GV$*^cInepTZTKUn1lJ-#m|@&khTvvu_0_;N?Z z|BEdAU$g$RyYmqL@8MXHKfXxWitbbm{($`V4z?fizxz0Dl0RL)BOIT||8{qGR(}8Q z@z&16`rmyVkG^KK_&4Nw5(GU6`zAdIo}HbZboxi%6hGb7uKem(aB|#van`%>eq}A$ z4!%4)zZe{CG!<)V$~`uGgr{deJ?ox!j%vl)hfK{2(dF_-EjaI+Z?;A5ElZ<_{z47r^pf2Q)4c5Ndt}$6|*#DpQp0&Ge8`=A{ z{1ph3t*je8{ET}Y%Uj1*TJJWA*RpR-c{qfPbScV_S)z_%ZD(k{`S zm?H5Q3!f~@HA8CNu&JJr2-0SvaHX5;S*;DrHvl!b2`xZMl65s|= zmEV5Le(O7-dhr|UbTT@laI-r?`^cOl&>cLnsqVm4nPmALRc&GUi=m|kMy8i9ElV3M zZ<;+=-gFupEHyTF>+HuNhTi+XSbqHCkM&*|{YF@Z6O7Gt?aE%|cFK2iiSUSj{)2i3 zv(1@(k`staAo;Ut6=zcd1<*;L^pOksSFEp#=8H(m31>kT@;SzN9ux0bvpt;#5)=X@ zx0szEtgIOzpZ~X>`24n`a}ES>A`dld3mfROx9l^G#qT&izYFsFop*9!vz>2OlNI1H zzLE->8#sOVm#_0*dPDt_r3C-S0vHqeO z|3^qggLB?F8t?=UOK~A00%H^@H!tru?yoXB!=Va4?z9!VWS< z>eahhi0?jlGk@Ylue<1IrrvfZ*FzBh=xP!>9zZ%4I@-3nz@3q>)KUJdLd~tDO{N8f zww{(RfG#$SM}oGu|JnUz$7Uk-v%S67Jv!d8Ex{pooY9Rl2TAf&7**a|KU2(+SM$yo zV?j1&fGLU0FFo{>jQNizDLy@{?>DA``MMeRaK>|BsNq7FX}etGn{*{<^%n z`PR~c-g$m#ICmQ!lC}EFFFl9vlZ^EryY&aOaR>f?ZDVz#;{P8ZCB`3v=W_S;;GsBy zg8hGEc`b4O!;7`G7uEj%27`q1M0JW+ z(U}Z%aThOJ81kQWIvrMMxTSG(4inujdfG zw(aQUB7Q=ILrKa41Tt2s2~r#=4EiySL8V&F9OfwJ!iqofzY9@gj38{BS{Ebl#?~kz zf>5+8(QIHWtwzxata#wVs#vO4v&&h4(%D13KJAcVIx*e^jOdZo*mH)ta~r7jM?kLn z#Nfdj_W1oEkyb2fRBz2b?fud_?!I}?n#~2})~EdzhYdxscQ*HJ6S)~^%%4@Z*MgGWB_4Q%PLn*Pyy_g0*r8(z$1vl)E~+{xh8 zJahw@ub@-6r$Q*5Nrq7yvEjw0h#u2NEt~KVae}Cj1EM-wc1GvvYYGW1@Z3wi)ptgt zC1j1@%a>+OtMrGbJo!%^2D`5uSS0^#tj6`ftMIqd|2{^l26)yWTLj{(!NQ&WD6B$TOXwI+m0(dR_$f%0-?u~bg^ zy_Ym=XGycC_VcLS=k|N5Bq`wLiMOhAR!OlokM^^Mm*GJx8f@b7;Du*e zgsxZ#-4E%;J?Gp`t*&BvVQhROo*Vq++ny0?0*g)VB%;FElwL};Qg`yQLQr79R3TQXHSQ0rq%)SW% zJ33|+56#De*x~H(hXdEHtHW6)rdmA8W7N|qV=mJ#g2xs;(8st zIPX$puL3b(F{_mc4l#AA2yk{09i5TAA;P$-Lgj#J_H@&Cgx{G@vBCBf{mYa^b#L7p z4RjN7PCt8kY_!iYteKp%=f^F;xq~38VlJ2Z1TXFRtS#GIUZ-9Q5{S~izd#WX`{ zsr~ms5O(l&RMu!DX~R#i%(S(qX+;DkMgNzGDr4#DQ|G&AmlJNiETv@K-KRiaN`2ULkulWD6v|XP6Phs|p zSa&Rv&b(VO@4-y|{Y*vB=7F8h%+*|HfcYLuJeu6SUVA>>WaEuS+04%(wdOolI~l|^ zo!4%+e{e%$Gw}jLyr#+zEXCv*f!qBNXD`IJpQ+Y&=N}*w{A@w_z+F;psjM<&ZPc_8 zwwAyCQIArKVQcy8Lp_-B3J+#nlE2U3?Mw0X#jK6q{}#r@#Tb9jq50<#;)Q-SizVI@ z>@Ws8^lWXE2y@vC9}0+gf0?b1W`o(VlwjMo9toItr`_I*l(K*kMQ^}6aycP~C`)ly znT1UzH!gg9Hh8WW3(VowDk~)&##GRnK=niuIWk%(YwX3;Zok8@@Mwiq1}m~$z{X%8 zn5MESM$`*41&6&O3%0Npw2x|J9C5!|lW&}fbIGs&yNP!d{@2GdV&O$|FfX_b_w8P- zZb)?P_5t3ar!K<9o%LW}t2-wB4F?)Mb!2zs{FKR{3G7_<@|IIbk)N6#&tV!42Oayl z4n{gYc^n&ysC70p?WyymmNvJ=>xF|hk8cyX!GZaq#v)tcBPN{bkO91;!v?K1H zp&L%<--T|l{Je-OUQ<*amKtDPLuNO$YJ|=5h>X?hI$|~19E{W;Zz9(AR&)OGmeb7R ziv$!kLSwCwTrtuX3gly8bE_BoDI=Zpye}1fnS|JGCwWT?ZyCx_Nvk^xzO*d;XG{tG zkI98ThW=-DbtQ5BcXMT>ivRu?>1)ydRH~jz)$=D+^`ss3q*b)RywTu{FQ{VUqfL(9 z==uzLA!Nct5y(itTZt-KQI3R{6apb`z2SSU_`xsinbBPe%KRw4L46;9eK?sov+&CE zhyRZIcZ!`q0V3WmIGQhAd%$WhH`uQ4J9F_4<32T_CIv%Q9X0w|QeyuXcS=2;|JTaO z#%A*V-}U9{{Lf>guVw$Q>^~J#@K>zxZiiifax9u$kyBKFn;}P-LOT_1-fNgq|1^i zx1FvaSB!L=gacipj=3(3EEj>{X;a#N4$v>}>Xx0+Z_y1HUfoS@aCmDM?i_LLd$1m! z;oZByH6u6e3|>Q~-h9%R^ue$H6bI%K)&?w&4d#@`&sqhM?lyUex9YXDtVTo^H;Ps^y?UUvqg@e_hou6f|27VagzkqGjwcvp06fJo-;_K{7A^wz-^7tTyTfUN|B!9S??c`63RM?z=3B zjZ7kfPE+itNe~mJk)+%5m1e8sUK8Rh8nS-F2Kkxre^(NYuurI6kRyhd!^uIgi`P=R z1KF5(UynP<3fOqrGp_21W}kg98o8Iwi1E^dF;P*b^D6*qINV48a8!@PGx4!V+eS6w z`uEc_oiA#zGoAf;UwjvQ3@`C}#be!klr1tQGEX=Q_zBCB zmUY&Z9yH{rByi|4)4w}mr{`@5{*aWFwAwEvyfhjtHiP0drMj@2@f>no*i(W%<;RsO zMNMKMHca73q!5{>GI0iwi-f07mT!9Mt`}R7^!-m@G!YC=g;B_@Sae&jP+?#kTG z0TgIbJuTQebms+R?K$#fRq-?LymEKI5sK}bUC{jYHBmg(Z~x1yy)*Q`kLdruvbnaN z(Eo0(tXJoMA0t)z-%9^m>3_@8_8tBIZ>j864o#MIOOvG|?74KiA?qFQ9vyf0xfgVrwiuBI4NSr=40338o8AY|=dPCFrKM-rIft?H z=_e8g0bHnN1(s&fz{I@)aP|(*_IHnWg+4*AWl5f)FbBopY7BLBjw;&9V{1X2J`#2R zk!t8ND(F(`=Umk@s-2Y*;m$plYG*4?=d9RNUFUr3CL=AhQVSI8opW{0h5BXy*clA? zP`WslOnk(#c43bYtqT@3>S%$|U(;Tf)m-00YkfzJwI*Jq>Z}X()wkAFC-l_1`Yu~R zovED8QcO$!|Bq2fn-QO~wWb)`h`-{Y?|y_aSz2FSH!_fbqVlB?bz8_F+=WmfF z-g6~X3*kfQ{43h6Xjd+~l6b`m+-`GfVv3!$Vy2#)Q>lj4r_kP0VChk@-I6?CNy|>Y zRp;*P-@Pq6_h-loEITxiyLj0+4kfN3?H&t^^PHtkJ-3EYxA-XIjx>d4ky3$8r|6`M zT95WP^?DPR4txdCu`^=i#E~yWu}2z97pW`F=teiG5spL=e&YFdb{DN^;P=lQq05Xk z;(9?5I_Yfcg7gjiX-j0+k2SAFMgpXH!xmx6KAQOin=fO+$g^IUULSP0xX%S%VY*-{ zsmFABQX|3$(fr8NjpGjj;vaQ9;S7r#60zB|bFqmL)rlPnX=U;c4hJQynY_M4+ZiW{ z?`Huia}bXLZ4%iI>{|eK$9q_E*9tYds`o0EZ4opN9pttMe|y;TX#0~RpFY@8g-J>? zF807=xPyql5x>aSucPk2PP)Bg_WIosj^s(NyHC%k6e%OOt9zSPFGlGkPT>X=^e(Hx8bMCs*ChYJ+#Rl)gFw&;o-QC|mI(Q4X zu0jmNufvO_suvPQK*J0ld0=X5H6OP?;zx#uUNWfhTVa-38e4}s)Wv9!B{NBs2CkS7jYg^ z<_bT99!(A4TRzN!P_hF#I7+|zE(cm%ZH~IVlQ-SCzYI_2W`n!=wJS{k`nID}b4o>v zI36-RY$dD-y=5ZLo!o4BDzV)^C;}F-->>y*R#)pq)q zNyh#U>(fWnTYLz-$pf}gwEwTICgT5Wtgf$C`~M>(T!w7a^P^St`|%(cIp=L63L94o z`~h0f32xG@zX9WLlg-v^t@4kdy>*Ik#>knx&d2))gFJ#wMV?KbKA3~laCB^ zv_tfVet$Mbu|XF2FJA4_&QorC`S5+Z`Ot)$e5O=&J);toXLvdeKr1D{nq~i;Atc zHsvSUch7>p=ex2XR4c#o-BFZ+{~`c=7sJMeoUDOx7zmH}O*qGS={(YT)s2uyK974$kQzRurTLP5fp$Q{^G=o3l? zvIAi@Q8l`$%3$1%Ja{S0)L7#`oNK2??1;q=b)r+5c67jGD^Y%Iw-o{c?AkD#TY>HY zBj_XYet{AKYpMiW+CDrw`2XGGZu6hrUz&%z$3N~6^===Yy!z>2Pk-xkt;5s_uL7y9 znflya35$qf0M;QubxLarV+$iccEX^Z1KH;w7;T~PsX^|-b`l`F!Tp8p1W@F82>s$7 zWuyb)?LfEQ9UbqWeg@x0{vYIQC7|JsXd-J}jw=k@imTXG!MYXl$O4bPJ%~u@xrQ%pu7-WC z-s9MD2Za71DgI#EoV$PyV17{JMs-Iaws*F+qZr$&C1AOy-jK(`mV=44!D?_GH6jox zM=t`n!Blq}td`jt>XxFhPEJm9Ws@#=4>m_d^=FwgLTZk__?#^d>6e(pCUoXph6-W4 zSmtbw4RPA6MhjZ%D2uY(?-xZPY)t}_ImhFJ`c=$zKKhxHJ#`{{fr)Rz0SkbMZ8`K> zZ*IR57`r)$P-C1OAQifG;E?p+#2+CS!DXg8c7a$9pq$}|j0;1KJ`k-0V*^q;a2efC z4w7Pk4ofidSKbZw59HZ6L$F0a!RWD{_bITD#$u@69gxjQCkOA?j{W(hepTE8rGGT$vrd;0@gV zFc7Y6GydvgIB`bk2?^}7TDYPahg3J4A{Y^rEw(!f(cw58Ub~}tgAK`tWa7y9EVPLz2(uZHez%lUpoBgmNR6cS8WrXt5ljt5ZCx5!+_BC~9Cq`&5bPkA3ewQf9vZn{%7NX;YdyY<@&$W z@s~EG2Wq2e{8!f2;^Y5fb)$;^{ut@$6EdR&SC%#Y2&*D%PGhT<6?!v}m|{yA-wEKi kMaE1K*<&P^OE+xUU%G2ms#2AzRHdBsA1Rg!i2xV@0LdmA$p8QV diff --git a/1-rabbitmq b/1-rabbitmq deleted file mode 100755 index 3a350e4..0000000 --- a/1-rabbitmq +++ /dev/null @@ -1,22 +0,0 @@ -#! /bin/bash - - -#if [ "$EUID" -ne 0 ] -# then echo "Please run as root" -# exit -#cfi - -RABBIT_PASS="smartme" - -docker run -d \ - --name=rabbitmq\ - -p 5672:5672 \ - --network=iotronic_network \ - --restart unless-stopped \ -rabbitmq:3 - -sleep 30 -docker exec rabbitmq rabbitmqctl add_user openstack $RABBIT_PASS -docker exec rabbitmq rabbitmqctl set_permissions openstack ".*" ".*" ".*" - -echo -e "\e[32mCompleted \e[0m" diff --git a/2-mysql/2-mysql b/2-mysql/2-mysql deleted file mode 100755 index 711e1a2..0000000 --- a/2-mysql/2-mysql +++ /dev/null @@ -1,110 +0,0 @@ -#!/bin/bash - -# Abilita il fail immediato in caso di errori -set -e - -# Percorso del file di configurazione -CONFIG_FILE="mysql-config.env" - -# Funzione di log per output formattato -log() { - echo -e "\e[34m[INFO]\e[0m $1" -} - -# Funzione per gestire errori -error_exit() { - echo -e "\e[31m[ERROR]\e[0m $1" - exit 1 -} - -# Funzione per verificare se MySQL è pronto -wait_for_mysql() { - log "Attesa dell'avvio di MariaDB..." - local retries=30 - while [[ $retries -gt 0 ]]; do - if docker exec "$CONTAINER_NAME" mysqladmin ping -uroot -p"$MYSQL_ROOT_PASSWORD" --silent; then - log "MariaDB è pronto." - return 0 - fi - retries=$((retries - 1)) - sleep 2 - done - error_exit "MariaDB non è pronto dopo un lungo periodo di attesa." -} - -# Carica variabili dal file mysql-config.env -if [[ -f "$CONFIG_FILE" ]]; then - log "Caricamento delle variabili da $CONFIG_FILE" - while IFS='=' read -r key value; do - # Ignora righe vuote o commenti - if [[ ! "$key" =~ ^# ]] && [[ -n "$key" ]]; then - export "$key"="$(echo "$value" | xargs)" - fi - done < "$CONFIG_FILE" -else - error_exit "File $CONFIG_FILE non trovato! Crealo prima di eseguire questo script." -fi - -# Verifica che le password e altre variabili siano definite -if [[ -z "$KEYSTONE_DBPASS" || -z "$IOTRONIC_DBPASS" || -z "$DESIGNATE_DBPASS" || -z "$MYSQL_ROOT_PASSWORD" || -z "$CONTAINER_NAME" || -z "$IMAGE_NAME" ]]; then - error_exit "Una o più variabili obbligatorie non sono definite nel file $CONFIG_FILE." -fi - -# Creazione del container Docker per MariaDB -log "Creazione del container Docker per MariaDB..." -docker create \ - --name="$CONTAINER_NAME" \ - --network=host \ - --restart unless-stopped \ - -e MYSQL_ROOT_PASSWORD="$MYSQL_ROOT_PASSWORD" \ - -v ${CONTAINER_NAME}_data:/var/lib/mysql \ - -v ${CONTAINER_NAME}_config:/etc/mysql \ - "$IMAGE_NAME" || log "Il container esiste già. Procedo con l'avvio." - -# Verifica della presenza dei file di configurazione prima della copia -if [[ ! -f 99-openstack.conf ]]; then - error_exit "File 99-openstack.conf non trovato!" -fi - -# Avvio del container MariaDB -log "Avvio del container MariaDB..." -docker start "$CONTAINER_NAME" - -# Attendi che MySQL sia pronto -wait_for_mysql - -# Copia dei file di configurazione nel container -log "Copia dei file di configurazione nel container..." -docker cp 99-openstack.conf "$CONTAINER_NAME:/etc/mysql/mariadb.conf.d/99-openstack.cnf" - -# Riavvio del container per applicare la configurazione -log "Riavvio del container per applicare la configurazione..." -docker restart "$CONTAINER_NAME" - -# Attendi che MySQL sia pronto dopo il riavvio -wait_for_mysql - -# Configurazione dei database tramite comandi SQL -log "Configurazione dei database..." -docker exec "$CONTAINER_NAME" mysql -uroot -p"$MYSQL_ROOT_PASSWORD" < /etc/timezone && \ - ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && \ - dpkg-reconfigure -f noninteractive tzdata && \ - add-apt-repository -y cloud-archive:$OPENSTACK_VERSION && \ - apt-get update && apt-get -y dist-upgrade && \ - apt-get clean && rm -rf /var/lib/apt/lists/* - -# Configura i log di Keystone -RUN mkdir -p /var/log/keystone && \ - touch /var/log/keystone/keystone-manage.log \ - /var/log/keystone/keystone-wsgi-public.log \ - /var/log/keystone/keystone.log && \ - chown -R keystone:keystone /var/log/keystone - -# Espone directory configurabili e porta di Keystone -VOLUME ["/etc/keystone", "/var/log/keystone"] -EXPOSE 5000 - -# Comando di default -CMD ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"] diff --git a/3-keystone/build/build b/3-keystone/build/build deleted file mode 100755 index 4b51563..0000000 --- a/3-keystone/build/build +++ /dev/null @@ -1,4 +0,0 @@ -#! /bin/bash - -VERSION=1.0 -docker build -t smartmeio/keystone-stain:$VERSION . \ No newline at end of file diff --git a/3-keystone/conf/adminrc b/3-keystone/conf/adminrc deleted file mode 100644 index 1f1a23e..0000000 --- a/3-keystone/conf/adminrc +++ /dev/null @@ -1,8 +0,0 @@ -OS_PROJECT_DOMAIN_NAME=Default -OS_USER_DOMAIN_NAME=Default -OS_PROJECT_NAME=admin -OS_USERNAME=admin -OS_PASSWORD=s4t -OS_AUTH_URL=http://localhost:5000/v3 -OS_IDENTITY_API_VERSION=3 -OS_IMAGE_API_VERSION=2 \ No newline at end of file diff --git a/3-keystone/keystone-config.env b/3-keystone/keystone-config.env deleted file mode 100644 index 438843f..0000000 --- a/3-keystone/keystone-config.env +++ /dev/null @@ -1,26 +0,0 @@ -# Configurazione generale di Keystone -VERSION=1.0 -HOST=localhost -URL=http://localhost:5000/v3 -ADMIN_PASS=s4t - -# File e directory -ADMINRC_FILE=conf/adminrc -KEYSTONE_CONF=conf/keystone.conf -BUILD_DIR=build - -# Docker -IMAGE_NAME=keystone-custom -KEYSTONE_CONTAINER_NAME=keystone - -# Regione e Progetto -REGION_NAME=RegionOne -SERVICE_PROJECT_NAME=service -SERVICE_PROJECT_DESCRIPTION="Service Project" - -# Database Keystone -KEYSTONE_DB_NAME=keystone -KEYSTONE_DB_USER=keystone -KEYSTONE_DBPASS=s4t - -DB_HOST=127.0.0.1 diff --git a/4-conductor/4-conductor b/4-conductor/4-conductor deleted file mode 100755 index f0a9a76..0000000 --- a/4-conductor/4-conductor +++ /dev/null @@ -1,109 +0,0 @@ -#!/bin/bash - -# Caricamento della configurazione dal file -CONFIG_FILE="./conductor-config.env" -if [ ! -f "$CONFIG_FILE" ]; then - echo "Errore: Il file di configurazione $CONFIG_FILE non esiste." - exit 1 -fi -source "$CONFIG_FILE" - -# Funzione per stampare messaggi di errore -function error_exit { - echo "Errore: $1" - exit 1 -} - -# Funzione per verificare la connessione al database -function verify_db_connection { - echo "Verifica della connessione al database..." - docker run --rm \ - --net=host \ - -v "$CONFIG_VOLUME:/etc/iotronic/" \ - -v "$LOG_VOLUME:/var/log/iotronic" \ - "$IMAGE_NAME:$VERSION" \ - /bin/sh -c "python3 -c ' -import os -import sqlalchemy -from sqlalchemy import create_engine - -try: - db_url = \"${DB_CONNECTION_STRING}\" - engine = create_engine(db_url) - with engine.connect() as connection: - print(\"Connessione al database riuscita!\") -except Exception as e: - print(f\"Errore nella connessione al database: {e}\") - exit(1) -'" - if [ $? -ne 0 ]; then - error_exit "Impossibile connettersi al database. Verifica la stringa di connessione: $DB_CONNECTION_STRING" - fi -} - -# Verifica dell'esistenza del repository iotronic -if [ ! -d "$REPO_DIR" ]; then - error_exit "La directory del repository $REPO_DIR non esiste." -fi - -# Verifica se il file di configurazione esiste -if [ ! -f "$CONF_FILE" ]; then - error_exit "Il file di configurazione $CONF_FILE non esiste." -fi - -# Passaggio 1: Copia del repository iotronic nella directory di build -echo "Preparazione della directory di build..." -cp -r "$REPO_DIR" "$BUILD_DIR/" || error_exit "Errore durante la copia del repository iotronic nella directory di build." - -# Passaggio 2: Build dell'immagine Docker -echo "Costruzione dell'immagine Docker..." -cd "$BUILD_DIR" || error_exit "Impossibile accedere alla directory $BUILD_DIR" -docker build -t "$IMAGE_NAME:$VERSION" . || error_exit "Errore durante la build dell'immagine Docker." -cd - >/dev/null -echo "Build completata con successo." - -# Passaggio 3: Creazione del container -echo "Creazione del container Docker..." -docker create \ - --name="$CONTAINER_NAME" \ - --restart unless-stopped \ - --net=host \ - --hostname=conductor \ - -p "$PORT_MAPPING" \ - -v "$CONFIG_VOLUME:/etc/iotronic/" \ - -v "$LOG_VOLUME:/var/log/iotronic" \ - "$IMAGE_NAME:$VERSION" || error_exit "Errore durante la creazione del container Docker." -echo "Container creato con successo." - -# Passaggio 4: Copia del file di configurazione nel container -echo "Copia del file di configurazione nel container..." -docker cp "$CONF_FILE" "$CONTAINER_NAME:/etc/iotronic/" || error_exit "Errore durante la copia del file di configurazione." -echo "File di configurazione copiato con successo." - -# Passaggio 5: Configurazione dei permessi sui log -echo "Configurazione dei permessi sui log..." -docker run --rm \ - --net=host \ - -v "$CONFIG_VOLUME:/etc/iotronic/" \ - -v "$LOG_VOLUME:/var/log/iotronic" \ - "$IMAGE_NAME:$VERSION" \ - /bin/sh -c "chown -R iotronic:iotronic /var/log/iotronic/" || error_exit "Errore durante la configurazione dei permessi sui log." -echo "Permessi sui log configurati con successo." - -# Passaggio 6: Verifica della connessione al database -verify_db_connection - -# Passaggio 7: Sincronizzazione del database -echo "Sincronizzazione del database di iotronic..." -docker run --rm \ - --net=host \ - -v "$CONFIG_VOLUME:/etc/iotronic/" \ - -v "$LOG_VOLUME:/var/log/iotronic" \ - "$IMAGE_NAME:$VERSION" \ - /bin/sh -c "iotronic-dbsync" || error_exit "Errore durante la sincronizzazione del database." -echo "Database sincronizzato con successo." - -# Passaggio 8: Avvio del container -echo "Avvio del container Docker..." -docker start "$CONTAINER_NAME" || error_exit "Errore durante l'avvio del container Docker." -echo "Container $CONTAINER_NAME avviato con successo." diff --git a/4-conductor/build/Dockerfile b/4-conductor/build/Dockerfile deleted file mode 100644 index 08e4d4d..0000000 --- a/4-conductor/build/Dockerfile +++ /dev/null @@ -1,95 +0,0 @@ -# Base image -FROM ubuntu:bionic - -# Impostazioni delle variabili d'ambiente -ENV DEBIAN_FRONTEND=noninteractive \ - LC_CTYPE=en_US.UTF-8 \ - LANG=en_US.UTF-8 \ - TZ=Europe/Rome - -# Installazione delle dipendenze di sistema e configurazione iniziale -RUN apt-get update && apt-get install -y --no-install-recommends \ - software-properties-common \ - locales \ - tzdata \ - build-essential \ - python3.8 \ - python3.8-distutils \ - python3.8-dev \ - python3-pip \ - python3-setuptools \ - git \ - apache2 \ - libapache2-mod-wsgi-py3 \ - rustc \ - libffi-dev \ - libssl-dev \ - pkg-config \ - zlib1g-dev \ - gcc \ - && locale-gen en_US.UTF-8 \ - && echo $TZ > /etc/timezone \ - && ln -fs /usr/share/zoneinfo/$TZ /etc/localtime \ - && dpkg-reconfigure -f noninteractive tzdata \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - -# Configurazione per utilizzare Python 3.8 come predefinito -RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.8 1 - -# Copia del repository iotronic nel container -COPY ./../iotronic /opt/build/iotronic - -# Imposta la directory di lavoro -WORKDIR /opt/build/iotronic - -# Installazione delle dipendenze Python -RUN pip3 install --upgrade pip \ - && pip3 install setuptools==59.6.0 wheel cffi cryptography pyopenssl \ - && pip3 install --ignore-installed PyYAML \ - && pip3 install \ - "pbr!=2.1.0,>=2.0.0" \ - "eventlet!=0.18.3,!=0.20.1,>=0.18.2" \ - "oslo.config>=5.2.0" \ - "oslo.log>=3.36.0" \ - "oslo.concurrency>=3.26.0" \ - "oslo.policy>=1.35.0" \ - "oslo.messaging>=6.3.0" \ - "oslo.db>=4.27.0" \ - "paramiko>=2.0.0" \ - "SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10" \ - "keystonemiddleware>=4.17.0" \ - "autobahn>=18.10.1" \ - "python-neutronclient>=6.7.0" \ - "python-designateclient>=2.11.0" \ - "pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0" \ - "PyMySQL>=0.7.6" \ - "osprofiler>=1.5.0" \ - "WSME>=0.8" \ - && python3 setup.py install - -# Creazione dell'utente iotronic e configurazione dei log -RUN useradd -m -d /var/lib/iotronic iotronic \ - && mkdir -p /var/log/iotronic \ - && touch /var/log/iotronic/iotronic-conductor.log \ - && touch /var/log/iotronic/iotronic-api_error.log \ - && touch /var/log/iotronic/iotronic-api_access.log \ - && chown -R iotronic:iotronic /var/log/iotronic/ - -# Configurazione di Apache -RUN cp etc/apache2/iotronic.conf /etc/apache2/sites-available/iotronic.conf \ - && a2ensite iotronic \ - && a2enmod wsgi - -# Monta volumi per configurazione e log -VOLUME ["/etc/iotronic", "/var/log/iotronic"] - -# Esposizione della porta per iotronic -EXPOSE 8812 - -# Copia dello script di avvio -COPY bin/startConductor /usr/local/bin/startConductor -RUN chmod +x /usr/local/bin/startConductor - -# Comando di default per avvio -CMD ["/usr/local/bin/startConductor"] diff --git a/4-conductor/conductor-config.env b/4-conductor/conductor-config.env deleted file mode 100644 index b0b815f..0000000 --- a/4-conductor/conductor-config.env +++ /dev/null @@ -1,22 +0,0 @@ -# Configurazione generale del Conductor -VERSION=latest - -# Docker -IMAGE_NAME=s4t/iotronic-conductor -CONTAINER_NAME=iotronic-conductor - -# Volumi e directory -CONFIG_VOLUME=iotronic-conductor_config -LOG_VOLUME=/var/log/iotronic-conductor -REPO_DIR=./../iotronic -BUILD_DIR=./build -CONF_FILE=./conf/iotronic.conf - -# Mapping delle porte -PORT_MAPPING=8812:8812 - -# Database Conductor -DB_CONNECTION_STRING=mysql+pymysql://iotronic:s4t@localhost/iotronic -IOTRONIC_DB_NAME=iotronic -IOTRONIC_DB_USER=iotronic -IOTRONIC_DBPASS=s4t diff --git a/4-conductor/conf/iotronic.conf b/4-conductor/conf/iotronic.conf deleted file mode 100644 index a253890..0000000 --- a/4-conductor/conf/iotronic.conf +++ /dev/null @@ -1,102 +0,0 @@ -[DEFAULT] -transport_url = rabbit://openstack:RABBIT_PASS@controller - -debug=True -log_file = /var/log/iotronic/iotronic-conductor.log -proxy=nginx - - -# Authentication strategy used by iotronic-api: one of -# "keystone" or "noauth". "noauth" should not be used in a -# production environment because all authentication will be -# disabled. (string value) -auth_strategy=keystone - -# Enable pecan debug mode. WARNING: this is insecure and -# should not be used in a production environment. (boolean -# value) -#pecan_debug=false - - -[conductor] -service_port_min=50000 -service_port_max=50100 - -[wamp] -wamp_transport_url = ws://controller:8181/ -wamp_realm = s4t -#skip_cert_verify= False -#register_agent = True - - - -[database] -connection = mysql+pymysql://iotronic:IOTRONIC_DBPASS@controller/iotronic - -[keystone_authtoken] -www_authenticate_uri = http://controller:5000 -auth_url = http://controller:5000 -auth_plugin = password -auth_type = password -project_domain_id = default -user_domain_id = default -project_name = service -username = iotronic -password = smartme - - -[neutron] -auth_url = http://controller:5000 -url = http://controller:9696 -auth_strategy = password -auth_type = password -project_domain_name = default -user_domain_name = default -region_name = RegionOne -project_name = service -username = neutron -password = NEUTRON_PASS -retries = 3 -project_domain_id= default - - -[designate] -auth_url = http://controller:5000 -url = http://controller:9001 -auth_strategy = password -project_domain_name = default -user_domain_name = default -region_name = RegionOne -project_name = service -username = designate -password = password -retries = 3 -project_domain_id= default - - -[cors] -# Indicate whether this resource may be shared with the domain -# received in the requests "origin" header. Format: -# "://[:]", no trailing slash. Example: -# https://horizon.example.com (list value) -#allowed_origin = - -# Indicate that the actual request can include user -# credentials (boolean value) -#allow_credentials = true - -# Indicate which headers are safe to expose to the API. -# Defaults to HTTP Simple Headers. (list value) -#expose_headers = - -# Maximum cache age of CORS preflight requests. (integer -# value) -#max_age = 3600 - -# Indicate which methods can be used during the actual -# request. (list value) -#allow_methods = OPTIONS,GET,HEAD,POST,PUT,DELETE,TRACE,PATCH - -# Indicate which header field names may be used during the -# actual request. (list value) -#allow_headers = diff --git a/4-conductor/create_IotronicDB_if_needed b/4-conductor/create_IotronicDB_if_needed deleted file mode 100755 index 2711193..0000000 --- a/4-conductor/create_IotronicDB_if_needed +++ /dev/null @@ -1,18 +0,0 @@ -#! /bin/bash - -IP_IOTRONIC="localhost" -IOTRONIC_PASS="s4t" - -openstack service create iot --name Iotronic -openstack user create --password $IOTRONIC_PASS iotronic -openstack role add --project service --user iotronic admin -openstack role create admin_iot_project -openstack role create manager_iot_project -openstack role create user_iot -openstack role add --project service --user iotronic admin_iot_project - -openstack endpoint create --region RegionOne iot public http://$IP_IOTRONIC:8812 -openstack endpoint create --region RegionOne iot internal http://$IP_IOTRONIC:8812 -openstack endpoint create --region RegionOne iot admin http://$IP_IOTRONIC:8812 - -openstack role add --project admin --user admin admin_iot_project diff --git a/5-wagent/5-wagent b/5-wagent/5-wagent deleted file mode 100755 index 2c49786..0000000 --- a/5-wagent/5-wagent +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - - -# Caricamento della configurazione dal file -CONFIG_FILE="./wagent-config.env" -if [ ! -f "$CONFIG_FILE" ]; then - echo "Errore: Il file di configurazione $CONFIG_FILE non esiste." - exit 1 -fi -source "$CONFIG_FILE" - -# Funzione per stampare messaggi di errore -function error_exit { - echo "Errore: $1" - exit 1 -} - -# Verifica dell'esistenza del repository iotronic -if [ ! -d "$REPO_DIR" ]; then - error_exit "La directory del repository $REPO_DIR non esiste." -fi - -# Verifica se il file di configurazione esiste -if [ ! -f "$CONF_FILE" ]; then - error_exit "Il file di configurazione $CONF_FILE non esiste." -fi - -# Passaggio 1: Copia del repository iotronic nella directory di build -echo "Preparazione della directory di build..." -cp -r "$REPO_DIR" "$BUILD_DIR/" || error_exit "Errore durante la copia del repository iotronic nella directory di build." - -# Passaggio 2: Build dell'immagine Docker -echo "Costruzione dell'immagine Docker..." -cd "$BUILD_DIR" || error_exit "Impossibile accedere alla directory $BUILD_DIR" -docker build -t "$IMAGE_NAME:$VERSION" . || error_exit "Errore durante la build dell'immagine Docker." -cd - >/dev/null -echo "Build completata con successo." - -# Passaggio 3: Creazione del container -echo "Creazione del container Docker..." -docker create \ - --name="$CONTAINER_NAME" \ - --restart unless-stopped \ - --net=host \ - --hostname=wagent1 \ - -v "$CONFIG_VOLUME:/etc/iotronic/" \ - -v "$LOG_VOLUME:/var/log/iotronic" \ - "$IMAGE_NAME:$VERSION" || error_exit "Errore durante la creazione del container Docker." -echo "Container creato con successo." - -# Passaggio 4: Configurazione dei permessi sui log -echo "Configurazione dei permessi sui log..." -docker run --rm \ - --net=host \ - -v "$CONFIG_VOLUME:/etc/iotronic/" \ - -v "$LOG_VOLUME:/var/log/iotronic" \ - "$IMAGE_NAME:$VERSION" \ - /bin/sh -c "chown -R iotronic:iotronic /var/log/iotronic/" || error_exit "Errore durante la configurazione dei permessi sui log." -echo "Permessi sui log configurati con successo." - -# Passaggio 5: Copia del file di configurazione nel container -echo "Copia del file di configurazione nel container..." -docker cp "$CONF_FILE" "$CONTAINER_NAME:/etc/iotronic/" || error_exit "Errore durante la copia del file di configurazione." -echo "File di configurazione copiato con successo." - -# Passaggio 6: Stop e avvio del container -echo "Riavvio del container Docker..." -docker stop "$CONTAINER_NAME" >/dev/null || error_exit "Errore durante lo stop del container Docker." -docker start "$CONTAINER_NAME" || error_exit "Errore durante l'avvio del container Docker." -echo "Container $CONTAINER_NAME avviato con successo." diff --git a/5-wagent/Dockerfile b/5-wagent/Dockerfile deleted file mode 100644 index ec5c249..0000000 --- a/5-wagent/Dockerfile +++ /dev/null @@ -1,72 +0,0 @@ -# Base image -FROM ubuntu:bionic - -# Impostazioni delle variabili d'ambiente -ENV DEBIAN_FRONTEND=noninteractive \ - LC_CTYPE=en_US.UTF-8 \ - LANG=en_US.UTF-8 \ - TZ=Europe/Rome - -# Installazione delle dipendenze di sistema e configurazione iniziale -RUN apt-get update && apt-get install -y --no-install-recommends \ - software-properties-common \ - locales \ - tzdata \ - build-essential \ - python3 \ - python3-pip \ - python3-setuptools \ - python3-openstackclient \ - git \ - apache2 \ - libapache2-mod-wsgi-py3 \ - rustc \ - && locale-gen en_US.UTF-8 \ - && echo $TZ > /etc/timezone \ - && ln -fs /usr/share/zoneinfo/$TZ /etc/localtime \ - && dpkg-reconfigure -f noninteractive tzdata \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - -# Clonazione del repository iotronic -WORKDIR /opt/build/ -RUN git clone https://opendev.org/x/iotronic.git - -# Impostazione della directory di lavoro -WORKDIR /opt/build/iotronic - -# Installazione di Python e pacchetti richiesti -RUN pip3 install --upgrade pip setuptools-rust \ - && pip3 install --ignore-installed PyYAML \ - && pip3 install \ - "pbr!=2.1.0,>=2.0.0" \ - "eventlet!=0.18.3,!=0.20.1,>=0.18.2" \ - "oslo.config>=5.2.0" \ - "oslo.log>=3.36.0" \ - "oslo.concurrency>=3.26.0" \ - "oslo.policy>=1.35.0" \ - "oslo.messaging>=6.3.0" \ - "oslo.db>=4.27.0" \ - "paramiko>=2.0.0" \ - "SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10" \ - "keystonemiddleware>=4.17.0" \ - "autobahn>=18.10.1" \ - "python-neutronclient>=6.7.0" \ - "python-designateclient>=2.11.0" \ - "pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0" \ - "PyMySQL>=0.7.6" \ - "osprofiler>=1.5.0" \ - "WSME>=0.8" \ - && python3 setup.py install - -# Creazione dell'utente iotronic e configurazione dei log -RUN useradd -m -d /var/lib/iotronic iotronic \ - && mkdir -p /var/log/iotronic \ - && touch /var/log/iotronic/iotronic-wagent.log \ - && chown -R iotronic:iotronic /var/log/iotronic/ - -# Monta volumi per configurazione e log -VOLUME ["/etc/iotronic", "/var/log/iotronic"] - -# Comando di default per avvio del WAMP agent -CMD ["/usr/local/bin/iotronic-wamp-agent"] diff --git a/5-wagent/build/Dockerfile b/5-wagent/build/Dockerfile deleted file mode 100644 index ec5c249..0000000 --- a/5-wagent/build/Dockerfile +++ /dev/null @@ -1,72 +0,0 @@ -# Base image -FROM ubuntu:bionic - -# Impostazioni delle variabili d'ambiente -ENV DEBIAN_FRONTEND=noninteractive \ - LC_CTYPE=en_US.UTF-8 \ - LANG=en_US.UTF-8 \ - TZ=Europe/Rome - -# Installazione delle dipendenze di sistema e configurazione iniziale -RUN apt-get update && apt-get install -y --no-install-recommends \ - software-properties-common \ - locales \ - tzdata \ - build-essential \ - python3 \ - python3-pip \ - python3-setuptools \ - python3-openstackclient \ - git \ - apache2 \ - libapache2-mod-wsgi-py3 \ - rustc \ - && locale-gen en_US.UTF-8 \ - && echo $TZ > /etc/timezone \ - && ln -fs /usr/share/zoneinfo/$TZ /etc/localtime \ - && dpkg-reconfigure -f noninteractive tzdata \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - -# Clonazione del repository iotronic -WORKDIR /opt/build/ -RUN git clone https://opendev.org/x/iotronic.git - -# Impostazione della directory di lavoro -WORKDIR /opt/build/iotronic - -# Installazione di Python e pacchetti richiesti -RUN pip3 install --upgrade pip setuptools-rust \ - && pip3 install --ignore-installed PyYAML \ - && pip3 install \ - "pbr!=2.1.0,>=2.0.0" \ - "eventlet!=0.18.3,!=0.20.1,>=0.18.2" \ - "oslo.config>=5.2.0" \ - "oslo.log>=3.36.0" \ - "oslo.concurrency>=3.26.0" \ - "oslo.policy>=1.35.0" \ - "oslo.messaging>=6.3.0" \ - "oslo.db>=4.27.0" \ - "paramiko>=2.0.0" \ - "SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10" \ - "keystonemiddleware>=4.17.0" \ - "autobahn>=18.10.1" \ - "python-neutronclient>=6.7.0" \ - "python-designateclient>=2.11.0" \ - "pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0" \ - "PyMySQL>=0.7.6" \ - "osprofiler>=1.5.0" \ - "WSME>=0.8" \ - && python3 setup.py install - -# Creazione dell'utente iotronic e configurazione dei log -RUN useradd -m -d /var/lib/iotronic iotronic \ - && mkdir -p /var/log/iotronic \ - && touch /var/log/iotronic/iotronic-wagent.log \ - && chown -R iotronic:iotronic /var/log/iotronic/ - -# Monta volumi per configurazione e log -VOLUME ["/etc/iotronic", "/var/log/iotronic"] - -# Comando di default per avvio del WAMP agent -CMD ["/usr/local/bin/iotronic-wamp-agent"] diff --git a/5-wagent/conf/iotronic.conf b/5-wagent/conf/iotronic.conf deleted file mode 100644 index 609c33e..0000000 --- a/5-wagent/conf/iotronic.conf +++ /dev/null @@ -1,95 +0,0 @@ -[DEFAULT] -transport_url = rabbit://openstack:RABBIT_PASS@controller - -debug=True -proxy=nginx -log_file = /var/log/iotronic/iotronic-wagent.log - -# Authentication strategy used by iotronic-api: one of -# "keystone" or "noauth". "noauth" should not be used in a -# production environment because all authentication will be -# disabled. (string value) -auth_strategy=keystone - -# Enable pecan debug mode. WARNING: this is insecure and -# should not be used in a production environment. (boolean -# value) -#pecan_debug=false - - -[wamp] -wamp_transport_url = wss://controller:8181/ -wamp_realm = s4t -skip_cert_verify= True -register_agent = True - - - -[database] -connection = mysql+pymysql://iotronic:IOTRONIC_DBPASS@controller/iotronic - -[keystone_authtoken] -www_authenticate_uri = http://controller:5000 -auth_url = http://controller:5000 -auth_plugin = password -project_domain_id = default -user_domain_id = default -project_name = service -username = iotronic -password = smartme - - -[neutron] -auth_url = http://controller:5000 -url = http://controller:9696 -auth_strategy = password -project_domain_name = default -user_domain_name = default -region_name = RegionOne -project_name = service -username = neutron -password = netrn_pwd -retries = 3 -project_domain_id= default - - -[designate] -auth_url = http://controller:35357 -url = http://controller:9001 -auth_strategy = password -project_domain_name = default -user_domain_name = default -region_name = RegionOne -project_name = service -username = designate -password = password -retries = 3 -project_domain_id= default - - -[cors] -# Indicate whether this resource may be shared with the domain -# received in the requests "origin" header. Format: -# "://[:]", no trailing slash. Example: -# https://horizon.example.com (list value) -#allowed_origin = - -# Indicate that the actual request can include user -# credentials (boolean value) -#allow_credentials = true - -# Indicate which headers are safe to expose to the API. -# Defaults to HTTP Simple Headers. (list value) -#expose_headers = - -# Maximum cache age of CORS preflight requests. (integer -# value) -#max_age = 3600 - -# Indicate which methods can be used during the actual -# request. (list value) -#allow_methods = OPTIONS,GET,HEAD,POST,PUT,DELETE,TRACE,PATCH - -# Indicate which header field names may be used during the -# actual request. (list value) -#allow_headers = diff --git a/5-wagent/wagent-config.env b/5-wagent/wagent-config.env deleted file mode 100644 index 056b7b2..0000000 --- a/5-wagent/wagent-config.env +++ /dev/null @@ -1,22 +0,0 @@ -# Configurazione generale del Wagent -VERSION=latest - -# Docker -IMAGE_NAME=s4t/iotronic-wagent -CONTAINER_NAME=iotronic-wagent - -# Volumi e directory -CONFIG_VOLUME=iotronic-wagent_config -LOG_VOLUME=/var/log/iotronic-wagent -REPO_DIR=./../iotronic -BUILD_DIR=./build -CONF_FILE=./conf/iotronic.conf - -# Mapping delle porte -PORT_MAPPING=8812:8812 - -# Database Conductor -DB_CONNECTION_STRING=mysql+pymysql://iotronic:s4t@localhost/iotronic -IOTRONIC_DB_NAME=iotronic -IOTRONIC_DB_USER=iotronic -IOTRONIC_DBPASS=s4t diff --git a/6-ui/6-ui b/6-ui/6-ui deleted file mode 100755 index 109e7bc..0000000 --- a/6-ui/6-ui +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -# Configurazione -VERSION="latest" -IMAGE_NAME="s4t/iotronic-ui" -CONTAINER_NAME="iotronic-ui" -CONFIG_VOLUME="iotronic-ui_config" -LOG_VOLUME="/var/log/iotronic-ui" -BUILD_DIR="./build" # Directory contenente il Dockerfile -CONF_FILE="./conf/local_settings.py" # Percorso del file di configurazione - -# Funzione per gestire errori -function error_exit { - echo "Errore: $1" - exit 1 -} - -# Verifica se la directory di build esiste -if [ ! -d "$BUILD_DIR" ]; then - error_exit "La directory di build $BUILD_DIR non esiste." -fi - -# Verifica se il file di configurazione esiste -if [ ! -f "$CONF_FILE" ]; then - error_exit "Il file di configurazione $CONF_FILE non esiste." -fi - -# Passaggio 1: Build dell'immagine Docker -echo "Costruzione dell'immagine Docker..." -cd "$BUILD_DIR" || error_exit "Impossibile accedere alla directory $BUILD_DIR" -docker build -t "$IMAGE_NAME:$VERSION" . || error_exit "Errore durante la build dell'immagine Docker." -cd - >/dev/null -echo "Build completata con successo." - -# Passaggio 2: Creazione del container Docker -echo "Creazione del container Docker..." -docker create \ - --name="$CONTAINER_NAME" \ - --restart unless-stopped \ - --net=host \ - -p 8585:80 \ - -v "$CONFIG_VOLUME:/etc/openstack-dashboard/" \ - -v "$LOG_VOLUME:/var/log/apache2/" \ - "$IMAGE_NAME:$VERSION" || error_exit "Errore durante la creazione del container Docker." -echo "Container creato con successo." - -# Passaggio 3: Copia del file di configurazione nel container -echo "Copia del file di configurazione nel container..." -docker cp "$CONF_FILE" "$CONTAINER_NAME:/etc/openstack-dashboard/" || error_exit "Errore durante la copia del file di configurazione." -echo "File di configurazione copiato con successo." - -# Passaggio 4: Avvio del container -echo "Avvio del container Docker..." -docker start "$CONTAINER_NAME" || error_exit "Errore durante l'avvio del container Docker." -echo "Container $CONTAINER_NAME avviato con successo." - diff --git a/6-ui/build/Dockerfile b/6-ui/build/Dockerfile deleted file mode 100644 index e680976..0000000 --- a/6-ui/build/Dockerfile +++ /dev/null @@ -1,66 +0,0 @@ -FROM ubuntu:bionic - -ENV DEBIAN_FRONTEND=noninteractive \ - LC_CTYPE=en_US.UTF-8 \ - LANG=en_US.UTF-8 \ - TZ=Europe/Rome - -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - software-properties-common \ - memcached \ - python-memcache \ - openstack-dashboard \ - git \ - python3-pip \ - python3-setuptools \ - nano \ - nocache \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - -# Clona i repository necessari -RUN git clone https://opendev.org/x/python-iotronicclient.git /opt/build/python-iotronicclient -RUN git clone https://opendev.org/x/iotronic-ui.git /opt/build/iotronic-ui - -# Installa python-iotronicclient -WORKDIR /opt/build/python-iotronicclient -RUN pip3 install --upgrade pip \ - && pip3 install \ - "pbr>=2.0.0" \ - "appdirs>=1.3.0" \ - "dogpile.cache>=0.6.2" \ - "jsonschema>=3.2.0" \ - "keystoneauth1>=2.18.0" \ - "osc-lib>=1.2.0" \ - "oslo.i18n>=3.15.3" \ - "oslo.serialization>=1.10.0" \ - "oslo.utils>=3.33.0" \ - "PrettyTable>=2.0.0" \ - "python-openstackclient>=3.3.0" \ - "PyYAML>=3.12" \ - "requests>=2.18.0" \ - "six>=1.11.0" \ - "pbr>=1.8" \ - && pip3 install . - -# Installa iotronic-ui -WORKDIR /opt/build/iotronic-ui -RUN pip3 install \ - "Django>=2.2.24" \ - "django-compressor" \ - "oslo.config>=6.8.1" \ - && python3 setup.py install \ - && cp iotronic_ui/api/iotronic.py /usr/share/openstack-dashboard/openstack_dashboard/api/ \ - && cp iotronic_ui/enabled/_6000_iot.py /usr/share/openstack-dashboard/openstack_dashboard/enabled/ \ - && cp iotronic_ui/enabled/_6* /usr/share/openstack-dashboard/openstack_dashboard/enabled/ - -# Aggiungi lo script startUI -COPY bin/startUI /usr/local/bin/startUI -RUN chmod +x /usr/local/bin/startUI - -VOLUME ["/etc/openstack-dashboard/"] - -EXPOSE 80 - -CMD ["/usr/local/bin/startUI"] diff --git a/6-ui/build/bin/startUI b/6-ui/build/bin/startUI deleted file mode 100755 index 039e7a5..0000000 --- a/6-ui/build/bin/startUI +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -# clean all pid -rm /run/apache2/apache2.pid -# Start the first process -/usr/sbin/apache2ctl -D FOREGROUND & -status=$? -if [ $status -ne 0 ]; then - echo "Failed to start APACHE2: $status" - exit $status -fi - -# Start the second process -/usr/bin/memcached -u memcache & -status=$? -if [ $status -ne 0 ]; then - echo "Failed to start memcached: $status" - exit $status -fi - -while sleep 60; do - ps aux |grep apache2 |grep -q -v grep - PROCESS_1_STATUS=$? - ps aux |grep memcached |grep -q -v grep - PROCESS_2_STATUS=$? - # If the greps above find anything, they exit with 0 status - # If they are not both 0, then something is wrong - if [ $PROCESS_1_STATUS -ne 0 -o $PROCESS_2_STATUS -ne 0 ]; then - echo "One of the processes has already exited." - exit 1 - fi -done \ No newline at end of file diff --git a/6-ui/conf/local_settings.py b/6-ui/conf/local_settings.py deleted file mode 100644 index d46f9dc..0000000 --- a/6-ui/conf/local_settings.py +++ /dev/null @@ -1,916 +0,0 @@ -# -*- coding: utf-8 -*- - -import os - -from django.utils.translation import ugettext_lazy as _ - -from horizon.utils import secret_key - -from openstack_dashboard.settings import HORIZON_CONFIG - -DEBUG = False - -# This setting controls whether or not compression is enabled. Disabling -# compression makes Horizon considerably slower, but makes it much easier -# to debug JS and CSS changes -#COMPRESS_ENABLED = not DEBUG - -# This setting controls whether compression happens on the fly, or offline -# with `python manage.py compress` -# See https://django-compressor.readthedocs.io/en/latest/usage/#offline-compression -# for more information -#COMPRESS_OFFLINE = not DEBUG - -# WEBROOT is the location relative to Webserver root -# should end with a slash. -WEBROOT = '/' -#LOGIN_URL = WEBROOT + 'auth/login/' -#LOGOUT_URL = WEBROOT + 'auth/logout/' -# -# LOGIN_REDIRECT_URL can be used as an alternative for -# HORIZON_CONFIG.user_home, if user_home is not set. -# Do not set it to '/home/', as this will cause circular redirect loop -#LOGIN_REDIRECT_URL = WEBROOT - -# If horizon is running in production (DEBUG is False), set this -# with the list of host/domain names that the application can serve. -# For more information see: -# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts -#ALLOWED_HOSTS = ['horizon.example.com', ] - -# Set SSL proxy settings: -# Pass this header from the proxy after terminating the SSL, -# and don't forget to strip it from the client's request. -# For more information see: -# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header -#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') - -# If Horizon is being served through SSL, then uncomment the following two -# settings to better secure the cookies from security exploits -#CSRF_COOKIE_SECURE = True -#SESSION_COOKIE_SECURE = True - -# The absolute path to the directory where message files are collected. -# The message file must have a .json file extension. When the user logins to -# horizon, the message files collected are processed and displayed to the user. -#MESSAGES_PATH=None - -# Overrides for OpenStack API versions. Use this setting to force the -# OpenStack dashboard to use a specific API version for a given service API. -# Versions specified here should be integers or floats, not strings. -# NOTE: The version should be formatted as it appears in the URL for the -# service API. For example, The identity service APIs have inconsistent -# use of the decimal point, so valid options would be 2.0 or 3. -# Minimum compute version to get the instance locked status is 2.9. -#OPENSTACK_API_VERSIONS = { -# "data-processing": 1.1, -# "identity": 3, -# "image": 2, -# "volume": 2, -# "compute": 2, -#} - -# Set this to True if running on a multi-domain model. When this is enabled, it -# will require the user to enter the Domain name in addition to the username -# for login. -#OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False - -# Set this to True if you want available domains displayed as a dropdown menu -# on the login screen. It is strongly advised NOT to enable this for public -# clouds, as advertising enabled domains to unauthenticated customers -# irresponsibly exposes private information. This should only be used for -# private clouds where the dashboard sits behind a corporate firewall. -#OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN = False - -# If OPENSTACK_KEYSTONE_DOMAIN_DROPDOWN is enabled, this option can be used to -# set the available domains to choose from. This is a list of pairs whose first -# value is the domain name and the second is the display name. -#OPENSTACK_KEYSTONE_DOMAIN_CHOICES = ( -# ('Default', 'Default'), -#) - -# Overrides the default domain used when running on single-domain model -# with Keystone V3. All entities will be created in the default domain. -# NOTE: This value must be the name of the default domain, NOT the ID. -# Also, you will most likely have a value in the keystone policy file like this -# "cloud_admin": "rule:admin_required and domain_id:" -# This value must be the name of the domain whose ID is specified there. -#OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default' - -# Set this to True to enable panels that provide the ability for users to -# manage Identity Providers (IdPs) and establish a set of rules to map -# federation protocol attributes to Identity API attributes. -# This extension requires v3.0+ of the Identity API. -#OPENSTACK_KEYSTONE_FEDERATION_MANAGEMENT = False - -# Set Console type: -# valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL", "MKS" -# or None. Set to None explicitly if you want to deactivate the console. -#CONSOLE_TYPE = "AUTO" - -# Toggle showing the openrc file for Keystone V2. -# If set to false the link will be removed from the user dropdown menu -# and the API Access page -#SHOW_KEYSTONE_V2_RC = True - -# If provided, a "Report Bug" link will be displayed in the site header -# which links to the value of this setting (ideally a URL containing -# information on how to report issues). -#HORIZON_CONFIG["bug_url"] = "http://bug-report.example.com" - -# Show backdrop element outside the modal, do not close the modal -# after clicking on backdrop. -#HORIZON_CONFIG["modal_backdrop"] = "static" - -# Specify a regular expression to validate user passwords. -#HORIZON_CONFIG["password_validator"] = { -# "regex": '.*', -# "help_text": _("Your password does not meet the requirements."), -#} - -# Turn off browser autocompletion for forms including the login form and -# the database creation workflow if so desired. -#HORIZON_CONFIG["password_autocomplete"] = "off" - -# Setting this to True will disable the reveal button for password fields, -# including on the login form. -#HORIZON_CONFIG["disable_password_reveal"] = False - -LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) - -# Set custom secret key: -# You can either set it to a specific value or you can let horizon generate a -# default secret key that is unique on this machine, e.i. regardless of the -# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, -# there may be situations where you would want to set this explicitly, e.g. -# when multiple dashboard instances are distributed on different machines -# (usually behind a load-balancer). Either you have to make sure that a session -# gets all requests routed to the same dashboard instance or you set the same -# SECRET_KEY for all of them. -SECRET_KEY = secret_key.generate_or_read_from_file('/var/lib/openstack-dashboard/secret_key') - -# We recommend you use memcached for development; otherwise after every reload -# of the django development server, you will have to login again. To use -# memcached set CACHES to something like - -CACHES = { - 'default': { - 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', - 'LOCATION': 'controller:11211', - }, -} - -#CACHES = { -# 'default': { -# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', -# } -#} - -# Send email to the console by default -EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' -# Or send them to /dev/null -#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' - -# Configure these for your outgoing email host -#EMAIL_HOST = 'smtp.my-company.com' -#EMAIL_PORT = 25 -#EMAIL_HOST_USER = 'djangomail' -#EMAIL_HOST_PASSWORD = 'top-secret!' - -# For multiple regions uncomment this configuration, and add (endpoint, title). -#AVAILABLE_REGIONS = [ -# ('http://cluster1.example.com:5000/v3', 'cluster1'), -# ('http://cluster2.example.com:5000/v3', 'cluster2'), -#] - -OPENSTACK_HOST = "controller" -OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST -OPENSTACK_KEYSTONE_DEFAULT_ROLE = "member" - -# For setting the default service region on a per-endpoint basis. Note that the -# default value for this setting is {}, and below is just an example of how it -# should be specified. -# A key of '*' is an optional global default if no other key matches. -#DEFAULT_SERVICE_REGIONS = { -# '*': 'RegionOne' -# OPENSTACK_KEYSTONE_URL: 'RegionTwo' -#} - -# Enables keystone web single-sign-on if set to True. -#WEBSSO_ENABLED = False - -# Authentication mechanism to be selected as default. -# The value must be a key from WEBSSO_CHOICES. -#WEBSSO_INITIAL_CHOICE = "credentials" - -# The list of authentication mechanisms which include keystone -# federation protocols and identity provider/federation protocol -# mapping keys (WEBSSO_IDP_MAPPING). Current supported protocol -# IDs are 'saml2' and 'oidc' which represent SAML 2.0, OpenID -# Connect respectively. -# Do not remove the mandatory credentials mechanism. -# Note: The last two tuples are sample mapping keys to a identity provider -# and federation protocol combination (WEBSSO_IDP_MAPPING). -#WEBSSO_CHOICES = ( -# ("credentials", _("Keystone Credentials")), -# ("oidc", _("OpenID Connect")), -# ("saml2", _("Security Assertion Markup Language")), -# ("acme_oidc", "ACME - OpenID Connect"), -# ("acme_saml2", "ACME - SAML2"), -#) - -# A dictionary of specific identity provider and federation protocol -# combinations. From the selected authentication mechanism, the value -# will be looked up as keys in the dictionary. If a match is found, -# it will redirect the user to a identity provider and federation protocol -# specific WebSSO endpoint in keystone, otherwise it will use the value -# as the protocol_id when redirecting to the WebSSO by protocol endpoint. -# NOTE: The value is expected to be a tuple formatted as: (, ). -#WEBSSO_IDP_MAPPING = { -# "acme_oidc": ("acme", "oidc"), -# "acme_saml2": ("acme", "saml2"), -#} - -# If set this URL will be used for web single-sign-on authentication -# instead of OPENSTACK_KEYSTONE_URL. This is needed in the deployment -# scenarios where network segmentation is used per security requirement. -# In this case, the controllers are not reachable from public network. -# Therefore, user's browser will not be able to access OPENSTACK_KEYSTONE_URL -# if it is set to the internal endpoint. -#WEBSSO_KEYSTONE_URL = "http://keystone-public.example.com/v3" - -# The Keystone Provider drop down uses Keystone to Keystone federation -# to switch between Keystone service providers. -# Set display name for Identity Provider (dropdown display name) -#KEYSTONE_PROVIDER_IDP_NAME = "Local Keystone" -# This id is used for only for comparison with the service provider IDs. This ID -# should not match any service provider IDs. -#KEYSTONE_PROVIDER_IDP_ID = "localkeystone" - -# Disable SSL certificate checks (useful for self-signed certificates): -#OPENSTACK_SSL_NO_VERIFY = True - -# The CA certificate to use to verify SSL connections -#OPENSTACK_SSL_CACERT = '/path/to/cacert.pem' - -# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the -# capabilities of the auth backend for Keystone. -# If Keystone has been configured to use LDAP as the auth backend then set -# can_edit_user to False and name to 'ldap'. -# -# TODO(tres): Remove these once Keystone has an API to identify auth backend. -OPENSTACK_KEYSTONE_BACKEND = { - 'name': 'native', - 'can_edit_user': True, - 'can_edit_group': True, - 'can_edit_project': True, - 'can_edit_domain': True, - 'can_edit_role': True, -} - -# Setting this to True, will add a new "Retrieve Password" action on instance, -# allowing Admin session password retrieval/decryption. -#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False - -# The Launch Instance user experience has been significantly enhanced. -# You can choose whether to enable the new launch instance experience, -# the legacy experience, or both. The legacy experience will be removed -# in a future release, but is available as a temporary backup setting to ensure -# compatibility with existing deployments. Further development will not be -# done on the legacy experience. Please report any problems with the new -# experience via the Launchpad tracking system. -# -# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to -# determine the experience to enable. Set them both to true to enable -# both. -#LAUNCH_INSTANCE_LEGACY_ENABLED = True -#LAUNCH_INSTANCE_NG_ENABLED = False - -# A dictionary of settings which can be used to provide the default values for -# properties found in the Launch Instance modal. -#LAUNCH_INSTANCE_DEFAULTS = { -# 'config_drive': False, -# 'enable_scheduler_hints': True, -# 'disable_image': False, -# 'disable_instance_snapshot': False, -# 'disable_volume': False, -# 'disable_volume_snapshot': False, -# 'create_volume': True, -#} - -# The Xen Hypervisor has the ability to set the mount point for volumes -# attached to instances (other Hypervisors currently do not). Setting -# can_set_mount_point to True will add the option to set the mount point -# from the UI. -OPENSTACK_HYPERVISOR_FEATURES = { - 'can_set_mount_point': False, - 'can_set_password': False, - 'requires_keypair': False, - 'enable_quotas': True -} - -# This settings controls whether IP addresses of servers are retrieved from -# neutron in the project instance table. Setting this to ``False`` may mitigate -# a performance issue in the project instance table in large deployments. -#OPENSTACK_INSTANCE_RETRIEVE_IP_ADDRESSES = True - -# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional -# services provided by cinder that is not exposed by its extension API. -OPENSTACK_CINDER_FEATURES = { - 'enable_backup': False, -} - -# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional -# services provided by neutron. Options currently available are load -# balancer service, security groups, quotas, VPN service. -OPENSTACK_NEUTRON_NETWORK = { - 'enable_router': True, - 'enable_quotas': True, - 'enable_ipv6': True, - 'enable_distributed_router': False, - 'enable_ha_router': False, - 'enable_fip_topology_check': True, - - # Default dns servers you would like to use when a subnet is - # created. This is only a default, users can still choose a different - # list of dns servers when creating a new subnet. - # The entries below are examples only, and are not appropriate for - # real deployments - # 'default_dns_nameservers': ["8.8.8.8", "8.8.4.4", "208.67.222.222"], - - # Set which provider network types are supported. Only the network types - # in this list will be available to choose from when creating a network. - # Network types include local, flat, vlan, gre, vxlan and geneve. - # 'supported_provider_types': ['*'], - - # You can configure available segmentation ID range per network type - # in your deployment. - # 'segmentation_id_range': { - # 'vlan': [1024, 2048], - # 'vxlan': [4094, 65536], - # }, - - # You can define additional provider network types here. - # 'extra_provider_types': { - # 'awesome_type': { - # 'display_name': 'Awesome New Type', - # 'require_physical_network': False, - # 'require_segmentation_id': True, - # } - # }, - - # Set which VNIC types are supported for port binding. Only the VNIC - # types in this list will be available to choose from when creating a - # port. - # VNIC types include 'normal', 'direct', 'direct-physical', 'macvtap', - # 'baremetal' and 'virtio-forwarder' - # Set to empty list or None to disable VNIC type selection. - 'supported_vnic_types': ['*'], - - # Set list of available physical networks to be selected in the physical - # network field on the admin create network modal. If it's set to an empty - # list, the field will be a regular input field. - # e.g. ['default', 'test'] - 'physical_networks': [], - -} - -# The OPENSTACK_HEAT_STACK settings can be used to disable password -# field required while launching the stack. -OPENSTACK_HEAT_STACK = { - 'enable_user_pass': True, -} - -# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features -# in the OpenStack Dashboard related to the Image service, such as the list -# of supported image formats. -#OPENSTACK_IMAGE_BACKEND = { -# 'image_formats': [ -# ('', _('Select format')), -# ('aki', _('AKI - Amazon Kernel Image')), -# ('ami', _('AMI - Amazon Machine Image')), -# ('ari', _('ARI - Amazon Ramdisk Image')), -# ('docker', _('Docker')), -# ('iso', _('ISO - Optical Disk Image')), -# ('ova', _('OVA - Open Virtual Appliance')), -# ('qcow2', _('QCOW2 - QEMU Emulator')), -# ('raw', _('Raw')), -# ('vdi', _('VDI - Virtual Disk Image')), -# ('vhd', _('VHD - Virtual Hard Disk')), -# ('vhdx', _('VHDX - Large Virtual Hard Disk')), -# ('vmdk', _('VMDK - Virtual Machine Disk')), -# ], -#} - -# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for -# image custom property attributes that appear on image detail pages. -IMAGE_CUSTOM_PROPERTY_TITLES = { - "architecture": _("Architecture"), - "kernel_id": _("Kernel ID"), - "ramdisk_id": _("Ramdisk ID"), - "image_state": _("Euca2ools state"), - "project_id": _("Project ID"), - "image_type": _("Image Type"), -} - -# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image -# custom properties should not be displayed in the Image Custom Properties -# table. -IMAGE_RESERVED_CUSTOM_PROPERTIES = [] - -# Set to 'legacy' or 'direct' to allow users to upload images to glance via -# Horizon server. When enabled, a file form field will appear on the create -# image form. If set to 'off', there will be no file form field on the create -# image form. See documentation for deployment considerations. -#HORIZON_IMAGES_UPLOAD_MODE = 'legacy' - -# Allow a location to be set when creating or updating Glance images. -# If using Glance V2, this value should be False unless the Glance -# configuration and policies allow setting locations. -#IMAGES_ALLOW_LOCATION = False - -# A dictionary of default settings for create image modal. -#CREATE_IMAGE_DEFAULTS = { -# 'image_visibility': "public", -#} - -# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints -# in the Keystone service catalog. Use this setting when Horizon is running -# external to the OpenStack environment. The default is 'publicURL'. -#OPENSTACK_ENDPOINT_TYPE = "publicURL" - -# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the -# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints -# in the Keystone service catalog. Use this setting when Horizon is running -# external to the OpenStack environment. The default is None. This -# value should differ from OPENSTACK_ENDPOINT_TYPE if used. -#SECONDARY_ENDPOINT_TYPE = None - -# The number of objects (Swift containers/objects or images) to display -# on a single page before providing a paging element (a "more" link) -# to paginate results. -API_RESULT_LIMIT = 1000 -API_RESULT_PAGE_SIZE = 20 - -# The size of chunk in bytes for downloading objects from Swift -SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024 - -# The default number of lines displayed for instance console log. -INSTANCE_LOG_LENGTH = 35 - -# Specify a maximum number of items to display in a dropdown. -DROPDOWN_MAX_ITEMS = 30 - -# The timezone of the server. This should correspond with the timezone -# of your entire OpenStack installation, and hopefully be in UTC. -TIME_ZONE = "UTC" - -# When launching an instance, the menu of available flavors is -# sorted by RAM usage, ascending. If you would like a different sort order, -# you can provide another flavor attribute as sorting key. Alternatively, you -# can provide a custom callback method to use for sorting. You can also provide -# a flag for reverse sort. For more info, see -# http://docs.python.org/2/library/functions.html#sorted -#CREATE_INSTANCE_FLAVOR_SORT = { -# 'key': 'name', -# # or -# 'key': my_awesome_callback_method, -# 'reverse': False, -#} - -# Set this to True to display an 'Admin Password' field on the Change Password -# form to verify that it is indeed the admin logged-in who wants to change -# the password. -#ENFORCE_PASSWORD_CHECK = False - -# Modules that provide /auth routes that can be used to handle different types -# of user authentication. Add auth plugins that require extra route handling to -# this list. -#AUTHENTICATION_URLS = [ -# 'openstack_auth.urls', -#] - -# The Horizon Policy Enforcement engine uses these values to load per service -# policy rule files. The content of these files should match the files the -# OpenStack services are using to determine role based access control in the -# target installation. - -# Path to directory containing policy.json files -#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") - -# Map of local copy of service policy files. -# Please insure that your identity policy file matches the one being used on -# your keystone servers. There is an alternate policy file that may be used -# in the Keystone v3 multi-domain case, policy.v3cloudsample.json. -# This file is not included in the Horizon repository by default but can be -# found at -# http://git.openstack.org/cgit/openstack/keystone/tree/etc/ \ -# policy.v3cloudsample.json -# Having matching policy files on the Horizon and Keystone servers is essential -# for normal operation. This holds true for all services and their policy files. -#POLICY_FILES = { -# 'identity': 'keystone_policy.json', -# 'compute': 'nova_policy.json', -# 'volume': 'cinder_policy.json', -# 'image': 'glance_policy.json', -# 'network': 'neutron_policy.json', -#} - -# Change this patch to the appropriate list of tuples containing -# a key, label and static directory containing two files: -# _variables.scss and _styles.scss -AVAILABLE_THEMES = [ - ('default', 'Default', 'themes/default'), -# ('material', 'Material', 'themes/material'), -] - -LOGGING = { - 'version': 1, - # When set to True this will disable all logging except - # for loggers specified in this configuration dictionary. Note that - # if nothing is specified here and disable_existing_loggers is True, - # django.db.backends will still log unless it is disabled explicitly. - 'disable_existing_loggers': False, - # If apache2 mod_wsgi is used to deploy OpenStack dashboard - # timestamp is output by mod_wsgi. If WSGI framework you use does not - # output timestamp for logging, add %(asctime)s in the following - # format definitions. - 'formatters': { - 'console': { - 'format': '%(levelname)s %(name)s %(message)s' - }, - 'operation': { - # The format of "%(message)s" is defined by - # OPERATION_LOG_OPTIONS['format'] - 'format': '%(message)s' - }, - }, - 'handlers': { - 'null': { - 'level': 'DEBUG', - 'class': 'logging.NullHandler', - }, - 'console': { - # Set the level to "DEBUG" for verbose output logging. - 'level': 'INFO', - 'class': 'logging.StreamHandler', - 'formatter': 'console', - }, - 'operation': { - 'level': 'INFO', - 'class': 'logging.StreamHandler', - 'formatter': 'operation', - }, - }, - 'loggers': { - 'horizon': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'horizon.operation_log': { - 'handlers': ['operation'], - 'level': 'INFO', - 'propagate': False, - }, - 'openstack_dashboard': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'novaclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'cinderclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'keystoneauth': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'keystoneclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'glanceclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'neutronclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'swiftclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'oslo_policy': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'openstack_auth': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'django': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - # Logging from django.db.backends is VERY verbose, send to null - # by default. - 'django.db.backends': { - 'handlers': ['null'], - 'propagate': False, - }, - 'requests': { - 'handlers': ['null'], - 'propagate': False, - }, - 'urllib3': { - 'handlers': ['null'], - 'propagate': False, - }, - 'chardet.charsetprober': { - 'handlers': ['null'], - 'propagate': False, - }, - 'iso8601': { - 'handlers': ['null'], - 'propagate': False, - }, - 'scss': { - 'handlers': ['null'], - 'propagate': False, - }, - }, -} - -# 'direction' should not be specified for all_tcp/udp/icmp. -# It is specified in the form. -SECURITY_GROUP_RULES = { - 'all_tcp': { - 'name': _('All TCP'), - 'ip_protocol': 'tcp', - 'from_port': '1', - 'to_port': '65535', - }, - 'all_udp': { - 'name': _('All UDP'), - 'ip_protocol': 'udp', - 'from_port': '1', - 'to_port': '65535', - }, - 'all_icmp': { - 'name': _('All ICMP'), - 'ip_protocol': 'icmp', - 'from_port': '-1', - 'to_port': '-1', - }, - 'ssh': { - 'name': 'SSH', - 'ip_protocol': 'tcp', - 'from_port': '22', - 'to_port': '22', - }, - 'smtp': { - 'name': 'SMTP', - 'ip_protocol': 'tcp', - 'from_port': '25', - 'to_port': '25', - }, - 'dns': { - 'name': 'DNS', - 'ip_protocol': 'tcp', - 'from_port': '53', - 'to_port': '53', - }, - 'http': { - 'name': 'HTTP', - 'ip_protocol': 'tcp', - 'from_port': '80', - 'to_port': '80', - }, - 'pop3': { - 'name': 'POP3', - 'ip_protocol': 'tcp', - 'from_port': '110', - 'to_port': '110', - }, - 'imap': { - 'name': 'IMAP', - 'ip_protocol': 'tcp', - 'from_port': '143', - 'to_port': '143', - }, - 'ldap': { - 'name': 'LDAP', - 'ip_protocol': 'tcp', - 'from_port': '389', - 'to_port': '389', - }, - 'https': { - 'name': 'HTTPS', - 'ip_protocol': 'tcp', - 'from_port': '443', - 'to_port': '443', - }, - 'smtps': { - 'name': 'SMTPS', - 'ip_protocol': 'tcp', - 'from_port': '465', - 'to_port': '465', - }, - 'imaps': { - 'name': 'IMAPS', - 'ip_protocol': 'tcp', - 'from_port': '993', - 'to_port': '993', - }, - 'pop3s': { - 'name': 'POP3S', - 'ip_protocol': 'tcp', - 'from_port': '995', - 'to_port': '995', - }, - 'ms_sql': { - 'name': 'MS SQL', - 'ip_protocol': 'tcp', - 'from_port': '1433', - 'to_port': '1433', - }, - 'mysql': { - 'name': 'MYSQL', - 'ip_protocol': 'tcp', - 'from_port': '3306', - 'to_port': '3306', - }, - 'rdp': { - 'name': 'RDP', - 'ip_protocol': 'tcp', - 'from_port': '3389', - 'to_port': '3389', - }, -} - -# Deprecation Notice: -# -# The setting FLAVOR_EXTRA_KEYS has been deprecated. -# Please load extra spec metadata into the Glance Metadata Definition Catalog. -# -# The sample quota definitions can be found in: -# /etc/metadefs/compute-quota.json -# -# The metadata definition catalog supports CLI and API: -# $glance --os-image-api-version 2 help md-namespace-import -# $glance-manage db_load_metadefs -# -# See Metadata Definitions on: -# https://docs.openstack.org/glance/latest/user/glancemetadefcatalogapi.html - -# The hash algorithm to use for authentication tokens. This must -# match the hash algorithm that the identity server and the -# auth_token middleware are using. Allowed values are the -# algorithms supported by Python's hashlib library. -#OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5' - -# AngularJS requires some settings to be made available to -# the client side. Some settings are required by in-tree / built-in horizon -# features. These settings must be added to REST_API_REQUIRED_SETTINGS in the -# form of ['SETTING_1','SETTING_2'], etc. -# -# You may remove settings from this list for security purposes, but do so at -# the risk of breaking a built-in horizon feature. These settings are required -# for horizon to function properly. Only remove them if you know what you -# are doing. These settings may in the future be moved to be defined within -# the enabled panel configuration. -# You should not add settings to this list for out of tree extensions. -# See: https://wiki.openstack.org/wiki/Horizon/RESTAPI -REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES', - 'LAUNCH_INSTANCE_DEFAULTS', - 'OPENSTACK_IMAGE_FORMATS', - 'OPENSTACK_KEYSTONE_BACKEND', - 'OPENSTACK_KEYSTONE_DEFAULT_DOMAIN', - 'CREATE_IMAGE_DEFAULTS', - 'ENFORCE_PASSWORD_CHECK'] - -# Additional settings can be made available to the client side for -# extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS -# !! Please use extreme caution as the settings are transferred via HTTP/S -# and are not encrypted on the browser. This is an experimental API and -# may be deprecated in the future without notice. -#REST_API_ADDITIONAL_SETTINGS = [] - -############################################################################### -# Ubuntu Settings -############################################################################### - - # The default theme if no cookie is present -DEFAULT_THEME = 'default' - -# Default Ubuntu apache configuration uses /horizon as the application root. -WEBROOT='/horizon/' - -# By default, validation of the HTTP Host header is disabled. Production -# installations should have this set accordingly. For more information -# see https://docs.djangoproject.com/en/dev/ref/settings/. -ALLOWED_HOSTS = '*' - -# Compress all assets offline as part of packaging installation -COMPRESS_OFFLINE = True - -# DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded -# within an iframe. Legacy browsers are still vulnerable to a Cross-Frame -# Scripting (XFS) vulnerability, so this option allows extra security hardening -# where iframes are not used in deployment. Default setting is True. -# For more information see: -# http://tinyurl.com/anticlickjack -#DISALLOW_IFRAME_EMBED = True - -# Help URL can be made available for the client. To provide a help URL, edit the -# following attribute to the URL of your choice. -#HORIZON_CONFIG["help_url"] = "http://openstack.mycompany.org" - -# Settings for OperationLogMiddleware -# OPERATION_LOG_ENABLED is flag to use the function to log an operation on -# Horizon. -# mask_targets is arrangement for appointing a target to mask. -# method_targets is arrangement of HTTP method to output log. -# format is the log contents. -#OPERATION_LOG_ENABLED = False -#OPERATION_LOG_OPTIONS = { -# 'mask_fields': ['password'], -# 'target_methods': ['POST'], -# 'ignored_urls': ['/js/', '/static/', '^/api/'], -# 'format': ("[%(client_ip)s] [%(domain_name)s]" -# " [%(domain_id)s] [%(project_name)s]" -# " [%(project_id)s] [%(user_name)s] [%(user_id)s] [%(request_scheme)s]" -# " [%(referer_url)s] [%(request_url)s] [%(message)s] [%(method)s]" -# " [%(http_status)s] [%(param)s]"), -#} - -# The default date range in the Overview panel meters - either minus N -# days (if the value is integer N), or from the beginning of the current month -# until today (if set to None). This setting should be used to limit the amount -# of data fetched by default when rendering the Overview panel. -#OVERVIEW_DAYS_RANGE = 1 - -# To allow operators to require users provide a search criteria first -# before loading any data into the views, set the following dict -# attributes to True in each one of the panels you want to enable this feature. -# Follow the convention . -#FILTER_DATA_FIRST = { -# 'admin.instances': False, -# 'admin.images': False, -# 'admin.networks': False, -# 'admin.routers': False, -# 'admin.volumes': False, -# 'identity.users': False, -# 'identity.projects': False, -# 'identity.groups': False, -# 'identity.roles': False -#} - -# Dict used to restrict user private subnet cidr range. -# An empty list means that user input will not be restricted -# for a corresponding IP version. By default, there is -# no restriction for IPv4 or IPv6. To restrict -# user private subnet cidr range set ALLOWED_PRIVATE_SUBNET_CIDR -# to something like -#ALLOWED_PRIVATE_SUBNET_CIDR = { -# 'ipv4': ['10.0.0.0/8', '192.168.0.0/16'], -# 'ipv6': ['fc00::/7'] -#} -ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []} - -# Projects and users can have extra attributes as defined by keystone v3. -# Horizon has the ability to display these extra attributes via this setting. -# If you'd like to display extra data in the project or user tables, set the -# corresponding dict key to the attribute name, followed by the display name. -# For more information, see horizon's customization -# (https://docs.openstack.org/horizon/latest/configuration/customizing.html#horizon-customization-module-overrides) -#PROJECT_TABLE_EXTRA_INFO = { -# 'phone_num': _('Phone Number'), -#} -#USER_TABLE_EXTRA_INFO = { -# 'phone_num': _('Phone Number'), -#} - -# Password will have an expiration date when using keystone v3 and enabling the -# feature. -# This setting allows you to set the number of days that the user will be alerted -# prior to the password expiration. -# Once the password expires keystone will deny the access and users must -# contact an admin to change their password. -#PASSWORD_EXPIRES_WARNING_THRESHOLD_DAYS = 0 diff --git a/README.md b/README.md deleted file mode 100644 index 3b53a5a..0000000 --- a/README.md +++ /dev/null @@ -1,157 +0,0 @@ -# **Guida Completa per il Deployment di Iotronic OpenStack** - -Questa guida dettagliata spiega come effettuare il deployment di Iotronic OpenStack utilizzando Docker, con la possibilità di sfruttare pacchetti esistenti di MySQL, Memcached e Keystone provenienti da un'installazione attiva di OpenStack. - ---- - -## **Requisiti Prerequisiti** -1. Sistema operativo: **Ubuntu Bionic (18.04)** o successivi. -2. **Docker** e **Docker Compose** installati. -3. Certificati SSL per i componenti (gestiti dalla CA locale inclusa). -4. Un'installazione attiva di OpenStack con i servizi di: - - **MySQL** (MariaDB). - - **Memcached**. - - **Keystone** configurato e operativo. - ---- - -## **Passaggi per il Deployment** - -### **1. Creazione del Certificato con la Root CA** -La Root CA genera i certificati per i client. - -```bash -cd ~/iotronic-openstack/0-CA/ -sudo ./0-CA_create iotronic -``` - -- Questo comando crea il certificato client per Iotronic e lo copia nella directory `/etc/ssl/iotronic/client_iotronic/`. - ---- - -### **2. Deploy di Crossbar** -Crossbar è il bus di messaggi utilizzato da Iotronic. - -```bash -cd ~/iotronic-openstack/1-iotronic-crossbar/ -sudo ./1-crossbar -``` - -- Verifica che il container **iotronic-crossbar** sia avviato correttamente. - ---- - -### **3. Deploy di WSTUN** -WSTUN gestisce i tunnel WebSocket per Iotronic. - -```bash -cd ~/iotronic-openstack/1-iotronic-wstun/ -sudo ./1-wstun -``` - -- Verifica che il container **iotronic-wstun** sia avviato correttamente. - ---- - -### **4. Configurazione del Database** -Se vuoi utilizzare un database MySQL esistente da un'installazione OpenStack attiva, salta questo passaggio. Altrimenti, crea un container MariaDB dedicato: - -```bash -cd ~/iotronic-openstack/2-mysql/ -sudo ./2-mysql -``` - -- Per verificare lo stato del database: - ```bash - docker logs -f iotronic_db - ``` - ---- - -### **5. Configurazione di Keystone** -Se Keystone è già configurato nell'installazione attiva di OpenStack, salta questo passaggio. Altrimenti: - -1. **Deploy del Container Keystone**: - ```bash - cd ~/iotronic-openstack/3-keystone/ - sudo ./3-keystone - ``` - -2. **Sincronizzazione e Configurazione di Keystone**: - ```bash - sudo ./3.5-keystone - ``` - ---- - -### **6. Deploy del Conductor** -Il Conductor è il cuore di Iotronic. - -1. **Deploy del Conductor**: - ```bash - cd ~/iotronic-openstack/4-conductor/ - sudo ./4-conductor - ``` - -2. **Configurazione del Database per il Conductor**: - ```bash - sudo ./4.5-conductor - ``` - ---- - -### **7. Deploy di WAgent** -WAgent è il componente agent-side di Iotronic. - -```bash -cd ~/iotronic-openstack/5-wagent/ -sudo ./5-wagent -``` - ---- - -### **8. Deploy della UI** -La UI consente di gestire Iotronic tramite un'interfaccia grafica. - -```bash -cd ~/iotronic-openstack/6-ui/ -sudo ./6-ui -``` - -- Accedi alla UI tramite il browser puntando a: `http://` - ---- - -## **Configurazioni Aggiuntive** -1. **MySQL**: - - Configura i dettagli del database nel file di configurazione del Conductor (`conductor-config.env`). - - Utilizza il servizio MySQL esistente dell'installazione OpenStack se disponibile. - -2. **Memcached**: - - Assicurati che Memcached sia in esecuzione nell'host OpenStack. - -3. **Keystone**: - - Aggiorna il file `keystone.conf` con le configurazioni corrette per i tuoi servizi. - ---- - -## **Note Importanti** -- **Controllo dei Log**: - Per ogni container, puoi verificare i log utilizzando: - ```bash - docker logs -f - ``` - -- **Persistenza**: - Se necessario, mappa volumi persistenti per i database e i file di configurazione. - -- **Risoluzione dei Problemi**: - - Controlla che i certificati SSL siano validi. - - Verifica la connettività tra i container. - -- **Pulizia**: - Rimuovi i container inutilizzati o interrotti per risparmiare risorse: - ```bash - docker system prune -f - ``` - diff --git a/compose_deployment/docker-compose.yml b/compose_deployment/docker-compose.yml index c09ab81..d7efa8b 100644 --- a/compose_deployment/docker-compose.yml +++ b/compose_deployment/docker-compose.yml @@ -258,6 +258,7 @@ services: command: > /bin/bash -c " echo '[INFO] Attesa del database Keystone...'; + chmod 777 /var/log/keystone/keystone-manage.log; until mysql -h iotronic-db -uroot -punime -e 'SELECT 1' >/dev/null 2>&1; do echo '[INFO] Database non ancora pronto, riprovo...'; sleep 5; diff --git a/iotronic/CONTRIBUTING.rst b/iotronic/CONTRIBUTING.rst deleted file mode 100644 index cebec9c..0000000 --- a/iotronic/CONTRIBUTING.rst +++ /dev/null @@ -1,17 +0,0 @@ -If you would like to contribute to the development of OpenStack, you must -follow the steps in this page: - - http://docs.openstack.org/infra/manual/developers.html - -If you already have a good understanding of how the system works and your -OpenStack accounts are set up, you can skip to the development workflow -section of this documentation to learn how changes to OpenStack should be -submitted for review via the Gerrit tool: - - http://docs.openstack.org/infra/manual/developers.html#development-workflow - -Pull requests submitted through GitHub will be ignored. - -Bugs should be filed on Launchpad, not GitHub: - - https://bugs.launchpad.net/iotronic diff --git a/iotronic/HACKING.rst b/iotronic/HACKING.rst deleted file mode 100644 index f38f20a..0000000 --- a/iotronic/HACKING.rst +++ /dev/null @@ -1,4 +0,0 @@ -iotronic Style Commandments -=============================================== - -Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ diff --git a/iotronic/LICENSE b/iotronic/LICENSE deleted file mode 100644 index 68c771a..0000000 --- a/iotronic/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/iotronic/MANIFEST.in b/iotronic/MANIFEST.in deleted file mode 100644 index c978a52..0000000 --- a/iotronic/MANIFEST.in +++ /dev/null @@ -1,6 +0,0 @@ -include AUTHORS -include ChangeLog -exclude .gitignore -exclude .gitreview - -global-exclude *.pyc diff --git a/iotronic/README.rst b/iotronic/README.rst deleted file mode 100644 index b0e4b07..0000000 --- a/iotronic/README.rst +++ /dev/null @@ -1,223 +0,0 @@ -=============================== -IoTronic -=============================== - -IoTronic is an Internet of Things resource management service for OpenStack clouds. - -IoTronic allows to manage Internet of Things resources as part of an OpenStack data center. - -* Free software: Apache license -* Source: http://git.openstack.org/git/openstack/iotronic -* Bugs: http://bugs.launchpad.net/iotronic - -.. contents:: Contents: - :local: - -Basic scenario ----------------------- -For this installation of the Iotronic Service we are considering a scenario with the following hosts and softwares: - -- Controller ( **Ubuntu linux**): Mysql, Keystone, Rabbitmq -- Iotronic ( **Ubuntu linux** ): Iotronic-conductor, iotronic-wamp-agent, crossbar -- Board: iotronic-lightining-rod - -Controller host setup ----------------------- -According to the `Openstack Documentation `_ install the following softwares on the controller host: - -- SQL database -- Message queue -- Memcached -- Keystone - -Creation of the database -------------------------- -On the dbms create the iotronic db and configure the access for the user iotronic:: - - MariaDB [(none)]> CREATE DATABASE iotronic; - MariaDB [(none)]> GRANT ALL PRIVILEGES ON iotronic.* TO iotronic@'localhost' IDENTIFIED BY ‘IOTRONIC_DBPASS’; - MariaDB [(none)]> GRANT ALL PRIVILEGES ON iotronic.* TO iotronic@'%' IDENTIFIED BY ‘IOTRONIC_DBPASS’; - -Add the user and the enpoints on Keystone:: - - openstack service create iot --name Iotronic - openstack user create --password-prompt iotronic - openstack role add --project service --user iotronic admin - openstack role create admin_iot_project - openstack role create manager_iot_project - openstack role create user_iot - openstack role add --project service --user iotronic admin_iot_project - - openstack endpoint create --region RegionOne iot public http://IP_IOTRONIC:8812 - openstack endpoint create --region RegionOne iot internal http://IP_IOTRONIC:8812 - openstack endpoint create --region RegionOne iot admin http://1IP_IOTRONIC:8812 - - -Configuring Iotronic Host --------------------------- - -Crossbar -^^^^^^^^^^^^^^^^^^^^^ -Install crossbar on the Iotronic host:: - - apt install python-pip python3-pip libsnappy-dev libssl-dev libffi-dev python-dev - pip3 install python-snappy crossbar - -Configuration:: - - mkdir /etc/crossbar - nano /etc/crossbar/config.json - -**config.json**:: - - { - "version": 2, - "controller": { - }, - "workers": [ - { - "type": "router", - "realms": [ - { - "name": "s4t", - "roles": [ - { - "name": "anonymous", - "permissions": [ - { - "uri": "*", - "allow": { - "publish": true, - "subscribe": true, - "call": true, - "register": true - } - } - ] - } - ] - } - ], - "transports": [ - { - "type": "websocket", - "endpoint": { - "type": "tcp", - "port": 8181 - }, - "debug":true, - "options":{ - "enable_webstatus":true, - "fail_by_drop": true, - "open_handshake_timeout": 2500, - "close_handshake_timeout": 1000, - "auto_ping_interval": 10000, - "auto_ping_timeout": 5000, - "auto_ping_size": 4 - } - } - ] - } - ] - } - -Create a systemd service file /etc/systemd/system/crossbar.service:: - - nano /etc/systemd/system/crossbar.service - -**crossbar.service**:: - - [Unit] - Description=Crossbar.io - After=network.target - - [Service] - Type=simple - User=root - Group=root - StandardInput=null - StandardOutput=journal - StandardError=journal - ExecStart=/usr/local/bin/crossbar start --cbdir=/etc/crossbar/ - ExecStop=/usr/local/bin/crossbar stop --cbdir=/etc/crossbar/ - Restart=on-abort - [Install] - WantedBy=multi-user.target - -Iotronic Installation -^^^^^^^^^^^^^^^^^^^^^ -Get the source:: - - git clone https://github.com/openstack/iotronic.git - -add the user iotronic:: - - useradd -m -d /var/lib/iotronic iotronic - -and Iotronic:: - - cd iotronic - pip3 install -r requirements.txt - python3 setup.py install - -create a log dir:: - - mkdir -p /var/log/iotronic - chown -R iotronic:iotronic /var/log/iotronic/ - -edit ``/etc/iotronic/iotronic.conf`` with the correct configuration:: - - nano /etc/iotronic/iotronic.conf - -There is just one wamp-agent and it must be set as the registration agent:: - - register_agent = True - -populate the database:: - - iotronic-dbsync - - -API Service Configuration -^^^^^^^^^^^^^^^^^^^^^^^^^^ -Install apache and the other components:: - - sudo apt-get install apache2 python-setuptools libapache2-mod-wsgi-py3 - -create log directory:: - - touch /var/log/iotronic/iotronic-api_error.log - touch /var/log/iotronic/iotronic-api_access.log - chown -R iotronic:iotronic /var/log/iotronic/ - -copy the config apache2 file:: - - cp etc/apache2/iotronic.conf /etc/apache2/sites-available/iotronic.conf - -enable the configuration:: - - a2ensite /etc/apache2/sites-available/iotronic.conf - -restart apache:: - - systemctl restart apache2 - - -Starting -^^^^^^^^^^^^^^^^^^^^^ -On the wamp agent:: - - systemctl enable iotronic-wamp-agent - systemctl start iotronic-wamp-agent - -On the conductor:: - - systemctl enable iotronic-conductor - systemctl start iotronic-conductor - - -Board Side ----------------------- - -Follow the `iotronic-lightning-rod README `_ - diff --git a/iotronic/babel.cfg b/iotronic/babel.cfg deleted file mode 100644 index 15cd6cb..0000000 --- a/iotronic/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/iotronic/doc/source/conf.py b/iotronic/doc/source/conf.py deleted file mode 100755 index 3966aca..0000000 --- a/iotronic/doc/source/conf.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys - -sys.path.insert(0, os.path.abspath('../..')) -# -- General configuration ---------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'sphinx.ext.autodoc', - #'sphinx.ext.intersphinx', - 'oslosphinx' -] - -# autodoc generation is a bit aggressive and a nuisance when doing heavy -# text edit cycles. -# execute "export SPHINX_DEBUG=1" in your terminal to disable - -# The suffix of source filenames. -source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'iotronic' -copyright = u'2013, OpenStack Foundation' - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# -- Options for HTML output -------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -# html_theme_path = ["."] -# html_theme = '_theme' -# html_static_path = ['static'] - -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass -# [howto/manual]). -latex_documents = [ - ('index', - '%s.tex' % project, - u'%s Documentation' % project, - u'OpenStack Foundation', 'manual'), -] - -# Example configuration for intersphinx: refer to the Python standard library. -#intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/iotronic/doc/source/contributing.rst b/iotronic/doc/source/contributing.rst deleted file mode 100644 index 1728a61..0000000 --- a/iotronic/doc/source/contributing.rst +++ /dev/null @@ -1,4 +0,0 @@ -============ -Contributing -============ -.. include:: ../../CONTRIBUTING.rst diff --git a/iotronic/doc/source/index.rst b/iotronic/doc/source/index.rst deleted file mode 100644 index 64e45d0..0000000 --- a/iotronic/doc/source/index.rst +++ /dev/null @@ -1,25 +0,0 @@ -.. iotronic documentation master file, created by - sphinx-quickstart on Tue Jul 9 22:26:36 2013. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to iotronic's documentation! -======================================================== - -Contents: - -.. toctree:: - :maxdepth: 2 - - readme - installation - usage - contributing - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/iotronic/doc/source/installation.rst b/iotronic/doc/source/installation.rst deleted file mode 100644 index 8b3cb64..0000000 --- a/iotronic/doc/source/installation.rst +++ /dev/null @@ -1,12 +0,0 @@ -============ -Installation -============ - -At the command line:: - - $ pip install iotronic - -Or, if you have virtualenvwrapper installed:: - - $ mkvirtualenv iotronic - $ pip install iotronic diff --git a/iotronic/doc/source/readme.rst b/iotronic/doc/source/readme.rst deleted file mode 100644 index a6210d3..0000000 --- a/iotronic/doc/source/readme.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../README.rst diff --git a/iotronic/doc/source/usage.rst b/iotronic/doc/source/usage.rst deleted file mode 100644 index 855bce0..0000000 --- a/iotronic/doc/source/usage.rst +++ /dev/null @@ -1,7 +0,0 @@ -======== -Usage -======== - -To use iotronic in a project:: - - import iotronic diff --git a/iotronic/etc/apache2/iotronic-ssl.conf b/iotronic/etc/apache2/iotronic-ssl.conf deleted file mode 100644 index c9d8d99..0000000 --- a/iotronic/etc/apache2/iotronic-ssl.conf +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is an example Apache2 configuration file for using the -# Ironic API through mod_wsgi. This version assumes you are -# running devstack to configure the software. - -Listen 8812 - - - WSGIDaemonProcess iotronic_ssl user=iotronic group=iotronic threads=5 display-name=%{GROUP} - WSGIScriptAlias / /var/www/cgi-bin/iotronic/app.wsgi - - WSGIProcessGroup iotronic - - ErrorLog /var/log/iotronic/iotronic-api_error.log - LogLevel debug - CustomLog /var/log/iotronic/iotronic-api_access.log combined - - SSLEngine on - SSLCertificateFile "/root/ssl/mysitename.crt" - SSLCertificateKeyFile "/root/ssl/mysitename.key" - - - WSGIProcessGroup iotronic - WSGIApplicationGroup %{GLOBAL} - AllowOverride All - Require all granted - - diff --git a/iotronic/etc/apache2/iotronic.conf b/iotronic/etc/apache2/iotronic.conf deleted file mode 100644 index 222e9b5..0000000 --- a/iotronic/etc/apache2/iotronic.conf +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is an example Apache2 configuration file for using the -# Ironic API through mod_wsgi. This version assumes you are -# running devstack to configure the software. - -Listen 8812 - - - WSGIDaemonProcess iotronic user=iotronic group=iotronic threads=5 display-name=%{GROUP} - WSGIScriptAlias / /var/www/cgi-bin/iotronic/app.wsgi - - #SetEnv APACHE_RUN_USER stack - #SetEnv APACHE_RUN_GROUP stack - WSGIProcessGroup iotronic - - ErrorLog /var/log/iotronic/iotronic-api_error.log - LogLevel debug - CustomLog /var/log/iotronic/iotronic-api_access.log combined - - - WSGIProcessGroup iotronic - WSGIApplicationGroup %{GLOBAL} - AllowOverride All - Require all granted - - diff --git a/iotronic/etc/iotronic/iotronic.conf b/iotronic/etc/iotronic/iotronic.conf deleted file mode 100644 index ff320ce..0000000 --- a/iotronic/etc/iotronic/iotronic.conf +++ /dev/null @@ -1,98 +0,0 @@ -[DEFAULT] -transport_url=rabbit://:@:5672/ - -debug=True -proxy=nginx - -# Authentication strategy used by iotronic-api: one of -# "keystone" or "noauth". "noauth" should not be used in a -# production environment because all authentication will be -# disabled. (string value) -auth_strategy=keystone - -# Enable pecan debug mode. WARNING: this is insecure and -# should not be used in a production environment. (boolean -# value) -#pecan_debug=false - -[conductor] -service_port_min=50000 -service_port_max=60000 - - -[wamp] -wamp_transport_url = ws://:/ -wamp_realm = s4t -# skip_cert_verify= False -# register_agent = True - - - -[database] -connection = mysql+pymysql://:@/iotronic - -[keystone_authtoken] -www_authenticate_uri = http://:5000 -auth_url = http://:5000 -auth_plugin = password -project_domain_id = default -user_domain_id = default -project_name = service -username = iotronic -password = - - -[neutron] -auth_url = http://:35357 -url = http://:9696 -auth_strategy = password -project_domain_name = default -user_domain_name = default -region_name = RegionOne -project_name = service -username = neutron -password = -retries = 3 -project_domain_id= default - - -[designate] -auth_url = http://:35357 -url = http://:9001 -auth_strategy = password -project_domain_name = default -user_domain_name = default -region_name = RegionOne -project_name = service -username = designate -password = -retries = 3 -project_domain_id= default - - -[cors] -# Indicate whether this resource may be shared with the domain -# received in the requests "origin" header. Format: -# "://[:]", no trailing slash. Example: -# https://horizon.example.com (list value) -#allowed_origin = - -# Indicate that the actual request can include user -# credentials (boolean value) -#allow_credentials = true - -# Indicate which headers are safe to expose to the API. -# Defaults to HTTP Simple Headers. (list value) -#expose_headers = - -# Maximum cache age of CORS preflight requests. (integer -# value) -#max_age = 3600 - -# Indicate which methods can be used during the actual -# request. (list value) -#allow_methods = OPTIONS,GET,HEAD,POST,PUT,DELETE,TRACE,PATCH - -# Indicate which header field names may be used during the -# actual request. (list value) -#allow_headers = \ No newline at end of file diff --git a/iotronic/etc/iotronic/policy.json b/iotronic/etc/iotronic/policy.json deleted file mode 100644 index 2c63c08..0000000 --- a/iotronic/etc/iotronic/policy.json +++ /dev/null @@ -1,2 +0,0 @@ -{ -} diff --git a/iotronic/etc/systemd/system/iotronic-conductor.service b/iotronic/etc/systemd/system/iotronic-conductor.service deleted file mode 100644 index 98d51cc..0000000 --- a/iotronic/etc/systemd/system/iotronic-conductor.service +++ /dev/null @@ -1,9 +0,0 @@ -[Unit] -Description=iotronic-conductor -After=network.target - -[Service] -ExecStart=/usr/local/bin/iotronic-conductor - -[Install] -WantedBy=multi-user.target diff --git a/iotronic/etc/systemd/system/iotronic-wamp-agent.service b/iotronic/etc/systemd/system/iotronic-wamp-agent.service deleted file mode 100644 index 6c0f5cd..0000000 --- a/iotronic/etc/systemd/system/iotronic-wamp-agent.service +++ /dev/null @@ -1,9 +0,0 @@ -[Unit] -Description=iotronic-wamp-agent -After=network.target - -[Service] -ExecStart=/usr/local/bin/iotronic-wamp-agent - -[Install] -WantedBy=multi-user.target diff --git a/iotronic/iotronic/__init__.py b/iotronic/iotronic/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/iotronic/iotronic/api/__init__.py b/iotronic/iotronic/api/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/iotronic/iotronic/api/app.py b/iotronic/iotronic/api/app.py deleted file mode 100644 index 4c7dc54..0000000 --- a/iotronic/iotronic/api/app.py +++ /dev/null @@ -1,156 +0,0 @@ -# -*- encoding: utf-8 -*- - -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from iotronic.api import config -from iotronic.api.controllers import base -from iotronic.api import hooks -from iotronic.api import middleware - -from iotronic.api.middleware import auth_token -from oslo_config import cfg -import oslo_middleware.cors as cors_middleware -import pecan -from pecan import make_app - -opts = [ - cfg.StrOpt( - 'auth_strategy', - default='keystone', - help=('Authentication strategy used by iotronic-api: "keystone" ' - 'or "noauth". "noauth" should not be used in a production ' - 'environment because all authentication will be disabled.')), - cfg.BoolOpt('debug_tracebacks_in_api', - default=False, - help=('Return server tracebacks in the API response for any ' - 'error responses. WARNING: this is insecure ' - 'and should not be used in a production environment.')), - cfg.BoolOpt( - 'pecan_debug', - default=False, - help=( - 'Enable pecan debug mode. WARNING: this is insecure ' - 'and should not be used in a production environment.')), -] - -api_opts = [ - cfg.StrOpt('host_ip', - default='0.0.0.0', - help=('The IP address on which iotronic-api listens.')), - cfg.PortOpt('port', - default=1288, - help=('The TCP port on which iotronic-api listens.')), - cfg.IntOpt('max_limit', - default=1000, - help=('The maximum number of items returned in a single ' - 'response from a collection resource.')), - cfg.StrOpt('public_endpoint', - help=("Public URL to use when building the links to the API " - "resources." - " If None the links will be built using the request's " - "host URL. If the API is operating behind a proxy, you " - "will want to change this to represent the proxy's URL. " - "Defaults to None.")), - cfg.IntOpt('api_workers', - help=('Number of workers for OpenStack Iotronic API service. ' - 'The default is equal to the number of CPUs available ' - 'if that can be determined, else a default worker ' - 'count of 1 is returned.')), - cfg.BoolOpt('enable_ssl_api', - default=False, - help=("Enable the integrated stand-alone API to service " - "requests via HTTPS instead of HTTP. If there is a " - "front-end service performing HTTPS offloading from " - "the service, this option should be False; note, you " - "will want to change public API endpoint to represent " - "SSL termination URL with 'public_endpoint' option.")), -] - -opt_group = cfg.OptGroup(name='api', - title='Options for the iotronic-api service') - - -CONF = cfg.CONF -CONF.register_opts(opts,) -CONF.register_opts(api_opts, 'api') - - -def get_pecan_config(): - # Set up the pecan configuration - filename = config.__file__.replace('.pyc', '.py') - return pecan.configuration.conf_from_file(filename) - - -class IotronicCORS(cors_middleware.CORS): - """Iotronic-specific CORS class - We're adding the Iotronic-specific version headers to the list of simple - headers in order that a request bearing those headers might be accepted by - the Iotronic REST API. - """ - simple_headers = cors_middleware.CORS.simple_headers + [ - 'X-Auth-Token', - base.Version.max_string, - base.Version.min_string, - base.Version.string - ] - - -def setup_app(config=None): - - app_hooks = [hooks.ConfigHook(), - hooks.DBHook(), - hooks.ContextHook(config.app.acl_public_routes), - hooks.RPCHook(), - hooks.NoExceptionTracebackHook(), - hooks.PublicUrlHook()] - - app_conf = dict(config.app) - - app = make_app( - app_conf.pop('root'), - hooks=app_hooks, - force_canonical=getattr(config.app, 'force_canonical', True), - wrap_app=middleware.ParsableErrorMiddleware, - **app_conf - ) - - if CONF.auth_strategy == "keystone": - app = auth_token.AuthTokenMiddleware( - app, dict(cfg.CONF), - public_api_routes=config.app.acl_public_routes) - - # Create a CORS wrapper, and attach iotronic-specific defaults that must be - # included in all CORS responses. - app = IotronicCORS(app, CONF) - cors_middleware.set_defaults( - allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'], - expose_headers=[base.Version.max_string, base.Version.min_string, - base.Version.string] - ) - - return app - - -class VersionSelectorApplication(object): - - def __init__(self): - pc = get_pecan_config() - pc.app.enable_acl = (CONF.auth_strategy == 'keystone') - self.v1 = setup_app(config=pc) - - def __call__(self, environ, start_response): - return self.v1(environ, start_response) diff --git a/iotronic/iotronic/api/config.py b/iotronic/iotronic/api/config.py deleted file mode 100644 index 82f2222..0000000 --- a/iotronic/iotronic/api/config.py +++ /dev/null @@ -1,33 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Server Specific Configurations -# See https://pecan.readthedocs.org/en/latest/configuration.html#server-configuration # noqa -server = { - 'port': '1288', - 'host': '0.0.0.0' -} - -# Pecan Application Configurations -# See https://pecan.readthedocs.org/en/latest/configuration.html#application-configuration # noqa -app = { - 'root': 'iotronic.api.controllers.root.RootController', - 'modules': ['iotronic.api'], - 'static_root': '%(confdir)s/public', - 'debug': False, - 'acl_public_routes': [ - '/', - '/v1', - ], -} diff --git a/iotronic/iotronic/api/controllers/__init__.py b/iotronic/iotronic/api/controllers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/iotronic/iotronic/api/controllers/base.py b/iotronic/iotronic/api/controllers/base.py deleted file mode 100644 index 69c0625..0000000 --- a/iotronic/iotronic/api/controllers/base.py +++ /dev/null @@ -1,115 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import functools - -from webob import exc -import wsme -from wsme import types as wtypes - -from iotronic.common.i18n import _ - - -class APIBase(wtypes.Base): - - created_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is created""" - - updated_at = wsme.wsattr(datetime.datetime, readonly=True) - """The time in UTC at which the object is updated""" - - def as_dict(self): - """Render this object as a dict of its fields.""" - return dict((k, getattr(self, k)) - for k in self.fields - if hasattr(self, k) and - getattr(self, k) != wsme.Unset) - - def unset_fields_except(self, except_list=None): - """Unset fields so they don't appear in the message body. - - :param except_list: A list of fields that won't be touched. - - """ - if except_list is None: - except_list = [] - - for k in self.as_dict(): - if k not in except_list: - setattr(self, k, wsme.Unset) - - -@functools.total_ordering -class Version(object): - """API Version object.""" - - string = 'X-OpenStack-Iotronic-API-Version' - """HTTP Header string carrying the requested version""" - - min_string = 'X-OpenStack-Iotronic-API-Minimum-Version' - """HTTP response header""" - - max_string = 'X-OpenStack-Iotronic-API-Maximum-Version' - """HTTP response header""" - - def __init__(self, headers, default_version, latest_version): - """Create an API Version object from the supplied headers. - - :param headers: webob headers - :param default_version: version to use if not specified in headers - :param latest_version: version to use if latest is requested - :raises: webob.HTTPNotAcceptable - """ - (self.major, self.minor) = Version.parse_headers( - headers, default_version, latest_version) - - def __repr__(self): - return '%s.%s' % (self.major, self.minor) - - @staticmethod - def parse_headers(headers, default_version, latest_version): - """Determine the API version requested based on the headers supplied. - - :param headers: webob headers - :param default_version: version to use if not specified in headers - :param latest_version: version to use if latest is requested - :returns: a tuple of (major, minor) version numbers - :raises: webob.HTTPNotAcceptable - """ - version_str = headers.get(Version.string, default_version) - - if version_str.lower() == 'latest': - parse_str = latest_version - else: - parse_str = version_str - - try: - version = tuple(int(i) for i in parse_str.split('.')) - except ValueError: - version = () - - if len(version) != 2: - raise exc.HTTPNotAcceptable(_( - "Invalid value for %s header") % Version.string) - return version - - def __gt__(self, other): - return (self.major, self.minor) > (other.major, other.minor) - - def __eq__(self, other): - return (self.major, self.minor) == (other.major, other.minor) - - def __ne__(self, other): - return not self.__eq__(other) diff --git a/iotronic/iotronic/api/controllers/link.py b/iotronic/iotronic/api/controllers/link.py deleted file mode 100644 index ec07a2d..0000000 --- a/iotronic/iotronic/api/controllers/link.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pecan -from wsme import types as wtypes - -from iotronic.api.controllers import base - - -def build_url(resource, resource_args, bookmark=False, base_url=None): - if base_url is None: - base_url = pecan.request.public_url - - template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s' - # FIXME(lucasagomes): I'm getting a 404 when doing a GET on - # a nested resource that the URL ends with a '/'. - # https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs - template += '%(args)s' if resource_args.startswith('?') else '/%(args)s' - return template % {'url': base_url, 'res': resource, 'args': resource_args} - - -class Link(base.APIBase): - """A link representation.""" - - href = wtypes.text - """The url of a link.""" - - rel = wtypes.text - """The name of a link.""" - - type = wtypes.text - """Indicates the type of document/link.""" - - @staticmethod - def make_link(rel_name, url, resource, resource_args, - bookmark=False, type=wtypes.Unset): - href = build_url(resource, resource_args, - bookmark=bookmark, base_url=url) - return Link(href=href, rel=rel_name, type=type) diff --git a/iotronic/iotronic/api/controllers/root.py b/iotronic/iotronic/api/controllers/root.py deleted file mode 100644 index 8b8ef8c..0000000 --- a/iotronic/iotronic/api/controllers/root.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from iotronic.api.controllers import base -from iotronic.api.controllers import link -from iotronic.api.controllers import v1 -from iotronic.api.controllers.v1 import versions -from iotronic.api import expose -import pecan -from pecan import rest -from wsme import types as wtypes - -ID_VERSION1 = 'v1' - - -class Version(base.APIBase): - """An API version representation. - - This class represents an API version, including the minimum and - maximum minor versions that are supported within the major version. - """ - - id = wtypes.text - """The ID of the (major) version, also acts as the release number""" - - links = [link.Link] - """A Link that point to a specific version of the API""" - - status = wtypes.text - """Status of the version. - One of: - * CURRENT - the latest version of API, - * SUPPORTED - supported, but not latest, version of API, - * DEPRECATED - supported, but deprecated, version of API. - """ - - version = wtypes.text - """The current, maximum supported (major.minor) version of API.""" - - min_version = wtypes.text - """Minimum supported (major.minor) version of API.""" - - def __init__(self, id, min_version, version, status='CURRENT'): - self.id = id - self.links = [link.Link.make_link('self', pecan.request.public_url, - self.id, '', bookmark=True)] - self.status = status - self.version = version - self.min_version = min_version - - -class Root(base.APIBase): - name = wtypes.text - """The name of the API""" - - description = wtypes.text - """Some information about this API""" - - versions = [Version] - """Links to all the versions available in this API""" - - default_version = Version - """A link to the default version of the API""" - - @staticmethod - def convert(): - root = Root() - root.name = "OpenStack Iotronic API" - root.description = ("Iotronic is an OpenStack project which aims to " - "provision baremetal machines.") - root.default_version = Version(ID_VERSION1, - versions.MIN_VERSION_STRING, - versions.MAX_VERSION_STRING) - root.versions = [root.default_version] - return root - - -class RootController(rest.RestController): - _versions = [ID_VERSION1] - """All supported API versions""" - - _default_version = ID_VERSION1 - """The default API version""" - - v1 = v1.Controller() - - @expose.expose(Root) - def get(self): - # NOTE: The reason why convert() it's being called for every - # request is because we need to get the host url from - # the request object to make the links. - return Root.convert() - - @pecan.expose() - def _route(self, args): - # Overrides the default routing behavior. - # It redirects the request to the default version of the ironic API - # if the version number is not specified in the url. - - if args[0] and args[0] not in self._versions: - args = [self._default_version] + args - return super(RootController, self)._route(args) diff --git a/iotronic/iotronic/api/controllers/v1/__init__.py b/iotronic/iotronic/api/controllers/v1/__init__.py deleted file mode 100644 index 35a632d..0000000 --- a/iotronic/iotronic/api/controllers/v1/__init__.py +++ /dev/null @@ -1,222 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Version 1 of the Iotronic API - -Specification can be found at doc/source/webapi/v1.rst -""" - -import pecan -from pecan import rest -from webob import exc -from wsme import types as wtypes - -from iotronic.api.controllers import base -from iotronic.api.controllers import link -from iotronic.api.controllers.v1 import board -from iotronic.api.controllers.v1 import enabledwebservice -from iotronic.api.controllers.v1 import fleet -from iotronic.api.controllers.v1 import plugin -from iotronic.api.controllers.v1 import port -from iotronic.api.controllers.v1 import request -from iotronic.api.controllers.v1 import service -from iotronic.api.controllers.v1 import webservice - -from iotronic.api.controllers.v1 import versions -from iotronic.api import expose -from iotronic.common.i18n import _ - -BASE_VERSION = versions.BASE_VERSION - -MIN_VER = base.Version( - {base.Version.string: versions.MIN_VERSION_STRING}, - versions.MIN_VERSION_STRING, versions.MAX_VERSION_STRING) -MAX_VER = base.Version( - {base.Version.string: versions.MAX_VERSION_STRING}, - versions.MIN_VERSION_STRING, versions.MAX_VERSION_STRING) - - -class V1(base.APIBase): - """The representation of the version 1 of the API.""" - - id = wtypes.text - """The ID of the version, also acts as the release number""" - - # links = [link.Link] - """Links that point to a specific URL for this version and documentation""" - - boards = [link.Link] - """Links to the boards resource""" - - plugins = [link.Link] - """Links to the plugins resource""" - - services = [link.Link] - """Links to the services resource""" - - enabledservices = [link.Link] - """Links to the services resource""" - - ports = [link.Link] - """Links to the ports resource""" - - fleet = [link.Link] - """Links to the fleets resource""" - - webservices = [link.Link] - """Links to the webservices resource""" - - @staticmethod - def convert(): - v1 = V1() - v1.id = "v1" - v1.links = [link.Link.make_link('self', pecan.request.public_url, - 'v1', '', bookmark=True), - link.Link.make_link('describedby', - 'http://docs.openstack.org', - 'developer/iotronic/dev', - 'api-spec-v1.html', - bookmark=True, type='text/html') - ] - - v1.plugins = [link.Link.make_link('self', pecan.request.public_url, - 'plugins', ''), - link.Link.make_link('bookmark', - pecan.request.public_url, - 'plugins', '', - bookmark=True) - ] - - v1.boards = [link.Link.make_link('self', pecan.request.public_url, - 'boards', ''), - link.Link.make_link('bookmark', - pecan.request.public_url, - 'boards', '', - bookmark=True) - ] - - v1.services = [link.Link.make_link('self', pecan.request.public_url, - 'services', ''), - link.Link.make_link('bookmark', - pecan.request.public_url, - 'services', '', - bookmark=True) - ] - - v1.enabledwebservices = [ - link.Link.make_link('self', pecan.request.public_url, - 'enabledwebservices', ''), - link.Link.make_link('bookmark', - pecan.request.public_url, - 'enabledwebservices', '', - bookmark=True) - ] - - v1.ports = [link.Link.make_link('self', pecan.request.public_url, - 'ports', ''), - link.Link.make_link('bookmark', - pecan.request.public_url, 'ports', '', - bookmark=True)] - - v1.fleets = [link.Link.make_link('self', pecan.request.public_url, - 'fleets', ''), - link.Link.make_link('bookmark', - pecan.request.public_url, - 'fleets', '', - bookmark=True) - ] - - v1.webservices = [link.Link.make_link('self', pecan.request.public_url, - 'webservices', ''), - link.Link.make_link('bookmark', - pecan.request.public_url, - 'webservices', '', - bookmark=True) - ] - - v1.requests = [link.Link.make_link('self', pecan.request.public_url, - 'requests', ''), - link.Link.make_link('bookmark', - pecan.request.public_url, - 'requests', '', - bookmark=True) - ] - - return v1 - - -class Controller(rest.RestController): - """Version 1 API controller root.""" - - boards = board.BoardsController() - plugins = plugin.PluginsController() - services = service.ServicesController() - enabledwebservices = enabledwebservice.EnabledWebservicesController() - ports = port.PortsController() - fleets = fleet.FleetsController() - webservices = webservice.WebservicesController() - requests = request.RequestsController() - - @expose.expose(V1) - def get(self): - # NOTE: The reason why convert() it's being called for every - # request is because we need to get the host url from - # the request object to make the links. - return V1.convert() - - def _check_version(self, version, headers=None): - if headers is None: - headers = {} - # ensure that major version in the URL matches the header - if version.major != BASE_VERSION: - raise exc.HTTPNotAcceptable(_( - "Mutually exclusive versions requested. Version %(ver)s " - "requested but not supported by this service. The supported " - "version range is: [%(min)s, %(max)s].") % { - 'ver': version, - 'min': versions.MIN_VERSION_STRING, - 'max': versions.MAX_VERSION_STRING - }, headers=headers) - # ensure the minor version is within the supported range - if version < MIN_VER or version > MAX_VER: - raise exc.HTTPNotAcceptable(_( - "Version %(ver)s was requested but the minor version is not " - "supported by this service. The supported version range is: " - "[%(min)s, %(max)s].") % { - 'ver': version, - 'min': versions.MIN_VERSION_STRING, - 'max': versions.MAX_VERSION_STRING - }, headers=headers) - - @pecan.expose() - def _route(self, args): - v = base.Version(pecan.request.headers, versions.MIN_VERSION_STRING, - versions.MAX_VERSION_STRING) - - # Always set the min and max headers - pecan.response.headers[base.Version.min_string] = ( - versions.MIN_VERSION_STRING) - pecan.response.headers[base.Version.max_string] = ( - versions.MAX_VERSION_STRING) - - # assert that requested version is supported - self._check_version(v, pecan.response.headers) - pecan.response.headers[base.Version.string] = str(v) - pecan.request.version = v - - return super(Controller, self)._route(args) - - -__all__ = ('Controller',) diff --git a/iotronic/iotronic/api/controllers/v1/board.py b/iotronic/iotronic/api/controllers/v1/board.py deleted file mode 100644 index 730ec2e..0000000 --- a/iotronic/iotronic/api/controllers/v1/board.py +++ /dev/null @@ -1,1032 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from iotronic.api.controllers import base -from iotronic.api.controllers import link -from iotronic.api.controllers.v1 import collection -from iotronic.api.controllers.v1.enabledwebservice import EnabledWebservice -from iotronic.api.controllers.v1 import location as loc -from iotronic.api.controllers.v1 import types -from iotronic.api.controllers.v1 import utils as api_utils -from iotronic.api.controllers.v1.webservice import Webservice -from iotronic.api.controllers.v1.webservice import WebserviceCollection -from iotronic.api import expose -from iotronic.common import exception -from iotronic.common import policy -from iotronic import objects -from oslo_log import log as logging -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes - -LOG = logging.getLogger(__name__) - -_DEFAULT_RETURN_FIELDS = ('name', 'code', 'status', 'uuid', 'session', 'type', - 'fleet', 'lr_version', 'connectivity', 'agent', - 'wstun_ip') -_DEFAULT_WEBSERVICE_RETURN_FIELDS = ('name', 'uuid', 'port', 'board_uuid', - 'extra') - -cache_agent_ip = {} - - -class Board(base.APIBase): - """API representation of a board. - - """ - - uuid = types.uuid - code = wsme.wsattr(wtypes.text) - status = wsme.wsattr(wtypes.text) - name = wsme.wsattr(wtypes.text) - type = wsme.wsattr(wtypes.text) - agent = wsme.wsattr(wtypes.text) - wstun_ip = wsme.wsattr(wtypes.text) - owner = types.uuid - session = wsme.wsattr(wtypes.text) - project = types.uuid - fleet = types.uuid - mobile = types.boolean - lr_version = wsme.wsattr(wtypes.text) - connectivity = types.jsontype - links = wsme.wsattr([link.Link], readonly=True) - location = wsme.wsattr([loc.Location]) - extra = types.jsontype - config = types.jsontype - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.Board.fields) - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - @staticmethod - def _convert_with_links(board, url, fields=None): - board_uuid = board.uuid - if fields is not None: - board.unset_fields_except(fields) - - if (board.agent != None): - if board.agent not in cache_agent_ip: - wagent = objects.WampAgent.get_by_hostname( - pecan.request.context, - board.agent) - wurl = wagent.wsurl - cache_agent_ip[board.agent] = wurl.split("//")[1].split(":")[0] - - board.wstun_ip = cache_agent_ip[board.agent] - - board.links = [link.Link.make_link('self', url, 'boards', - board_uuid), - link.Link.make_link('bookmark', url, 'boards', - board_uuid, bookmark=True) - ] - return board - - @classmethod - def convert_with_links(cls, rpc_board, fields=None): - board = Board(**rpc_board.as_dict()) - - try: - session = objects.SessionWP.get_session_by_board_uuid( - pecan.request.context, board.uuid) - board.session = session.session_id - except Exception: - board.session = None - - try: - list_loc = objects.Location.list_by_board_uuid( - pecan.request.context, board.uuid) - board.location = loc.Location.convert_with_list(list_loc) - except Exception: - board.location = [] - - # to enable as soon as a better session and location management - # is implemented - # if fields is not None: - # api_utils.check_for_invalid_fields(fields, board_dict) - - if board.config == {}: - ragent = objects.WampAgent.get_registration_agent( - pecan.request.context) - board.config = { - "iotronic": { - "board": { - "code": board.code - }, - "wamp": { - "registration-agent": { - "url": ragent.wsurl, - "realm": "s4t" - } - } - } - } - - return cls._convert_with_links(board, - pecan.request.public_url, - fields=fields) - - -class BoardCollection(collection.Collection): - """API representation of a collection of boards.""" - - boards = [Board] - """A list containing boards objects""" - - def __init__(self, **kwargs): - self._type = 'boards' - - @staticmethod - def convert_with_links(boards, limit, url=None, fields=None, **kwargs): - collection = BoardCollection() - collection.boards = [Board.convert_with_links(n, fields=fields) - for n in boards] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -class Port(base.APIBase): - board_uuid = types.uuid - uuid = types.uuid - VIF_name = wtypes.text - MAC_add = wtypes.text - ip = wtypes.text - network = wtypes.text - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.Port.fields) - fields.remove('board_uuid') - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - setattr(self, 'uuid', kwargs.get('uuid', wtypes.Unset)) - - -class PortCollection(collection.Collection): - """API representation of a collection of ports.""" - - ports = [Port] - - def __init__(self, **kwargs): - self._type = 'ports' - - @staticmethod - def get_list(ports, fields=None): - collection = PortCollection() - collection.ports = [Port(**n.as_dict()) - for n in ports] - return collection - - -class InjectionPlugin(base.APIBase): - plugin = types.uuid_or_name - board_uuid = types.uuid_or_name - status = wtypes.text - onboot = types.boolean - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.InjectionPlugin.fields) - fields.remove('board_uuid') - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - setattr(self, 'plugin', kwargs.get('plugin_uuid', wtypes.Unset)) - - -class InjectionCollection(collection.Collection): - """API representation of a collection of injection.""" - - injections = [InjectionPlugin] - - def __init__(self, **kwargs): - self._type = 'injections' - - @staticmethod - def get_list(injections, fields=None): - collection = InjectionCollection() - collection.injections = [InjectionPlugin(**n.as_dict()) - for n in injections] - return collection - - -class ExposedService(base.APIBase): - service = types.uuid_or_name - board_uuid = types.uuid_or_name - public_port = wsme.types.IntegerType() - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.ExposedService.fields) - fields.remove('board_uuid') - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - setattr(self, 'service', kwargs.get('service_uuid', wtypes.Unset)) - - -class ExposedCollection(collection.Collection): - """API representation of a collection of injection.""" - - exposed = [ExposedService] - - def __init__(self, **kwargs): - self._type = 'exposed' - - @staticmethod - def get_list(exposed, fields=None): - collection = ExposedCollection() - collection.exposed = [ExposedService(**n.as_dict()) - for n in exposed] - return collection - - -class PluginAction(base.APIBase): - action = wsme.wsattr(wtypes.text) - parameters = types.jsontype - - -class ServiceAction(base.APIBase): - action = wsme.wsattr(wtypes.text) - parameters = types.jsontype - - -class Network(base.APIBase): - network = types.jsontype - subnet = types.jsontype - security_groups = types.jsontype - - -class BoardAction(base.APIBase): - action = wsme.wsattr(wtypes.text) - parameters = types.jsontype - - -class BoardPluginsController(rest.RestController): - def __init__(self, board_ident): - self.board_ident = board_ident - - def _get_plugins_on_board_collection(self, board_uuid, fields=None): - injections = objects.InjectionPlugin.list(pecan.request.context, - board_uuid) - - return InjectionCollection.get_list(injections, - fields=fields) - - @expose.expose(InjectionCollection, - status_code=200) - def get_all(self): - """Retrieve a list of plugins of a board. - - """ - rpc_board = api_utils.get_rpc_board(self.board_ident) - - cdict = pecan.request.context.to_policy_values() - cdict['owner'] = rpc_board.owner - policy.authorize('iot:plugin_on_board:get', cdict, cdict) - - return self._get_plugins_on_board_collection(rpc_board.uuid) - - @expose.expose(wtypes.text, types.uuid_or_name, body=PluginAction, - status_code=200) - def post(self, plugin_ident, PluginAction): - - if not PluginAction.action: - raise exception.MissingParameterValue( - ("Action is not specified.")) - - if not PluginAction.parameters: - PluginAction.parameters = {} - - rpc_board = api_utils.get_rpc_board(self.board_ident) - rpc_plugin = api_utils.get_rpc_plugin(plugin_ident) - - try: - cdict = pecan.request.context.to_policy_values() - cdict['owner'] = rpc_board.owner - policy.authorize('iot:plugin_action:post', cdict, cdict) - - if not rpc_plugin.public: - cdict = pecan.request.context.to_policy_values() - cdict['owner'] = rpc_plugin.owner - policy.authorize('iot:plugin_action:post', cdict, cdict) - except exception: - return exception - - rpc_board.check_if_online() - - if objects.plugin.want_customs_params(PluginAction.action): - valid_keys = list(rpc_plugin.parameters.keys()) - if not all(k in PluginAction.parameters for k in valid_keys): - raise exception.InvalidParameterValue( - "Parameters are different from the valid ones") - - result = pecan.request.rpcapi.action_plugin(pecan.request.context, - rpc_plugin.uuid, - rpc_board.uuid, - PluginAction.action, - PluginAction.parameters) - return result - - @expose.expose(wtypes.text, body=InjectionPlugin, - status_code=200) - def put(self, Injection): - """inject a plugin into a board. - - :param plugin_ident: UUID or logical name of a plugin. - :param board_ident: UUID or logical name of a board. - """ - - if not Injection.plugin: - raise exception.MissingParameterValue( - ("Plugin is not specified.")) - - if not Injection.onboot: - Injection.onboot = False - - rpc_board = api_utils.get_rpc_board(self.board_ident) - rpc_plugin = api_utils.get_rpc_plugin(Injection.plugin) - - try: - cdict = pecan.request.context.to_policy_values() - cdict['owner'] = rpc_board.owner - policy.authorize('iot:plugin_inject:put', cdict, cdict) - - if not rpc_plugin.public: - cdict = pecan.request.context.to_policy_values() - cdict['owner'] = rpc_plugin.owner - policy.authorize('iot:plugin_inject:put', cdict, cdict) - except exception: - return exception - - rpc_board.check_if_online() - result = pecan.request.rpcapi.inject_plugin(pecan.request.context, - rpc_plugin.uuid, - rpc_board.uuid, - Injection.onboot) - return result - - @expose.expose(wtypes.text, types.uuid_or_name, - status_code=204) - def delete(self, plugin_uuid): - """Remove a plugin from a board. - - :param plugin_ident: UUID or logical name of a plugin. - :param board_ident: UUID or logical name of a board. - """ - rpc_board = api_utils.get_rpc_board(self.board_ident) - cdict = pecan.request.context.to_policy_values() - cdict['owner'] = rpc_board.owner - - policy.authorize('iot:plugin_remove:delete', cdict, cdict) - - rpc_board.check_if_online() - rpc_plugin = api_utils.get_rpc_plugin(plugin_uuid) - return pecan.request.rpcapi.remove_plugin(pecan.request.context, - rpc_plugin.uuid, - rpc_board.uuid) - - -class BoardServicesController(rest.RestController): - _custom_actions = { - 'action': ['POST'], - 'restore': ['GET'], - 'status': ['GET'] - } - - def __init__(self, board_ident): - self.board_ident = board_ident - - def _get_services_on_board_collection(self, board_uuid, fields=None): - services = objects.ExposedService.list(pecan.request.context, - board_uuid) - - return ExposedCollection.get_list(services, - fields=fields) - - @expose.expose(ExposedCollection, - status_code=200) - def get_all(self): - """Retrieve a list of services of a board. - - """ - rpc_board = api_utils.get_rpc_board(self.board_ident) - - cdict = pecan.request.context.to_policy_values() - cdict['project_id'] = rpc_board.project - policy.authorize('iot:service_on_board:get', cdict, cdict) - - return self._get_services_on_board_collection(rpc_board.uuid) - - @expose.expose(wtypes.text, types.uuid_or_name, body=ServiceAction, - status_code=200) - def action(self, service_ident, ServiceAction): - - if not ServiceAction.action: - raise exception.MissingParameterValue( - ("Action is not specified.")) - - rpc_board = api_utils.get_rpc_board(self.board_ident) - rpc_service = api_utils.get_rpc_service(service_ident) - - try: - cdict = pecan.request.context.to_policy_values() - cdict['owner'] = rpc_board.owner - policy.authorize('iot:service_action:post', cdict, cdict) - - except exception: - return exception - - # add here the check on force parameter - rpc_board.check_if_online() - - result = pecan.request.rpcapi.action_service(pecan.request.context, - rpc_service.uuid, - rpc_board.uuid, - ServiceAction.action) - return result - - @expose.expose(ExposedCollection, - status_code=200) - def restore(self): - rpc_board = api_utils.get_rpc_board(self.board_ident) - - try: - cdict = pecan.request.context.to_policy_values() - cdict['owner'] = rpc_board.owner - policy.authorize('iot:service_action:post', cdict, cdict) - - except exception: - return exception - - rpc_board.check_if_online() - - pecan.request.rpcapi.restore_services_on_board( - pecan.request.context, - rpc_board.uuid) - - return self._get_services_on_board_collection(rpc_board.uuid) - - @expose.expose(wtypes.text, status_code=200) - def status(self): - """Get status of all services in a board. - - :param board_ident: UUID or logical name of a board. - """ - # context = pecan.request.context - # cdict = context.to_policy_values() - # policy.authorize('iot:board:delete', cdict, cdict) - - rpc_board = api_utils.get_rpc_board(self.board_ident) - - rpc_board.check_if_online() - - result = pecan.request.rpcapi.status_services_on_board( - pecan.request.context, - rpc_board.uuid - ) - - return result - - -class BoardWebservicesController(rest.RestController): - _custom_actions = { - 'enable': ['POST'], - 'disable': ['DELETE'], - 'renew': ['GET'] - } - - invalid_sort_key_list = ['extra', ] - - def __init__(self, board_ident): - self.board_ident = board_ident - - def _get_webservices_collection(self, board, marker, limit, - sort_key, sort_dir, - fields=None): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Webservice.get_by_uuid(pecan.request.context, - marker) - - if sort_key in self.invalid_sort_key_list: - raise exception.InvalidParameterValue( - ("The sort_key value %(key)s is an invalid field for " - "sorting") % {'key': sort_key}) - - filters = {} - filters['board_uuid'] = board - webservices = objects.Webservice.list(pecan.request.context, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir, - filters=filters) - - parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} - - return WebserviceCollection.convert_with_links(webservices, limit, - fields=fields, - **parameters) - - @expose.expose(WebserviceCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def get_all(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None): - """Retrieve a list of webservices. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param with_public: Optional boolean to get also public pluings. - :param all_webservices: Optional boolean to get all the pluings. - Only for the admin - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:webservice:get', cdict, cdict) - - rpc_board = api_utils.get_rpc_board(self.board_ident) - - filters = {} - filters['board_uuid'] = rpc_board.uuid - - if fields is None: - fields = _DEFAULT_WEBSERVICE_RETURN_FIELDS - return self._get_webservices_collection(rpc_board.uuid, marker, - limit, sort_key, sort_dir, - fields=fields) - - @expose.expose(Webservice, body=Webservice, status_code=201) - def put(self, Webservice): - """Create a new Webservice. - - :param Webservice: a Webservice within the request body. - """ - - context = pecan.request.context - cdict = context.to_policy_values() - policy.authorize('iot:webservice:create', cdict, cdict) - - if not Webservice.name: - raise exception.MissingParameterValue( - ("Name is not specified.")) - - if Webservice.name: - if not api_utils.is_valid_name(Webservice.name): - msg = ("Cannot create webservice with invalid name %(name)s") - raise wsme.exc.ClientSideError(msg % {'name': Webservice.name}, - status_code=400) - - rpc_board = api_utils.get_rpc_board(self.board_ident) - - new_Webservice = objects.Webservice(pecan.request.context, - **Webservice.as_dict()) - - new_Webservice.board_uuid = rpc_board.uuid - new_Webservice = pecan.request.rpcapi.create_webservice( - pecan.request.context, - new_Webservice) - - return Webservice.convert_with_links(new_Webservice) - - class EnabledWebserverData(base.APIBase): - dns = wtypes.text - zone = wtypes.text - email = wtypes.text - - @expose.expose(EnabledWebservice, body=EnabledWebserverData, - status_code=201) - def enable(self, EnabledWebserverData): - """Create a new Webservice. - - :param Webservice: a Webservice within the request body. - """ - # context = pecan.request.context - # cdict = context.to_policy_values() - # policy.authorize('iot:webservice:create', cdict, cdict) - - if not EnabledWebserverData.dns: - raise exception.MissingParameterValue( - ("dns is not specified.")) - if not EnabledWebserverData.zone: - raise exception.MissingParameterValue( - ("zone is not specified.")) - if not EnabledWebserverData.email: - raise exception.MissingParameterValue( - ("email is not specified.")) - - rpc_board = api_utils.get_rpc_board(self.board_ident) - - new_EnWebservice = pecan.request.rpcapi.enable_webservice( - pecan.request.context, - EnabledWebserverData.dns, - EnabledWebserverData.zone, - EnabledWebserverData.email, - rpc_board.uuid) - - return EnabledWebservice.convert_with_links(new_EnWebservice) - - @expose.expose(None, status_code=204) - def disable(self): - """Disable webservices in a board. - - :param board_ident: UUID or logical name of a board. - """ - # context = pecan.request.context - # cdict = context.to_policy_values() - # policy.authorize('iot:board:delete', cdict, cdict) - - rpc_board = api_utils.get_rpc_board(self.board_ident) - pecan.request.rpcapi.disable_webservice(pecan.request.context, - rpc_board.uuid) - - @expose.expose(None) - def renew(self): - """Renew webservices certificates in a board. - - :param board_ident: UUID or logical name of a board. - """ - # context = pecan.request.context - # cdict = context.to_policy_values() - # policy.authorize('iot:board:delete', cdict, cdict) - - rpc_board = api_utils.get_rpc_board(self.board_ident) - - rpc_board.check_if_online() - - pecan.request.rpcapi.renew_webservice(pecan.request.context, - rpc_board.uuid) - - -class BoardPortsController(rest.RestController): - - def __init__(self, board_ident): - self.board_ident = board_ident - - def _get_ports_on_board_collection(self, board_uuid, fields=None): - filters = {} - filters['board_uuid'] = board_uuid - ports = objects.Port.list(pecan.request.context, - filters=filters) - - return PortCollection.get_list(ports, fields=fields) - - def get_port_detail(self, board_uuid, port_uuid): - filters = {} - filters['board_uuid'] = board_uuid - ports = objects.Port.list(pecan.request.context, - filters=filters) - for port in ports: - if port.uuid == port_uuid: - return port - - @expose.expose(wtypes.text, types.uuid_or_name, body=Network, - status_code=200) - def put(self, Network): - - if not Network.network: - raise exception.MissingParameterValue( - ("Network is not specified.")) - - rpc_board = api_utils.get_rpc_board(self.board_ident) - - rpc_board.check_if_online() - - result = pecan.request.rpcapi. \ - create_port_on_board(pecan.request.context, rpc_board.uuid, - Network.network, Network.subnet, - Network.security_groups) - return result - - @expose.expose(wtypes.text, types.uuid_or_name, - status_code=204) - def delete(self, port): - - if not port: - raise exception.MissingParameterValue( - ("Port is not specified.")) - - rpc_board = api_utils.get_rpc_board(self.board_ident) - rpc_port = api_utils.get_rpc_port(port) - - rpc_board.check_if_online() - - result = pecan.request.rpcapi.remove_port_from_board( - pecan.request.context, rpc_board.uuid, rpc_port.uuid) - - return result - - @expose.expose(PortCollection, status_code=200) - def get_all(self): - - rpc_board = api_utils.get_rpc_board(self.board_ident) - - cdict = pecan.request.context.to_policy_values() - cdict['owner'] = rpc_board.owner - policy.authorize('iot:port_on_board:get', cdict, cdict) - - return self._get_ports_on_board_collection(rpc_board.uuid) - - @expose.expose(Port, types.uuid_or_name, status_code=200) - def get_one(self, port_ident): - rpc_board = api_utils.get_rpc_board(self.board_ident) - - cdict = pecan.request.context.to_policy_values() - cdict['owner'] = rpc_board.owner - policy.authorize('iot:port_on_board:get', cdict, cdict) - - return self.get_port_detail(rpc_board, port_ident) - - -class BoardsController(rest.RestController): - """REST controller for Boards.""" - - _subcontroller_map = { - 'plugins': BoardPluginsController, - 'services': BoardServicesController, - 'ports': BoardPortsController, - 'webservices': BoardWebservicesController, - } - - invalid_sort_key_list = ['extra', 'location'] - - _custom_actions = { - 'detail': ['GET'], - 'action': ['POST'], - } - - @pecan.expose() - def _lookup(self, ident, *remainder): - try: - ident = types.uuid_or_name.validate(ident) - except exception.InvalidUuidOrName as e: - pecan.abort('400', e.args[0]) - if not remainder: - return - - subcontroller = self._subcontroller_map.get(remainder[0]) - if subcontroller: - return subcontroller(board_ident=ident), remainder[1:] - - def _get_boards_collection(self, status, marker, limit, - sort_key, sort_dir, - project=None, - resource_url=None, fields=None): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Board.get_by_uuid(pecan.request.context, - marker) - - if sort_key in self.invalid_sort_key_list: - raise exception.InvalidParameterValue( - ("The sort_key value %(key)s is an invalid field for " - "sorting") % {'key': sort_key}) - - filters = {} - - # bounding the request to a project - if project: - if pecan.request.context.is_admin: - filters['project_id'] = project - else: - msg = ("Project parameter can be used only " - "by the administrator.") - raise wsme.exc.ClientSideError(msg, - status_code=400) - else: - filters['project_id'] = pecan.request.context.project_id - - if status: - filters['status'] = status - - boards = objects.Board.list(pecan.request.context, limit, marker_obj, - sort_key=sort_key, sort_dir=sort_dir, - filters=filters) - - parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} - - return BoardCollection.convert_with_links(boards, limit, - url=resource_url, - fields=fields, - **parameters) - - @expose.expose(Board, types.uuid_or_name, types.listtype) - def get_one(self, board_ident, fields=None): - """Retrieve information about the given board. - - :param board_ident: UUID or logical name of a board. - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:board:get', cdict, cdict) - - rpc_board = api_utils.get_rpc_board(board_ident) - - return Board.convert_with_links(rpc_board, fields=fields) - - @expose.expose(BoardCollection, wtypes.text, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, wtypes.text) - def get_all(self, status=None, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None, project=None): - """Retrieve a list of boards. - - :param status: Optional string value to get only board in - that status. - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:board:get', cdict, cdict) - - if fields is None: - fields = _DEFAULT_RETURN_FIELDS - return self._get_boards_collection(status, marker, - limit, sort_key, sort_dir, - fields=fields, project=project) - - @expose.expose(Board, body=Board, status_code=201) - def post(self, Board): - """Create a new Board. - - :param Board: a Board within the request body. - """ - context = pecan.request.context - - cdict = context.to_policy_values() - policy.authorize('iot:board:create', cdict, cdict) - - if not Board.name: - raise exception.MissingParameterValue( - ("Name is not specified.")) - if not Board.code: - raise exception.MissingParameterValue( - ("Code is not specified.")) - if not Board.location: - raise exception.MissingParameterValue( - ("Location is not specified.")) - - if Board.name: - if not api_utils.is_valid_board_name(Board.name): - msg = ("Cannot create board with invalid name %(name)s") - raise wsme.exc.ClientSideError(msg % {'name': Board.name}, - status_code=400) - - new_Board = objects.Board(pecan.request.context, - **Board.as_dict()) - - new_Board.owner = pecan.request.context.user_id - new_Board.project = pecan.request.context.project_id - - new_Location = objects.Location(pecan.request.context, - **Board.location[0].as_dict()) - - new_Board = pecan.request.rpcapi.create_board(pecan.request.context, - new_Board, new_Location) - - return Board.convert_with_links(new_Board) - - @expose.expose(None, types.uuid_or_name, status_code=204) - def delete(self, board_ident): - """Delete a board. - - :param board_ident: UUID or logical name of a board. - """ - context = pecan.request.context - cdict = context.to_policy_values() - policy.authorize('iot:board:delete', cdict, cdict) - - rpc_board = api_utils.get_rpc_board(board_ident) - pecan.request.rpcapi.destroy_board(pecan.request.context, - rpc_board.uuid) - - @expose.expose(Board, types.uuid_or_name, body=Board, status_code=200) - def patch(self, board_ident, val_Board): - """Update a board. - - :param board_ident: UUID or logical name of a board. - :param Board: values to be changed - :return updated_board: updated_board - """ - - context = pecan.request.context - cdict = context.to_policy_values() - policy.authorize('iot:board:update', cdict, cdict) - - board = api_utils.get_rpc_board(board_ident) - val_Board = val_Board.as_dict() - for key in val_Board: - try: - board[key] = val_Board[key] - except Exception: - pass - - updated_board = pecan.request.rpcapi.update_board( - pecan.request.context, - board) - return Board.convert_with_links(updated_board) - - @expose.expose(BoardCollection, wtypes.text, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, wtypes.text) - def detail(self, status=None, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None, project=None): - """Retrieve a list of boards. - - :param status: Optional string value to get only board in - that status. - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param project: Optional string value to get only boards - of the project. - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:board:get', cdict, cdict) - - # /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "boards": - raise exception.HTTPNotFound() - - return self._get_boards_collection(status, marker, - limit, sort_key, sort_dir, - project=project, fields=fields) - - @expose.expose(wtypes.text, types.uuid_or_name, body=BoardAction, - status_code=200) - def action(self, board_ident, BoardAction): - """Action on a board. - - :param board_ident: UUID or logical name of a board. - """ - - context = pecan.request.context - cdict = context.to_policy_values() - policy.authorize('iot:board_action:post', cdict, cdict) - - if not BoardAction.parameters: - BoardAction.parameters = {} - - rpc_board = api_utils.get_rpc_board(board_ident) - return pecan.request.rpcapi.action_board(pecan.request.context, - rpc_board.uuid, - BoardAction.action, - BoardAction.parameters) diff --git a/iotronic/iotronic/api/controllers/v1/collection.py b/iotronic/iotronic/api/controllers/v1/collection.py deleted file mode 100644 index bec95bd..0000000 --- a/iotronic/iotronic/api/controllers/v1/collection.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pecan -from wsme import types as wtypes - -from iotronic.api.controllers import base -from iotronic.api.controllers import link - - -class Collection(base.APIBase): - - next = wtypes.text - """A link to retrieve the next subset of the collection""" - - @property - def collection(self): - return getattr(self, self._type) - - def has_next(self, limit): - """Return whether collection has more items.""" - return len(self.collection) and len(self.collection) == limit - - def get_next(self, limit, url=None, **kwargs): - """Return a link to the next subset of the collection.""" - if not self.has_next(limit): - return wtypes.Unset - - resource_url = url or self._type - q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs]) - next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % { - 'args': q_args, 'limit': limit, - 'marker': self.collection[-1].uuid} - - return link.Link.make_link('next', pecan.request.public_url, - resource_url, next_args).href diff --git a/iotronic/iotronic/api/controllers/v1/enabledwebservice.py b/iotronic/iotronic/api/controllers/v1/enabledwebservice.py deleted file mode 100644 index cee4d46..0000000 --- a/iotronic/iotronic/api/controllers/v1/enabledwebservice.py +++ /dev/null @@ -1,162 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from iotronic.api.controllers import base -from iotronic.api.controllers import link -from iotronic.api.controllers.v1 import collection -from iotronic.api.controllers.v1 import types -from iotronic.api.controllers.v1 import utils as api_utils -from iotronic.api import expose -from iotronic.common import exception -from iotronic.common import policy -from iotronic import objects - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes - -_DEFAULT_RETURN_FIELDS = ('board_uuid', 'http_port', 'https_port', - 'dns', 'zone', 'extra') - - -class EnabledWebservice(base.APIBase): - """API representation of a enabled_webservice. - - """ - http_port = wsme.types.IntegerType() - https_port = wsme.types.IntegerType() - board_uuid = types.uuid - dns = wsme.wsattr(wtypes.text) - zone = wsme.wsattr(wtypes.text) - extra = types.jsontype - - links = wsme.wsattr([link.Link], readonly=True) - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.EnabledWebservice.fields) - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - @staticmethod - def _convert(enabled_webservice, fields=None): - if fields is not None: - enabled_webservice.unset_fields_except(fields) - - return enabled_webservice - - @classmethod - def convert_with_links(cls, rpc_enabled_webservice, fields=None): - enabled_webservice = EnabledWebservice( - **rpc_enabled_webservice.as_dict()) - if fields is not None: - api_utils.check_for_invalid_fields(fields, - enabled_webservice.as_dict()) - - return cls._convert(enabled_webservice, - fields=fields) - - -class EnabledWebserviceCollection(collection.Collection): - """API representation of a collection of EnabledWebservices.""" - - EnabledWebservices = [EnabledWebservice] - """A list containing EnabledWebservices objects""" - - def __init__(self, **kwargs): - self._type = 'EnabledWebservices' - - @staticmethod - def convert_with_links(EnabledWebservices, limit, url=None, fields=None, - **kwargs): - collection = EnabledWebserviceCollection() - collection.EnabledWebservices = [ - EnabledWebservice.convert_with_links(n, fields=fields) - for n in EnabledWebservices] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -class EnabledWebservicesController(rest.RestController): - """REST controller for EnabledWebservices.""" - - invalid_sort_key_list = ['extra', ] - - def _get_EnabledWebservices_collection(self, marker, limit, - sort_key, sort_dir, project_id, - fields=None): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.EnabledWebservice.get_by_id( - pecan.request.context, - marker) - - if sort_key in self.invalid_sort_key_list: - raise exception.InvalidParameterValue( - ("The sort_key value %(key)s is an invalid field for " - "sorting") % {'key': sort_key}) - - filters = {} - filters['project_id'] = project_id - - EnabledWebservices = objects.EnabledWebservice.list( - pecan.request.context, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir, - filters=filters) - - parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} - - return EnabledWebserviceCollection.convert_with_links( - EnabledWebservices, limit, - fields=fields, - **parameters) - - @expose.expose(EnabledWebserviceCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype) - def get_all(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None): - """Retrieve a list of EnabledWebservices. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:enabledwebservice:get', cdict, cdict) - project_id = pecan.request.context.project_id - - if fields is None: - fields = _DEFAULT_RETURN_FIELDS - return self._get_EnabledWebservices_collection(marker, - limit, - sort_key, sort_dir, - project_id=project_id, - fields=fields) diff --git a/iotronic/iotronic/api/controllers/v1/fleet.py b/iotronic/iotronic/api/controllers/v1/fleet.py deleted file mode 100644 index b68660d..0000000 --- a/iotronic/iotronic/api/controllers/v1/fleet.py +++ /dev/null @@ -1,351 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from iotronic.api.controllers import base -from iotronic.api.controllers import link -from iotronic.api.controllers.v1.board import BoardCollection -from iotronic.api.controllers.v1 import collection -from iotronic.api.controllers.v1 import types -from iotronic.api.controllers.v1 import utils as api_utils -from iotronic.api import expose -from iotronic.common import exception -from iotronic.common import policy -from iotronic import objects - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes - -_DEFAULT_RETURN_FIELDS = ( - 'name', 'uuid', 'project', 'description', 'extra') - -_DEFAULT_BOARDS_RETURN_FIELDS = ('name', 'code', 'status', 'uuid', - 'session', 'type', 'fleet', 'lr_version', - 'connectivity', 'agent', 'wstun_ip') - - -class Fleet(base.APIBase): - """API representation of a fleet. - - """ - uuid = types.uuid - name = wsme.wsattr(wtypes.text) - project = types.uuid - description = wsme.wsattr(wtypes.text) - extra = types.jsontype - - links = wsme.wsattr([link.Link], readonly=True) - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.Fleet.fields) - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - @staticmethod - def _convert_with_links(fleet, url, fields=None): - fleet_uuid = fleet.uuid - if fields is not None: - fleet.unset_fields_except(fields) - - fleet.links = [link.Link.make_link('self', url, 'fleets', - fleet_uuid), - link.Link.make_link('bookmark', url, 'fleets', - fleet_uuid, bookmark=True) - ] - return fleet - - @classmethod - def convert_with_links(cls, rpc_fleet, fields=None): - fleet = Fleet(**rpc_fleet.as_dict()) - - if fields is not None: - api_utils.check_for_invalid_fields(fields, fleet.as_dict()) - - return cls._convert_with_links(fleet, pecan.request.public_url, - fields=fields) - - -class FleetCollection(collection.Collection): - """API representation of a collection of fleets.""" - - fleets = [Fleet] - """A list containing fleets objects""" - - def __init__(self, **kwargs): - self._type = 'fleets' - - @staticmethod - def convert_with_links(fleets, limit, url=None, fields=None, **kwargs): - collection = FleetCollection() - collection.fleets = [Fleet.convert_with_links(n, fields=fields) - for n in fleets] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -class FleetBoardsController(rest.RestController): - def __init__(self, fleet_ident): - self.fleet_ident = fleet_ident - - @expose.expose(BoardCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype) - def get_all(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None): - """Retrieve a list of boards. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:board:get', cdict, cdict) - - if fields is None: - fields = _DEFAULT_BOARDS_RETURN_FIELDS - - filters = {} - filters['fleet'] = self.fleet_ident - - boards = objects.Board.list(pecan.request.context, limit, marker, - sort_key=sort_key, sort_dir=sort_dir, - filters=filters) - - parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} - - return BoardCollection.convert_with_links(boards, limit, - fields=fields, - **parameters) - - -class FleetsController(rest.RestController): - """REST controller for Fleets.""" - - _subcontroller_map = { - 'boards': FleetBoardsController, - } - - invalid_sort_key_list = ['extra', ] - - _custom_actions = { - 'detail': ['GET'], - } - - @pecan.expose() - def _lookup(self, ident, *remainder): - try: - ident = types.uuid_or_name.validate(ident) - except exception.InvalidUuidOrName as e: - pecan.abort('400', e.args[0]) - if not remainder: - return - - subcontroller = self._subcontroller_map.get(remainder[0]) - if subcontroller: - return subcontroller(fleet_ident=ident), remainder[1:] - - def _get_fleets_collection(self, marker, limit, - sort_key, sort_dir, - project=None, - fields=None): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Fleet.get_by_uuid(pecan.request.context, - marker) - - if sort_key in self.invalid_sort_key_list: - raise exception.InvalidParameterValue( - ("The sort_key value %(key)s is an invalid field for " - "sorting") % {'key': sort_key}) - - filters = {} - - if project: - if pecan.request.context.is_admin: - filters['project_id'] = project - fleets = objects.Fleet.list(pecan.request.context, limit, - marker_obj, - sort_key=sort_key, sort_dir=sort_dir, - filters=filters) - - parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} - - return FleetCollection.convert_with_links(fleets, limit, - fields=fields, - **parameters) - - @expose.expose(Fleet, types.uuid_or_name, types.listtype) - def get_one(self, fleet_ident, fields=None): - """Retrieve information about the given fleet. - - :param fleet_ident: UUID or logical name of a fleet. - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - rpc_fleet = api_utils.get_rpc_fleet(fleet_ident) - cdict = pecan.request.context.to_policy_values() - cdict['project_id'] = rpc_fleet.project - policy.authorize('iot:fleet:get_one', cdict, cdict) - - return Fleet.convert_with_links(rpc_fleet, fields=fields) - - @expose.expose(FleetCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def get_all(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None): - """Retrieve a list of fleets. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param with_public: Optional boolean to get also public pluings. - :param all_fleets: Optional boolean to get all the pluings. - Only for the admin - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:fleet:get', cdict, cdict) - - if fields is None: - fields = _DEFAULT_RETURN_FIELDS - return self._get_fleets_collection(marker, - limit, sort_key, sort_dir, - project=cdict['project_id'], - fields=fields) - - @expose.expose(Fleet, body=Fleet, status_code=201) - def post(self, Fleet): - """Create a new Fleet. - - :param Fleet: a Fleet within the request body. - """ - context = pecan.request.context - cdict = context.to_policy_values() - policy.authorize('iot:fleet:create', cdict, cdict) - - if not Fleet.name: - raise exception.MissingParameterValue( - ("Name is not specified.")) - - if Fleet.name: - if not api_utils.is_valid_name(Fleet.name): - msg = ("Cannot create fleet with invalid name %(name)s") - raise wsme.exc.ClientSideError(msg % {'name': Fleet.name}, - status_code=400) - - new_Fleet = objects.Fleet(pecan.request.context, - **Fleet.as_dict()) - - new_Fleet.project = cdict['project_id'] - new_Fleet = pecan.request.rpcapi.create_fleet( - pecan.request.context, - new_Fleet) - - return Fleet.convert_with_links(new_Fleet) - - @expose.expose(None, types.uuid_or_name, status_code=204) - def delete(self, fleet_ident): - """Delete a fleet. - - :param fleet_ident: UUID or logical name of a fleet. - """ - context = pecan.request.context - cdict = context.to_policy_values() - policy.authorize('iot:fleet:delete', cdict, cdict) - - rpc_fleet = api_utils.get_rpc_fleet(fleet_ident) - pecan.request.rpcapi.destroy_fleet(pecan.request.context, - rpc_fleet.uuid) - - @expose.expose(Fleet, types.uuid_or_name, body=Fleet, status_code=200) - def patch(self, fleet_ident, val_Fleet): - """Update a fleet. - - :param fleet_ident: UUID or logical name of a fleet. - :param Fleet: values to be changed - :return updated_fleet: updated_fleet - """ - - rpc_fleet = api_utils.get_rpc_fleet(fleet_ident) - cdict = pecan.request.context.to_policy_values() - cdict['project'] = rpc_fleet.project - policy.authorize('iot:fleet:update', cdict, cdict) - - val_Fleet = val_Fleet.as_dict() - for key in val_Fleet: - try: - rpc_fleet[key] = val_Fleet[key] - except Exception: - pass - - updated_fleet = pecan.request.rpcapi.update_fleet( - pecan.request.context, rpc_fleet) - return Fleet.convert_with_links(updated_fleet) - - @expose.expose(FleetCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def detail(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None, with_public=False, all_fleets=False): - """Retrieve a list of fleets. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param with_public: Optional boolean to get also public fleet. - :param all_fleets: Optional boolean to get all the fleets. - Only for the admin - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:fleet:get', cdict, cdict) - - # /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "fleets": - raise exception.HTTPNotFound() - - return self._get_fleets_collection(marker, - limit, sort_key, sort_dir, - with_public=with_public, - all_fleets=all_fleets, - fields=fields) diff --git a/iotronic/iotronic/api/controllers/v1/location.py b/iotronic/iotronic/api/controllers/v1/location.py deleted file mode 100644 index 8f4bc76..0000000 --- a/iotronic/iotronic/api/controllers/v1/location.py +++ /dev/null @@ -1,45 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from iotronic.api.controllers import base -from iotronic import objects -import wsme -from wsme import types as wtypes - - -class Location(base.APIBase): - """API representation of a location. - - """ - - longitude = wsme.wsattr(wtypes.text) - latitude = wsme.wsattr(wtypes.text) - altitude = wsme.wsattr(wtypes.text) - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.Location.fields) - for k in fields: - # Skip fields we do not expose. - if k is not 'created_at': - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - @staticmethod - def convert_with_list(list): - list_locations = [] - for l in list: - list_locations.append(Location(**l.as_dict())) - return list_locations diff --git a/iotronic/iotronic/api/controllers/v1/plugin.py b/iotronic/iotronic/api/controllers/v1/plugin.py deleted file mode 100644 index 1e6471a..0000000 --- a/iotronic/iotronic/api/controllers/v1/plugin.py +++ /dev/null @@ -1,360 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from iotronic.api.controllers import base -from iotronic.api.controllers import link -from iotronic.api.controllers.v1 import collection -from iotronic.api.controllers.v1 import types -from iotronic.api.controllers.v1 import utils as api_utils -from iotronic.api import expose -from iotronic.common import exception -from iotronic.common import policy -from iotronic import objects - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes - -_DEFAULT_RETURN_FIELDS = ('name', 'uuid', 'owner', 'public', 'callable') - - -class Plugin(base.APIBase): - """API representation of a plugin. - - """ - uuid = types.uuid - name = wsme.wsattr(wtypes.text) - code = wsme.wsattr(wtypes.text) - public = types.boolean - owner = types.uuid - callable = types.boolean - parameters = types.jsontype - links = wsme.wsattr([link.Link], readonly=True) - extra = types.jsontype - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.Plugin.fields) - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - @staticmethod - def _convert_with_links(plugin, url, fields=None): - plugin_uuid = plugin.uuid - if fields is not None: - plugin.unset_fields_except(fields) - - plugin.links = [link.Link.make_link('self', url, 'plugins', - plugin_uuid), - link.Link.make_link('bookmark', url, 'plugins', - plugin_uuid, bookmark=True) - ] - return plugin - - @classmethod - def convert_with_links(cls, rpc_plugin, fields=None): - plugin = Plugin(**rpc_plugin.as_dict()) - - if fields is not None: - api_utils.check_for_invalid_fields(fields, plugin.as_dict()) - - return cls._convert_with_links(plugin, pecan.request.public_url, - fields=fields) - - -class PluginCollection(collection.Collection): - """API representation of a collection of plugins.""" - - plugins = [Plugin] - """A list containing plugins objects""" - - def __init__(self, **kwargs): - self._type = 'plugins' - - @staticmethod - def convert_with_links(plugins, limit, url=None, fields=None, **kwargs): - collection = PluginCollection() - collection.plugins = [Plugin.convert_with_links(n, fields=fields) - for n in plugins] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -class PublicPluginsController(rest.RestController): - """REST controller for Public Plugins.""" - - invalid_sort_key_list = ['extra', 'location'] - - def _get_plugins_collection(self, marker, limit, - sort_key, sort_dir, - fields=None): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Plugin.get_by_uuid(pecan.request.context, - marker) - - if sort_key in self.invalid_sort_key_list: - raise exception.InvalidParameterValue( - ("The sort_key value %(key)s is an invalid field for " - "sorting") % {'key': sort_key}) - - filters = {} - filters['public'] = True - - plugins = objects.Plugin.list(pecan.request.context, limit, marker_obj, - sort_key=sort_key, sort_dir=sort_dir, - filters=filters) - - parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} - - return PluginCollection.convert_with_links(plugins, limit, - fields=fields, - **parameters) - - @expose.expose(PluginCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def get_all(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None): - """Retrieve a list of plugins. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:plugin:get', cdict, cdict) - - if fields is None: - fields = _DEFAULT_RETURN_FIELDS - return self._get_plugins_collection(marker, - limit, sort_key, sort_dir, - fields=fields) - - -class PluginsController(rest.RestController): - """REST controller for Plugins.""" - - public = PublicPluginsController() - - invalid_sort_key_list = ['extra', 'location'] - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_plugins_collection(self, marker, limit, - sort_key, sort_dir, - fields=None, with_public=False, - all_plugins=False): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Plugin.get_by_uuid(pecan.request.context, - marker) - - if sort_key in self.invalid_sort_key_list: - raise exception.InvalidParameterValue( - ("The sort_key value %(key)s is an invalid field for " - "sorting") % {'key': sort_key}) - - filters = {} - if all_plugins and not pecan.request.context.is_admin: - msg = ("all_plugins parameter can only be used " - "by the administrator.") - raise wsme.exc.ClientSideError(msg, - status_code=400) - else: - if not all_plugins: - filters['owner'] = pecan.request.context.user_id - if with_public: - filters['with_public'] = with_public - - plugins = objects.Plugin.list(pecan.request.context, limit, marker_obj, - sort_key=sort_key, sort_dir=sort_dir, - filters=filters) - - parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} - - return PluginCollection.convert_with_links(plugins, limit, - fields=fields, - **parameters) - - @expose.expose(Plugin, types.uuid_or_name, types.listtype) - def get_one(self, plugin_ident, fields=None): - """Retrieve information about the given plugin. - - :param plugin_ident: UUID or logical name of a plugin. - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - rpc_plugin = api_utils.get_rpc_plugin(plugin_ident) - if not rpc_plugin.public: - cdict = pecan.request.context.to_policy_values() - cdict['owner'] = rpc_plugin.owner - policy.authorize('iot:plugin:get_one', cdict, cdict) - - return Plugin.convert_with_links(rpc_plugin, fields=fields) - - @expose.expose(PluginCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def get_all(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None, with_public=False, all_plugins=False): - """Retrieve a list of plugins. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param with_public: Optional boolean to get also public pluings. - :param all_plugins: Optional boolean to get all the pluings. - Only for the admin - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:plugin:get', cdict, cdict) - - if fields is None: - fields = _DEFAULT_RETURN_FIELDS - return self._get_plugins_collection(marker, - limit, sort_key, sort_dir, - with_public=with_public, - all_plugins=all_plugins, - fields=fields) - - @expose.expose(Plugin, body=Plugin, status_code=201) - def post(self, Plugin): - """Create a new Plugin. - - :param Plugin: a Plugin within the request body. - """ - context = pecan.request.context - cdict = context.to_policy_values() - policy.authorize('iot:plugin:create', cdict, cdict) - - if not Plugin.name: - raise exception.MissingParameterValue( - ("Name is not specified.")) - - if Plugin.name: - if not api_utils.is_valid_name(Plugin.name): - msg = ("Cannot create plugin with invalid name %(name)s") - raise wsme.exc.ClientSideError(msg % {'name': Plugin.name}, - status_code=400) - - new_Plugin = objects.Plugin(pecan.request.context, - **Plugin.as_dict()) - - new_Plugin.owner = cdict['user'] - new_Plugin = pecan.request.rpcapi.create_plugin(pecan.request.context, - new_Plugin) - - return Plugin.convert_with_links(new_Plugin) - - @expose.expose(None, types.uuid_or_name, status_code=204) - def delete(self, plugin_ident): - """Delete a plugin. - - :param plugin_ident: UUID or logical name of a plugin. - """ - context = pecan.request.context - cdict = context.to_policy_values() - policy.authorize('iot:plugin:delete', cdict, cdict) - - rpc_plugin = api_utils.get_rpc_plugin(plugin_ident) - pecan.request.rpcapi.destroy_plugin(pecan.request.context, - rpc_plugin.uuid) - - @expose.expose(Plugin, types.uuid_or_name, body=Plugin, status_code=200) - def patch(self, plugin_ident, val_Plugin): - """Update a plugin. - - :param plugin_ident: UUID or logical name of a plugin. - :param Plugin: values to be changed - :return updated_plugin: updated_plugin - """ - - rpc_plugin = api_utils.get_rpc_plugin(plugin_ident) - cdict = pecan.request.context.to_policy_values() - cdict['owner'] = rpc_plugin.owner - policy.authorize('iot:plugin:update', cdict, cdict) - - val_Plugin = val_Plugin.as_dict() - for key in val_Plugin: - try: - rpc_plugin[key] = val_Plugin[key] - except Exception: - pass - - updated_plugin = pecan.request.rpcapi.update_plugin( - pecan.request.context, rpc_plugin) - return Plugin.convert_with_links(updated_plugin) - - @expose.expose(PluginCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def detail(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None, with_public=False, all_plugins=False): - """Retrieve a list of plugins. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param with_public: Optional boolean to get also public pluings. - :param all_plugins: Optional boolean to get all the pluings. - Only for the admin - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:plugin:get', cdict, cdict) - - # /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "plugins": - raise exception.HTTPNotFound() - - return self._get_plugins_collection(marker, - limit, sort_key, sort_dir, - with_public=with_public, - all_plugins=all_plugins, - fields=fields) diff --git a/iotronic/iotronic/api/controllers/v1/port.py b/iotronic/iotronic/api/controllers/v1/port.py deleted file mode 100644 index bc499a8..0000000 --- a/iotronic/iotronic/api/controllers/v1/port.py +++ /dev/null @@ -1,202 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from iotronic.api.controllers import base -from iotronic.api.controllers import link -from iotronic.api.controllers.v1 import collection -from iotronic.api.controllers.v1 import types -from iotronic.api.controllers.v1 import utils as api_utils -from iotronic.api import expose -from iotronic.common import exception -from iotronic.common import policy -from iotronic import objects - -from oslo_log import log as logging -LOG = logging.getLogger(__name__) - - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes - - -_DEFAULT_RETURN_FIELDS = ('uuid', 'board_uuid', 'MAC_add', - 'VIF_name', 'network', 'ip') - - -class Port(base.APIBase): - """API representation of a port. - - """ - uuid = types.uuid - board_uuid = types.uuid - MAC_add = wsme.wsattr(wtypes.text) - VIF_name = wsme.wsattr(wtypes.text) - network = wsme.wsattr(wtypes.text) - ip = wsme.wsattr(wtypes.text) - - links = wsme.wsattr([link.Link], readonly=True) - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.Port.fields) - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - @staticmethod - def _convert_with_links(port, url, fields=None): - port_uuid = port.uuid - if fields is not None: - port.unset_fields_except(fields) - - port.links = [link.Link.make_link('self', url, 'ports', port_uuid), - link.Link.make_link('bookmark', url, 'ports', - port_uuid, bookmark=True)] - return port - - @classmethod - def convert_with_links(cls, rpc_port, fields=None): - port = Port(**rpc_port.as_dict()) - - if fields is not None: - api_utils.check_for_invalid_fields(fields, port.as_dict()) - - return cls._convert_with_links(port, pecan.request.public_url, - fields=fields) - - -class PortCollection(collection.Collection): - """API representation of a collection of ports.""" - - ports = [Port] - """A list containing ports objects""" - - def __init__(self, **kwargs): - self._type = 'ports' - - @staticmethod - def convert_with_links(ports, limit, url=None, fields=None, **kwargs): - collection = PortCollection() - collection.ports = [Port.convert_with_links(n, fields=fields) - for n in ports] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -class PortsController(rest.RestController): - - """REST controller for Ports.""" - -# public = PublicPortsController() - - invalid_sort_key_list = ['extra', 'location'] - - def _get_ports_collection(self, marker, limit, sort_key, sort_dir, - fields=None, with_public=False, - all_ports=False): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Port.get_by_uuid(pecan.request.context, - marker) - - if sort_key in self.invalid_sort_key_list: - raise exception.InvalidParameterValue( - ("The sort_key value %(key)s is an invalid field for " - "sorting") % {'key': sort_key}) - - filters = {} -# if all_ports and not pecan.request.context.is_admin: -# msg = ("all_ports parameter can only be used " -# "by the administrator.") -# raise wsme.exc.ClientSideError(msg, -# status_code=400) -# else: -# if not all_ports: -# filters['owner'] = pecan.request.context.user_id -# if with_public: -# filters['with_public'] = with_public - - ports = objects.Port.list(pecan.request.context, limit, marker_obj, - sort_key=sort_key, sort_dir=sort_dir, - filters=filters) - - parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} - - return PortCollection.convert_with_links(ports, limit, - fields=fields, **parameters) - - @expose.expose(PortCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def get_all(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None, with_public=False, all_ports=False): - """Retrieve a list of ports. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param with_public: Optional boolean to get also public ports. - :param all_ports: Optional boolean to get all the ports. - Only for the admin - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:port_on_board:get', cdict, cdict) - - if fields is None: - fields = _DEFAULT_RETURN_FIELDS - return self._get_ports_collection(marker, - limit, sort_key, sort_dir, - with_public=with_public, - all_ports=all_ports, - fields=fields) - - @expose.expose(Port, types.uuid_or_name, types.listtype) - def get_one(self, port_ident, fields=None): - """Retrieve information about the given port. - - :param port_ident: UUID or logical name of a port. - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - rpc_port = api_utils.get_rpc_port(port_ident) -# if not rpc_port.public: -# cdict = pecan.request.context.to_policy_values() -# cdict['owner'] = rpc_port.owner -# policy.authorize('iot:port:get_one', cdict, cdict) - - return Port.convert_with_links(rpc_port, fields=fields) - - @expose.expose(None, types.uuid_or_name, status_code=204) - def delete(self, port_ident): - - rpc_port = api_utils.get_rpc_port(port_ident) - board = rpc_port.board_uuid - - pecan.request.rpcapi.remove_port_from_board(pecan.request.context, - board, rpc_port.uuid) - return diff --git a/iotronic/iotronic/api/controllers/v1/request.py b/iotronic/iotronic/api/controllers/v1/request.py deleted file mode 100644 index dcfbaaa..0000000 --- a/iotronic/iotronic/api/controllers/v1/request.py +++ /dev/null @@ -1,296 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes - -from iotronic.api.controllers import base -from iotronic.api.controllers import link -from iotronic.api.controllers.v1 import collection -from iotronic.api.controllers.v1.result import ResultCollection -from iotronic.api.controllers.v1 import types -from iotronic.api.controllers.v1 import utils as api_utils -from iotronic.api import expose -from iotronic.common import exception -from iotronic.common import policy -from iotronic import objects - -_DEFAULT_RETURN_FIELDS = ( - 'uuid', - 'destination_uuid', - 'main_request_uuid', - 'pending_requests', - 'status', - 'type', - 'action' -) - -_DEFAULT_RESULT_RETURN_FIELDS = ( - 'board_uuid', - 'request_uuid', - 'result', - 'message' -) - - -class Request(base.APIBase): - """API representation of a request. - - """ - - uuid = types.uuid - destination_uuid = types.uuid - main_request_uuid = types.uuid - pending_requests = wsme.types.IntegerType() - status = wsme.wsattr(wtypes.text) - project = types.uuid - type = wsme.types.IntegerType() - action = wsme.wsattr(wtypes.text) - - links = wsme.wsattr([link.Link], readonly=True) - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.Request.fields) - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - @staticmethod - def _convert_with_links(request, url, fields=None): - request_uuid = request.uuid - if fields is not None: - request.unset_fields_except(fields) - - request.links = [link.Link.make_link('self', url, 'requests', - request_uuid), - link.Link.make_link('bookmark', url, 'requests', - request_uuid, bookmark=True) - ] - return request - - @classmethod - def convert_with_links(cls, rpc_request, fields=None): - request = Request(**rpc_request.as_dict()) - - if fields is not None: - api_utils.check_for_invalid_fields(fields, request.as_dict()) - - return cls._convert_with_links(request, pecan.request.public_url, - fields=fields) - - -class RequestCollection(collection.Collection): - """API representation of a collection of requests.""" - - requests = [Request] - """A list containing requests objects""" - - def __init__(self, **kwargs): - self._type = 'requests' - - @staticmethod - def convert_with_links(requests, limit, url=None, fields=None, **kwargs): - collection = RequestCollection() - collection.requests = [Request.convert_with_links(n, fields=fields) - for n in requests] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -class ResultsRequestController(rest.RestController): - def __init__(self, request_ident): - self.request_ident = request_ident - - @expose.expose(ResultCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype) - def get_all(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None): - """Retrieve a list of boards. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:result:get', cdict, cdict) - - if fields is None: - fields = _DEFAULT_RESULT_RETURN_FIELDS - - filters = {} - filters['request_uuid'] = self.request_ident - - results = objects.Result.list(pecan.request.context, limit, marker, - sort_key=sort_key, sort_dir=sort_dir, - filters=filters) - - parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} - - return ResultCollection.convert_with_links(results, limit, - fields=fields, - **parameters) - - -class RequestsController(rest.RestController): - """REST controller for Requests.""" - - _subcontroller_map = { - 'results': ResultsRequestController, - } - - invalid_sort_key_list = ['extra', ] - - _custom_actions = { - 'detail': ['GET'], - } - - @pecan.expose() - def _lookup(self, ident, *remainder): - try: - ident = types.uuid_or_name.validate(ident) - except exception.InvalidUuidOrName as e: - pecan.abort('400', e.args[0]) - if not remainder: - return - - subcontroller = self._subcontroller_map.get(remainder[0]) - if subcontroller: - return subcontroller(request_ident=ident), remainder[1:] - - def _get_requests_collection(self, marker, limit, - sort_key, sort_dir, - project=None, - fields=None): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Request.get_by_uuid(pecan.request.context, - marker) - - if sort_key in self.invalid_sort_key_list: - raise exception.InvalidParameterValue( - ("The sort_key value %(key)s is an invalid field for " - "sorting") % {'key': sort_key}) - - filters = {} - - if project: - if pecan.request.context.is_admin: - filters['project_id'] = project - requests = objects.Request.list(pecan.request.context, limit, - marker_obj, - sort_key=sort_key, sort_dir=sort_dir, - filters=filters) - - parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} - - return RequestCollection.convert_with_links(requests, limit, - fields=fields, - **parameters) - - @expose.expose(Request, types.uuid_or_name, types.listtype) - def get_one(self, request_ident, fields=None): - """Retrieve information about the given request. - - :param request_ident: UUID or logical name of a request. - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:request:get_one', cdict, cdict) - - rpc_request = objects.Request.get_by_uuid(pecan.request.context, - request_ident) - return Request.convert_with_links(rpc_request, fields=fields) - - @expose.expose(RequestCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def get_all(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None): - """Retrieve a list of requests. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param with_public: Optional boolean to get also public pluings. - :param all_requests: Optional boolean to get all the pluings. - Only for the admin - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:request:get', cdict, cdict) - - if fields is None: - fields = _DEFAULT_RETURN_FIELDS - return self._get_requests_collection(marker, - limit, sort_key, sort_dir, - project=cdict['project_id'], - fields=fields) - - @expose.expose(RequestCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def detail(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None, with_public=False, all_requests=False): - """Retrieve a list of requests. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param with_public: Optional boolean to get also public request. - :param all_requests: Optional boolean to get all the requests. - Only for the admin - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:request:get', cdict, cdict) - - # /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "requests": - raise exception.HTTPNotFound() - - return self._get_requests_collection(marker, - limit, sort_key, sort_dir, - with_public=with_public, - all_requests=all_requests, - fields=fields) diff --git a/iotronic/iotronic/api/controllers/v1/result.py b/iotronic/iotronic/api/controllers/v1/result.py deleted file mode 100644 index 9282da0..0000000 --- a/iotronic/iotronic/api/controllers/v1/result.py +++ /dev/null @@ -1,214 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes - -from iotronic.api.controllers import base -from iotronic.api.controllers import link -from iotronic.api.controllers.v1 import collection -from iotronic.api.controllers.v1 import types -from iotronic.api.controllers.v1 import utils as api_utils -from iotronic.api import expose -from iotronic.common import exception -from iotronic.common import policy -from iotronic import objects - -_DEFAULT_RETURN_FIELDS = ( - 'board_uuid', - 'request_uuid', - 'result', -) - - -class Result(base.APIBase): - """API representation of a result. - - """ - board_uuid = types.uuid - request_uuid = types.uuid - result = wsme.wsattr(wtypes.text) - message = wsme.wsattr(wtypes.text) - - links = wsme.wsattr([link.Link], readonly=True) - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.Result.fields) - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - @staticmethod - def _convert_with_links(result, fields=None): - if fields is not None: - result.unset_fields_except(fields) - return result - - @classmethod - def convert_with_links(cls, rpc_result, fields=None): - result = Result(**rpc_result.as_dict()) - - if fields is not None: - api_utils.check_for_invalid_fields(fields, result.as_dict()) - - return cls._convert_with_links(result, - fields=fields) - - -class ResultCollection(collection.Collection): - """API representation of a collection of results.""" - - results = [Result] - """A list containing results objects""" - - def __init__(self, **kwargs): - self._type = 'results' - - @staticmethod - def convert_with_links(results, limit, url=None, fields=None, **kwargs): - collection = ResultCollection() - collection.results = [Result.convert_with_links(n, fields=fields) - for n in results] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -class ResultsController(rest.RestController): - """REST controller for Results.""" - - invalid_sort_key_list = ['extra', ] - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_results_collection(self, marker, limit, - sort_key, sort_dir, - project=None, - fields=None): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Result.get_by_uuid(pecan.result.context, - marker) - - if sort_key in self.invalid_sort_key_list: - raise exception.InvalidParameterValue( - ("The sort_key value %(key)s is an invalid field for " - "sorting") % {'key': sort_key}) - - filters = {} - - if project: - if pecan.result.context.is_admin: - filters['project_id'] = project - results = objects.Result.list(pecan.result.context, limit, - marker_obj, - sort_key=sort_key, sort_dir=sort_dir, - filters=filters) - - parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} - - return ResultCollection.convert_with_links(results, limit, - fields=fields, - **parameters) - - @expose.expose(Result, types.uuid_or_name, types.listtype) - def get_one(self, result_ident, fields=None): - """Retrieve information about the given result. - - :param result_ident: UUID or logical name of a result. - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - cdict = pecan.result.context.to_policy_values() - policy.authorize('iot:result:get_one', cdict, cdict) - - rpc_result = objects.Result.get_by_uuid(pecan.result.context, - result_ident) - return Result.convert_with_links(rpc_result, fields=fields) - - @expose.expose(ResultCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def get_all(self, request_uuid, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None): - """Retrieve a list of results. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param with_public: Optional boolean to get also public pluings. - :param all_results: Optional boolean to get all the pluings. - Only for the admin - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - cdict = pecan.result.context.to_policy_values() - policy.authorize('iot:result:get', cdict, cdict) - - if fields is None: - fields = _DEFAULT_RETURN_FIELDS - return self._get_results_collection(marker, - limit, sort_key, sort_dir, - request_uuid=request_uuid, - fields=fields) - - @expose.expose(ResultCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def detail(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None, with_public=False, all_results=False): - """Retrieve a list of results. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param with_public: Optional boolean to get also public result. - :param all_results: Optional boolean to get all the results. - Only for the admin - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - cdict = pecan.result.context.to_policy_values() - policy.authorize('iot:result:get', cdict, cdict) - - # /detail should only work against collections - parent = pecan.result.path.split('/')[:-1][-1] - if parent != "results": - raise exception.HTTPNotFound() - - return self._get_results_collection(marker, - limit, sort_key, sort_dir, - with_public=with_public, - all_results=all_results, - fields=fields) diff --git a/iotronic/iotronic/api/controllers/v1/service.py b/iotronic/iotronic/api/controllers/v1/service.py deleted file mode 100644 index a69dba2..0000000 --- a/iotronic/iotronic/api/controllers/v1/service.py +++ /dev/null @@ -1,362 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from iotronic.api.controllers import base -from iotronic.api.controllers import link -from iotronic.api.controllers.v1 import collection -from iotronic.api.controllers.v1 import types -from iotronic.api.controllers.v1 import utils as api_utils -from iotronic.api import expose -from iotronic.common import exception -from iotronic.common import policy -from iotronic import objects - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes - -_DEFAULT_RETURN_FIELDS = ( - 'name', 'uuid', 'project', 'port', 'protocol', 'extra') - - -class Service(base.APIBase): - """API representation of a service. - - """ - uuid = types.uuid - name = wsme.wsattr(wtypes.text) - project = types.uuid - port = wsme.types.IntegerType() - protocol = wsme.wsattr(wtypes.text) - extra = types.jsontype - - links = wsme.wsattr([link.Link], readonly=True) - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.Service.fields) - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - @staticmethod - def _convert_with_links(service, url, fields=None): - service_uuid = service.uuid - if fields is not None: - service.unset_fields_except(fields) - - service.links = [link.Link.make_link('self', url, 'services', - service_uuid), - link.Link.make_link('bookmark', url, 'services', - service_uuid, bookmark=True) - ] - return service - - @classmethod - def convert_with_links(cls, rpc_service, fields=None): - service = Service(**rpc_service.as_dict()) - - if fields is not None: - api_utils.check_for_invalid_fields(fields, service.as_dict()) - - return cls._convert_with_links(service, pecan.request.public_url, - fields=fields) - - -class ServiceCollection(collection.Collection): - """API representation of a collection of services.""" - - services = [Service] - """A list containing services objects""" - - def __init__(self, **kwargs): - self._type = 'services' - - @staticmethod - def convert_with_links(services, limit, url=None, fields=None, **kwargs): - collection = ServiceCollection() - collection.services = [Service.convert_with_links(n, fields=fields) - for n in services] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -class PublicServicesController(rest.RestController): - """REST controller for Public Services.""" - - invalid_sort_key_list = ['extra', 'location'] - - def _get_services_collection(self, marker, limit, - sort_key, sort_dir, - fields=None): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Service.get_by_uuid(pecan.request.context, - marker) - - if sort_key in self.invalid_sort_key_list: - raise exception.InvalidParameterValue( - ("The sort_key value %(key)s is an invalid field for " - "sorting") % {'key': sort_key}) - - filters = {} - filters['public'] = True - - services = objects.Service.list(pecan.request.context, limit, - marker_obj, - sort_key=sort_key, sort_dir=sort_dir, - filters=filters) - - parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} - - return ServiceCollection.convert_with_links(services, limit, - fields=fields, - **parameters) - - @expose.expose(ServiceCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def get_all(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None): - """Retrieve a list of services. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:service:get', cdict, cdict) - - if fields is None: - fields = _DEFAULT_RETURN_FIELDS - return self._get_services_collection(marker, - limit, sort_key, sort_dir, - fields=fields) - - -class ServicesController(rest.RestController): - """REST controller for Services.""" - - public = PublicServicesController() - - invalid_sort_key_list = ['extra', ] - - _custom_actions = { - 'detail': ['GET'], - } - - def _get_services_collection(self, marker, limit, - sort_key, sort_dir, - fields=None, with_public=False, - all_services=False): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Service.get_by_uuid(pecan.request.context, - marker) - - if sort_key in self.invalid_sort_key_list: - raise exception.InvalidParameterValue( - ("The sort_key value %(key)s is an invalid field for " - "sorting") % {'key': sort_key}) - - filters = {} - if all_services and not pecan.request.context.is_admin: - msg = ("all_services parameter can only be used " - "by the administrator.") - raise wsme.exc.ClientSideError(msg, - status_code=400) - else: - if not all_services: - filters['project'] = pecan.request.context.user_id - if with_public: - filters['with_public'] = with_public - - services = objects.Service.list(pecan.request.context, limit, - marker_obj, - sort_key=sort_key, sort_dir=sort_dir, - filters=filters) - - parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} - - return ServiceCollection.convert_with_links(services, limit, - fields=fields, - **parameters) - - @expose.expose(Service, types.uuid_or_name, types.listtype) - def get_one(self, service_ident, fields=None): - """Retrieve information about the given service. - - :param service_ident: UUID or logical name of a service. - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - rpc_service = api_utils.get_rpc_service(service_ident) - cdict = pecan.request.context.to_policy_values() - cdict['project'] = rpc_service.project - policy.authorize('iot:service:get_one', cdict, cdict) - - return Service.convert_with_links(rpc_service, fields=fields) - - @expose.expose(ServiceCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def get_all(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None, with_public=False, all_services=False): - """Retrieve a list of services. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param with_public: Optional boolean to get also public pluings. - :param all_services: Optional boolean to get all the pluings. - Only for the admin - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:service:get', cdict, cdict) - - if fields is None: - fields = _DEFAULT_RETURN_FIELDS - return self._get_services_collection(marker, - limit, sort_key, sort_dir, - with_public=with_public, - all_services=all_services, - fields=fields) - - @expose.expose(Service, body=Service, status_code=201) - def post(self, Service): - """Create a new Service. - - :param Service: a Service within the request body. - """ - context = pecan.request.context - cdict = context.to_policy_values() - policy.authorize('iot:service:create', cdict, cdict) - - if not Service.name: - raise exception.MissingParameterValue( - ("Name is not specified.")) - - if Service.name: - if not api_utils.is_valid_name(Service.name): - msg = ("Cannot create service with invalid name %(name)s") - raise wsme.exc.ClientSideError(msg % {'name': Service.name}, - status_code=400) - - new_Service = objects.Service(pecan.request.context, - **Service.as_dict()) - - new_Service.project = cdict['project_id'] - new_Service = pecan.request.rpcapi.create_service( - pecan.request.context, - new_Service) - - return Service.convert_with_links(new_Service) - - @expose.expose(None, types.uuid_or_name, status_code=204) - def delete(self, service_ident): - """Delete a service. - - :param service_ident: UUID or logical name of a service. - """ - context = pecan.request.context - cdict = context.to_policy_values() - policy.authorize('iot:service:delete', cdict, cdict) - - rpc_service = api_utils.get_rpc_service(service_ident) - pecan.request.rpcapi.destroy_service(pecan.request.context, - rpc_service.uuid) - - @expose.expose(Service, types.uuid_or_name, body=Service, status_code=200) - def patch(self, service_ident, val_Service): - """Update a service. - - :param service_ident: UUID or logical name of a service. - :param Service: values to be changed - :return updated_service: updated_service - """ - - rpc_service = api_utils.get_rpc_service(service_ident) - cdict = pecan.request.context.to_policy_values() - cdict['project'] = rpc_service.project - policy.authorize('iot:service:update', cdict, cdict) - - val_Service = val_Service.as_dict() - for key in val_Service: - try: - rpc_service[key] = val_Service[key] - except Exception: - pass - - updated_service = pecan.request.rpcapi.update_service( - pecan.request.context, rpc_service) - return Service.convert_with_links(updated_service) - - @expose.expose(ServiceCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def detail(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None, with_public=False, all_services=False): - """Retrieve a list of services. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param with_public: Optional boolean to get also public service. - :param all_services: Optional boolean to get all the services. - Only for the admin - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:service:get', cdict, cdict) - - # /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "services": - raise exception.HTTPNotFound() - - return self._get_services_collection(marker, - limit, sort_key, sort_dir, - with_public=with_public, - all_services=all_services, - fields=fields) diff --git a/iotronic/iotronic/api/controllers/v1/types.py b/iotronic/iotronic/api/controllers/v1/types.py deleted file mode 100644 index 30f7254..0000000 --- a/iotronic/iotronic/api/controllers/v1/types.py +++ /dev/null @@ -1,360 +0,0 @@ -# coding: utf-8 -# -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import json - -from oslo_utils import strutils -from oslo_utils import uuidutils -import six -import wsme -from wsme import types as wtypes - -from iotronic.api.controllers.v1 import utils as v1_utils -from iotronic.common import exception -from iotronic.common.i18n import _ -from iotronic.common import utils - - -class MacAddressType(wtypes.UserType): - """A simple MAC address type.""" - - basetype = wtypes.text - name = 'macaddress' - - @staticmethod - def validate(value): - return utils.validate_and_normalize_mac(value) - - @staticmethod - def frombasetype(value): - if value is None: - return None - return MacAddressType.validate(value) - - -class UuidOrNameType(wtypes.UserType): - """A simple UUID or logical name type.""" - - basetype = wtypes.text - name = 'uuid_or_name' - - @staticmethod - def validate(value): - if not (uuidutils.is_uuid_like(value) - or v1_utils.is_valid_logical_name(value)): - raise exception.InvalidUuidOrName(name=value) - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return UuidOrNameType.validate(value) - - -class NameType(wtypes.UserType): - """A simple logical name type.""" - - basetype = wtypes.text - name = 'name' - - @staticmethod - def validate(value): - if not v1_utils.is_valid_logical_name(value): - raise exception.InvalidName(name=value) - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return NameType.validate(value) - - -class UuidType(wtypes.UserType): - """A simple UUID type.""" - - basetype = wtypes.text - name = 'uuid' - - @staticmethod - def validate(value): - if not uuidutils.is_uuid_like(value): - raise exception.InvalidUUID(uuid=value) - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return UuidType.validate(value) - - -class BooleanType(wtypes.UserType): - """A simple boolean type.""" - - basetype = wtypes.text - name = 'boolean' - - @staticmethod - def validate(value): - try: - return strutils.bool_from_string(value, strict=True) - except ValueError as e: - # raise Invalid to return 400 (BadRequest) in the API - raise exception.Invalid(six.text_type(e)) - - @staticmethod - def frombasetype(value): - if value is None: - return None - return BooleanType.validate(value) - - -class JsonType(wtypes.UserType): - """A simple JSON type.""" - - basetype = wtypes.text - name = 'json' - - def __str__(self): - # These are the json serializable native types - return ' | '.join(map(str, (wtypes.text, six.integer_types, float, - BooleanType, list, dict, None))) - - @staticmethod - def validate(value): - try: - json.dumps(value) - except TypeError: - raise exception.Invalid(_('%s is not JSON serializable') % value) - else: - return value - - @staticmethod - def frombasetype(value): - return JsonType.validate(value) - - -class ListType(wtypes.UserType): - """A simple list type.""" - - basetype = wtypes.text - name = 'list' - - @staticmethod - def validate(value): - """Validate and convert the input to a ListType. - - :param value: A comma separated string of values - :returns: A list of unique values, whose order is not guaranteed. - """ - items = [v.strip().lower() for v in six.text_type(value).split(',')] - # filter() to remove empty items - # set() to remove duplicated items - return list(set(filter(None, items))) - - @staticmethod - def frombasetype(value): - if value is None: - return None - return ListType.validate(value) - - -macaddress = MacAddressType() -uuid_or_name = UuidOrNameType() -name = NameType() -uuid = UuidType() -boolean = BooleanType() -listtype = ListType() -# Can't call it 'json' because that's the name of the stdlib module -jsontype = JsonType() - - -class JsonPatchType(wtypes.Base): - """A complex type that represents a single json-patch operation.""" - - path = wtypes.wsattr(wtypes.StringType(pattern='^(/[\w-]+)+$'), - mandatory=True) - op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'), - mandatory=True) - value = wsme.wsattr(jsontype, default=wtypes.Unset) - - # The class of the objects being patched. Override this in subclasses. - # Should probably be a subclass of iotronic.api.controllers.base.APIBase. - _api_base = None - - # Attributes that are not required for construction, but which may not be - # removed if set. Override in subclasses if needed. - _extra_non_removable_attrs = set() - - # Set of non-removable attributes, calculated lazily. - _non_removable_attrs = None - - @staticmethod - def internal_attrs(): - """Returns a list of internal attributes. - - Internal attributes can't be added, replaced or removed. This - method may be overwritten by derived class. - - """ - return ['/created_at', '/id', '/links', '/updated_at', '/uuid'] - - @classmethod - def non_removable_attrs(cls): - """Returns a set of names of attributes that may not be removed. - - Attributes whose 'mandatory' property is True are automatically added - to this set. To add additional attributes to the set, override the - field _extra_non_removable_attrs in subclasses, with a set of the form - {'/foo', '/bar'}. - """ - if cls._non_removable_attrs is None: - cls._non_removable_attrs = cls._extra_non_removable_attrs.copy() - if cls._api_base: - fields = inspect.getmembers(cls._api_base, - lambda a: not inspect.isroutine(a)) - for name, field in fields: - if getattr(field, 'mandatory', False): - cls._non_removable_attrs.add('/%s' % name) - return cls._non_removable_attrs - - @staticmethod - def validate(patch): - _path = '/' + patch.path.split('/')[1] - if _path in patch.internal_attrs(): - msg = _("'%s' is an internal attribute and can not be updated") - raise wsme.exc.ClientSideError(msg % patch.path) - - if patch.path in patch.non_removable_attrs() and patch.op == 'remove': - msg = _("'%s' is a mandatory attribute and can not be removed") - raise wsme.exc.ClientSideError(msg % patch.path) - - if patch.op != 'remove': - if patch.value is wsme.Unset: - msg = _("'add' and 'replace' operations need a value") - raise wsme.exc.ClientSideError(msg) - - ret = {'path': patch.path, 'op': patch.op} - if patch.value is not wsme.Unset: - ret['value'] = patch.value - return ret - - -class LocalLinkConnectionType(wtypes.UserType): - """A type describing local link connection.""" - - basetype = wtypes.DictType - name = 'locallinkconnection' - - mandatory_fields = {'switch_id', - 'port_id'} - valid_fields = mandatory_fields.union({'switch_info'}) - - @staticmethod - def validate(value): - """Validate and convert the input to a LocalLinkConnectionType. - - :param value: A dictionary of values to validate, switch_id is a MAC - address or an OpenFlow based datapath_id, switch_info is an - optional field. - - For example:: - - { - 'switch_id': mac_or_datapath_id(), - 'port_id': 'Ethernet3/1', - 'switch_info': 'switch1' - } - - :returns: A dictionary. - :raises: Invalid if some of the keys in the dictionary being validated - are unknown, invalid, or some required ones are missing. - """ - wtypes.DictType(wtypes.text, wtypes.text).validate(value) - - keys = set(value) - - # This is to workaround an issue when an API object is initialized from - # RPC object, in which dictionary fields that are set to None become - # empty dictionaries - if not keys: - return value - - invalid = keys - LocalLinkConnectionType.valid_fields - if invalid: - raise exception.Invalid(_('%s are invalid keys') % (invalid)) - - # Check all mandatory fields are present - missing = LocalLinkConnectionType.mandatory_fields - keys - if missing: - msg = _('Missing mandatory keys: %s') % missing - raise exception.Invalid(msg) - - # Check switch_id is either a valid mac address or - # OpenFlow datapath_id and normalize it. - try: - value['switch_id'] = utils.validate_and_normalize_mac( - value['switch_id']) - except exception.InvalidMAC: - try: - value['switch_id'] = utils.validate_and_normalize_datapath_id( - value['switch_id']) - except exception.InvalidDatapathID: - raise exception.InvalidSwitchID(switch_id=value['switch_id']) - - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return LocalLinkConnectionType.validate(value) - -locallinkconnectiontype = LocalLinkConnectionType() - - -class VifType(JsonType): - - basetype = wtypes.text - name = 'viftype' - - mandatory_fields = {'id'} - - @staticmethod - def validate(value): - super(VifType, VifType).validate(value) - keys = set(value) - # Check all mandatory fields are present - missing = VifType.mandatory_fields - keys - if missing: - msg = _('Missing mandatory keys: %s') % ', '.join(list(missing)) - raise exception.Invalid(msg) - UuidOrNameType.validate(value['id']) - - return value - - @staticmethod - def frombasetype(value): - if value is None: - return None - return VifType.validate(value) - - -viftype = VifType() diff --git a/iotronic/iotronic/api/controllers/v1/utils.py b/iotronic/iotronic/api/controllers/v1/utils.py deleted file mode 100644 index c7ab25d..0000000 --- a/iotronic/iotronic/api/controllers/v1/utils.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import jsonpatch -from oslo_config import cfg -from oslo_utils import uuidutils -import pecan -import wsme - -from iotronic.common import exception -from iotronic.common.i18n import _ -from iotronic.common import utils -from iotronic import objects - -CONF = cfg.CONF - -JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException, - jsonpatch.JsonPointerException, - KeyError) - - -def validate_limit(limit): - if limit is None: - return CONF.api.max_limit - - if limit <= 0: - raise wsme.exc.ClientSideError(_("Limit must be positive")) - - return min(CONF.api.max_limit, limit) - - -def validate_sort_dir(sort_dir): - if sort_dir not in ['asc', 'desc']: - raise wsme.exc.ClientSideError(_("Invalid sort direction: %s. " - "Acceptable values are " - "'asc' or 'desc'") % sort_dir) - return sort_dir - - -def apply_jsonpatch(doc, patch): - for p in patch: - if p['op'] == 'add' and p['path'].count('/') == 1: - if p['path'].lstrip('/') not in doc: - msg = _('Adding a new attribute (%s) to the root of ' - ' the resource is not allowed') - raise wsme.exc.ClientSideError(msg % p['path']) - return jsonpatch.apply_patch(doc, jsonpatch.JsonPatch(patch)) - - -def get_patch_value(patch, path): - for p in patch: - if p['path'] == path: - return p['value'] - - -def allow_board_logical_names(): - # v1.5 added logical name aliases - return pecan.request.version.minor >= 5 - - -def get_rpc_board(board_ident): - """Get the RPC board from the board uuid or logical name. - - :param board_ident: the UUID or logical name of a board. - - :returns: The RPC Board. - :raises: InvalidUuidOrName if the name or uuid provided is not valid. - :raises: BoardNotFound if the board is not found. - """ - # Check to see if the board_ident is a valid UUID. If it is, treat it - # as a UUID. - if uuidutils.is_uuid_like(board_ident): - return objects.Board.get_by_uuid(pecan.request.context, board_ident) - - # We can refer to boards by their name, if the client supports it - # if allow_board_logical_names(): - # if utils.is_hostname_safe(board_ident): - else: - return objects.Board.get_by_name(pecan.request.context, board_ident) - - raise exception.InvalidUuidOrName(name=board_ident) - - raise exception.BoardNotFound(board=board_ident) - - -def get_rpc_plugin(plugin_ident): - """Get the RPC plugin from the plugin uuid or logical name. - - :param plugin_ident: the UUID or logical name of a plugin. - - :returns: The RPC Plugin. - :raises: InvalidUuidOrName if the name or uuid provided is not valid. - :raises: PluginNotFound if the plugin is not found. - """ - # Check to see if the plugin_ident is a valid UUID. If it is, treat it - # as a UUID. - if uuidutils.is_uuid_like(plugin_ident): - return objects.Plugin.get_by_uuid(pecan.request.context, plugin_ident) - - # We can refer to plugins by their name, if the client supports it - # if allow_plugin_logical_names(): - # if utils.is_hostname_safe(plugin_ident): - else: - return objects.Plugin.get_by_name(pecan.request.context, plugin_ident) - - raise exception.InvalidUuidOrName(name=plugin_ident) - - raise exception.PluginNotFound(plugin=plugin_ident) - - -def get_rpc_service(service_ident): - """Get the RPC service from the service uuid or logical name. - - :param service_ident: the UUID or logical name of a service. - - :returns: The RPC Service. - :raises: InvalidUuidOrName if the name or uuid provided is not valid. - :raises: ServiceNotFound if the service is not found. - """ - # Check to see if the service_ident is a valid UUID. If it is, treat it - # as a UUID. - if uuidutils.is_uuid_like(service_ident): - return objects.Service.get_by_uuid(pecan.request.context, - service_ident) - - # We can refer to services by their name, if the client supports it - # if allow_service_logical_names(): - # if utils.is_hostname_safe(service_ident): - else: - return objects.Service.get_by_name(pecan.request.context, - service_ident) - - raise exception.InvalidUuidOrName(name=service_ident) - - raise exception.ServiceNotFound(service=service_ident) - - -def get_rpc_webservice(webservice_ident): - """Get the RPC webservice from the webservice uuid or logical name. - - :param webservice_ident: the UUID or logical name of a webservice. - - :returns: The RPC Webservice. - :raises: InvalidUuidOrName if the name or uuid provided is not valid. - :raises: WebserviceNotFound if the webservice is not found. - """ - # Check to see if the webservice_ident is a valid UUID. If it is, treat it - # as a UUID. - if uuidutils.is_uuid_like(webservice_ident): - return objects.Webservice.get_by_uuid(pecan.request.context, - webservice_ident) - - # # We can refer to webservices by their name, if the client supports it - # # if allow_webservice_logical_names(): - # # if utils.is_hostname_safe(webservice_ident): - # else: - # return objects.Webservice.get_by_name(pecan.request.context, - # webservice_ident) - - raise exception.InvalidUuidOrName(name=webservice_ident) - - raise exception.WebserviceNotFound(webservice=webservice_ident) - - -def get_rpc_port(port_ident): - """Get the RPC port from the port uuid. - - :param port_ident: the UUID or logical name of a port. - - :returns: The RPC Port. - :raises: InvalidUuidOrName if the name or uuid provided is not valid. - :raises: portNotFound if the port is not found. - """ - # Check to see if the port_ident is a valid UUID. If it is, treat it - # as a UUID. - if uuidutils.is_uuid_like(port_ident): - return objects.Port.get_by_uuid(pecan.request.context, - port_ident) - - # We can refer to ports by their name, if the client supports it - else: - return objects.Port.get_by_name(pecan.request.context, - port_ident) - - raise exception.InvalidUuidOrName(uuid=port_ident) - - raise exception.PortNottFound(uuid=port_ident) - - -def get_rpc_fleet(fleet_ident): - """Get the RPC fleet from the fleet uuid or logical name. - - :param fleet_ident: the UUID or logical name of a fleet. - - :returns: The RPC Fleet. - :raises: InvalidUuidOrName if the name or uuid provided is not valid. - :raises: FleetNotFound if the fleet is not found. - """ - # Check to see if the fleet_ident is a valid UUID. If it is, treat it - # as a UUID. - if uuidutils.is_uuid_like(fleet_ident): - return objects.Fleet.get_by_uuid(pecan.request.context, - fleet_ident) - - # We can refer to fleets by their name, if the client supports it - # if allow_fleet_logical_names(): - # if utils.is_hostname_safe(fleet_ident): - else: - return objects.Fleet.get_by_name(pecan.request.context, - fleet_ident) - - raise exception.InvalidUuidOrName(name=fleet_ident) - - raise exception.FleetNotFound(fleet=fleet_ident) - - -def is_valid_board_name(name): - """Determine if the provided name is a valid board name. - - Check to see that the provided board name is valid, and isn't a UUID. - - :param: name: the board name to check. - :returns: True if the name is valid, False otherwise. - """ - return not uuidutils.is_uuid_like(name) - - -def is_valid_name(name): - """Determine if the provided name is a valid name. - - Check to see that the provided board name isn't a UUID. - - :param: name: the board name to check. - :returns: True if the name is valid, False otherwise. - """ - return not uuidutils.is_uuid_like(name) - - -def is_valid_logical_name(name): - """Determine if the provided name is a valid hostname.""" - - return utils.is_valid_logical_name(name) - - -def check_for_invalid_fields(fields, object_fields): - """Check for requested non-existent fields. - - Check if the user requested non-existent fields. - - :param fields: A list of fields requested by the user - :object_fields: A list of fields supported by the object. - :raises: InvalidParameterValue if invalid fields were requested. - - """ - invalid_fields = set(fields) - set(object_fields) - if invalid_fields: - raise exception.InvalidParameterValue( - _('Field(s) "%s" are not valid') % ', '.join(invalid_fields)) diff --git a/iotronic/iotronic/api/controllers/v1/versions.py b/iotronic/iotronic/api/controllers/v1/versions.py deleted file mode 100644 index 4de266c..0000000 --- a/iotronic/iotronic/api/controllers/v1/versions.py +++ /dev/null @@ -1,25 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is the version 1 API -BASE_VERSION = 1 - -MINOR_1_INITIAL_VERSION = 0 -MINOR_MAX_VERSION = 0 - -# String representations of the minor and maximum versions -MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, - MINOR_1_INITIAL_VERSION) -MAX_VERSION_STRING = '{}.{}'.format(BASE_VERSION, - MINOR_MAX_VERSION) diff --git a/iotronic/iotronic/api/controllers/v1/webservice.py b/iotronic/iotronic/api/controllers/v1/webservice.py deleted file mode 100644 index 161c6cb..0000000 --- a/iotronic/iotronic/api/controllers/v1/webservice.py +++ /dev/null @@ -1,277 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from iotronic.api.controllers import base -from iotronic.api.controllers import link -from iotronic.api.controllers.v1 import collection -from iotronic.api.controllers.v1 import types -from iotronic.api.controllers.v1 import utils as api_utils -from iotronic.api import expose -from iotronic.common import exception -from iotronic.common import policy -from iotronic import objects - -import pecan -from pecan import rest -import wsme -from wsme import types as wtypes - -_DEFAULT_RETURN_FIELDS = ('name', 'uuid', 'port', 'board_uuid', 'extra') - - -class Webservice(base.APIBase): - """API representation of a webservice. - - """ - uuid = types.uuid - name = wsme.wsattr(wtypes.text) - port = wsme.types.IntegerType() - board_uuid = types.uuid - secure = types.boolean - extra = types.jsontype - - links = wsme.wsattr([link.Link], readonly=True) - - def __init__(self, **kwargs): - self.fields = [] - fields = list(objects.Webservice.fields) - for k in fields: - # Skip fields we do not expose. - if not hasattr(self, k): - continue - self.fields.append(k) - setattr(self, k, kwargs.get(k, wtypes.Unset)) - - @staticmethod - def _convert_with_links(webservice, url, fields=None): - webservice_uuid = webservice.uuid - if fields is not None: - webservice.unset_fields_except(fields) - - webservice.links = [link.Link.make_link('self', url, 'webservices', - webservice_uuid), - link.Link.make_link('bookmark', url, 'webservices', - webservice_uuid, bookmark=True) - ] - return webservice - - @classmethod - def convert_with_links(cls, rpc_webservice, fields=None): - webservice = Webservice(**rpc_webservice.as_dict()) - if fields is not None: - api_utils.check_for_invalid_fields(fields, webservice.as_dict()) - - return cls._convert_with_links(webservice, pecan.request.public_url, - fields=fields) - - -class WebserviceCollection(collection.Collection): - """API representation of a collection of webservices.""" - - webservices = [Webservice] - """A list containing webservices objects""" - - def __init__(self, **kwargs): - self._type = 'webservices' - - @staticmethod - def convert_with_links(webservices, limit, url=None, fields=None, - **kwargs): - collection = WebserviceCollection() - collection.webservices = [Webservice.convert_with_links(n, - fields=fields) - for n in webservices] - collection.next = collection.get_next(limit, url=url, **kwargs) - return collection - - -class WebservicesController(rest.RestController): - """REST controller for Webservices.""" - - # _subcontroller_map = { - # 'boards': WebserviceBoardsController, - # } - - invalid_sort_key_list = ['extra', ] - - _custom_actions = { - 'detail': ['GET'], - } - - # @pecan.expose() - # def _lookup(self, ident, *remainder): - # try: - # ident = types.uuid_or_name.validate(ident) - # except exception.InvalidUuidOrName as e: - # pecan.abort('400', e.args[0]) - # if not remainder: - # return - # - # subcontroller = self._subcontroller_map.get(remainder[0]) - # if subcontroller: - # return subcontroller(webservice_ident=ident), remainder[1:] - - def _get_webservices_collection(self, marker, limit, - sort_key, sort_dir, - project_id, - fields=None): - - limit = api_utils.validate_limit(limit) - sort_dir = api_utils.validate_sort_dir(sort_dir) - - marker_obj = None - if marker: - marker_obj = objects.Webservice.get_by_uuid(pecan.request.context, - marker) - - if sort_key in self.invalid_sort_key_list: - raise exception.InvalidParameterValue( - ("The sort_key value %(key)s is an invalid field for " - "sorting") % {'key': sort_key}) - - filters = {} - filters['project_id'] = project_id - - webservices = objects.Webservice.list(pecan.request.context, limit, - marker_obj, - sort_key=sort_key, - sort_dir=sort_dir, - filters=filters) - - parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} - - return WebserviceCollection.convert_with_links(webservices, limit, - fields=fields, - **parameters) - - @expose.expose(Webservice, types.uuid_or_name, types.listtype) - def get_one(self, webservice_ident, fields=None): - """Retrieve information about the given webservice. - - :param webservice_ident: UUID or logical name of a webservice. - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - rpc_webservice = api_utils.get_rpc_webservice(webservice_ident) - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:webservice:get_one', cdict, cdict) - - return Webservice.convert_with_links(rpc_webservice, fields=fields) - - @expose.expose(WebserviceCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def get_all(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None): - """Retrieve a list of webservices. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param with_public: Optional boolean to get also public pluings. - :param all_webservices: Optional boolean to get all the pluings. - Only for the admin - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:webservice:get', cdict, cdict) - - project_id = pecan.request.context.project_id - - if fields is None: - fields = _DEFAULT_RETURN_FIELDS - return self._get_webservices_collection(marker, - limit, sort_key, sort_dir, - project_id=project_id, - fields=fields) - - @expose.expose(None, types.uuid_or_name, status_code=204) - def delete(self, webservice_ident): - """Delete a webservice. - - :param webservice_ident: UUID or logical name of a webservice. - """ - context = pecan.request.context - cdict = context.to_policy_values() - policy.authorize('iot:webservice:delete', cdict, cdict) - - rpc_webservice = api_utils.get_rpc_webservice(webservice_ident) - pecan.request.rpcapi.destroy_webservice(pecan.request.context, - rpc_webservice.uuid) - - # @expose.expose(Webservice, types.uuid_or_name, body=Webservice, - # status_code=200) - # def patch(self, webservice_ident, val_Webservice): - # """Update a webservice. - # - # :param webservice_ident: UUID or logical name of a webservice. - # :param Webservice: values to be changed - # :return updated_webservice: updated_webservice - # """ - # - # rpc_webservice = api_utils.get_rpc_webservice(webservice_ident) - # cdict = pecan.request.context.to_policy_values() - # cdict['project'] = rpc_webservice.project - # policy.authorize('iot:webservice:update', cdict, cdict) - # - # val_Webservice = val_Webservice.as_dict() - # for key in val_Webservice: - # try: - # rpc_webservice[key] = val_Webservice[key] - # except Exception: - # pass - # - # updated_webservice = pecan.request.rpcapi.update_webservice( - # pecan.request.context, rpc_webservice) - # return Webservice.convert_with_links(updated_webservice) - - @expose.expose(WebserviceCollection, types.uuid, int, wtypes.text, - wtypes.text, types.listtype, types.boolean, types.boolean) - def detail(self, marker=None, - limit=None, sort_key='id', sort_dir='asc', - fields=None, with_public=False, all_webservs=False): - """Retrieve a list of webservices. - - :param marker: pagination marker for large data sets. - :param limit: maximum number of resources to return in a single result. - This value cannot be larger than the value of max_limit - in the [api] section of the ironic configuration, or only - max_limit resources will be returned. - :param sort_key: column to sort results by. Default: id. - :param sort_dir: direction to sort. "asc" or "desc". Default: asc. - :param with_public: Optional boolean to get also public webservice. - :param all_webservs: Optional boolean to get all the webservices. - Only for the admin - :param fields: Optional, a list with a specified set of fields - of the resource to be returned. - """ - - cdict = pecan.request.context.to_policy_values() - policy.authorize('iot:webservice:get', cdict, cdict) - - # /detail should only work against collections - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "webservices": - raise exception.HTTPNotFound() - - return self._get_webservices_collection(marker, - limit, sort_key, sort_dir, - with_public=with_public, - all_webservices=all_webservs, - fields=fields) diff --git a/iotronic/iotronic/api/expose.py b/iotronic/iotronic/api/expose.py deleted file mode 100644 index 46d4649..0000000 --- a/iotronic/iotronic/api/expose.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Copyright 2015 Rackspace, Inc -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import wsmeext.pecan as wsme_pecan - - -def expose(*args, **kwargs): - """Ensure that only JSON, and not XML, is supported.""" - if 'rest_content_types' not in kwargs: - kwargs['rest_content_types'] = ('json',) - return wsme_pecan.wsexpose(*args, **kwargs) diff --git a/iotronic/iotronic/api/hooks.py b/iotronic/iotronic/api/hooks.py deleted file mode 100644 index af8742c..0000000 --- a/iotronic/iotronic/api/hooks.py +++ /dev/null @@ -1,145 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log -from pecan import hooks -from six.moves import http_client - -from iotronic.common import context -from iotronic.common import policy -from iotronic.conductor import rpcapi -from iotronic.db import api as dbapi - -LOG = log.getLogger(__name__) - -CHECKED_DEPRECATED_POLICY_ARGS = False - - -class ConfigHook(hooks.PecanHook): - """Attach the config object to the request so controllers can get to it.""" - - def before(self, state): - state.request.cfg = cfg.CONF - - -class DBHook(hooks.PecanHook): - """Attach the dbapi object to the request so controllers can get to it.""" - - def before(self, state): - state.request.dbapi = dbapi.get_instance() - - -class ContextHook(hooks.PecanHook): - """Configures a request context and attaches it to the request.""" - - def __init__(self, public_api_routes): - self.public_api_routes = public_api_routes - super(ContextHook, self).__init__() - - def before(self, state): - headers = state.request.headers - - is_public_api = state.request.environ.get( - 'is_public_api', False) - ctx = context.RequestContext.from_environ( - state.request.environ, - is_public_api=is_public_api, - project_id=headers.get('X-Project-Id'), - user_id=headers.get('X-User-Id'), - ) - - # Do not pass any token with context for noauth mode - if cfg.CONF.auth_strategy == 'noauth': - ctx.auth_token = None - - creds = ctx.to_policy_values() - - is_admin = policy.check('is_admin', creds, creds) - ctx.is_admin = is_admin - - state.request.context = ctx - - def after(self, state): - if state.request.context == {}: - # An incorrect url path will not create RequestContext - return - # NOTE(lintan): RequestContext will generate a request_id if no one - # passing outside, so it always contain a request_id. - request_id = state.request.context.request_id - state.response.headers['Openstack-Request-Id'] = request_id - - -class RPCHook(hooks.PecanHook): - """Attach the rpcapi object to the request so controllers can get to it.""" - - def before(self, state): - state.request.rpcapi = rpcapi.ConductorAPI() - - -class NoExceptionTracebackHook(hooks.PecanHook): - """Workaround rpc.common: deserialize_remote_exception. - - deserialize_remote_exception builds rpc exception traceback into error - message which is then sent to the client. Such behavior is a security - concern so this hook is aimed to cut-off traceback from the error message. - - """ - - # NOTE(max_lobur): 'after' hook used instead of 'on_error' because - # 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator - # catches and handles all the errors, so 'on_error' dedicated for unhandled - # exceptions never fired. - def after(self, state): - # Omit empty body. Some errors may not have body at this level yet. - if not state.response.body: - return - - # Do nothing if there is no error. - # Status codes in the range 200 (OK) to 399 (400 = BAD_REQUEST) are not - # an error. - if (http_client.OK <= state.response.status_int < - http_client.BAD_REQUEST): - return - - json_body = state.response.json - # Do not remove traceback when traceback config is set - if cfg.CONF.debug_tracebacks_in_api: - return - - faultstring = json_body.get('faultstring') - traceback_marker = 'Traceback (most recent call last):' - if faultstring and traceback_marker in faultstring: - # Cut-off traceback. - faultstring = faultstring.split(traceback_marker, 1)[0] - # Remove trailing newlines and spaces if any. - json_body['faultstring'] = faultstring.rstrip() - # Replace the whole json. Cannot change original one because it's - # generated on the fly. - state.response.json = json_body - - -class PublicUrlHook(hooks.PecanHook): - """Attach the right public_url to the request. - - Attach the right public_url to the request so resources can create - links even when the API service is behind a proxy or SSL terminator. - - """ - - def before(self, state): - state.request.public_url = (cfg.CONF.api.public_endpoint or - state.request.host_url) diff --git a/iotronic/iotronic/api/middleware/__init__.py b/iotronic/iotronic/api/middleware/__init__.py deleted file mode 100644 index 746a609..0000000 --- a/iotronic/iotronic/api/middleware/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from iotronic.api.middleware import auth_token -from iotronic.api.middleware import parsable_error - - -ParsableErrorMiddleware = parsable_error.ParsableErrorMiddleware -AuthTokenMiddleware = auth_token.AuthTokenMiddleware - -__all__ = ('ParsableErrorMiddleware', - 'AuthTokenMiddleware') diff --git a/iotronic/iotronic/api/middleware/auth_token.py b/iotronic/iotronic/api/middleware/auth_token.py deleted file mode 100644 index c598d69..0000000 --- a/iotronic/iotronic/api/middleware/auth_token.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from keystonemiddleware import auth_token -from oslo_log import log - -from iotronic.common import exception -from iotronic.common.i18n import _ -from iotronic.common import utils - -LOG = log.getLogger(__name__) - - -class AuthTokenMiddleware(auth_token.AuthProtocol): - """A wrapper on Keystone auth_token middleware. - - Does not perform verification of authentication tokens - for public routes in the API. - - """ - def __init__(self, app, conf, public_api_routes=None): - api_routes = [] if public_api_routes is None else public_api_routes - self._iotronic_app = app - # TODO(mrda): Remove .xml and ensure that doesn't result in a - # 401 Authentication Required instead of 404 Not Found - route_pattern_tpl = '%s(\.json|\.xml)?$' - - try: - self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl) - for route_tpl in api_routes] - except re.error as e: - msg = _('Cannot compile public API routes: %s') % e - - LOG.error(msg) - raise exception.ConfigInvalid(error_msg=msg) - - super(AuthTokenMiddleware, self).__init__(app, conf) - - def __call__(self, env, start_response): - path = utils.safe_rstrip(env.get('PATH_INFO'), '/') - - # The information whether the API call is being performed against the - # public API is required for some other components. Saving it to the - # WSGI environment is reasonable thereby. - env['is_public_api'] = any(map(lambda pattern: re.match(pattern, path), - self.public_api_routes)) - - if env['is_public_api']: - return self._iotronic_app(env, start_response) - - return super(AuthTokenMiddleware, self).__call__(env, start_response) diff --git a/iotronic/iotronic/api/middleware/parsable_error.py b/iotronic/iotronic/api/middleware/parsable_error.py deleted file mode 100644 index d4150fc..0000000 --- a/iotronic/iotronic/api/middleware/parsable_error.py +++ /dev/null @@ -1,96 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Middleware to replace the plain text message body of an error -response with one formatted so the client can parse it. - -Based on pecan.middleware.errordocument -""" - -import json -from xml import etree as et - -from oslo_log import log -import six -import webob - -from iotronic.common.i18n import _ -from iotronic.common.i18n import _LE - -LOG = log.getLogger(__name__) - - -class ParsableErrorMiddleware(object): - """Replace error body with something the client can parse.""" - - def __init__(self, app): - self.app = app - - def __call__(self, environ, start_response): - # Request for this state, modified by replace_start_response() - # and used when an error is being reported. - state = {} - - def replacement_start_response(status, headers, exc_info=None): - """Overrides the default response to make errors parsable.""" - try: - status_code = int(status.split(' ')[0]) - state['status_code'] = status_code - except (ValueError, TypeError): # pragma: nocover - raise Exception(_( - 'ErrorDocumentMiddleware received an invalid ' - 'status %s') % status) - else: - if (state['status_code'] // 100) not in (2, 3): - # Remove some headers so we can replace them later - # when we have the full error message and can - # compute the length. - headers = [(h, v) - for (h, v) in headers - if h not in ('Content-Length', 'Content-Type') - ] - # Save the headers in case we need to modify them. - state['headers'] = headers - return start_response(status, headers, exc_info) - - app_iter = self.app(environ, replacement_start_response) - if (state['status_code'] // 100) not in (2, 3): - req = webob.Request(environ) - if (req.accept.best_match( - ['application/json', - 'application/xml']) == 'application/xml'): - try: - # simple check xml is valid - body = [et.ElementTree.tostring( - et.ElementTree.fromstring('' - + '\n'.join(app_iter) - + ''))] - except et.ElementTree.ParseError as err: - LOG.error(_LE('Error parsing HTTP response: %s'), err) - body = ['%s' % state['status_code'] - + ''] - state['headers'].append(('Content-Type', 'application/xml')) - else: - if six.PY3: - app_iter = [i.decode('utf-8') for i in app_iter] - body = [json.dumps({'error_message': '\n'.join(app_iter)})] - if six.PY3: - body = [item.encode('utf-8') for item in body] - state['headers'].append(('Content-Type', 'application/json')) - state['headers'].append(('Content-Length', str(len(body[0])))) - else: - body = app_iter - return body diff --git a/iotronic/iotronic/cmd/__init__.py b/iotronic/iotronic/cmd/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/iotronic/iotronic/cmd/conductor.py b/iotronic/iotronic/cmd/conductor.py deleted file mode 100755 index 6d1e1bd..0000000 --- a/iotronic/iotronic/cmd/conductor.py +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env python - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Iotronic Conductor -""" - -from oslo_config import cfg - -from iotronic.conductor.manager import ConductorManager -import socket - -CONF = cfg.CONF - - -def main(): - ConductorManager(socket.gethostname()) diff --git a/iotronic/iotronic/cmd/dbsync.py b/iotronic/iotronic/cmd/dbsync.py deleted file mode 100644 index 8f6780d..0000000 --- a/iotronic/iotronic/cmd/dbsync.py +++ /dev/null @@ -1,330 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Run storage database migration. -""" - -from __future__ import print_function - -import sys - -from iotronic.common import context -from iotronic.common.i18n import _ -from iotronic.common import service -from iotronic.db import api as db_api -from iotronic.db import migration -from oslo_config import cfg - -CONF = cfg.CONF -dbapi = db_api.get_instance() - -# NOTE(rloo): This is a list of functions to perform online data migrations -# (from previous releases) for this release, in batches. It may be empty. -# The migration functions should be ordered by execution order; from earlier -# to later releases. -# -# Each migration function takes two arguments -- the context and maximum -# number of objects to migrate, and returns a 2-tuple -- the total number of -# objects that need to be migrated at the beginning of the function, and the -# number migrated. If the function determines that no migrations are needed, -# it returns (0, 0). -# -# Example of a function docstring: -# -# def sample_data_migration(context, max_count): -# """Sample method to migrate data to new format. -# -# :param context: an admin context -# :param max_count: The maximum number of objects to migrate. Must be -# >= 0. If zero, all the objects will be migrated. -# :returns: A 2-tuple -- the total number of objects that need to be -# migrated (at the beginning of this call) and the number -# of migrated objects. -# """ -# NOTE(vdrok): Do not access objects' attributes, instead only provide object -# and attribute name tuples, so that not to trigger the load of the whole -# object, in case it is lazy loaded. The attribute will be accessed when needed -# by doing getattr on the object -ONLINE_MIGRATIONS = ( - -) - - -class DBCommand(object): - - def _check_versions(self): - """Check the versions of objects. - - Check that the object versions are compatible with this release - of iotronic. It does this by comparing the objects' .version field - in the database, with the expected versions of these objects. - - If it isn't compatible, we exit the program, returning 2. - """ - if migration.version() is None: - # no tables, nothing to check - return - - """ - try: - if not dbapi.check_versions(): - sys.stderr.write( - _('The database is not compatible with this ' - 'release of iotronic (%s). Please run ' - '"iotronic-dbsync online_data_migrations" using ' - 'the previous release.\n') - % version.version_info.release_string()) - # NOTE(rloo): We return 1 in online_data_migrations() to - # indicate that there are more objects to migrate, - # so don't use 1 here. - sys.exit(2) - except exception.DatabaseVersionTooOld: - sys.stderr.write( - _('The database version is not compatible with this ' - 'release of iotronic (%s). This can happen if you are ' - 'attempting to upgrade from a version older than ' - 'the previous release (skip versions upgrade). ' - 'This is an unsupported upgrade method. ' - 'Please run "iotronic-dbsync upgrade" using the previous ' - 'releases for a fast-forward upgrade.\n') - % version.version_info.release_string()) - sys.exit(2) - """ - - def upgrade(self): - self._check_versions() - migration.upgrade(CONF.command.revision) - - def revision(self): - migration.revision(CONF.command.message, CONF.command.autogenerate) - - def stamp(self): - migration.stamp(CONF.command.revision) - - def version(self): - print(migration.version()) - - def create_schema(self): - migration.create_schema() - - def online_data_migrations(self): - self._check_versions() - self._run_online_data_migrations(max_count=CONF.command.max_count, - options=CONF.command.options) - - def _run_migration_functions(self, context, max_count, options): - """Runs the migration functions. - - Runs the data migration functions in the ONLINE_MIGRATIONS list. - It makes sure the total number of object migrations doesn't exceed the - specified max_count. A migration of an object will typically migrate - one row of data inside the database. - - :param: context: an admin context - :param: max_count: the maximum number of objects (rows) to migrate; - a value >= 1. - :param: options: migration options - dict mapping migration name - to a dictionary of options for this migration. - :raises: Exception from the migration function - :returns: Boolean value indicating whether migrations are done. Returns - False if max_count objects have been migrated (since at that - point, it is unknown whether all migrations are done). Returns - True if migrations are all done (i.e. fewer than max_count objects - were migrated when the migrations are done). - """ - total_migrated = 0 - - for migration_func_obj, migration_func_name in ONLINE_MIGRATIONS: - migration_func = getattr(migration_func_obj, migration_func_name) - migration_opts = options.get(migration_func_name, {}) - num_to_migrate = max_count - total_migrated - try: - total_to_do, num_migrated = migration_func(context, - num_to_migrate, - **migration_opts) - except Exception as e: - print(_("Error while running %(migration)s: %(err)s.") - % {'migration': migration_func.__name__, 'err': e}, - file=sys.stderr) - raise - - print(_('%(migration)s() migrated %(done)i of %(total)i objects.') - % {'migration': migration_func.__name__, - 'total': total_to_do, - 'done': num_migrated}) - total_migrated += num_migrated - if total_migrated >= max_count: - # NOTE(rloo). max_count objects have been migrated so we have - # to stop. We return False because there is no look-ahead so - # we don't know if the migrations have been all done. All we - # know is that we've migrated max_count. It is possible that - # the migrations are done and that there aren't any more to - # migrate after this, but that would involve checking: - # 1. num_migrated == total_to_do (easy enough), AND - # 2. whether there are other migration functions and whether - # they need to do any object migrations (not so easy to - # check) - return False - - return True - - def _run_online_data_migrations(self, max_count=None, options=None): - """Perform online data migrations for the release. - - Online data migrations are done by running all the data migration - functions in the ONLINE_MIGRATIONS list. If max_count is None, all - the functions will be run in batches of 50 objects, until the - migrations are done. Otherwise, this will run (some of) the functions - until max_count objects have been migrated. - - :param: max_count: the maximum number of individual object migrations - or modified rows, a value >= 1. If None, migrations are run in a - loop in batches of 50, until completion. - :param: options: options to pass to migrations. List of values in the - form of .