tensorflow:: serving:: ServerCore:: Options
#include <server_core.h>
Options for configuring a ServerCore object.
Summary
Public attributes |
|
---|---|
allow_version_labels = true
|
bool
|
aspired_version_policy
|
std::unique_ptr< AspiredVersionPolicy >
|
custom_model_config_loader
|
|
fail_if_no_model_versions_found = false
|
bool
|
file_system_poll_wait_seconds = 30
|
int32
|
flush_filesystem_caches = false
|
bool
|
load_retry_interval_micros = 1LL * 60 * 1000 * 1000
|
int64
|
max_num_load_retries = 5
|
int32
|
model_config_list_root_dir
|
optional< string >
|
model_server_config
|
ModelServerConfig
|
num_initial_load_threads = 4.0 * port::NumSchedulableCPUs()
|
int32
|
num_load_threads = 0
|
int32
|
num_unload_threads = 0
|
int32
|
platform_config_map
|
PlatformConfigMap
|
pre_load_hook
|
PreLoadHook
|
servable_state_monitor_creator
|
ServableStateMonitorCreator
|
server_request_logger
|
std::unique_ptr< ServerRequestLogger >
|
server_request_logger_updater
|
|
total_model_memory_limit_bytes = std::numeric_limits
|
uint64
|
Public attributes
allow_version_labels
bool tensorflow::serving::ServerCore::Options::allow_version_labels = true
aspired_version_policy
std::unique_ptr< AspiredVersionPolicy > tensorflow::serving::ServerCore::Options::aspired_version_policy
custom_model_config_loader
CustomModelConfigLoader tensorflow::serving::ServerCore::Options::custom_model_config_loader
fail_if_no_model_versions_found
bool tensorflow::serving::ServerCore::Options::fail_if_no_model_versions_found = false
file_system_poll_wait_seconds
int32 tensorflow::serving::ServerCore::Options::file_system_poll_wait_seconds = 30
flush_filesystem_caches
bool tensorflow::serving::ServerCore::Options::flush_filesystem_caches = false
load_retry_interval_micros
int64 tensorflow::serving::ServerCore::Options::load_retry_interval_micros = 1LL * 60 * 1000 * 1000
max_num_load_retries
int32 tensorflow::serving::ServerCore::Options::max_num_load_retries = 5
model_config_list_root_dir
optional< string > tensorflow::serving::ServerCore::Options::model_config_list_root_dir
model_server_config
ModelServerConfig tensorflow::serving::ServerCore::Options::model_server_config
num_initial_load_threads
int32 tensorflow::serving::ServerCore::Options::num_initial_load_threads = 4.0 * port::NumSchedulableCPUs()
num_load_threads
int32 tensorflow::serving::ServerCore::Options::num_load_threads = 0
num_unload_threads
int32 tensorflow::serving::ServerCore::Options::num_unload_threads = 0
platform_config_map
PlatformConfigMap tensorflow::serving::ServerCore::Options::platform_config_map
pre_load_hook
PreLoadHook tensorflow::serving::ServerCore::Options::pre_load_hook
servable_state_monitor_creator
ServableStateMonitorCreator tensorflow::serving::ServerCore::Options::servable_state_monitor_creator
server_request_logger
std::unique_ptr< ServerRequestLogger > tensorflow::serving::ServerCore::Options::server_request_logger
server_request_logger_updater
ServerRequestLoggerUpdater tensorflow::serving::ServerCore::Options::server_request_logger_updater
total_model_memory_limit_bytes
uint64 tensorflow::serving::ServerCore::Options::total_model_memory_limit_bytes = std::numeric_limits::max()