tensör akışı:: hizmet veren:: Sunucu Çekirdeği:: Seçenekler
#include <server_core.h>
Seçenekler bir yapılandırma ServerCore nesnesi.
Özet
Genel özellikler
allow_version_labels
bool tensorflow::serving::ServerCore::Options::allow_version_labels = true
allow_version_labels_for_unavailable_models
bool tensorflow::serving::ServerCore::Options::allow_version_labels_for_unavailable_models = false
aspired_version_policy
std::unique_ptr< AspiredVersionPolicy > tensorflow::serving::ServerCore::Options::aspired_version_policy
custom_model_config_loader
CustomModelConfigLoader tensorflow::serving::ServerCore::Options::custom_model_config_loader
fail_if_no_model_versions_found
bool tensorflow::serving::ServerCore::Options::fail_if_no_model_versions_found = false
file_system_poll_wait_seconds
int32 tensorflow::serving::ServerCore::Options::file_system_poll_wait_seconds = 30
flush_filesystem_caches
bool tensorflow::serving::ServerCore::Options::flush_filesystem_caches = false
load_retry_interval_micros
int64 tensorflow::serving::ServerCore::Options::load_retry_interval_micros = 1LL * 60 * 1000 * 1000
max_num_load_reries
int32 tensorflow::serving::ServerCore::Options::max_num_load_retries = 5
model_config_list_root_dir
optional< string > tensorflow::serving::ServerCore::Options::model_config_list_root_dir
model_server_config
ModelServerConfig tensorflow::serving::ServerCore::Options::model_server_config
num_initial_load_threads
int32 tensorflow::serving::ServerCore::Options::num_initial_load_threads = 4.0 * port::NumSchedulableCPUs()
num_load_threads
int32 tensorflow::serving::ServerCore::Options::num_load_threads = 0
num_unload_threads
int32 tensorflow::serving::ServerCore::Options::num_unload_threads = 0
platform_config_map
PlatformConfigMap tensorflow::serving::ServerCore::Options::platform_config_map
pre_load_hook
PreLoadHook tensorflow::serving::ServerCore::Options::pre_load_hook
tahmin_response_tensor_serialization_option
internal::PredictResponseTensorSerializationOption tensorflow::serving::ServerCore::Options::predict_response_tensor_serialization_option = internal::PredictResponseTensorSerializationOption::kAsProtoField
servable_state_monitor_creator
ServableStateMonitorCreator tensorflow::serving::ServerCore::Options::servable_state_monitor_creator
servable_versions_always_present
bool tensorflow::serving::ServerCore::Options::servable_versions_always_present = false
server_request_logger
std::unique_ptr< ServerRequestLogger > tensorflow::serving::ServerCore::Options::server_request_logger
server_request_logger_updater
ServerRequestLoggerUpdater tensorflow::serving::ServerCore::Options::server_request_logger_updater
storage_path_prefix
std::string tensorflow::serving::ServerCore::Options::storage_path_prefix
total_model_memory_limit_bytes
uint64 tensorflow::serving::ServerCore::Options::total_model_memory_limit_bytes = std::numeric_limits::max()