тензорный поток :: сервировка :: ServerCore :: Опции

#include <server_core.h>

Параметры для конфигурирования ServerCore объекта.

Резюме

Публичные атрибуты

allow_version_labels = true
bool
allow_version_labels_for_unavailable_models = false
bool
aspired_version_policy
std::unique_ptr< AspiredVersionPolicy >
custom_model_config_loader
fail_if_no_model_versions_found = false
bool
file_system_poll_wait_seconds = 30
int32
flush_filesystem_caches = false
bool
load_retry_interval_micros = 1LL * 60 * 1000 * 1000
int64
max_num_load_retries = 5
int32
model_config_list_root_dir
optional< string >
model_server_config
ModelServerConfig
num_initial_load_threads = 4.0 * port::NumSchedulableCPUs()
int32
num_load_threads = 0
int32
num_unload_threads = 0
int32
platform_config_map
PlatformConfigMap
pre_load_hook
PreLoadHook
predict_response_tensor_serialization_option = internal::PredictResponseTensorSerializationOption::kAsProtoField
internal::PredictResponseTensorSerializationOption
servable_state_monitor_creator
ServableStateMonitorCreator
servable_versions_always_present = false
bool
server_request_logger
std::unique_ptr< ServerRequestLogger >
server_request_logger_updater
storage_path_prefix
std::string
total_model_memory_limit_bytes = std::numeric_limits ::max() total_model_memory_limit_bytes = std::numeric_limits ::max()
uint64

Публичные атрибуты

allow_version_labels

bool tensorflow::serving::ServerCore::Options::allow_version_labels = true

allow_version_labels_for_unavailable_models

bool tensorflow::serving::ServerCore::Options::allow_version_labels_for_unavailable_models = false

aspired_version_policy

std::unique_ptr< AspiredVersionPolicy > tensorflow::serving::ServerCore::Options::aspired_version_policy

custom_model_config_loader

CustomModelConfigLoader tensorflow::serving::ServerCore::Options::custom_model_config_loader

fail_if_no_model_versions_found

bool tensorflow::serving::ServerCore::Options::fail_if_no_model_versions_found = false

file_system_poll_wait_seconds

int32 tensorflow::serving::ServerCore::Options::file_system_poll_wait_seconds = 30

flush_filesystem_caches

bool tensorflow::serving::ServerCore::Options::flush_filesystem_caches = false

load_retry_interval_micros

int64 tensorflow::serving::ServerCore::Options::load_retry_interval_micros = 1LL * 60 * 1000 * 1000

max_num_load_retries

int32 tensorflow::serving::ServerCore::Options::max_num_load_retries = 5

model_config_list_root_dir

optional< string > tensorflow::serving::ServerCore::Options::model_config_list_root_dir

model_server_config

ModelServerConfig tensorflow::serving::ServerCore::Options::model_server_config

num_initial_load_threads

int32 tensorflow::serving::ServerCore::Options::num_initial_load_threads = 4.0 * port::NumSchedulableCPUs()

num_load_threads

int32 tensorflow::serving::ServerCore::Options::num_load_threads = 0

num_unload_threads

int32 tensorflow::serving::ServerCore::Options::num_unload_threads = 0

platform_config_map

PlatformConfigMap tensorflow::serving::ServerCore::Options::platform_config_map

pre_load_hook

PreLoadHook tensorflow::serving::ServerCore::Options::pre_load_hook

pred_response_tensor_serialization_option

internal::PredictResponseTensorSerializationOption tensorflow::serving::ServerCore::Options::predict_response_tensor_serialization_option =
            internal::PredictResponseTensorSerializationOption::kAsProtoField

servable_state_monitor_creator

ServableStateMonitorCreator tensorflow::serving::ServerCore::Options::servable_state_monitor_creator

servable_versions_always_present

bool tensorflow::serving::ServerCore::Options::servable_versions_always_present = false

server_request_logger

std::unique_ptr< ServerRequestLogger > tensorflow::serving::ServerCore::Options::server_request_logger

server_request_logger_updater

ServerRequestLoggerUpdater tensorflow::serving::ServerCore::Options::server_request_logger_updater

префикс_путь_хранилища

std::string tensorflow::serving::ServerCore::Options::storage_path_prefix

total_model_memory_limit_bytes

uint64 tensorflow::serving::ServerCore::Options::total_model_memory_limit_bytes = std::numeric_limits::max()