From 0e0d0a0c2e63f8be6d2a33659a02ea0bd9d44353 Mon Sep 17 00:00:00 2001 From: gasoonjia Date: Tue, 13 Jan 2026 11:07:33 -0800 Subject: [PATCH 1/9] [slimtensor] integration into backend Differential Revision: [D90606409](https://our.internmc.facebook.com/intern/diff/D90606409/) [ghstack-poisoned] --- backends/aoti/aoti_delegate_handle.h | 12 + backends/aoti/common_shims.cpp | 268 ---- backends/aoti/slim/c10/cuda/Exception.h | 2 + backends/aoti/targets.bzl | 34 +- backends/aoti/tests/TARGETS | 19 - backends/aoti/tests/utils.h | 74 -- backends/cuda/runtime/TARGETS | 45 +- backends/cuda/runtime/cuda_backend.cpp | 288 +++-- backends/cuda/runtime/guard.h | 4 +- backends/cuda/runtime/shims/memory.cpp | 902 ++----------- backends/cuda/runtime/shims/memory.h | 190 ++- backends/cuda/runtime/shims/memory_slim.cpp | 259 ---- backends/cuda/runtime/shims/memory_slim.h | 175 --- .../cuda/runtime/shims/tests/CMakeLists.txt | 10 +- backends/cuda/runtime/shims/tests/targets.bzl | 43 +- .../test_aoti_torch__reinterpret_tensor.cpp | 1132 ++++++++--------- ...st_aoti_torch__reinterpret_tensor_slim.cpp | 692 ---------- .../test_aoti_torch_assign_tensors_out.cpp | 524 +++++--- ...est_aoti_torch_assign_tensors_out_slim.cpp | 437 ------- .../shims/tests/test_aoti_torch_copy_.cpp | 657 +++++----- .../tests/test_aoti_torch_copy__slim.cpp | 487 ------- ..._aoti_torch_create_tensor_from_blob_v2.cpp | 1046 +++++++-------- ..._torch_create_tensor_from_blob_v2_slim.cpp | 633 --------- .../test_aoti_torch_delete_tensor_object.cpp | 533 ++++---- ...t_aoti_torch_delete_tensor_object_slim.cpp | 385 ------ .../tests/test_aoti_torch_empty_strided.cpp | 856 +++++-------- .../test_aoti_torch_empty_strided_slim.cpp | 467 ------- .../shims/tests/test_aoti_torch_item_bool.cpp | 318 +++-- .../tests/test_aoti_torch_item_bool_slim.cpp | 291 ----- .../test_aoti_torch_new_tensor_handle.cpp | 619 +++++---- ...test_aoti_torch_new_tensor_handle_slim.cpp | 651 ---------- backends/cuda/runtime/utils.h | 2 + 32 files changed, 3240 insertions(+), 8815 deletions(-) delete mode 100644 backends/aoti/common_shims.cpp delete mode 100644 backends/aoti/tests/utils.h delete mode 100644 backends/cuda/runtime/shims/memory_slim.cpp delete mode 100644 backends/cuda/runtime/shims/memory_slim.h delete mode 100644 backends/cuda/runtime/shims/tests/test_aoti_torch__reinterpret_tensor_slim.cpp delete mode 100644 backends/cuda/runtime/shims/tests/test_aoti_torch_assign_tensors_out_slim.cpp delete mode 100644 backends/cuda/runtime/shims/tests/test_aoti_torch_copy__slim.cpp delete mode 100644 backends/cuda/runtime/shims/tests/test_aoti_torch_create_tensor_from_blob_v2_slim.cpp delete mode 100644 backends/cuda/runtime/shims/tests/test_aoti_torch_delete_tensor_object_slim.cpp delete mode 100644 backends/cuda/runtime/shims/tests/test_aoti_torch_empty_strided_slim.cpp delete mode 100644 backends/cuda/runtime/shims/tests/test_aoti_torch_item_bool_slim.cpp delete mode 100644 backends/cuda/runtime/shims/tests/test_aoti_torch_new_tensor_handle_slim.cpp diff --git a/backends/aoti/aoti_delegate_handle.h b/backends/aoti/aoti_delegate_handle.h index b14e02da9ef..cc56c747f8e 100644 --- a/backends/aoti/aoti_delegate_handle.h +++ b/backends/aoti/aoti_delegate_handle.h @@ -11,6 +11,11 @@ #include #include #include +#include + +#ifdef CUDA_AVAILABLE +#include +#endif namespace executorch { namespace backends { @@ -95,6 +100,13 @@ struct AOTIDelegateHandle { AOTInductorModelContainerGetNumOutputsFunc get_num_outputs; AOTInductorModelContainerRunFunc run; AOTInductorModelUpdateConstantsFromBlobFunc update_constants_from_blob; + +#ifdef CUDA_AVAILABLE + // Cached output tensors for skip-copy optimization. + // When copy-skip is enabled, output SlimTensors are cached here to keep + // GPU memory alive while the caller processes the results. + std::vector cached_outputs; +#endif }; } // namespace aoti diff --git a/backends/aoti/common_shims.cpp b/backends/aoti/common_shims.cpp deleted file mode 100644 index abfde86db6d..00000000000 --- a/backends/aoti/common_shims.cpp +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include -#include -#include - -namespace executorch { -namespace backends { -namespace aoti { - -namespace internal { -// Global storage for tensor metadata -AOTI_SHIM_EXPORT std::unordered_map> - tensor_to_sizes; -AOTI_SHIM_EXPORT std::unordered_map> - tensor_to_strides; -} // namespace internal - -extern "C" { - -// Autograd mode functions -int32_t aoti_torch_grad_mode_is_enabled() { - // No autograd ever - return false; -} - -void aoti_torch_grad_mode_set_enabled(bool enabled) { - if (enabled) { - throw std::runtime_error("Cannot enable autograd"); - } -} - -// Tensor attribute operations -AOTITorchError aoti_torch_get_data_ptr(Tensor* tensor, void** ret_data_ptr) { - *ret_data_ptr = tensor->mutable_data_ptr(); - return Error::Ok; -} - -AOTITorchError aoti_torch_get_storage_offset( - Tensor* tensor, - int64_t* ret_storage_offset) { - // Storage offset is always 0 in ET - *ret_storage_offset = 0; - - return Error::Ok; -} - -AOTITorchError aoti_torch_get_strides(Tensor* tensor, int64_t** ret_strides) { - auto it = internal::tensor_to_strides.find(tensor); - bool needs_update = false; - - if (it == internal::tensor_to_strides.end()) { - needs_update = true; - } else { - // CRITICAL: Multimodal models reuse tensors with different shapes across - // executions (e.g., variable-length audio). We MUST validate cached - // metadata matches current tensor state, or CUDA kernels will receive - // incorrect shapes leading to memory corruption and segfaults. - auto tensor_strides = tensor->strides(); - needs_update = !std::equal( - it->second.begin(), - it->second.end(), - tensor_strides.begin(), - tensor_strides.end()); - } - - if (needs_update) { - std::vector strides(tensor->dim()); - auto tensor_strides = tensor->strides(); - for (int i = 0; i < tensor->dim(); i++) { - strides[i] = tensor_strides[i]; - } - it = - internal::tensor_to_strides.insert_or_assign(tensor, std::move(strides)) - .first; - } - - // For 0D tensors, data() returns nullptr on empty vectors, but we need to - // return a valid pointer - if (it->second.empty()) { - static int64_t empty_strides_placeholder = 0; - *ret_strides = &empty_strides_placeholder; - } else { - *ret_strides = it->second.data(); - } - - return Error::Ok; -} - -AOTITorchError aoti_torch_get_dtype(Tensor* tensor, int32_t* ret_dtype) { - *ret_dtype = static_cast(tensor->scalar_type()); - - return Error::Ok; -} - -AOTITorchError aoti_torch_get_sizes(Tensor* tensor, int64_t** ret_sizes) { - auto it = internal::tensor_to_sizes.find(tensor); - bool needs_update = false; - - if (it == internal::tensor_to_sizes.end()) { - needs_update = true; - } else { - // CRITICAL: Multimodal models reuse tensors with different shapes across - // executions (e.g., variable-length audio). We MUST validate cached - // metadata matches current tensor state, or CUDA kernels will receive - // incorrect shapes leading to memory corruption and segfaults. - auto tensor_sizes = tensor->sizes(); - needs_update = !std::equal( - it->second.begin(), - it->second.end(), - tensor_sizes.begin(), - tensor_sizes.end()); - } - - if (needs_update) { - std::vector sizes(tensor->dim()); - auto tensor_sizes = tensor->sizes(); - for (int i = 0; i < tensor->dim(); i++) { - sizes[i] = tensor_sizes[i]; - } - it = internal::tensor_to_sizes.insert_or_assign(tensor, std::move(sizes)) - .first; - } - - // For 0D tensors, data() returns nullptr on empty vectors, but we need to - // return a valid pointer - if (it->second.empty()) { - static int64_t empty_sizes_placeholder = 0; - *ret_sizes = &empty_sizes_placeholder; - } else { - *ret_sizes = it->second.data(); - } - - return Error::Ok; -} - -AOTITorchError aoti_torch_get_device_index( - Tensor* tensor, - int32_t* ret_device_index) { - // Let's assume all tensors AOTI using are on CUDA:0 - *ret_device_index = 0; - return Error::Ok; -} - -AOTITorchError aoti_torch_get_dim(Tensor* tensor, int64_t* ret_dim) { - *ret_dim = static_cast(tensor->dim()); - return Error::Ok; -} - -// Device and layout utility functions -int32_t aoti_torch_device_type_cpu() { - // Let's say cpu is 0 for ET as well - return 0; -} - -int32_t aoti_torch_layout_strided() { - // ET only support strided layout, the return value will always be 0, a.k.a - // at::Layout::Strided; - return 0; -} - -// Dtype constants - these return the PyTorch dtype codes -int32_t aoti_torch_dtype_float32() { - return 6; // PyTorch's float32 dtype code -} - -int32_t aoti_torch_dtype_bfloat16() { - return 15; // PyTorch's bfloat16 dtype code -} - -int32_t aoti_torch_dtype_int8() { - return 1; // PyTorch's int32 dtype code -} - -int32_t aoti_torch_dtype_int16() { - return 2; // PyTorch's int32 dtype code -} - -int32_t aoti_torch_dtype_int32() { - return 3; // PyTorch's int32 dtype code -} - -int32_t aoti_torch_dtype_bool() { - return 11; // PyTorch's bool dtype code -} - -int32_t aoti_torch_dtype_int64() { - return 4; // PyTorch's int64 dtype code -} - -// Dtype utility function needed by Metal backend. -// Returns the size of the dtype in bytes. -size_t aoti_torch_dtype_element_size(int32_t dtype) { - return dtype_to_element_size(dtype); -} - -// Cleanup functions -void cleanup_tensor_metadata() { - internal::tensor_to_sizes.clear(); - internal::tensor_to_strides.clear(); -} - -AOTI_SHIM_EXPORT void aoti_torch_warn( - const char* func, - const char* file, - uint32_t line, - const char* msg) { - ET_LOG(Error, "[%s:%u] %s: %s", file, line, func, msg); -} - -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_get_storage_size(Tensor* tensor, int64_t* ret_size) { - (void)tensor; - (void)ret_size; - throw std::runtime_error("Not implemented"); - return Error::Internal; -} - -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_clone_preserve_strides(Tensor* self, Tensor** ret_new_tensor) { - (void)self; - (void)ret_new_tensor; - throw std::runtime_error("Not implemented"); - return Error::Internal; -} - -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_clone(Tensor* self, Tensor** ret_new_tensor) { - (void)self; - (void)ret_new_tensor; - throw std::runtime_error("Not implemented"); - return Error::Internal; -} - -AOTI_SHIM_EXPORT AOTITorchError aoti_torch_create_tensor_from_blob( - void* data_ptr, - int64_t ndim, - const int64_t* sizes, - const int64_t* strides, - int64_t storage_offset, - int32_t dtype, - int32_t device_type, - int32_t device_index, - Tensor** ret_new_tensor) { - (void)data_ptr; - (void)ndim; - (void)sizes; - (void)strides; - (void)storage_offset; - (void)dtype; - (void)device_type; - (void)device_index; - (void)ret_new_tensor; - throw std::runtime_error("Not implemented"); - return Error::Internal; -} - -} // extern "C" - -} // namespace aoti -} // namespace backends -} // namespace executorch diff --git a/backends/aoti/slim/c10/cuda/Exception.h b/backends/aoti/slim/c10/cuda/Exception.h index 33d8414e661..4db5781eb9f 100644 --- a/backends/aoti/slim/c10/cuda/Exception.h +++ b/backends/aoti/slim/c10/cuda/Exception.h @@ -19,12 +19,14 @@ /// Checks a CUDA expression and aborts on error. /// @param EXPR The CUDA expression to check. +#ifndef ET_CUDA_CHECK #define ET_CUDA_CHECK(EXPR) \ do { \ const cudaError_t __err = EXPR; \ ET_CHECK_MSG( \ __err == cudaSuccess, "CUDA error: %s", cudaGetErrorString(__err)); \ } while (0) +#endif /// Checks a CUDA expression and logs a warning on error (non-fatal). /// @param EXPR The CUDA expression to check. diff --git a/backends/aoti/targets.bzl b/backends/aoti/targets.bzl index 588dbc14831..ffe27e1d1e3 100644 --- a/backends/aoti/targets.bzl +++ b/backends/aoti/targets.bzl @@ -33,26 +33,22 @@ def define_common_targets(): ], ) - # AOTI common shims functionality + # AOTI common shims functionality (header-only library) + # The caller determines which tensor type is used by defining CUDA_AVAILABLE. + # - With CUDA_AVAILABLE=1: Uses SlimTensor + # - Without CUDA_AVAILABLE: Uses ETensor runtime.cxx_library( name = "common_shims", - srcs = [ - "common_shims.cpp", - ], headers = [ "common_shims.h", "export.h", "utils.h", ], - # @lint-ignore BUCKLINT: Avoid `link_whole=True` (https://fburl.com/avoid-link-whole) - link_whole = True, - supports_python_dlopen = True, - # Constructor needed for backend registration. - compiler_flags = ["-Wno-global-constructors"], visibility = ["PUBLIC"], - deps = [ + exported_deps = [ "//executorch/runtime/core:core", "//executorch/runtime/core/exec_aten:lib", + "//executorch/backends/aoti/slim/core:slimtensor", ], ) @@ -86,21 +82,3 @@ def define_common_targets(): ":delegate_handle", ], ) - - # SlimTensor-based common shims (header-only library) - # The caller determines which tensor type is used by defining CUDA_AVAILABLE. - # - With CUDA_AVAILABLE=1: Uses SlimTensor - # - Without CUDA_AVAILABLE: Uses ETensor - runtime.cxx_library( - name = "common_shims_slim", - headers = [ - "common_shims_slim.h", - "export.h", - ], - visibility = ["@EXECUTORCH_CLIENTS"], - deps = [ - "//executorch/runtime/core:core", - "//executorch/runtime/core/exec_aten:lib", - "//executorch/backends/aoti/slim/core:slimtensor", - ], - ) diff --git a/backends/aoti/tests/TARGETS b/backends/aoti/tests/TARGETS index d92e0e32a1f..8b3e8a7f4b1 100644 --- a/backends/aoti/tests/TARGETS +++ b/backends/aoti/tests/TARGETS @@ -8,27 +8,8 @@ cpp_unittest( srcs = [ "test_common_shims.cpp", ], - headers = [ - "utils.h", - ], deps = [ "//executorch/backends/aoti:common_shims", - "//executorch/extension/tensor:tensor", - "//executorch/runtime/core:core", - "//executorch/runtime/platform:platform", - "//executorch/runtime/core/exec_aten/testing_util:tensor_util", - "//executorch/runtime/core/exec_aten:lib", - "//executorch/extension/tensor:tensor", - ], -) - -cpp_unittest( - name = "test_common_shims_slim", - srcs = [ - "test_common_shims_slim.cpp", - ], - deps = [ - "//executorch/backends/aoti:common_shims_slim", "//executorch/backends/aoti/slim/core:slimtensor", "//executorch/backends/aoti/slim/factory:empty", "//executorch/runtime/core:core", diff --git a/backends/aoti/tests/utils.h b/backends/aoti/tests/utils.h deleted file mode 100644 index 1f26f7e2d51..00000000000 --- a/backends/aoti/tests/utils.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#pragma once - -#include -#include -#include -#include - -namespace executorch { -namespace backends { -namespace aoti { -namespace test { - -// Use the same type aliases as in common_shims.h -using executorch::runtime::etensor::Tensor; - -/** - * Creates a test tensor with the specified shape and scalar type - */ -inline std::shared_ptr create_test_tensor( - const std::vector& sizes, - exec_aten::ScalarType dtype = exec_aten::ScalarType::Float) { - // Calculate total number of elements - int64_t total_elements = 1; - for (int64_t size : sizes) { - total_elements *= size; - } - - // Calculate strides (row-major layout) - std::vector strides(sizes.size()); - if (sizes.size() > 0) { - strides[sizes.size() - 1] = 1; - for (int i = sizes.size() - 2; i >= 0; i--) { - strides[i] = strides[i + 1] * sizes[i + 1]; - } - } - - // Allocate data buffer - size_t dtype_size = exec_aten::elementSize(dtype); - void* data = malloc(total_elements * dtype_size); - - // Convert sizes and strides to the required type - std::vector sizes_converted( - sizes.begin(), sizes.end()); - std::vector strides_converted( - strides.begin(), strides.end()); - - // Create the tensor with the correct argument types and count - auto tensor = executorch::extension::from_blob( - data, sizes_converted, strides_converted, dtype); - - return tensor; -} - -/** - * Helper to clean up tensor data that was allocated with malloc - */ -inline void free_tensor_data(Tensor* tensor) { - if (tensor && tensor->mutable_data_ptr()) { - free(tensor->mutable_data_ptr()); - } -} - -} // namespace test -} // namespace aoti -} // namespace backends -} // namespace executorch diff --git a/backends/cuda/runtime/TARGETS b/backends/cuda/runtime/TARGETS index ad5baa8d83f..0d2e14248df 100644 --- a/backends/cuda/runtime/TARGETS +++ b/backends/cuda/runtime/TARGETS @@ -71,34 +71,34 @@ runtime.cxx_library( runtime.cxx_library( name = "runtime_shims", srcs = [ - "guard.cpp", "shims/cuda_guard.cpp", "shims/int4mm.cu", "shims/memory.cpp", "shims/tensor_attribute.cpp", ], headers = [ - "guard.h", "shims/cuda_guard.h", "shims/int4mm.cuh", "shims/int4mm.h", "shims/memory.h", "shims/tensor_attribute.h", - "utils.h", ], # @lint-ignore BUCKLINT: Avoid `link_whole=True` (https://fburl.com/avoid-link-whole) link_whole = True, supports_python_dlopen = True, # Constructor needed for backend registration. compiler_flags = ["-Wno-global-constructors"], + preprocessor_flags = ["-DCUDA_AVAILABLE=1"], visibility = ["PUBLIC"], deps = [ ":tensor_maker", "//executorch/backends/aoti:common_shims", + "//executorch/backends/aoti/slim/core:slimtensor", + "//executorch/backends/aoti/slim/factory:empty", + "//executorch/backends/aoti/slim/factory:from_blob", "//executorch/runtime/core:core", "//executorch/runtime/core/exec_aten:lib", "//executorch/runtime/platform:platform", - "//executorch/backends/cuda/runtime:cuda_platform", ], nvcc_flags = get_nvcc_arch_args() + [ "-_NVCC_HOST_COMPILER_FLAG_", @@ -109,33 +109,12 @@ runtime.cxx_library( ], ) +# Legacy alias for backward compatibility runtime.cxx_library( name = "runtime_shims_slim", - srcs = [ - "shims/memory_slim.cpp", - ], - headers = [ - "shims/memory_slim.h", - ], - # @lint-ignore BUCKLINT: Avoid `link_whole=True` (https://fburl.com/avoid-link-whole) - link_whole = True, - supports_python_dlopen = True, - visibility = ["@EXECUTORCH_CLIENTS"], - preprocessor_flags = ["-DCUDA_AVAILABLE=1"], - deps = [ - "//executorch/backends/aoti/slim/core:slimtensor", - "//executorch/backends/aoti/slim/factory:empty", - "//executorch/backends/aoti/slim/factory:from_blob", - "//executorch/backends/aoti:common_shims", - "//executorch/runtime/core:core", - "//executorch/runtime/platform:platform", - ], - nvcc_flags = get_nvcc_arch_args() + [ - "-_NVCC_HOST_COMPILER_FLAG_", - "gcc", - ], - external_deps = [ - ("cuda", None, "cuda-lazy"), + visibility = ["PUBLIC"], + exported_deps = [ + ":runtime_shims", ], ) @@ -149,10 +128,16 @@ runtime.cxx_library( supports_python_dlopen = True, # Constructor needed for backend registration. compiler_flags = ["-Wno-global-constructors"], + preprocessor_flags = ["-DCUDA_AVAILABLE=1"], visibility = ["PUBLIC"], deps = [ - ":runtime_shims", + ":runtime_shims_slim", "//executorch/backends/aoti:aoti_common", + "//executorch/backends/aoti/slim/core:slimtensor", + "//executorch/backends/aoti/slim/factory:empty", + "//executorch/backends/aoti/slim/factory:from_blob", + "//executorch/backends/aoti/slim/factory:from_etensor", + "//executorch/extension/tensor:tensor", "//executorch/runtime/backend:interface", "//executorch/runtime/core/exec_aten/util:tensor_util", ], diff --git a/backends/cuda/runtime/cuda_backend.cpp b/backends/cuda/runtime/cuda_backend.cpp index cd1c6b96f02..4cf0b1521d6 100644 --- a/backends/cuda/runtime/cuda_backend.cpp +++ b/backends/cuda/runtime/cuda_backend.cpp @@ -21,6 +21,16 @@ #include #include +// Include SlimTensor headers for CUDA backend +#include +#include +#include +#include +#include +#include +#include +#include + // Include our shim layer headers #include #include @@ -52,10 +62,113 @@ using executorch::runtime::Result; using executorch::runtime::Span; using executorch::runtime::etensor::Tensor; +// SlimTensor type aliases +using slim::c10::Device; +using slim::c10::DeviceType; +using slim::CPU_DEVICE; +using slim::DEFAULT_CUDA_DEVICE; +using slim::DeviceTraits; +using slim::from_etensor; +using slim::SlimTensor; + namespace { constexpr char kSkipCopyOutputToCpuForMethod[] = "skip_copy_output_to_cpu_for_method"; + +/** + * Copies data from a SlimTensor to an ETensor. + * + * This function converts a SlimTensor back to an ETensor. The ETensor is + * assumed to always reside on CPU, so this handles both CPU→CPU and GPU→CPU + * copies. The function will resize the ETensor if needed and copy the data. + * + * @param slim_tensor Pointer to the source SlimTensor (must not be null). + * @param etensor Pointer to the destination ETensor (must not be null). + * @return Error::Ok on success, or an appropriate error code on failure. + */ +inline Error copy_slimtensor_to_etensor( + const SlimTensor* slim_tensor, + Tensor* etensor) { + ET_CHECK_OR_RETURN_ERROR( + slim_tensor != nullptr, + InvalidArgument, + "copy_slimtensor_to_etensor: slim_tensor pointer cannot be nullptr"); + + ET_CHECK_OR_RETURN_ERROR( + etensor != nullptr, + InvalidArgument, + "copy_slimtensor_to_etensor: etensor pointer cannot be nullptr"); + + // Check storage_offset is 0 (ETensor does not support storage offset) + ET_CHECK_OR_RETURN_ERROR( + slim_tensor->storage_offset() == 0, + InvalidArgument, + "copy_slimtensor_to_etensor: SlimTensor storage_offset must be 0, got %ld", + static_cast(slim_tensor->storage_offset())); + + // Check that SlimTensor is contiguous + ET_CHECK_OR_RETURN_ERROR( + slim_tensor->is_contiguous(), + InvalidArgument, + "copy_slimtensor_to_etensor: SlimTensor must be contiguous"); + + // Check dtype matches + slim::c10::ScalarType slim_dtype = slim_tensor->dtype(); + executorch::runtime::etensor::ScalarType etensor_dtype = etensor->scalar_type(); + ET_CHECK_OR_RETURN_ERROR( + static_cast(slim_dtype) == static_cast(etensor_dtype), + InvalidArgument, + "copy_slimtensor_to_etensor: dtype mismatch, SlimTensor dtype %d != ETensor dtype %d", + static_cast(slim_dtype), + static_cast(etensor_dtype)); + + // Check dimensions match + ET_CHECK_OR_RETURN_ERROR( + static_cast(slim_tensor->dim()) == etensor->dim(), + InvalidArgument, + "copy_slimtensor_to_etensor: dimension mismatch, SlimTensor dim %zu != ETensor dim %zd", + slim_tensor->dim(), + etensor->dim()); + + // Convert sizes from int64_t to SizesType (int32_t) for resize + const size_t ndim = slim_tensor->dim(); + std::vector new_sizes( + ndim); + auto slim_sizes = slim_tensor->sizes(); + for (size_t i = 0; i < ndim; ++i) { + new_sizes[i] = static_cast< + executorch::runtime::etensor::TensorImpl::SizesType>(slim_sizes[i]); + } + + // Resize ETensor to match SlimTensor sizes + Error resize_err = executorch::ET_RUNTIME_NAMESPACE::resize_tensor( + *etensor, + executorch::runtime::ArrayRef< + executorch::runtime::etensor::TensorImpl::SizesType>( + new_sizes.data(), new_sizes.size())); + ET_CHECK_OK_OR_RETURN_ERROR( + resize_err, "copy_slimtensor_to_etensor: failed to resize ETensor"); + + // Copy data from SlimTensor to ETensor + // SlimTensor may be on GPU or CPU, ETensor is always on CPU + size_t nbytes = slim_tensor->nbytes(); + if (nbytes > 0) { + void* dst_data = etensor->mutable_data_ptr(); + const void* src_data = slim_tensor->data_ptr(); + + if (slim_tensor->is_cpu()) { + // CPU → CPU copy + std::memcpy(dst_data, src_data, nbytes); + } else { + // GPU → CPU copy + DeviceTraits::memcpy( + dst_data, src_data, nbytes, CPU_DEVICE, slim_tensor->device()); + } + } + + return Error::Ok; } +} // anonymous namespace class ET_EXPERIMENTAL CudaBackend final : public ::executorch::runtime::BackendInterface { @@ -286,86 +399,75 @@ class ET_EXPERIMENTAL CudaBackend final args.size()) // NOTE: ExecuTorch tensors are always on CPU/host memory - // We need to create GPU copies for CUDA kernel execution - std::vector gpu_inputs( - n_inputs); // GPU copies for kernel execution - std::vector gpu_outputs( - n_outputs); // GPU tensors for kernel output - - // Process input tensors: ExecuTorch provides CPU tensors, create GPU - // copies - for (size_t i = 0; i < n_inputs; i++) { - // Get tensor dimensions and properties from ExecuTorch CPU tensor - auto cpu_tensor = &(args[i]->toTensor()); - auto sizes = cpu_tensor->sizes(); - auto scalar_type = cpu_tensor->scalar_type(); + // We need to create GPU copies for CUDA kernel execution using SlimTensor + std::vector gpu_input_tensors(n_inputs); + std::vector gpu_inputs(n_inputs); + std::vector gpu_output_tensors(n_outputs); + std::vector gpu_outputs(n_outputs); - // Create GPU tensor with same shape - std::vector sizes_vec(sizes.begin(), sizes.end()); + // Process input tensors: convert ETensor (CPU) to SlimTensor (GPU) + for (size_t i = 0; i < n_inputs; i++) { + auto* cpu_tensor = &(args[i]->toTensor()); + + // Check if input data is already on GPU (skip-copy optimization for inputs) + // This can happen when the caller has pre-staged data on GPU + cudaPointerAttributes attributes{}; + const void* data_ptr = cpu_tensor->const_data_ptr(); + if (data_ptr != nullptr) { + cudaError_t err = cudaPointerGetAttributes(&attributes, data_ptr); + if (err == cudaSuccess && attributes.type == cudaMemoryTypeDevice) { + // Data is already on GPU - wrap it directly without copy + auto sizes = cpu_tensor->sizes(); + auto strides = cpu_tensor->strides(); + std::vector sizes_vec(sizes.begin(), sizes.end()); + std::vector strides_vec(strides.begin(), strides.end()); + + gpu_input_tensors[i] = slim::from_blob( + const_cast(data_ptr), + slim::makeArrayRef(sizes_vec), + slim::makeArrayRef(strides_vec), + static_cast(cpu_tensor->scalar_type()), + DEFAULT_CUDA_DEVICE, + 0 // storage_offset + ); + gpu_inputs[i] = &gpu_input_tensors[i]; + continue; + } + } - AOTITensorHandle gpu_input_handle; - Error create_err = aoti_torch_empty_strided( - sizes_vec.size(), - sizes_vec.data(), - nullptr, // use default strides - static_cast(scalar_type), - 1, // device_type = cuda - 0, // device_index = 0 - &gpu_input_handle); - - ET_CHECK_OR_RETURN_ERROR( - create_err == Error::Ok, - Internal, - "Failed to create GPU tensor for input %d", - i); - - gpu_inputs[i] = gpu_input_handle; - - // Copy data from CPU to GPU - ET_CHECK_OR_RETURN_ERROR( - aoti_torch_copy_(gpu_inputs[i], cpu_tensor, 0) == Error::Ok, - Internal, - "Failed to copy input %d from CPU to GPU", - i); + // Data is on CPU - use from_etensor to copy to GPU + gpu_input_tensors[i] = + from_etensor(*cpu_tensor, CPU_DEVICE, DEFAULT_CUDA_DEVICE); + gpu_inputs[i] = &gpu_input_tensors[i]; } - // Process output tensors: create GPU counterparts for ExecuTorch CPU - // tensors + + // Process output tensors: create GPU SlimTensors for kernel output for (size_t i = 0; i < n_outputs; i++) { - // Get output tensor dimensions from ExecuTorch CPU tensor - auto cpu_output_tensor = &(args[i + n_inputs]->toTensor()); + auto* cpu_output_tensor = &(args[i + n_inputs]->toTensor()); auto sizes = cpu_output_tensor->sizes(); + auto strides = cpu_output_tensor->strides(); auto scalar_type = cpu_output_tensor->scalar_type(); - // Create GPU tensor with same shape for kernel output std::vector sizes_vec(sizes.begin(), sizes.end()); - - AOTITensorHandle gpu_output_handle; - Error create_err = aoti_torch_empty_strided( - sizes_vec.size(), - sizes_vec.data(), - nullptr, // use default strides - static_cast(scalar_type), - 1, // device_type = cuda - 0, // device_index = 0 - &gpu_output_handle); - - ET_CHECK_OR_RETURN_ERROR( - create_err == Error::Ok, - Internal, - "Failed to create GPU tensor for output %d", - i); - - gpu_outputs[i] = gpu_output_handle; + std::vector strides_vec(strides.begin(), strides.end()); + + gpu_output_tensors[i] = slim::empty_strided( + slim::makeArrayRef(sizes_vec), + slim::makeArrayRef(strides_vec), + static_cast(scalar_type), + DEFAULT_CUDA_DEVICE); + gpu_outputs[i] = &gpu_output_tensors[i]; } - // Run AOTI container with GPU tensors + + // Run AOTI container with GPU SlimTensors AOTIRuntimeError error = handle->run( handle->container_handle, - gpu_inputs.data(), // Use GPU input tensors + reinterpret_cast(gpu_inputs.data()), n_inputs, - gpu_outputs.data(), // Use GPU output tensors + reinterpret_cast(gpu_outputs.data()), n_outputs, - handle->cuda_stream, // Pass the actual CUDA stream - nullptr); // proxy_executor_handle can remain nullptr + handle->cuda_stream, + nullptr); ET_CHECK_OR_RETURN_ERROR( error == Error::Ok, @@ -376,22 +478,46 @@ class ET_EXPERIMENTAL CudaBackend final const bool copy_outputs = !should_skip_copy_for_method(handle->method_name); if (copy_outputs) { - // Copy GPU output results back to CPU output tensors + // Copy GPU SlimTensor results back to CPU ETensors for (size_t i = 0; i < n_outputs; i++) { - auto cpu_output_tensor = &(args[i + n_inputs]->toTensor()); - // For DYNAMIC_BOUND tensors we try to resize + auto* cpu_output_tensor = &(args[i + n_inputs]->toTensor()); ET_CHECK_OK_OR_RETURN_ERROR( - resize_tensor(*cpu_output_tensor, gpu_outputs[i]->sizes()), - "Error resizing tensor at output index %d", - i); - ET_CHECK_OK_OR_RETURN_ERROR( - aoti_torch_copy_(cpu_output_tensor, gpu_outputs[i], 0), - "Failed to copy GPU output %d back to CPU", + copy_slimtensor_to_etensor(gpu_outputs[i], cpu_output_tensor), + "Failed to copy GPU output %zu back to CPU ETensor", i); } } else { + // Skip-copy optimization: wrap GPU data as ETensor using from_blob + // The caller is responsible for handling GPU data directly for (size_t i = 0; i < n_outputs; i++) { - args[i + n_inputs]->toTensor() = *gpu_outputs[i]; + // Move output SlimTensors to cached_outputs for lifetime management + handle->cached_outputs.push_back(std::move(gpu_output_tensors[i])); + + // Create an ETensor wrapper pointing to the GPU data + // The data stays on GPU and the caller handles it + SlimTensor& cached = handle->cached_outputs.back(); + auto slim_sizes = cached.sizes(); + auto slim_strides = cached.strides(); + + std::vector et_sizes(cached.dim()); + std::vector et_strides(cached.dim()); + for (size_t d = 0; d < cached.dim(); d++) { + et_sizes[d] = + static_cast(slim_sizes[d]); + et_strides[d] = + static_cast(slim_strides[d]); + } + + // Use tensor_ptr_maker to create a non-owning ETensor wrapper + // Note: This creates a view into the SlimTensor's GPU memory + auto tensor_ptr = executorch::extension::from_blob( + cached.data_ptr(), + std::move(et_sizes), + std::move(et_strides), + static_cast(cached.dtype())); + + // Assign the wrapped tensor to the output EValue + args[i + n_inputs]->toTensor() = *tensor_ptr; } } @@ -424,9 +550,12 @@ class ET_EXPERIMENTAL CudaBackend final // AOTInductorModelContainerDelete(handle->container_handle); // Now close the shared library - auto err = Error::Ok; if (handle->so_handle != nullptr) { - err = close_library(handle->so_handle); + Error err = close_library(handle->so_handle); + ET_CHECK_OR_LOG_ERROR( + err == Error::Ok, + "Failed to close shared library for %s", + handle->so_path.c_str()); } // Remove the temporary shared library file @@ -441,7 +570,6 @@ class ET_EXPERIMENTAL CudaBackend final } delete handle; - clear_all_tensors(); } private: diff --git a/backends/cuda/runtime/guard.h b/backends/cuda/runtime/guard.h index 3f187000f90..2f0fb8f7546 100644 --- a/backends/cuda/runtime/guard.h +++ b/backends/cuda/runtime/guard.h @@ -19,8 +19,8 @@ namespace executorch::backends::cuda { using executorch::runtime::Error; using executorch::runtime::Result; -// Type alias for device index -using DeviceIndex = int32_t; +// Signed device index type matching DeviceIndex in slim tensor library +using DeviceIndex = int8_t; /** * Set the current CUDA stream for the specified device. diff --git a/backends/cuda/runtime/shims/memory.cpp b/backends/cuda/runtime/shims/memory.cpp index 86f6cdd6396..c10cbc3ad7f 100644 --- a/backends/cuda/runtime/shims/memory.cpp +++ b/backends/cuda/runtime/shims/memory.cpp @@ -6,104 +6,26 @@ * LICENSE file in the root directory of this source tree. */ -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -namespace executorch::backends::cuda { - -using executorch::aten::SizesType; -using executorch::aten::StridesType; -using executorch::backends::aoti::aoti_torch_dtype_bool; -using executorch::backends::aoti::aoti_torch_get_device_index; -using executorch::backends::aoti::aoti_torch_get_dtype; -using executorch::backends::aoti::aoti_torch_get_sizes; -using executorch::backends::aoti::aoti_torch_get_strides; -using executorch::backends::aoti::convert_sizes_to_vector; -using executorch::backends::aoti::convert_strides_to_vector; -using executorch::backends::aoti::dtype_to_element_size; -using executorch::backends::aoti::dtype_to_scalar_type; -using executorch::backends::aoti::validate_storage_offset; - -// Global storage for tensors and their metadata -std::unordered_set> tensors; - -// Reference counting for memory addresses -// Maps memory address to number of tensors using it -// Special value: NOT_OWN (-1) means tensor never owns the memory -constexpr int32_t NOT_OWN = -1; -std::unordered_map memory_to_n_tensor; - -namespace { - -// Calculate linear offset from strides and indices -int64_t calculate_linear_offset( - const int64_t* indices, - const int64_t* strides, - int64_t ndim) { - int64_t offset = 0; - for (int64_t i = 0; i < ndim; ++i) { - offset += indices[i] * strides[i]; - } - return offset; -} - -// Convert linear index to multi-dimensional indices based on sizes -void linear_to_indices( - int64_t linear_idx, - const int64_t* sizes, - int64_t ndim, - int64_t* indices) { - for (int64_t i = ndim - 1; i >= 0; --i) { - indices[i] = linear_idx % sizes[i]; - linear_idx /= sizes[i]; - } -} +#include +#include +#include +#include -// Generic pointwise copy function that handles arbitrary strides -template -AOTITorchError pointwise_copy_generic( - T* dst_data, - const T* src_data, - const int64_t* dst_sizes, - const int64_t* dst_strides, - const int64_t* src_sizes, - const int64_t* src_strides, - int64_t dst_ndim, - int64_t src_ndim, - int64_t total_elements) { - std::vector dst_indices(dst_ndim); - std::vector src_indices(src_ndim); - - for (int64_t linear_idx = 0; linear_idx < total_elements; ++linear_idx) { - // Convert linear index to multi-dimensional indices for both tensors - linear_to_indices(linear_idx, dst_sizes, dst_ndim, dst_indices.data()); - linear_to_indices(linear_idx, src_sizes, src_ndim, src_indices.data()); - - // Calculate offsets for both source and destination - int64_t src_offset = - calculate_linear_offset(src_indices.data(), src_strides, src_ndim); - int64_t dst_offset = - calculate_linear_offset(dst_indices.data(), dst_strides, dst_ndim); - - // Copy element - dst_data[dst_offset] = src_data[src_offset]; - } +namespace executorch::backends::cuda { - return Error::Ok; -} +namespace c10 = executorch::backends::aoti::slim::c10; +using c10::Device; +using c10::DeviceIndex; +using c10::DeviceType; +using c10::ScalarType; +using executorch::backends::aoti::slim::empty_strided; +using executorch::backends::aoti::slim::from_blob; +using executorch::backends::aoti::slim::IntArrayRef; -} // anonymous namespace +// Use SlimTensor directly to avoid naming conflicts with ETensor +using SlimTensor = executorch::backends::aoti::slim::SlimTensor; extern "C" { @@ -116,109 +38,43 @@ AOTITorchError aoti_torch_create_tensor_from_blob_v2( int32_t dtype, int32_t device_type, int32_t device_index, - Tensor** ret_new_tensor, + SlimTensor** ret_new_tensor, int32_t layout, const uint8_t* opaque_metadata, int64_t opaque_metadata_size) { - (void)opaque_metadata; + // Unused parameters (void)layout; + (void)opaque_metadata; (void)opaque_metadata_size; - // Validate input parameters first ET_CHECK_OR_RETURN_ERROR( data != nullptr, InvalidArgument, - "aoti_torch_create_tensor_from_blob_v2 failed: data pointer is null"); - - ET_CHECK_OR_RETURN_ERROR( - !(sizes_ptr == nullptr && ndim > 0), - InvalidArgument, - "aoti_torch_create_tensor_from_blob_v2 failed: sizes_ptr is null"); + "aoti_torch_create_tensor_from_blob_v2: data is null"); ET_CHECK_OR_RETURN_ERROR( ret_new_tensor != nullptr, InvalidArgument, - "aoti_torch_create_tensor_from_blob_v2 failed: ret_new_tensor is null"); - - // Check that device_index is always 0 - ET_CHECK_OR_RETURN_ERROR( - device_index == 0, - InvalidArgument, - "device_index must be 0, got: %d", - device_index); - - // Validate dtype using SupportedDTypes from utils.h - ET_CHECK_OK_OR_RETURN_ERROR(validate_dtype(dtype)); - - // Storage offset must be 0 since from_blob cannot handle different offsets - ET_CHECK_OK_OR_RETURN_ERROR(validate_storage_offset(storage_offset)); - - // Verify that data pointer location matches the requested device_type - cudaPointerAttributes data_attributes{}; - ET_CUDA_CHECK_OR_RETURN_ERROR( - cudaPointerGetAttributes(&data_attributes, data)); - - bool data_is_on_device = data_attributes.type == cudaMemoryTypeDevice; - bool data_is_on_host = data_attributes.type == cudaMemoryTypeHost || - data_attributes.type == cudaMemoryTypeUnregistered; - bool requested_device = - device_type == static_cast(SupportedDevices::CUDA); - bool requested_cpu = - device_type == static_cast(SupportedDevices::CPU); - - // Error if data location doesn't match requested device type - ET_CHECK_OR_RETURN_ERROR( - !(data_is_on_device && requested_cpu), - InvalidArgument, - "aoti_torch_create_tensor_from_blob_v2 failed: data pointer %p is on CUDA " - "but device_type is CPU. Data must be on CPU for CPU tensors.", - data); + "aoti_torch_create_tensor_from_blob_v2: ret_new_tensor is null"); ET_CHECK_OR_RETURN_ERROR( - !(data_is_on_host && requested_device), + !(sizes_ptr == nullptr && ndim > 0), InvalidArgument, - "aoti_torch_create_tensor_from_blob_v2 failed: data pointer %p is on CPU " - "but device_type is CUDA. Data must be on GPU for CUDA tensors.", - data); - - // Convert sizes to the format expected by ExecutorTorch using SizesType - std::vector sizes = - convert_sizes_to_vector(ndim, sizes_ptr); - - // Convert strides using the common helper function with StridesType - std::vector strides = - convert_strides_to_vector(ndim, sizes_ptr, strides_ptr); - - // Create ExecutorTorch tensor that wraps the existing memory - // Note: We're NOT copying the data, just wrapping it - // Using CUDA-specific tensor maker that supports incontiguous tensors - auto tensor = make_tensor( - sizes, // tensor dimensions - data, // existing memory (don't copy!) - {}, // dim_order (empty, will be auto-generated) - strides, // tensor strides (allows different strides) - dtype_to_scalar_type(dtype) // map int32_t dtype to ScalarType - ); - - ET_CHECK_OR_RETURN_ERROR( - tensor != nullptr, InvalidArgument, "Failed to create tensor from blob"); + "aoti_torch_create_tensor_from_blob_v2: sizes_ptr is null but ndim > 0"); - // Store the tensor so it doesn't get destroyed - tensors.insert(tensor); - - *ret_new_tensor = tensor.get(); - - // Check if this memory address is already being tracked - auto memory_it = memory_to_n_tensor.find(data); - ET_CHECK_OR_RETURN_ERROR( - memory_it == memory_to_n_tensor.end(), - InvalidArgument, - "Memory address %p is already being tracked by another tensor", - data); + IntArrayRef sizes(sizes_ptr, static_cast(ndim)); + IntArrayRef strides(strides_ptr, static_cast(ndim)); - // Mark this memory as NOT_OWN since tensor created from blob never owns - // memory - memory_to_n_tensor[data] = NOT_OWN; + // Create the SlimTensor using from_blob (non-owning) + *ret_new_tensor = new SlimTensor(from_blob( + data, + sizes, + strides, + static_cast(dtype), + Device( + static_cast(device_type), + static_cast(device_index)), + storage_offset)); return Error::Ok; } @@ -230,697 +86,177 @@ AOTITorchError aoti_torch_empty_strided( int32_t dtype, int32_t device_type, int32_t device_index, - Tensor** ret_new_tensor) { - // Check that device_index is always 0 + SlimTensor** ret_new_tensor) { ET_CHECK_OR_RETURN_ERROR( - device_index == 0, + ret_new_tensor != nullptr, InvalidArgument, - "device_index must be 0, got: %d", - device_index); - - // This requires us to reserve CUDA memory and put it into a ETensor - void* ptr; + "aoti_torch_empty_strided: ret_new_tensor is null"); - ET_CHECK_OK_OR_RETURN_ERROR(validate_dtype(dtype)); - - size_t element_size = dtype_to_element_size(dtype); ET_CHECK_OR_RETURN_ERROR( - element_size != 0, + !(sizes_ptr == nullptr && ndim > 0), InvalidArgument, - "Invalid element size for dtype: %d", - dtype); - - // Calculate storage size based on strides, matching PyTorch's behavior - // This is critical when sizes and strides don't match the expected contiguous - // layout Reference: PyTorch's computeStorageNbytes in EmptyTensor.cpp - int64_t storage_size = 1; // storage offset (0) + 1 - for (int64_t i = 0; i < ndim; i++) { - if (sizes_ptr[i] == 0) { - storage_size = 0; - break; - } - // For each dimension, add stride[i] * (size[i] - 1) - // This gives us the maximum offset in that dimension - int64_t stride_i = (strides_ptr != nullptr) ? strides_ptr[i] : 1; - if (strides_ptr == nullptr) { - // Calculate contiguous stride if not provided - for (int64_t j = i + 1; j < ndim; j++) { - stride_i *= sizes_ptr[j]; - } - } - storage_size += stride_i * (sizes_ptr[i] - 1); - } - int64_t nbytes = storage_size * element_size; - - if (device_type == static_cast(SupportedDevices::CUDA)) { - ET_CUDA_CHECK_OR_RETURN_ERROR( - cudaMallocAsync(&ptr, static_cast(nbytes), cudaStreamDefault)); - } else if (device_type == static_cast(SupportedDevices::CPU)) { - // Ensure 16-byte alignment for CPU memory to match CUDA requirements - ptr = aligned_alloc(16, nbytes); - ET_CHECK_OR_RETURN_ERROR( - ptr != nullptr, - MemoryAllocationFailed, - "Failed to allocate aligned CPU memory"); - } else { - ET_CHECK_OR_RETURN_ERROR( - false, - NotImplemented, - "Need to implement empty_strided for non-CUDA non-CPU device type %d", - device_type); - } - - // ETensor sizes - auto sizes = convert_sizes_to_vector(ndim, sizes_ptr); - - // ETensor strides - auto strides = convert_strides_to_vector(ndim, sizes_ptr, strides_ptr); - - // ETensor creation with dynamic shape support for edge cases - // Using CUDA-specific tensor maker that supports incontiguous tensors - auto tensor = make_tensor( + "aoti_torch_empty_strided: sizes_ptr is null but ndim > 0"); + + IntArrayRef sizes(sizes_ptr, static_cast(ndim)); + IntArrayRef strides(strides_ptr, static_cast(ndim)); + + // Create the SlimTensor using empty_strided (owning) + *ret_new_tensor = new SlimTensor(empty_strided( sizes, - ptr, - {}, // dim_order (empty, will be auto-generated) strides, - dtype_to_scalar_type(dtype)); + static_cast(dtype), + Device( + static_cast(device_type), + static_cast(device_index)))); - // Store the tensor so it doesn't get destroyed - tensors.insert(tensor); - *ret_new_tensor = tensor.get(); - - // This tensor owns the memory it allocated, set reference count to 1 - memory_to_n_tensor[ptr] = 1; return Error::Ok; } -void clear_all_tensors() { - // Use aoti_torch_delete_tensor_object to properly delete each tensor - // Note: We need to collect tensor pointers first since deletion modifies the - // set - std::vector tensor_ptrs; - tensor_ptrs.reserve(tensors.size()); - for (const auto& tensor_shared : tensors) { - tensor_ptrs.push_back(tensor_shared.get()); - } - - // Now delete each tensor - this will modify the global tensors set - for (Tensor* tensor_ptr : tensor_ptrs) { - aoti_torch_delete_tensor_object(tensor_ptr); - } - - // tensors set should now be empty, but ensure it's cleared - tensors.clear(); - - // Clear memory tracking map (includes leftover NOT_OWN entries) - memory_to_n_tensor.clear(); - - ET_LOG(Info, "Cleared all tensors and memory tracking"); -} - -AOTITorchError aoti_torch_delete_tensor_object(Tensor* tensor) { - // Handle null tensor pointer - ET_CHECK_OR_RETURN_ERROR( - tensor != nullptr, InvalidArgument, "Cannot delete null tensor"); - - // Check if tensor exists in our tracking - bool found_in_tensors = false; - for (auto it = tensors.begin(); it != tensors.end(); ++it) { - if (it->get() == tensor) { - found_in_tensors = true; - break; - } - } - - // If tensor not found in our tracking, it's invalid +AOTITorchError aoti_torch_delete_tensor_object(SlimTensor* tensor) { ET_CHECK_OR_RETURN_ERROR( - found_in_tensors, InvalidArgument, "Didn't find tensor %p", tensor); - - // Find and delete the tensor - for (auto it = tensors.begin(); it != tensors.end(); ++it) { - if (it->get() == tensor) { - // Get the tensor before erasing - auto tensor_ptr = *it; - void* data_ptr = tensor_ptr->mutable_data_ptr(); - - // Find the reference count for this memory address - auto memory_it = memory_to_n_tensor.find(data_ptr); - if (memory_it != memory_to_n_tensor.end()) { - int32_t ref_count = memory_it->second; - - if (ref_count == NOT_OWN) { - // Tensor never owned the memory, skip freeing - // Just remove tensor from tracking - tensors.erase(it); - return Error::Ok; - } else if (ref_count == 1) { - // Only current tensor using this memory, free it - // Determine if it's GPU memory - cudaPointerAttributes attributes{}; - ET_CUDA_CHECK_OR_RETURN_ERROR( - cudaPointerGetAttributes(&attributes, data_ptr)); - - if (attributes.type == cudaMemoryTypeDevice) { - ET_CUDA_CHECK_OR_RETURN_ERROR( - cudaFreeAsync(data_ptr, cudaStreamDefault)); - } else { - ET_CHECK_OR_RETURN_ERROR( - attributes.type != cudaMemoryTypeManaged, - Internal, - "Expected host memory but got managed!") - // This is CPU memory - free immediately - aligned_free(data_ptr); - data_ptr = nullptr; - } - - // Remove from memory tracking - memory_to_n_tensor.erase(memory_it); - } else if (ref_count > 1) { - // Other tensors still using this memory, just decrement count - memory_to_n_tensor[data_ptr] = ref_count - 1; - } - } else { - ET_CHECK_OR_RETURN_ERROR( - false, - Internal, - "Internal error: memory not found during deletion"); - } - - // Remove tensor from set (this will call the destructor if it's the last - // reference) - tensors.erase(it); - return Error::Ok; - } - } - - // This should never be reached since we found it above - ET_CHECK_OR_RETURN_ERROR( - false, Internal, "Internal error: tensor not found after validation"); -} - -AOTITorchError -aoti_torch_copy_(Tensor* self, Tensor* src, int32_t non_blocking) { - (void)non_blocking; - - // Check for null pointers first - ET_CHECK_OR_RETURN_ERROR( - self != nullptr, - InvalidArgument, - "aoti_torch_copy_ failed: self tensor is null"); - - ET_CHECK_OR_RETURN_ERROR( - src != nullptr, + tensor != nullptr, InvalidArgument, - "aoti_torch_copy_ failed: src tensor is null"); + "aoti_torch_delete_tensor_object: tensor is null"); - // Get dtype information and validate compatibility - int32_t self_dtype, src_dtype; - aoti_torch_get_dtype(self, &self_dtype); - aoti_torch_get_dtype(src, &src_dtype); + // SlimTensor uses SharedPtr for storage, so simply deleting the tensor + // will automatically handle reference counting and free the underlying + // storage when no more references exist. + delete tensor; - ET_CHECK_OK_OR_RETURN_ERROR(validate_dtype(self_dtype)); - - ET_CHECK_OK_OR_RETURN_ERROR(validate_dtype(src_dtype)); + return Error::Ok; +} - // Check dtype compatibility - both tensors must have the same dtype +AOTITorchError aoti_torch_new_tensor_handle( + SlimTensor* orig_handle, + SlimTensor** new_handle) { ET_CHECK_OR_RETURN_ERROR( - self_dtype == src_dtype, + orig_handle != nullptr, InvalidArgument, - "dtype mismatch. self.dtype=%d, src.dtype=%d. aoti_torch_copy_ requires same dtypes", - self_dtype, - src_dtype); - - // Check total number of elements compatibility (PyTorch copy_ behavior) - int64_t self_numel = self->numel(); - int64_t src_numel = src->numel(); + "aoti_torch_new_tensor_handle: orig_handle is null"); ET_CHECK_OR_RETURN_ERROR( - self_numel == src_numel, + new_handle != nullptr, InvalidArgument, - "numel mismatch. self.numel()=%ld, src.numel()=%ld", - self_numel, - src_numel); - - // Get tensor metadata - int64_t* self_strides; - int64_t* src_strides; - aoti_torch_get_strides(self, &self_strides); - aoti_torch_get_strides(src, &src_strides); - - int64_t* self_sizes; - int64_t* src_sizes; - aoti_torch_get_sizes(self, &self_sizes); - aoti_torch_get_sizes(src, &src_sizes); - - // Determine device locations - cudaPointerAttributes srcAttributes{}; - cudaPointerAttributes dstAttributes{}; - - ET_CUDA_CHECK_OR_RETURN_ERROR( - cudaPointerGetAttributes(&srcAttributes, src->data_ptr())); - - ET_CUDA_CHECK_OR_RETURN_ERROR( - cudaPointerGetAttributes(&dstAttributes, self->data_ptr())); - - bool srcIsDevice = srcAttributes.type == cudaMemoryTypeDevice; - bool dstIsDevice = dstAttributes.type == cudaMemoryTypeDevice; - - // Check if tensors have the same schema (sizes, strides, dtype) for fast path - bool same_schema = true; - for (int i = 0; i < self->dim(); i++) { - if (self_strides[i] != src_strides[i]) { - same_schema = false; - break; - } - } - - size_t total_bytes = src->nbytes(); - int64_t total_elements = self->numel(); - - if (same_schema) { - // Fast path: Direct memory copy since layouts match exactly - if (srcIsDevice && dstIsDevice) { - ET_CUDA_CHECK_OR_RETURN_ERROR(cudaMemcpy( - self->mutable_data_ptr(), - src->data_ptr(), - total_bytes, - cudaMemcpyDeviceToDevice)); - } else if (srcIsDevice && !dstIsDevice) { - ET_CUDA_CHECK_OR_RETURN_ERROR(cudaMemcpy( - self->mutable_data_ptr(), - src->data_ptr(), - total_bytes, - cudaMemcpyDeviceToHost)); - } else if (!srcIsDevice && dstIsDevice) { - ET_CUDA_CHECK_OR_RETURN_ERROR(cudaMemcpy( - self->mutable_data_ptr(), - src->data_ptr(), - total_bytes, - cudaMemcpyHostToDevice)); - } else { - std::memcpy(self->mutable_data_ptr(), src->data_ptr(), total_bytes); - } - } else { - // Fallback path: Pointwise copy with stride-aware indexing - // This handles arbitrary tensor layouts and strides - - size_t element_size = dtype_to_element_size(self_dtype); - ET_CHECK_OR_RETURN_ERROR( - element_size != 0, - InvalidArgument, - "Invalid element size for dtype: %d", - self_dtype); - - // Allocate temporary host memory for GPU tensors - float* src_host_data = nullptr; - float* dst_host_data = nullptr; - bool need_free_src = false; - bool need_free_dst = false; - - if (srcIsDevice) { - src_host_data = - static_cast(malloc(total_elements * sizeof(float))); - ET_CHECK_OR_RETURN_ERROR( - src_host_data != nullptr, - MemoryAllocationFailed, - "Failed to allocate memory for src_host_data"); - ET_CUDA_CHECK_OR_RETURN_ERROR(cudaMemcpy( - src_host_data, src->data_ptr(), total_bytes, cudaMemcpyDeviceToHost)); - need_free_src = true; - } else { - src_host_data = static_cast(src->data_ptr()); - } - - if (dstIsDevice) { - dst_host_data = - static_cast(malloc(total_elements * sizeof(float))); - if (dst_host_data == nullptr) { - if (need_free_src) { - free(src_host_data); - } - ET_CHECK_OR_RETURN_ERROR( - false, - MemoryAllocationFailed, - "Failed to allocate memory for dst_host_data"); - } - need_free_dst = true; - } else { - dst_host_data = static_cast(self->mutable_data_ptr()); - } - - // Perform pointwise copy with stride calculation - AOTITorchError copy_err = pointwise_copy_generic( - dst_host_data, - src_host_data, - self_sizes, - self_strides, - src_sizes, - src_strides, - self->dim(), - src->dim(), - total_elements); - - if (copy_err != Error::Ok) { - // Clean up temporary buffers before returning - if (need_free_src) { - free(src_host_data); - } - if (need_free_dst) { - free(dst_host_data); - } - return copy_err; - } - - // Copy result back to device if needed - if (dstIsDevice) { - ET_CUDA_CHECK_OR_RETURN_ERROR(cudaMemcpy( - self->mutable_data_ptr(), - dst_host_data, - total_bytes, - cudaMemcpyHostToDevice)); - } - - // Clean up temporary buffers - if (need_free_src) { - free(src_host_data); - } - if (need_free_dst) { - free(dst_host_data); - } - } + "aoti_torch_new_tensor_handle: new_handle is null"); + + // Create a new SlimTensor that shares the same underlying storage. + // SlimTensor's copy constructor shares the SharedPtr, so both + // tensors will reference the same memory. When the last tensor is deleted, + // the storage will be freed. + *new_handle = new SlimTensor(*orig_handle); return Error::Ok; } AOTITorchError aoti_torch__reinterpret_tensor( - Tensor* self, + SlimTensor* self, int64_t ndim, const int64_t* sizes_ptr, const int64_t* strides_ptr, int64_t storage_offset, - Tensor** ret_new_tensor) { - // Validate input parameters first + SlimTensor** ret_new_tensor) { ET_CHECK_OR_RETURN_ERROR( self != nullptr, InvalidArgument, - "aoti_torch__reinterpret_tensor failed: self tensor is null"); - - ET_CHECK_OR_RETURN_ERROR( - !(sizes_ptr == nullptr && ndim > 0), - InvalidArgument, - "aoti_torch__reinterpret_tensor failed: sizes_ptr is null"); + "aoti_torch__reinterpret_tensor: self is null"); ET_CHECK_OR_RETURN_ERROR( ret_new_tensor != nullptr, InvalidArgument, - "aoti_torch__reinterpret_tensor failed: ret_new_tensor is null"); - - // Check if storage_offset is not 0 - return error if not - ET_CHECK_OK_OR_RETURN_ERROR(validate_storage_offset(storage_offset)); - - // Get the device info from the source tensor to perform device_index - // validation - int32_t device_type = 0; - int32_t device_index = 0; - ET_CHECK_OK_OR_RETURN_ERROR(aoti_torch_get_device_type(self, &device_type)); - - ET_CHECK_OK_OR_RETURN_ERROR(aoti_torch_get_device_index(self, &device_index)); + "aoti_torch__reinterpret_tensor: ret_new_tensor is null"); - // Ensure device_index is always 0 ET_CHECK_OR_RETURN_ERROR( - device_index == 0, + ndim >= 0, InvalidArgument, - "device_index must be 0, got: %d", - device_index); + "aoti_torch__reinterpret_tensor: ndim must be non-negative, got %lld", + static_cast(ndim)); - // Get the dtype from the source tensor - int32_t dtype = 0; - ET_CHECK_OK_OR_RETURN_ERROR(aoti_torch_get_dtype(self, &dtype)); - - // Validate dtype using SupportedDTypes - ET_CHECK_OK_OR_RETURN_ERROR(validate_dtype(dtype)); - - // Get the original data pointer from the source tensor - void* data_ptr = self->mutable_data_ptr(); ET_CHECK_OR_RETURN_ERROR( - data_ptr != nullptr, - InvalidArgument, - "Source tensor has null data pointer"); - - // Check if the given memory is in the map, if not return error - auto memory_it = memory_to_n_tensor.find(data_ptr); - ET_CHECK_OR_RETURN_ERROR( - memory_it != memory_to_n_tensor.end(), - InvalidArgument, - "Memory address %p is not being tracked by reference counting system", - data_ptr); - - // Convert sizes using utility function from utils.h - std::vector sizes = convert_sizes_to_vector(ndim, sizes_ptr); - - // Convert strides using utility function from utils.h - std::vector strides = - convert_strides_to_vector(ndim, sizes_ptr, strides_ptr); - - // Create new tensor view that reinterprets the same memory with different - // shape/strides This creates a view, not a copy - the data pointer is shared - // Using CUDA-specific tensor maker that supports incontiguous tensors - std::shared_ptr tensor = make_tensor( - sizes, // New sizes with explicit SizesType - data_ptr, // Reuse the same memory from source tensor - {}, // dim_order (empty, will be auto-generated) - strides, // New strides with explicit StridesType - dtype_to_scalar_type(dtype) // Convert dtype with explicit type casting - ); - - ET_CHECK_OR_RETURN_ERROR( - tensor != nullptr, + !(sizes_ptr == nullptr && ndim > 0), InvalidArgument, - "Failed to create reinterpreted tensor view"); + "aoti_torch__reinterpret_tensor: sizes_ptr is null but ndim > 0"); - // Store the tensor so it doesn't get destroyed - tensors.insert(tensor); + IntArrayRef sizes(sizes_ptr, static_cast(ndim)); + IntArrayRef strides(strides_ptr, static_cast(ndim)); - *ret_new_tensor = tensor.get(); - - // Increment the reference count for this memory address only if it is owned - // by tensor - memory_to_n_tensor[data_ptr] = memory_to_n_tensor[data_ptr] == NOT_OWN - ? NOT_OWN - : memory_to_n_tensor[data_ptr] + 1; + // Create a new tensor view using as_strided. This creates a tensor that + // shares the same underlying storage but with different sizes, strides, + // and storage offset. SlimTensor::as_strided() handles this via copy + // constructor which shares the SharedPtr. + *ret_new_tensor = + new SlimTensor(self->as_strided(sizes, strides, storage_offset)); return Error::Ok; } -AOTITorchError aoti_torch_new_tensor_handle( - Tensor* orig_handle, - Tensor** new_handle) { - // Validate input parameters - ET_CHECK_OR_RETURN_ERROR( - orig_handle != nullptr, - InvalidArgument, - "aoti_torch_new_tensor_handle failed: orig_handle is null"); - - ET_CHECK_OR_RETURN_ERROR( - new_handle != nullptr, - InvalidArgument, - "aoti_torch_new_tensor_handle failed: new_handle is null"); - - // Get metadata from the original tensor - int64_t* sizes_ptr; - int64_t* strides_ptr; - int32_t dtype; - int32_t device_type; - int32_t device_index; - - ET_CHECK_OK_OR_RETURN_ERROR(aoti_torch_get_sizes(orig_handle, &sizes_ptr)); - ET_CHECK_OK_OR_RETURN_ERROR( - aoti_torch_get_strides(orig_handle, &strides_ptr)); - ET_CHECK_OK_OR_RETURN_ERROR(aoti_torch_get_dtype(orig_handle, &dtype)); - ET_CHECK_OK_OR_RETURN_ERROR( - aoti_torch_get_device_type(orig_handle, &device_type)); - ET_CHECK_OK_OR_RETURN_ERROR( - aoti_torch_get_device_index(orig_handle, &device_index)); - - int64_t ndim = orig_handle->dim(); - - // Validate dtype - ET_CHECK_OK_OR_RETURN_ERROR(validate_dtype(dtype)); - - // Ensure device_index is always 0 - ET_CHECK_OR_RETURN_ERROR( - device_index == 0, - InvalidArgument, - "device_index must be 0, got: %d", - device_index); - - // Get the original data pointer from the source tensor - void* data_ptr = orig_handle->mutable_data_ptr(); - ET_CHECK_OR_RETURN_ERROR( - data_ptr != nullptr, - InvalidArgument, - "Source tensor has null data pointer"); +AOTITorchError +aoti_torch_copy_(SlimTensor* self, SlimTensor* src, int32_t non_blocking) { + (void)non_blocking; // SlimTensor::copy_() is always synchronous for now - // Check if the given memory is in the map - auto memory_it = memory_to_n_tensor.find(data_ptr); ET_CHECK_OR_RETURN_ERROR( - memory_it != memory_to_n_tensor.end(), - InvalidArgument, - "Memory address %p is not being tracked by reference counting system", - data_ptr); - - // Convert sizes and strides to vectors - std::vector sizes = convert_sizes_to_vector(ndim, sizes_ptr); - std::vector strides = - convert_strides_to_vector(ndim, sizes_ptr, strides_ptr); - - // Create new tensor that shares the same memory as the original - // This is similar to PyTorch's Tensor copy constructor - creates a new - // tensor object that shares the same underlying storage - std::shared_ptr tensor = make_tensor( - sizes, // Same sizes as original - data_ptr, // Share the same memory from source tensor - {}, // dim_order (empty, will be auto-generated) - strides, // Same strides as original - dtype_to_scalar_type(dtype) // Same dtype as original - ); + self != nullptr, InvalidArgument, "aoti_torch_copy_: self is null"); ET_CHECK_OR_RETURN_ERROR( - tensor != nullptr, InvalidArgument, "Failed to create new tensor handle"); - - // Store the tensor so it doesn't get destroyed - tensors.insert(tensor); + src != nullptr, InvalidArgument, "aoti_torch_copy_: src is null"); - *new_handle = tensor.get(); - - // Increment the reference count for this memory address only if it is owned - // by tensor - memory_to_n_tensor[data_ptr] = memory_to_n_tensor[data_ptr] == NOT_OWN - ? NOT_OWN - : memory_to_n_tensor[data_ptr] + 1; + // SlimTensor::copy_() handles: + // - Same numel validation + // - Same dtype validation + // - CPU-CPU, CPU-CUDA, CUDA-CPU, CUDA-CUDA copies + // - Contiguous fast path and non-contiguous element-wise copy + self->copy_(*src); return Error::Ok; } -AOTITorchError aoti_torch_item_bool(Tensor* tensor, bool* ret_value) { - // Validate input parameters +AOTITorchError aoti_torch_item_bool(SlimTensor* tensor, bool* ret_value) { ET_CHECK_OR_RETURN_ERROR( tensor != nullptr, InvalidArgument, - "aoti_torch_item_bool failed: tensor is null"); + "aoti_torch_item_bool: tensor is null"); ET_CHECK_OR_RETURN_ERROR( ret_value != nullptr, InvalidArgument, - "aoti_torch_item_bool failed: ret_value is null"); - - // Validate that tensor dtype is bool - int32_t dtype; - ET_CHECK_OK_OR_RETURN_ERROR(aoti_torch_get_dtype(tensor, &dtype)); + "aoti_torch_item_bool: ret_value is null"); ET_CHECK_OR_RETURN_ERROR( - dtype == aoti_torch_dtype_bool(), + tensor->numel() == 1, InvalidArgument, - "aoti_torch_item_bool failed: tensor dtype is not bool (got %d)", - dtype); + "aoti_torch_item_bool: tensor must have exactly 1 element, got %zu", + tensor->numel()); - // Get the data pointer - const void* data_ptr = tensor->const_data_ptr(); ET_CHECK_OR_RETURN_ERROR( - data_ptr != nullptr, + tensor->dtype() == ScalarType::Bool, InvalidArgument, - "aoti_torch_item_bool failed: tensor data pointer is null"); - - // Check if tensor is on CUDA or CPU - cudaPointerAttributes attributes{}; - ET_CUDA_CHECK_OR_RETURN_ERROR( - cudaPointerGetAttributes(&attributes, data_ptr)); - - if (attributes.type == cudaMemoryTypeDevice) { - // CUDA memory case: copy from device to host - bool device_value; - ET_CUDA_CHECK_OR_RETURN_ERROR(cudaMemcpy( - &device_value, data_ptr, sizeof(bool), cudaMemcpyDeviceToHost)); - *ret_value = device_value; - } else { - // CPU memory case: direct access - const bool* bool_ptr = static_cast(data_ptr); - *ret_value = *bool_ptr; - } + "aoti_torch_item_bool: tensor dtype must be Bool"); + + // SlimTensor::item() handles both CPU and CUDA tensors. + // For CUDA tensors, it copies the value to CPU automatically. + *ret_value = tensor->item(); return Error::Ok; } -AOTITorchError aoti_torch_assign_tensors_out(Tensor* src, Tensor** ret_dst) { - // Validate input parameters +AOTITorchError aoti_torch_assign_tensors_out(SlimTensor* src, SlimTensor** ret_dst) { ET_CHECK_OR_RETURN_ERROR( src != nullptr, InvalidArgument, - "aoti_torch_assign_tensors_out failed: src is null"); + "aoti_torch_assign_tensors_out: src is null"); ET_CHECK_OR_RETURN_ERROR( ret_dst != nullptr, InvalidArgument, - "aoti_torch_assign_tensors_out failed: ret_dst is null"); - - // Get the data pointer from the source tensor - void* data_ptr = src->mutable_data_ptr(); - ET_CHECK_OR_RETURN_ERROR( - data_ptr != nullptr, - InvalidArgument, - "Source tensor has null data pointer"); + "aoti_torch_assign_tensors_out: ret_dst is null"); - // Check if the given memory is in the map, if not return error - auto memory_it = memory_to_n_tensor.find(data_ptr); - ET_CHECK_OR_RETURN_ERROR( - memory_it != memory_to_n_tensor.end(), - InvalidArgument, - "Memory address %p is not being tracked by reference counting system", - data_ptr); - - // Get dtype from source tensor - int32_t dtype = 0; - ET_CHECK_OK_OR_RETURN_ERROR(aoti_torch_get_dtype(src, &dtype)); - - // Get sizes and strides from source tensor - int64_t* sizes_ptr; - int64_t* strides_ptr; - ET_CHECK_OK_OR_RETURN_ERROR(aoti_torch_get_sizes(src, &sizes_ptr)); - ET_CHECK_OK_OR_RETURN_ERROR(aoti_torch_get_strides(src, &strides_ptr)); - - int64_t ndim = src->dim(); - - // Convert to vectors - std::vector sizes = convert_sizes_to_vector(ndim, sizes_ptr); - std::vector strides = - convert_strides_to_vector(ndim, sizes_ptr, strides_ptr); - - // Create new tensor view that shares the same memory as source tensor - std::shared_ptr tensor = make_tensor( - sizes, - data_ptr, // Share the same memory from source tensor - {}, // dim_order (empty, will be auto-generated) - strides, - dtype_to_scalar_type(dtype)); - - ET_CHECK_OR_RETURN_ERROR( - tensor != nullptr, - InvalidArgument, - "Failed to create tensor view in aoti_torch_assign_tensors_out"); - - // Store the tensor so it doesn't get destroyed - tensors.insert(tensor); - - *ret_dst = tensor.get(); - - // Increment the reference count for this memory address only if it is owned - // by tensor - memory_to_n_tensor[data_ptr] = memory_to_n_tensor[data_ptr] == NOT_OWN - ? NOT_OWN - : memory_to_n_tensor[data_ptr] + 1; + // Move the source tensor into the destination. After this operation, + // the source tensor will be left in an undefined state (reset). + // This differs from aoti_torch_new_tensor_handle which copies the tensor. + *ret_dst = new SlimTensor(std::move(*src)); return Error::Ok; } + } // extern "C" } // namespace executorch::backends::cuda diff --git a/backends/cuda/runtime/shims/memory.h b/backends/cuda/runtime/shims/memory.h index 34b781a5270..036fa5ec6c6 100644 --- a/backends/cuda/runtime/shims/memory.h +++ b/backends/cuda/runtime/shims/memory.h @@ -8,15 +8,20 @@ #pragma once -#include -#include -#include #include +#include +#include +#include +#include + namespace executorch::backends::cuda { -using executorch::backends::aoti::AOTITorchError; -using executorch::backends::aoti::Tensor; +using executorch::runtime::Error; +using AOTITorchError = Error; + +// Use SlimTensor directly in shim APIs to avoid naming conflicts with ETensor +using SlimTensor = executorch::backends::aoti::slim::SlimTensor; extern "C" { @@ -28,21 +33,17 @@ extern "C" { * * @param data Pointer to the memory blob to wrap (must not be null) * @param ndim Number of dimensions in the tensor - * @param sizes_ptr Pointer to array of dimension sizes (using SizesType) - * @param strides_ptr Pointer to array of strides for each dimension (using - * StridesType, can be null for contiguous) - * @param storage_offset Storage offset (must be 0 for current implementation) - * @param dtype Data type identifier (supports FLOAT32 and BFLOAT16 from - * SupportedDTypes) - * @param device_type Device type (CPU=0, CUDA=1 from SupportedDevices) - * @param device_index Device index (must be 0 for current implementation) - * @param ret_new_tensor Output parameter for the created tensor (must not be - * null) + * @param sizes_ptr Pointer to array of dimension sizes + * @param strides_ptr Pointer to array of strides for each dimension + * @param storage_offset Storage offset in number of elements + * @param dtype Data type identifier (matches PyTorch scalar types) + * @param device_type Device type (CPU=0, CUDA=1) + * @param device_index Device index + * @param ret_new_tensor Output parameter for the created tensor * @param layout Tensor layout identifier (0=strided) * @param opaque_metadata Optional metadata pointer (can be null) * @param opaque_metadata_size Size of opaque metadata in bytes - * @return AOTITorchError error code (Error::Ok on success, or an error code on - * failure) + * @return AOTITorchError error code (Error::Ok on success) */ AOTI_SHIM_EXPORT AOTITorchError aoti_torch_create_tensor_from_blob_v2( void* data, @@ -53,24 +54,23 @@ AOTI_SHIM_EXPORT AOTITorchError aoti_torch_create_tensor_from_blob_v2( int32_t dtype, int32_t device_type, int32_t device_index, - Tensor** ret_new_tensor, + SlimTensor** ret_new_tensor, int32_t layout, const uint8_t* opaque_metadata, int64_t opaque_metadata_size); /** * Creates an uninitialized tensor with specified dimensions, strides, and - * dtyper on either CPU or CUDA device. + * dtype on either CPU or CUDA device. * * @param ndim Number of dimensions in the tensor * @param sizes_ptr Pointer to array of dimension sizes * @param strides_ptr Pointer to array of strides for each dimension * @param dtype Data type identifier (matches PyTorch scalar types) * @param device_type Device type (0=CPU, 1=CUDA) - * @param device_index Device index (must be 0 for current implementation) + * @param device_index Device index * @param ret_new_tensor Output parameter for the created tensor - * @return AOTITorchError error code (Error::Ok on success, or an error code on - * failure) + * @return AOTITorchError error code (Error::Ok on success) */ AOTI_SHIM_EXPORT AOTITorchError aoti_torch_empty_strided( int64_t ndim, @@ -79,129 +79,99 @@ AOTI_SHIM_EXPORT AOTITorchError aoti_torch_empty_strided( int32_t dtype, int32_t device_type, int32_t device_index, - Tensor** ret_new_tensor); + SlimTensor** ret_new_tensor); /** - * Deletes a tensor object and frees its associated memory. + * Deletes a tensor object and frees associated resources. * - * @param tensor Pointer to the tensor object to be deleted - * @return AOTITorchError error code (Error::Ok on success, or an error code on - * failure) + * For SlimTensor, the underlying storage uses SharedPtr-based reference + * counting. When the last tensor referencing the storage is deleted, + * the memory is automatically freed. + * + * @param tensor Pointer to the tensor to delete (must not be null) + * @return AOTITorchError error code (Error::Ok on success) */ -AOTI_SHIM_EXPORT AOTITorchError aoti_torch_delete_tensor_object(Tensor* tensor); +AOTI_SHIM_EXPORT AOTITorchError aoti_torch_delete_tensor_object(SlimTensor* tensor); /** - * Creates a tensor view that reinterprets the same underlying memory with - * different shape and strides without copying data. + * Creates a new tensor handle that shares storage with the original tensor. * - * Note that the new tensor will not have the ownership of the underlying - * memory. + * The new handle is a copy of the original tensor's metadata (sizes, strides, + * dtype, device) and shares the same underlying storage via SharedPtr. + * Both tensors will reference the same memory, and the memory will only be + * freed when all references are deleted. * - * @param self Input tensor whose memory will be reinterpreted - * @param ndim Number of dimensions for the new tensor view - * @param sizes_ptr Array of sizes for each dimension - * @param strides_ptr Array of strides for each dimension (or nullptr for - * contiguous) - * @param storage_offset Storage offset (must be 0) - * @param ret_new_tensor Output pointer to store the new tensor view + * @param orig_handle Pointer to the original tensor (must not be null) + * @param new_handle Output parameter for the new tensor handle + * @return AOTITorchError error code (Error::Ok on success) + */ +AOTI_SHIM_EXPORT AOTITorchError +aoti_torch_new_tensor_handle(SlimTensor* orig_handle, SlimTensor** new_handle); + +/** + * Creates a reinterpreted view of a tensor with new sizes, strides, and offset. + * + * This is equivalent to torch.as_strided() - it creates a new tensor that + * shares the same underlying storage but with different view parameters. * - * @return Error::Ok on success, appropriate error code on failure + * @param self Original tensor to reinterpret (must not be null) + * @param ndim Number of dimensions for the new view + * @param sizes_ptr Pointer to array of dimension sizes + * @param strides_ptr Pointer to array of strides for each dimension + * @param storage_offset Storage offset in number of elements + * @param ret_new_tensor Output parameter for the reinterpreted tensor view + * @return AOTITorchError error code (Error::Ok on success) */ AOTI_SHIM_EXPORT AOTITorchError aoti_torch__reinterpret_tensor( - Tensor* self, + SlimTensor* self, int64_t ndim, const int64_t* sizes_ptr, const int64_t* strides_ptr, int64_t storage_offset, - Tensor** ret_new_tensor); + SlimTensor** ret_new_tensor); /** * Copies data from source tensor to destination tensor. * - * This function implements copy function for tensors living in CUDA AOTI - * backend. It supports copying between tensors with different shapes (as long - * as they have the same total number of elements) and different memory - * layouts/strides. - * - * Note that currently this function does not support copying between tensors - * with different dtypes. - * - * @param self Destination tensor (data will be overwritten) - * @param src Source tensor (data will be copied from this tensor) - * @param non_blocking Whether the copy should be non-blocking (currently - * ignored) - * - * @return Error::Ok on success, appropriate error code on failure: - * - Error::InvalidArgument: null pointers, dtype mismatch, numel - * mismatch - * - Error::MemoryAllocationFailed: failed to allocate temporary memory - * - Error::Internal: CUDA operation failures - */ -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_copy_(Tensor* self, Tensor* src, int32_t non_blocking); - -/** - * Creates a new tensor handle from an existing one. - * - * This function creates a new tensor object that shares the same underlying - * memory as the original tensor. Similar to PyTorch's Tensor copy constructor, - * it creates a new handle/reference to the same data without performing a deep - * copy. - * - * The new tensor will: - * - Share the same memory/storage as the original tensor - * - Have the same shape, strides, and dtype as the original - * - Increment the reference count for the underlying memory (if owned) - * - * @param orig_handle Original tensor to create a new handle from (must not be - * null) - * @param new_handle Output pointer to store the new tensor handle (must not be - * null) + * Handles all device combinations (CPU-CPU, CPU-CUDA, CUDA-CPU, CUDA-CUDA) + * and supports tensors with different strides. The destination tensor must + * already be allocated with sufficient storage. * - * @return Error::Ok on success, appropriate error code on failure: - * - Error::InvalidArgument: null pointers or invalid parameters + * @param self Destination tensor (must not be null) + * @param src Source tensor to copy from (must not be null) + * @param non_blocking If true, the copy may be asynchronous (currently ignored) + * @return AOTITorchError error code (Error::Ok on success) */ AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_new_tensor_handle(Tensor* orig_handle, Tensor** new_handle); +aoti_torch_copy_(SlimTensor* self, SlimTensor* src, int32_t non_blocking); /** - * Retrieves a boolean value from a 0D boolean tensor. + * Extracts a boolean scalar value from a single-element tensor. * - * This function extracts the scalar boolean value from a tensor that contains - * a single boolean element. The tensor can be on either CPU or CUDA device. - * For CUDA tensors, the value is copied from device to host memory. + * The tensor must contain exactly one element and have Bool dtype. + * For CUDA tensors, this will synchronize to copy the value to CPU. * - * @param tensor Pointer to a 0D boolean tensor (must not be null) - * @param ret_value Output pointer to store the boolean value (must not be null) - * - * @return Error::Ok on success, appropriate error code on failure: - * - Error::InvalidArgument: null pointers or tensor dtype is not bool + * @param tensor Single-element boolean tensor (must not be null) + * @param ret_value Output parameter for the extracted boolean value + * @return AOTITorchError error code (Error::Ok on success) */ AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_item_bool(Tensor* tensor, bool* ret_value); +aoti_torch_item_bool(SlimTensor* tensor, bool* ret_value); /** - * Creates a new tensor that shares the same underlying data as the source - * tensor. - * - * This function creates a new tensor view with the same shape, strides, and - * dtype as the source tensor, sharing the same underlying memory. The new - * tensor handle will be stored in ret_dst. + * Moves a tensor into a new handle and assigns it to the output parameter. * - * @param src The source tensor providing the data and metadata. - * @param ret_dst On output, this will point to the new tensor view. + * Unlike aoti_torch_new_tensor_handle which copies, this function moves the + * source tensor into the destination. After this operation, the source tensor + * is left in an undefined/reset state and should not be used. * - * @return Error::Ok on success, appropriate error code on failure: - * - Error::InvalidArgument: null pointers or memory not tracked + * @param src Source tensor to move from (must not be null, will be reset) + * @param ret_dst Output parameter for the new tensor handle + * @return AOTITorchError error code (Error::Ok on success) */ AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_assign_tensors_out(Tensor* src, Tensor** ret_dst); - -// Function to clear all tensors from internal storage -AOTI_SHIM_EXPORT void clear_all_tensors(); +aoti_torch_assign_tensors_out(SlimTensor* src, SlimTensor** ret_dst); -// Function to clear memory tracking map (for test cleanup) -AOTI_SHIM_EXPORT void clear_memory_tracking(); } // extern "C" } // namespace executorch::backends::cuda diff --git a/backends/cuda/runtime/shims/memory_slim.cpp b/backends/cuda/runtime/shims/memory_slim.cpp deleted file mode 100644 index 58bf43b34b0..00000000000 --- a/backends/cuda/runtime/shims/memory_slim.cpp +++ /dev/null @@ -1,259 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include - -#include -#include -#include -#include - -namespace executorch::backends::cuda { - -namespace c10 = executorch::backends::aoti::slim::c10; -using c10::Device; -using c10::DeviceIndex; -using c10::DeviceType; -using c10::ScalarType; -using executorch::backends::aoti::slim::empty_strided; -using executorch::backends::aoti::slim::from_blob; -using executorch::backends::aoti::slim::IntArrayRef; - -extern "C" { - -AOTITorchError aoti_torch_create_tensor_from_blob_v2( - void* data, - int64_t ndim, - const int64_t* sizes_ptr, - const int64_t* strides_ptr, - int64_t storage_offset, - int32_t dtype, - int32_t device_type, - int32_t device_index, - Tensor** ret_new_tensor, - int32_t layout, - const uint8_t* opaque_metadata, - int64_t opaque_metadata_size) { - // Unused parameters - (void)layout; - (void)opaque_metadata; - (void)opaque_metadata_size; - - ET_CHECK_OR_RETURN_ERROR( - data != nullptr, - InvalidArgument, - "aoti_torch_create_tensor_from_blob_v2: data is null"); - - ET_CHECK_OR_RETURN_ERROR( - ret_new_tensor != nullptr, - InvalidArgument, - "aoti_torch_create_tensor_from_blob_v2: ret_new_tensor is null"); - - ET_CHECK_OR_RETURN_ERROR( - !(sizes_ptr == nullptr && ndim > 0), - InvalidArgument, - "aoti_torch_create_tensor_from_blob_v2: sizes_ptr is null but ndim > 0"); - - IntArrayRef sizes(sizes_ptr, static_cast(ndim)); - IntArrayRef strides(strides_ptr, static_cast(ndim)); - - // Create the SlimTensor using from_blob (non-owning) - *ret_new_tensor = new Tensor(from_blob( - data, - sizes, - strides, - static_cast(dtype), - Device( - static_cast(device_type), - static_cast(device_index)), - storage_offset)); - - return Error::Ok; -} - -AOTITorchError aoti_torch_empty_strided( - int64_t ndim, - const int64_t* sizes_ptr, - const int64_t* strides_ptr, - int32_t dtype, - int32_t device_type, - int32_t device_index, - Tensor** ret_new_tensor) { - ET_CHECK_OR_RETURN_ERROR( - ret_new_tensor != nullptr, - InvalidArgument, - "aoti_torch_empty_strided: ret_new_tensor is null"); - - ET_CHECK_OR_RETURN_ERROR( - !(sizes_ptr == nullptr && ndim > 0), - InvalidArgument, - "aoti_torch_empty_strided: sizes_ptr is null but ndim > 0"); - - IntArrayRef sizes(sizes_ptr, static_cast(ndim)); - IntArrayRef strides(strides_ptr, static_cast(ndim)); - - // Create the SlimTensor using empty_strided (owning) - *ret_new_tensor = new Tensor(empty_strided( - sizes, - strides, - static_cast(dtype), - Device( - static_cast(device_type), - static_cast(device_index)))); - - return Error::Ok; -} - -AOTITorchError aoti_torch_delete_tensor_object(Tensor* tensor) { - ET_CHECK_OR_RETURN_ERROR( - tensor != nullptr, - InvalidArgument, - "aoti_torch_delete_tensor_object: tensor is null"); - - // SlimTensor uses SharedPtr for storage, so simply deleting the tensor - // will automatically handle reference counting and free the underlying - // storage when no more references exist. - delete tensor; - - return Error::Ok; -} - -AOTITorchError aoti_torch_new_tensor_handle( - Tensor* orig_handle, - Tensor** new_handle) { - ET_CHECK_OR_RETURN_ERROR( - orig_handle != nullptr, - InvalidArgument, - "aoti_torch_new_tensor_handle: orig_handle is null"); - - ET_CHECK_OR_RETURN_ERROR( - new_handle != nullptr, - InvalidArgument, - "aoti_torch_new_tensor_handle: new_handle is null"); - - // Create a new SlimTensor that shares the same underlying storage. - // SlimTensor's copy constructor shares the SharedPtr, so both - // tensors will reference the same memory. When the last tensor is deleted, - // the storage will be freed. - *new_handle = new Tensor(*orig_handle); - - return Error::Ok; -} - -AOTITorchError aoti_torch__reinterpret_tensor( - Tensor* self, - int64_t ndim, - const int64_t* sizes_ptr, - const int64_t* strides_ptr, - int64_t storage_offset, - Tensor** ret_new_tensor) { - ET_CHECK_OR_RETURN_ERROR( - self != nullptr, - InvalidArgument, - "aoti_torch__reinterpret_tensor: self is null"); - - ET_CHECK_OR_RETURN_ERROR( - ret_new_tensor != nullptr, - InvalidArgument, - "aoti_torch__reinterpret_tensor: ret_new_tensor is null"); - - ET_CHECK_OR_RETURN_ERROR( - ndim >= 0, - InvalidArgument, - "aoti_torch__reinterpret_tensor: ndim must be non-negative, got %lld", - static_cast(ndim)); - - ET_CHECK_OR_RETURN_ERROR( - !(sizes_ptr == nullptr && ndim > 0), - InvalidArgument, - "aoti_torch__reinterpret_tensor: sizes_ptr is null but ndim > 0"); - - IntArrayRef sizes(sizes_ptr, static_cast(ndim)); - IntArrayRef strides(strides_ptr, static_cast(ndim)); - - // Create a new tensor view using as_strided. This creates a tensor that - // shares the same underlying storage but with different sizes, strides, - // and storage offset. SlimTensor::as_strided() handles this via copy - // constructor which shares the SharedPtr. - *ret_new_tensor = - new Tensor(self->as_strided(sizes, strides, storage_offset)); - - return Error::Ok; -} - -AOTITorchError -aoti_torch_copy_(Tensor* self, Tensor* src, int32_t non_blocking) { - (void)non_blocking; // SlimTensor::copy_() is always synchronous for now - - ET_CHECK_OR_RETURN_ERROR( - self != nullptr, InvalidArgument, "aoti_torch_copy_: self is null"); - - ET_CHECK_OR_RETURN_ERROR( - src != nullptr, InvalidArgument, "aoti_torch_copy_: src is null"); - - // SlimTensor::copy_() handles: - // - Same numel validation - // - Same dtype validation - // - CPU-CPU, CPU-CUDA, CUDA-CPU, CUDA-CUDA copies - // - Contiguous fast path and non-contiguous element-wise copy - self->copy_(*src); - - return Error::Ok; -} - -AOTITorchError aoti_torch_item_bool(Tensor* tensor, bool* ret_value) { - ET_CHECK_OR_RETURN_ERROR( - tensor != nullptr, - InvalidArgument, - "aoti_torch_item_bool: tensor is null"); - - ET_CHECK_OR_RETURN_ERROR( - ret_value != nullptr, - InvalidArgument, - "aoti_torch_item_bool: ret_value is null"); - - ET_CHECK_OR_RETURN_ERROR( - tensor->numel() == 1, - InvalidArgument, - "aoti_torch_item_bool: tensor must have exactly 1 element, got %zu", - tensor->numel()); - - ET_CHECK_OR_RETURN_ERROR( - tensor->dtype() == ScalarType::Bool, - InvalidArgument, - "aoti_torch_item_bool: tensor dtype must be Bool"); - - // SlimTensor::item() handles both CPU and CUDA tensors. - // For CUDA tensors, it copies the value to CPU automatically. - *ret_value = tensor->item(); - - return Error::Ok; -} - -AOTITorchError aoti_torch_assign_tensors_out(Tensor* src, Tensor** ret_dst) { - ET_CHECK_OR_RETURN_ERROR( - src != nullptr, - InvalidArgument, - "aoti_torch_assign_tensors_out: src is null"); - - ET_CHECK_OR_RETURN_ERROR( - ret_dst != nullptr, - InvalidArgument, - "aoti_torch_assign_tensors_out: ret_dst is null"); - - // Move the source tensor into the destination. After this operation, - // the source tensor will be left in an undefined state (reset). - // This differs from aoti_torch_new_tensor_handle which copies the tensor. - *ret_dst = new Tensor(std::move(*src)); - - return Error::Ok; -} - -} // extern "C" - -} // namespace executorch::backends::cuda diff --git a/backends/cuda/runtime/shims/memory_slim.h b/backends/cuda/runtime/shims/memory_slim.h deleted file mode 100644 index 5a0845f243c..00000000000 --- a/backends/cuda/runtime/shims/memory_slim.h +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#pragma once - -#include - -#include -#include -#include -#include - -namespace executorch::backends::cuda { - -using executorch::runtime::Error; -using AOTITorchError = Error; -using Tensor = executorch::backends::aoti::slim::SlimTensor; - -extern "C" { - -/** - * Creates a tensor object from an existing memory blob without copying the - * data. The tensor will wrap the provided memory and will not take ownership of - * it. When the tensor is deleted, the original memory will remain valid and - * must be freed by the caller. - * - * @param data Pointer to the memory blob to wrap (must not be null) - * @param ndim Number of dimensions in the tensor - * @param sizes_ptr Pointer to array of dimension sizes - * @param strides_ptr Pointer to array of strides for each dimension - * @param storage_offset Storage offset in number of elements - * @param dtype Data type identifier (matches PyTorch scalar types) - * @param device_type Device type (CPU=0, CUDA=1) - * @param device_index Device index - * @param ret_new_tensor Output parameter for the created tensor - * @param layout Tensor layout identifier (0=strided) - * @param opaque_metadata Optional metadata pointer (can be null) - * @param opaque_metadata_size Size of opaque metadata in bytes - * @return AOTITorchError error code (Error::Ok on success) - */ -AOTI_SHIM_EXPORT AOTITorchError aoti_torch_create_tensor_from_blob_v2( - void* data, - int64_t ndim, - const int64_t* sizes_ptr, - const int64_t* strides_ptr, - int64_t storage_offset, - int32_t dtype, - int32_t device_type, - int32_t device_index, - Tensor** ret_new_tensor, - int32_t layout, - const uint8_t* opaque_metadata, - int64_t opaque_metadata_size); - -/** - * Creates an uninitialized tensor with specified dimensions, strides, and - * dtype on either CPU or CUDA device. - * - * @param ndim Number of dimensions in the tensor - * @param sizes_ptr Pointer to array of dimension sizes - * @param strides_ptr Pointer to array of strides for each dimension - * @param dtype Data type identifier (matches PyTorch scalar types) - * @param device_type Device type (0=CPU, 1=CUDA) - * @param device_index Device index - * @param ret_new_tensor Output parameter for the created tensor - * @return AOTITorchError error code (Error::Ok on success) - */ -AOTI_SHIM_EXPORT AOTITorchError aoti_torch_empty_strided( - int64_t ndim, - const int64_t* sizes_ptr, - const int64_t* strides_ptr, - int32_t dtype, - int32_t device_type, - int32_t device_index, - Tensor** ret_new_tensor); - -/** - * Deletes a tensor object and frees associated resources. - * - * For SlimTensor, the underlying storage uses SharedPtr-based reference - * counting. When the last tensor referencing the storage is deleted, - * the memory is automatically freed. - * - * @param tensor Pointer to the tensor to delete (must not be null) - * @return AOTITorchError error code (Error::Ok on success) - */ -AOTI_SHIM_EXPORT AOTITorchError aoti_torch_delete_tensor_object(Tensor* tensor); - -/** - * Creates a new tensor handle that shares storage with the original tensor. - * - * The new handle is a copy of the original tensor's metadata (sizes, strides, - * dtype, device) and shares the same underlying storage via SharedPtr. - * Both tensors will reference the same memory, and the memory will only be - * freed when all references are deleted. - * - * @param orig_handle Pointer to the original tensor (must not be null) - * @param new_handle Output parameter for the new tensor handle - * @return AOTITorchError error code (Error::Ok on success) - */ -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_new_tensor_handle(Tensor* orig_handle, Tensor** new_handle); - -/** - * Creates a reinterpreted view of a tensor with new sizes, strides, and offset. - * - * This is equivalent to torch.as_strided() - it creates a new tensor that - * shares the same underlying storage but with different view parameters. - * - * @param self Original tensor to reinterpret (must not be null) - * @param ndim Number of dimensions for the new view - * @param sizes_ptr Pointer to array of dimension sizes - * @param strides_ptr Pointer to array of strides for each dimension - * @param storage_offset Storage offset in number of elements - * @param ret_new_tensor Output parameter for the reinterpreted tensor view - * @return AOTITorchError error code (Error::Ok on success) - */ -AOTI_SHIM_EXPORT AOTITorchError aoti_torch__reinterpret_tensor( - Tensor* self, - int64_t ndim, - const int64_t* sizes_ptr, - const int64_t* strides_ptr, - int64_t storage_offset, - Tensor** ret_new_tensor); - -/** - * Copies data from source tensor to destination tensor. - * - * Handles all device combinations (CPU-CPU, CPU-CUDA, CUDA-CPU, CUDA-CUDA) - * and supports tensors with different strides. The destination tensor must - * already be allocated with sufficient storage. - * - * @param self Destination tensor (must not be null) - * @param src Source tensor to copy from (must not be null) - * @param non_blocking If true, the copy may be asynchronous (currently ignored) - * @return AOTITorchError error code (Error::Ok on success) - */ -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_copy_(Tensor* self, Tensor* src, int32_t non_blocking); - -/** - * Extracts a boolean scalar value from a single-element tensor. - * - * The tensor must contain exactly one element and have Bool dtype. - * For CUDA tensors, this will synchronize to copy the value to CPU. - * - * @param tensor Single-element boolean tensor (must not be null) - * @param ret_value Output parameter for the extracted boolean value - * @return AOTITorchError error code (Error::Ok on success) - */ -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_item_bool(Tensor* tensor, bool* ret_value); - -/** - * Moves a tensor into a new handle and assigns it to the output parameter. - * - * Unlike aoti_torch_new_tensor_handle which copies, this function moves the - * source tensor into the destination. After this operation, the source tensor - * is left in an undefined/reset state and should not be used. - * - * @param src Source tensor to move from (must not be null, will be reset) - * @param ret_dst Output parameter for the new tensor handle - * @return AOTITorchError error code (Error::Ok on success) - */ -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_assign_tensors_out(Tensor* src, Tensor** ret_dst); - -} // extern "C" - -} // namespace executorch::backends::cuda diff --git a/backends/cuda/runtime/shims/tests/CMakeLists.txt b/backends/cuda/runtime/shims/tests/CMakeLists.txt index 204c08688c4..291e3052bbd 100644 --- a/backends/cuda/runtime/shims/tests/CMakeLists.txt +++ b/backends/cuda/runtime/shims/tests/CMakeLists.txt @@ -35,7 +35,7 @@ endif() # Find installed ExecuTorch find_package(executorch CONFIG REQUIRED HINTS ${CMAKE_INSTALL_PREFIX}) -# List of test files +# List of SlimTensor-based test files (now the primary tests) set(CUDA_SHIM_TESTS test_aoti_torch_create_tensor_from_blob_v2 test_aoti_torch_empty_strided @@ -49,6 +49,7 @@ set(CUDA_SHIM_TESTS enable_testing() +# Build SlimTensor-based tests foreach(test_name ${CUDA_SHIM_TESTS}) add_executable(${test_name} ${test_name}.cpp) @@ -57,16 +58,15 @@ foreach(test_name ${CUDA_SHIM_TESTS}) ${CUDAToolkit_INCLUDE_DIRS} ) + target_compile_definitions(${test_name} PRIVATE CUDA_AVAILABLE=1) + target_link_libraries( ${test_name} PRIVATE GTest::gtest GTest::gtest_main aoti_cuda_shims - aoti_cuda_backend - cuda_tensor_maker - cuda_platform + slimtensor executorch_core - extension_tensor CUDA::cudart ) diff --git a/backends/cuda/runtime/shims/tests/targets.bzl b/backends/cuda/runtime/shims/tests/targets.bzl index a6b18eba4c8..04f7aa2f963 100644 --- a/backends/cuda/runtime/shims/tests/targets.bzl +++ b/backends/cuda/runtime/shims/tests/targets.bzl @@ -3,35 +3,12 @@ load("@fbcode_macros//build_defs:cpp_unittest.bzl", "cpp_unittest") load("@fbcode_macros//build_defs/lib:re_test_utils.bzl", "re_test_utils") def cuda_shim_cpp_unittest(name): + """Unittest for SlimTensor-based shim functions.""" cpp_unittest( name = "test_" + name, srcs = [ "test_" + name + ".cpp", ], - deps = [ - "//executorch/backends/aoti:common_shims", - "//executorch/backends/cuda/runtime:runtime_shims", - "//executorch/extension/tensor:tensor", - "//executorch/runtime/core:core", - "//executorch/runtime/platform:platform", - "//executorch/runtime/core/exec_aten:lib", - ], - external_deps = [ - ("cuda", None, "cuda-lazy"), - ], - keep_gpu_sections = True, - remote_execution = re_test_utils.remote_execution( - platform = "gpu-remote-execution", - ), - ) - -def cuda_shim_slim_cpp_unittest(name): - """Unittest for SlimTensor-based shim functions.""" - cpp_unittest( - name = "test_" + name + "_slim", - srcs = [ - "test_" + name + "_slim.cpp", - ], deps = [ "//executorch/backends/cuda/runtime:runtime_shims_slim", "//executorch/backends/aoti:common_shims", @@ -58,24 +35,12 @@ def define_common_targets(): The directory containing this targets.bzl file should also contain both TARGETS and BUCK files that call this function. """ - # Original ETensor-based shim tests, will be removed after migration + # SlimTensor-based shim tests (now the primary tests) cuda_shim_cpp_unittest("aoti_torch_empty_strided") - cuda_shim_cpp_unittest("aoti_torch_delete_tensor_object") cuda_shim_cpp_unittest("aoti_torch_create_tensor_from_blob_v2") + cuda_shim_cpp_unittest("aoti_torch_delete_tensor_object") + cuda_shim_cpp_unittest("aoti_torch_new_tensor_handle") cuda_shim_cpp_unittest("aoti_torch__reinterpret_tensor") cuda_shim_cpp_unittest("aoti_torch_copy_") - cuda_shim_cpp_unittest("aoti_torch_cuda_guard") - cuda_shim_cpp_unittest("aoti_torch_cuda__weight_int4pack_mm") - cuda_shim_cpp_unittest("aoti_torch_new_tensor_handle") cuda_shim_cpp_unittest("aoti_torch_item_bool") cuda_shim_cpp_unittest("aoti_torch_assign_tensors_out") - - # SlimTensor-based shim tests - cuda_shim_slim_cpp_unittest("aoti_torch_empty_strided") - cuda_shim_slim_cpp_unittest("aoti_torch_create_tensor_from_blob_v2") - cuda_shim_slim_cpp_unittest("aoti_torch_delete_tensor_object") - cuda_shim_slim_cpp_unittest("aoti_torch_new_tensor_handle") - cuda_shim_slim_cpp_unittest("aoti_torch__reinterpret_tensor") - cuda_shim_slim_cpp_unittest("aoti_torch_copy_") - cuda_shim_slim_cpp_unittest("aoti_torch_item_bool") - cuda_shim_slim_cpp_unittest("aoti_torch_assign_tensors_out") diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch__reinterpret_tensor.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch__reinterpret_tensor.cpp index d3044810b15..d2ad645136e 100644 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch__reinterpret_tensor.cpp +++ b/backends/cuda/runtime/shims/tests/test_aoti_torch__reinterpret_tensor.cpp @@ -7,806 +7,686 @@ */ #include -#include -#include -#include -#include -#include -#include -#include -#include #include #include -using namespace executorch::backends::aoti; -using namespace executorch::backends::cuda; -using namespace executorch::runtime; -using executorch::runtime::etensor::Tensor; - -// Test fixture for aoti_torch__reinterpret_tensor tests -class AOTITorchReinterpretTensorTest : public ::testing::Test { - protected: - void SetUp() override { - // Initialize ExecuTorch Platform Abstraction Layer - et_pal_init(); +#include +#include +#include +#include +#include - // Check if CUDA is available - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - if (err != cudaSuccess || device_count == 0) { - GTEST_SKIP() << "CUDA not available, skipping CUDA tests"; - } +using namespace executorch::backends::cuda; +using executorch::runtime::Error; - // Clean up any existing cached metadata before each test - cleanup_tensor_metadata(); +namespace slim_c10 = executorch::backends::aoti::slim::c10; - // Clear any remaining tensors from previous tests - clear_all_tensors(); - } +namespace { - void TearDown() override { - // Clean up metadata - cleanup_tensor_metadata(); +bool isCudaAvailable() { + int device_count = 0; + cudaError_t err = cudaGetDeviceCount(&device_count); + return (err == cudaSuccess && device_count > 0); +} - // Clear the global tensor storage using the provided function - clear_all_tensors(); +std::vector calculateContiguousStrides( + const std::vector& sizes) { + std::vector strides(sizes.size()); + if (sizes.empty()) { + return strides; } - - // Helper to calculate number of elements from sizes - int64_t calculate_numel(const std::vector& sizes) { - int64_t numel = 1; - for (int64_t size : sizes) { - numel *= size; - } - return numel; + strides[sizes.size() - 1] = 1; + for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { + strides[i] = strides[i + 1] * sizes[i + 1]; } + return strides; +} - // Helper to calculate contiguous strides from sizes - std::vector calculate_contiguous_strides( - const std::vector& sizes) { - std::vector strides(sizes.size()); - if (sizes.empty()) { - return strides; - } +} // namespace - strides[sizes.size() - 1] = 1; - for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { - strides[i] = strides[i + 1] * sizes[i + 1]; - } - return strides; +class AOTITorchReinterpretTensorSlimTest : public ::testing::Test { + protected: + void SetUp() override { + et_pal_init(); } - // Helper to create a source tensor using empty_strided (which allocates new - // memory) - Tensor* create_source_tensor( + Tensor* createTestTensor( const std::vector& sizes, - int32_t dtype = 6, // float32 - int32_t device_type = 1, // CUDA + const std::vector& strides = {}, + int32_t dtype = static_cast(slim_c10::ScalarType::Float), + int32_t device_type = static_cast(slim_c10::DeviceType::CPU), int32_t device_index = 0) { - std::vector strides = calculate_contiguous_strides(sizes); + Tensor* tensor = nullptr; + + std::vector effective_strides = strides; + if (strides.empty()) { + effective_strides = calculateContiguousStrides(sizes); + } - Tensor* tensor; AOTITorchError error = aoti_torch_empty_strided( sizes.size(), sizes.data(), - strides.data(), + effective_strides.data(), dtype, device_type, device_index, &tensor); - if (error != Error::Ok) { - return nullptr; - } - - return tensor; + return (error == Error::Ok) ? tensor : nullptr; } - - private: - std::vector cuda_memory_buffers_; - std::vector cpu_memory_buffers_; }; -// Test basic functionality: reinterpret tensor with different shapes -TEST_F(AOTITorchReinterpretTensorTest, BasicReinterpretation) { - // Create a source tensor with shape [12] (1D with 12 elements) - std::vector source_sizes = {12}; - Tensor* source_tensor = create_source_tensor(source_sizes); - ASSERT_NE(source_tensor, nullptr); - - // Store the original data pointer - void* original_data_ptr = source_tensor->mutable_data_ptr(); - ASSERT_NE(original_data_ptr, nullptr); +// ============================================================================ +// Basic Functionality Tests +// ============================================================================ + +TEST_F(AOTITorchReinterpretTensorSlimTest, BasicView_CPU) { + std::vector sizes = {2, 3, 4}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(orig_tensor, nullptr); - // Reinterpret as [3, 4] (2D with same number of elements) - std::vector new_sizes = {3, 4}; - std::vector new_strides = calculate_contiguous_strides(new_sizes); + std::vector new_sizes = {6, 4}; + std::vector new_strides = {4, 1}; + int64_t storage_offset = 0; - Tensor* reinterpreted_tensor; + Tensor* view_tensor = nullptr; AOTITorchError error = aoti_torch__reinterpret_tensor( - source_tensor, + orig_tensor, new_sizes.size(), new_sizes.data(), new_strides.data(), - 0, // storage_offset - &reinterpreted_tensor); + storage_offset, + &view_tensor); EXPECT_EQ(error, Error::Ok); - ASSERT_NE(reinterpreted_tensor, nullptr); - - // Check that the reinterpreted tensor has the new shape - EXPECT_EQ(reinterpreted_tensor->dim(), 2); - EXPECT_EQ(reinterpreted_tensor->size(0), 3); - EXPECT_EQ(reinterpreted_tensor->size(1), 4); - - // CRITICAL: Check that the reinterpreted tensor uses the SAME memory - void* reinterpreted_data_ptr = reinterpreted_tensor->mutable_data_ptr(); - EXPECT_EQ(reinterpreted_data_ptr, original_data_ptr) - << "Reinterpreted tensor should use the same memory as the source tensor"; - - // Write data through the original tensor and verify it's visible through the - // reinterpreted tensor - std::vector test_data = { - 1.0f, - 2.0f, - 3.0f, - 4.0f, - 5.0f, - 6.0f, - 7.0f, - 8.0f, - 9.0f, - 10.0f, - 11.0f, - 12.0f}; - cudaError_t cuda_err = cudaMemcpy( - original_data_ptr, - test_data.data(), - test_data.size() * sizeof(float), - cudaMemcpyHostToDevice); - EXPECT_EQ(cuda_err, cudaSuccess); - - // Read back through the reinterpreted tensor - std::vector readback_data(12); - cuda_err = cudaMemcpy( - readback_data.data(), - reinterpreted_data_ptr, - readback_data.size() * sizeof(float), - cudaMemcpyDeviceToHost); - EXPECT_EQ(cuda_err, cudaSuccess); - - // Verify the data matches - for (size_t i = 0; i < test_data.size(); i++) { - EXPECT_EQ(readback_data[i], test_data[i]) - << "Data should be the same through both tensors at index " << i; - } -} - -// Test reinterpreting with different strides -TEST_F(AOTITorchReinterpretTensorTest, ReinterpretWithCustomStrides) { - // Create a source tensor with shape [2, 6] (contiguous) - std::vector source_sizes = {2, 6}; - Tensor* source_tensor = create_source_tensor(source_sizes); - ASSERT_NE(source_tensor, nullptr); - - void* original_data_ptr = source_tensor->mutable_data_ptr(); - ASSERT_NE(original_data_ptr, nullptr); + ASSERT_NE(view_tensor, nullptr); - // Reinterpret as [3, 4] with custom strides (still valid for the same memory) - std::vector new_sizes = {3, 4}; - std::vector new_strides = {4, 1}; // Row-major strides for [3, 4] + EXPECT_EQ(view_tensor->dim(), 2); + EXPECT_EQ(view_tensor->size(0), 6); + EXPECT_EQ(view_tensor->size(1), 4); + EXPECT_EQ(view_tensor->stride(0), 4); + EXPECT_EQ(view_tensor->stride(1), 1); - Tensor* reinterpreted_tensor; - AOTITorchError error = aoti_torch__reinterpret_tensor( - source_tensor, - new_sizes.size(), - new_sizes.data(), - new_strides.data(), - 0, // storage_offset - &reinterpreted_tensor); + EXPECT_EQ(view_tensor->data_ptr(), orig_tensor->data_ptr()); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(reinterpreted_tensor, nullptr); + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); +} - // Check shape - EXPECT_EQ(reinterpreted_tensor->dim(), 2); - EXPECT_EQ(reinterpreted_tensor->size(0), 3); - EXPECT_EQ(reinterpreted_tensor->size(1), 4); +TEST_F(AOTITorchReinterpretTensorSlimTest, NullSelf) { + std::vector sizes = {2, 3}; + std::vector strides = {3, 1}; - // CRITICAL: Check that the reinterpreted tensor uses the SAME memory - void* reinterpreted_data_ptr = reinterpreted_tensor->mutable_data_ptr(); - EXPECT_EQ(reinterpreted_data_ptr, original_data_ptr) - << "Reinterpreted tensor should use the same memory as the source tensor"; + Tensor* view_tensor = nullptr; + AOTITorchError error = aoti_torch__reinterpret_tensor( + nullptr, sizes.size(), sizes.data(), strides.data(), 0, &view_tensor); - // Verify strides were set correctly - int64_t* tensor_strides; - error = aoti_torch_get_strides(reinterpreted_tensor, &tensor_strides); - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(tensor_strides[0], 4); - EXPECT_EQ(tensor_strides[1], 1); + EXPECT_EQ(error, Error::InvalidArgument); } -// Test error cases: null input tensor -TEST_F(AOTITorchReinterpretTensorTest, NullInputTensor) { - std::vector new_sizes = {2, 3}; - std::vector new_strides = calculate_contiguous_strides(new_sizes); +TEST_F(AOTITorchReinterpretTensorSlimTest, NullReturnPointer) { + std::vector sizes = {2, 3}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(orig_tensor, nullptr); + + std::vector new_sizes = {6}; + std::vector new_strides = {1}; - Tensor* reinterpreted_tensor; AOTITorchError error = aoti_torch__reinterpret_tensor( - nullptr, // null input tensor + orig_tensor, new_sizes.size(), new_sizes.data(), new_strides.data(), - 0, // storage_offset - &reinterpreted_tensor); + 0, + nullptr); EXPECT_EQ(error, Error::InvalidArgument); + + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); } -// Test error cases: null sizes pointer -TEST_F(AOTITorchReinterpretTensorTest, NullSizesPointer) { - std::vector source_sizes = {6}; - Tensor* source_tensor = create_source_tensor(source_sizes); - ASSERT_NE(source_tensor, nullptr); +TEST_F(AOTITorchReinterpretTensorSlimTest, NegativeNdim) { + std::vector sizes = {2, 3}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(orig_tensor, nullptr); - std::vector new_strides = {2, 1}; + std::vector new_sizes = {6}; + std::vector new_strides = {1}; - Tensor* reinterpreted_tensor; + Tensor* view_tensor = nullptr; AOTITorchError error = aoti_torch__reinterpret_tensor( - source_tensor, - 2, // ndim > 0 - nullptr, // null sizes pointer - new_strides.data(), - 0, // storage_offset - &reinterpreted_tensor); + orig_tensor, -1, new_sizes.data(), new_strides.data(), 0, &view_tensor); EXPECT_EQ(error, Error::InvalidArgument); + + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); } -// Test error cases: null return tensor pointer -TEST_F(AOTITorchReinterpretTensorTest, NullReturnTensorPointer) { - std::vector source_sizes = {6}; - Tensor* source_tensor = create_source_tensor(source_sizes); - ASSERT_NE(source_tensor, nullptr); +// ============================================================================ +// Storage Offset Tests +// ============================================================================ + +TEST_F(AOTITorchReinterpretTensorSlimTest, WithStorageOffset_CPU) { + std::vector sizes = {4, 4}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(orig_tensor, nullptr); - std::vector new_sizes = {2, 3}; - std::vector new_strides = calculate_contiguous_strides(new_sizes); + std::vector new_sizes = {2, 4}; + std::vector new_strides = {4, 1}; + int64_t storage_offset = 4; // Skip first row + Tensor* view_tensor = nullptr; AOTITorchError error = aoti_torch__reinterpret_tensor( - source_tensor, + orig_tensor, new_sizes.size(), new_sizes.data(), new_strides.data(), - 0, // storage_offset - nullptr); // null return tensor pointer + storage_offset, + &view_tensor); - EXPECT_EQ(error, Error::InvalidArgument); + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(view_tensor, nullptr); + + EXPECT_EQ(view_tensor->dim(), 2); + EXPECT_EQ(view_tensor->size(0), 2); + EXPECT_EQ(view_tensor->size(1), 4); + + char* orig_ptr = static_cast(orig_tensor->data_ptr()); + char* view_ptr = static_cast(view_tensor->data_ptr()); + EXPECT_EQ(view_ptr, orig_ptr + storage_offset * sizeof(float)); + + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); } -// Test error cases: non-zero storage offset (should fail) -TEST_F(AOTITorchReinterpretTensorTest, NonZeroStorageOffset) { - std::vector source_sizes = {6}; - Tensor* source_tensor = create_source_tensor(source_sizes); - ASSERT_NE(source_tensor, nullptr); +// ============================================================================ +// Memory Sharing Tests +// ============================================================================ + +TEST_F(AOTITorchReinterpretTensorSlimTest, MemorySharing_CPU) { + std::vector sizes = {6}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(orig_tensor, nullptr); + + void* orig_ptr = orig_tensor->data_ptr(); std::vector new_sizes = {2, 3}; - std::vector new_strides = calculate_contiguous_strides(new_sizes); + std::vector new_strides = {3, 1}; - Tensor* reinterpreted_tensor; + Tensor* view_tensor = nullptr; AOTITorchError error = aoti_torch__reinterpret_tensor( - source_tensor, + orig_tensor, new_sizes.size(), new_sizes.data(), new_strides.data(), - 1, // non-zero storage_offset (should fail) - &reinterpreted_tensor); + 0, + &view_tensor); - EXPECT_EQ(error, Error::InvalidArgument); + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(view_tensor, nullptr); + + EXPECT_EQ(view_tensor->data_ptr(), orig_ptr); + + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + + EXPECT_EQ(view_tensor->data_ptr(), orig_ptr); + + EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); } -// Test reinterpreting CPU tensor -TEST_F(AOTITorchReinterpretTensorTest, ReinterpretCPUTensor) { - // Create a CPU tensor with shape [8] - std::vector source_sizes = {8}; - Tensor* source_tensor = create_source_tensor( - source_sizes, - 6, // float32 - 0, // CPU device +TEST_F(AOTITorchReinterpretTensorSlimTest, MultipleViews_CPU) { + std::vector sizes = {24}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), 0); - ASSERT_NE(source_tensor, nullptr); + ASSERT_NE(orig_tensor, nullptr); - void* original_data_ptr = source_tensor->mutable_data_ptr(); - ASSERT_NE(original_data_ptr, nullptr); + void* orig_ptr = orig_tensor->data_ptr(); - // Reinterpret as [2, 4] - std::vector new_sizes = {2, 4}; - std::vector new_strides = calculate_contiguous_strides(new_sizes); + std::vector sizes1 = {2, 12}; + std::vector strides1 = {12, 1}; + + std::vector sizes2 = {4, 6}; + std::vector strides2 = {6, 1}; + + std::vector sizes3 = {2, 3, 4}; + std::vector strides3 = {12, 4, 1}; + + Tensor* view1 = nullptr; + Tensor* view2 = nullptr; + Tensor* view3 = nullptr; + + EXPECT_EQ( + aoti_torch__reinterpret_tensor( + orig_tensor, + sizes1.size(), + sizes1.data(), + strides1.data(), + 0, + &view1), + Error::Ok); + EXPECT_EQ( + aoti_torch__reinterpret_tensor( + orig_tensor, + sizes2.size(), + sizes2.data(), + strides2.data(), + 0, + &view2), + Error::Ok); + EXPECT_EQ( + aoti_torch__reinterpret_tensor( + orig_tensor, + sizes3.size(), + sizes3.data(), + strides3.data(), + 0, + &view3), + Error::Ok); + + EXPECT_EQ(view1->data_ptr(), orig_ptr); + EXPECT_EQ(view2->data_ptr(), orig_ptr); + EXPECT_EQ(view3->data_ptr(), orig_ptr); + + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + + EXPECT_EQ(view1->data_ptr(), orig_ptr); + EXPECT_EQ(view2->data_ptr(), orig_ptr); + EXPECT_EQ(view3->data_ptr(), orig_ptr); + + EXPECT_EQ(aoti_torch_delete_tensor_object(view1), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(view2), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(view3), Error::Ok); +} + +// ============================================================================ +// Dimension Change Tests +// ============================================================================ + +TEST_F(AOTITorchReinterpretTensorSlimTest, ExpandDimensions_CPU) { + std::vector sizes = {6}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(orig_tensor, nullptr); + EXPECT_EQ(orig_tensor->dim(), 1); + + std::vector new_sizes = {2, 3}; + std::vector new_strides = {3, 1}; - Tensor* reinterpreted_tensor; + Tensor* view_tensor = nullptr; AOTITorchError error = aoti_torch__reinterpret_tensor( - source_tensor, + orig_tensor, new_sizes.size(), new_sizes.data(), new_strides.data(), - 0, // storage_offset - &reinterpreted_tensor); + 0, + &view_tensor); EXPECT_EQ(error, Error::Ok); - ASSERT_NE(reinterpreted_tensor, nullptr); - - // Check that the reinterpreted tensor uses the SAME memory - void* reinterpreted_data_ptr = reinterpreted_tensor->mutable_data_ptr(); - EXPECT_EQ(reinterpreted_data_ptr, original_data_ptr) - << "Reinterpreted CPU tensor should use the same memory as the source tensor"; - - // Test direct memory access for CPU tensors - float* original_float_ptr = reinterpret_cast(original_data_ptr); - float* reinterpreted_float_ptr = - reinterpret_cast(reinterpreted_data_ptr); - - // Write through original and read through reinterpreted - original_float_ptr[0] = 42.0f; - EXPECT_EQ(reinterpreted_float_ptr[0], 42.0f) - << "Changes through original tensor should be visible through reinterpreted tensor"; -} + ASSERT_NE(view_tensor, nullptr); + EXPECT_EQ(view_tensor->dim(), 2); -// Test that deleting source tensor doesn't affect reinterpreted tensor (they -// share memory) -TEST_F(AOTITorchReinterpretTensorTest, DeletionBehavior) { - std::vector source_sizes = {6}; - Tensor* source_tensor = create_source_tensor(source_sizes); - ASSERT_NE(source_tensor, nullptr); + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); +} - void* shared_data_ptr = source_tensor->mutable_data_ptr(); +TEST_F(AOTITorchReinterpretTensorSlimTest, CollapseDimensions_CPU) { + std::vector sizes = {2, 3, 4}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(orig_tensor, nullptr); + EXPECT_EQ(orig_tensor->dim(), 3); - // Reinterpret as [2, 3] - std::vector new_sizes = {2, 3}; - std::vector new_strides = calculate_contiguous_strides(new_sizes); + std::vector new_sizes = {24}; + std::vector new_strides = {1}; - Tensor* reinterpreted_tensor; + Tensor* view_tensor = nullptr; AOTITorchError error = aoti_torch__reinterpret_tensor( - source_tensor, + orig_tensor, new_sizes.size(), new_sizes.data(), new_strides.data(), 0, - &reinterpreted_tensor); + &view_tensor); EXPECT_EQ(error, Error::Ok); - ASSERT_NE(reinterpreted_tensor, nullptr); + ASSERT_NE(view_tensor, nullptr); + EXPECT_EQ(view_tensor->dim(), 1); + EXPECT_EQ(view_tensor->numel(), 24); - // Verify they share the same memory - EXPECT_EQ(reinterpreted_tensor->mutable_data_ptr(), shared_data_ptr); + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); +} - // Delete the source tensor (which owns the memory) - error = aoti_torch_delete_tensor_object(source_tensor); - EXPECT_EQ(error, Error::Ok); +TEST_F(AOTITorchReinterpretTensorSlimTest, ScalarTensorView_CPU) { + std::vector sizes = {1}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(orig_tensor, nullptr); + + std::vector new_sizes = {}; + std::vector new_strides = {}; - // The reinterpreted tensor should still be valid but the memory might be - // freed Since the source tensor owned the memory, the reinterpreted tensor - // becomes invalid This is expected behavior - the user needs to manage the - // lifecycle properly + Tensor* view_tensor = nullptr; + AOTITorchError error = aoti_torch__reinterpret_tensor( + orig_tensor, 0, new_sizes.data(), new_strides.data(), 0, &view_tensor); - // Clean up the reinterpreted tensor - error = aoti_torch_delete_tensor_object(reinterpreted_tensor); EXPECT_EQ(error, Error::Ok); -} + ASSERT_NE(view_tensor, nullptr); + EXPECT_EQ(view_tensor->dim(), 0); + EXPECT_EQ(view_tensor->numel(), 1); -// Test scalar tensor reinterpretation -TEST_F(AOTITorchReinterpretTensorTest, ReinterpretScalarTensor) { - // Create a scalar tensor (0D) - std::vector source_sizes = {}; - Tensor* source_tensor = create_source_tensor(source_sizes); - ASSERT_NE(source_tensor, nullptr); + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); +} - void* original_data_ptr = source_tensor->mutable_data_ptr(); +// ============================================================================ +// Stride Tests +// ============================================================================ + +TEST_F(AOTITorchReinterpretTensorSlimTest, TransposeViaStrides_CPU) { + std::vector sizes = {3, 4}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(orig_tensor, nullptr); - // Try to reinterpret scalar as [1] (1D with 1 element) - std::vector new_sizes = {1}; - std::vector new_strides = {1}; + std::vector new_sizes = {4, 3}; + std::vector new_strides = {1, 4}; - Tensor* reinterpreted_tensor; + Tensor* view_tensor = nullptr; AOTITorchError error = aoti_torch__reinterpret_tensor( - source_tensor, + orig_tensor, new_sizes.size(), new_sizes.data(), new_strides.data(), 0, - &reinterpreted_tensor); + &view_tensor); EXPECT_EQ(error, Error::Ok); - ASSERT_NE(reinterpreted_tensor, nullptr); - - // Check that the reinterpreted tensor uses the SAME memory - EXPECT_EQ(reinterpreted_tensor->mutable_data_ptr(), original_data_ptr); + ASSERT_NE(view_tensor, nullptr); + EXPECT_EQ(view_tensor->size(0), 4); + EXPECT_EQ(view_tensor->size(1), 3); + EXPECT_EQ(view_tensor->stride(0), 1); + EXPECT_EQ(view_tensor->stride(1), 4); - // Check new shape - EXPECT_EQ(reinterpreted_tensor->dim(), 1); - EXPECT_EQ(reinterpreted_tensor->size(0), 1); + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); } -// Test reinterpreting tensor with zero-sized dimension -// TODO: This test is disabled because zero-sized tensors have complex stride -// validation requirements that need further investigation -TEST_F(AOTITorchReinterpretTensorTest, DISABLED_ReinterpretZeroSizedTensor) { - // Create a tensor with shape [0, 5] (zero elements) - std::vector source_sizes = {0, 5}; - Tensor* source_tensor = create_source_tensor(source_sizes); - ASSERT_NE(source_tensor, nullptr); - - void* original_data_ptr = source_tensor->mutable_data_ptr(); +// ============================================================================ +// Different Dtype Tests +// ============================================================================ + +TEST_F(AOTITorchReinterpretTensorSlimTest, Int64Tensor_CPU) { + std::vector sizes = {6}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Long), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(orig_tensor, nullptr); - // Reinterpret as [5, 0] (still zero elements) - std::vector new_sizes = {5, 0}; - std::vector new_strides = calculate_contiguous_strides(new_sizes); + std::vector new_sizes = {2, 3}; + std::vector new_strides = {3, 1}; - Tensor* reinterpreted_tensor; + Tensor* view_tensor = nullptr; AOTITorchError error = aoti_torch__reinterpret_tensor( - source_tensor, + orig_tensor, new_sizes.size(), new_sizes.data(), new_strides.data(), 0, - &reinterpreted_tensor); + &view_tensor); EXPECT_EQ(error, Error::Ok); - ASSERT_NE(reinterpreted_tensor, nullptr); - - // Check that the reinterpreted tensor uses the SAME memory - EXPECT_EQ(reinterpreted_tensor->mutable_data_ptr(), original_data_ptr); + ASSERT_NE(view_tensor, nullptr); + EXPECT_EQ(view_tensor->itemsize(), 8); - // Check new shape - EXPECT_EQ(reinterpreted_tensor->dim(), 2); - EXPECT_EQ(reinterpreted_tensor->size(0), 5); - EXPECT_EQ(reinterpreted_tensor->size(1), 0); + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); } -// Test with nullptr strides (should use contiguous strides) -TEST_F(AOTITorchReinterpretTensorTest, NullStridesPointer) { - std::vector source_sizes = {12}; - Tensor* source_tensor = create_source_tensor(source_sizes); - ASSERT_NE(source_tensor, nullptr); - - void* original_data_ptr = source_tensor->mutable_data_ptr(); +TEST_F(AOTITorchReinterpretTensorSlimTest, BFloat16Tensor_CPU) { + std::vector sizes = {6}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::BFloat16), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(orig_tensor, nullptr); - // Reinterpret as [3, 4] with null strides (should calculate contiguous - // strides) - std::vector new_sizes = {3, 4}; + std::vector new_sizes = {2, 3}; + std::vector new_strides = {3, 1}; - Tensor* reinterpreted_tensor; + Tensor* view_tensor = nullptr; AOTITorchError error = aoti_torch__reinterpret_tensor( - source_tensor, + orig_tensor, new_sizes.size(), new_sizes.data(), - nullptr, // null strides - should calculate contiguous strides + new_strides.data(), 0, - &reinterpreted_tensor); + &view_tensor); EXPECT_EQ(error, Error::Ok); - ASSERT_NE(reinterpreted_tensor, nullptr); - - // Check that the reinterpreted tensor uses the SAME memory - EXPECT_EQ(reinterpreted_tensor->mutable_data_ptr(), original_data_ptr); + ASSERT_NE(view_tensor, nullptr); + EXPECT_EQ(view_tensor->itemsize(), 2); - // Check that contiguous strides were calculated correctly - int64_t* tensor_strides; - error = aoti_torch_get_strides(reinterpreted_tensor, &tensor_strides); - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(tensor_strides[0], 4); // stride for dimension 0 should be 4 - EXPECT_EQ(tensor_strides[1], 1); // stride for dimension 1 should be 1 + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); } -// Test bf16 tensor reinterpretation -TEST_F(AOTITorchReinterpretTensorTest, ReinterpretBF16Tensor) { - // Create a bf16 source tensor with shape [6] - std::vector source_sizes = {6}; - Tensor* source_tensor = create_source_tensor( - source_sizes, - static_cast( - SupportedDTypes::BFLOAT16), // bf16 dtype from SupportedDTypes - static_cast( - SupportedDevices::CUDA), // CUDA device from SupportedDevices - 0); // device_index must be 0 - ASSERT_NE(source_tensor, nullptr); - - void* original_data_ptr = source_tensor->mutable_data_ptr(); - ASSERT_NE(original_data_ptr, nullptr); - - // Verify the tensor is actually bf16 - int32_t actual_dtype = 0; - AOTITorchError dtype_check_error = - aoti_torch_get_dtype(source_tensor, &actual_dtype); - EXPECT_EQ(dtype_check_error, Error::Ok); - EXPECT_EQ(actual_dtype, static_cast(SupportedDTypes::BFLOAT16)) - << "Source tensor should have bfloat16 dtype"; - - // Reinterpret as [2, 3] (same number of elements) - std::vector new_sizes = {2, 3}; - std::vector new_strides = calculate_contiguous_strides(new_sizes); +// ============================================================================ +// CUDA Tests +// ============================================================================ + +TEST_F(AOTITorchReinterpretTensorSlimTest, BasicView_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } - Tensor* reinterpreted_tensor; + std::vector sizes = {2, 3, 4}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(orig_tensor, nullptr); + EXPECT_TRUE(orig_tensor->is_cuda()); + + std::vector new_sizes = {6, 4}; + std::vector new_strides = {4, 1}; + + Tensor* view_tensor = nullptr; AOTITorchError error = aoti_torch__reinterpret_tensor( - source_tensor, + orig_tensor, new_sizes.size(), new_sizes.data(), new_strides.data(), - 0, // storage_offset - &reinterpreted_tensor); + 0, + &view_tensor); EXPECT_EQ(error, Error::Ok); - ASSERT_NE(reinterpreted_tensor, nullptr); - - // Check that the reinterpreted tensor has the new shape - EXPECT_EQ(reinterpreted_tensor->dim(), 2); - EXPECT_EQ(reinterpreted_tensor->size(0), 2); - EXPECT_EQ(reinterpreted_tensor->size(1), 3); - - // Verify the dtype is preserved as bf16 - int32_t reinterpreted_dtype = 0; - dtype_check_error = - aoti_torch_get_dtype(reinterpreted_tensor, &reinterpreted_dtype); - EXPECT_EQ(dtype_check_error, Error::Ok); - EXPECT_EQ( - reinterpreted_dtype, static_cast(SupportedDTypes::BFLOAT16)) - << "Reinterpreted tensor should preserve bfloat16 dtype"; - - // CRITICAL: Check that the reinterpreted tensor uses the SAME memory - void* reinterpreted_data_ptr = reinterpreted_tensor->mutable_data_ptr(); - EXPECT_EQ(reinterpreted_data_ptr, original_data_ptr) - << "Reinterpreted tensor should use the same memory as the source tensor"; - - // Test memory sharing by writing data through the original tensor - // and verifying it's visible through the reinterpreted tensor - // Note: bf16 has 2 bytes per element - std::vector test_data_bf16 = { - 0x3F80, 0x4000, 0x4040, 0x4080, 0x40A0, 0x40C0}; // bf16 values - cudaError_t cuda_err = cudaMemcpy( - original_data_ptr, - test_data_bf16.data(), - test_data_bf16.size() * sizeof(uint16_t), - cudaMemcpyHostToDevice); - EXPECT_EQ(cuda_err, cudaSuccess); - - // Read back through the reinterpreted tensor - std::vector readback_data_bf16(6); - cuda_err = cudaMemcpy( - readback_data_bf16.data(), - reinterpreted_data_ptr, - readback_data_bf16.size() * sizeof(uint16_t), - cudaMemcpyDeviceToHost); - EXPECT_EQ(cuda_err, cudaSuccess); - - // Verify the data matches - for (size_t i = 0; i < test_data_bf16.size(); i++) { - EXPECT_EQ(readback_data_bf16[i], test_data_bf16[i]) - << "BF16 data should be the same through both tensors at index " << i; - } + ASSERT_NE(view_tensor, nullptr); + EXPECT_TRUE(view_tensor->is_cuda()); + + EXPECT_EQ(view_tensor->dim(), 2); + EXPECT_EQ(view_tensor->size(0), 6); + EXPECT_EQ(view_tensor->size(1), 4); + + EXPECT_EQ(view_tensor->data_ptr(), orig_tensor->data_ptr()); + + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); } -// Test reference counting behavior - memory not in map should fail -TEST_F(AOTITorchReinterpretTensorTest, MemoryNotInMapShouldFail) { - // Create a tensor directly without using our allocation functions - // This should NOT be in the reference counting map - void* external_memory; - ASSERT_EQ( - cudaMallocManaged(&external_memory, 12 * sizeof(float)), cudaSuccess); - - // Create a tensor by manually wrapping this memory without going through our - // APIs - std::vector sizes = {12}; - std::vector strides = calculate_contiguous_strides(sizes); - - // Create the tensor directly using ExecutorTorch extension - auto tensor_shared = executorch::extension::from_blob( - external_memory, - convert_sizes_to_vector(sizes.size(), sizes.data()), - convert_strides_to_vector(sizes.size(), sizes.data(), strides.data()), - executorch::runtime::etensor::ScalarType::Float); - - ASSERT_TRUE(tensor_shared); - Tensor* external_tensor = tensor_shared.get(); - - // Try to reinterpret this tensor - should fail because memory is not in map - std::vector new_sizes = {3, 4}; - std::vector new_strides = calculate_contiguous_strides(new_sizes); - - Tensor* reinterpreted_tensor; +TEST_F(AOTITorchReinterpretTensorSlimTest, WithStorageOffset_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + + std::vector sizes = {4, 4}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(orig_tensor, nullptr); + + std::vector new_sizes = {2, 4}; + std::vector new_strides = {4, 1}; + int64_t storage_offset = 8; + + Tensor* view_tensor = nullptr; AOTITorchError error = aoti_torch__reinterpret_tensor( - external_tensor, + orig_tensor, new_sizes.size(), new_sizes.data(), new_strides.data(), - 0, // storage_offset - &reinterpreted_tensor); + storage_offset, + &view_tensor); - // Should fail because memory is not being tracked by reference counting - // system - EXPECT_EQ(error, Error::InvalidArgument); + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(view_tensor, nullptr); + EXPECT_TRUE(view_tensor->is_cuda()); + + char* orig_ptr = static_cast(orig_tensor->data_ptr()); + char* view_ptr = static_cast(view_tensor->data_ptr()); + EXPECT_EQ(view_ptr, orig_ptr + storage_offset * sizeof(float)); - // Clean up the external memory - ASSERT_EQ(cudaFree(external_memory), cudaSuccess); + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); } -// Test reference counting behavior - creating view increments reference count -TEST_F(AOTITorchReinterpretTensorTest, ViewCreationIncrementsReferenceCount) { - // Create a source tensor that owns memory (reference count = 1) - std::vector source_sizes = {12}; - Tensor* source_tensor = create_source_tensor(source_sizes); - ASSERT_NE(source_tensor, nullptr); +TEST_F(AOTITorchReinterpretTensorSlimTest, MemorySharing_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } - void* shared_data_ptr = source_tensor->mutable_data_ptr(); - ASSERT_NE(shared_data_ptr, nullptr); + std::vector sizes = {6}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(orig_tensor, nullptr); - // Create first view - should increment reference count to 2 - std::vector view1_sizes = {3, 4}; - std::vector view1_strides = - calculate_contiguous_strides(view1_sizes); + void* orig_ptr = orig_tensor->data_ptr(); - Tensor* view1_tensor; - AOTITorchError error = aoti_torch__reinterpret_tensor( - source_tensor, - view1_sizes.size(), - view1_sizes.data(), - view1_strides.data(), - 0, - &view1_tensor); + std::vector new_sizes = {2, 3}; + std::vector new_strides = {3, 1}; - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(view1_tensor, nullptr); - EXPECT_EQ(view1_tensor->mutable_data_ptr(), shared_data_ptr); - - // Create second view - should increment reference count to 3 - std::vector view2_sizes = {2, 6}; - std::vector view2_strides = - calculate_contiguous_strides(view2_sizes); - - Tensor* view2_tensor; - error = aoti_torch__reinterpret_tensor( - source_tensor, - view2_sizes.size(), - view2_sizes.data(), - view2_strides.data(), + Tensor* view_tensor = nullptr; + AOTITorchError error = aoti_torch__reinterpret_tensor( + orig_tensor, + new_sizes.size(), + new_sizes.data(), + new_strides.data(), 0, - &view2_tensor); + &view_tensor); EXPECT_EQ(error, Error::Ok); - ASSERT_NE(view2_tensor, nullptr); - EXPECT_EQ(view2_tensor->mutable_data_ptr(), shared_data_ptr); + ASSERT_NE(view_tensor, nullptr); - // Now delete the source tensor - memory should NOT be freed (reference count - // = 2) - error = aoti_torch_delete_tensor_object(source_tensor); - EXPECT_EQ(error, Error::Ok); + EXPECT_EQ(view_tensor->data_ptr(), orig_ptr); - // Both views should still be valid - test by accessing memory - float test_value = 42.0f; - cudaError_t cuda_err = cudaMemcpy( - shared_data_ptr, &test_value, sizeof(float), cudaMemcpyHostToDevice); - EXPECT_EQ(cuda_err, cudaSuccess); - - float readback_value = 0.0f; - cuda_err = cudaMemcpy( - &readback_value, - view1_tensor->mutable_data_ptr(), - sizeof(float), - cudaMemcpyDeviceToHost); - EXPECT_EQ(cuda_err, cudaSuccess); - EXPECT_EQ(readback_value, test_value); - - // Delete first view - memory should still NOT be freed (reference count = 1) - error = aoti_torch_delete_tensor_object(view1_tensor); - EXPECT_EQ(error, Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + EXPECT_EQ(view_tensor->data_ptr(), orig_ptr); - // Second view should still be valid - readback_value = 0.0f; - cuda_err = cudaMemcpy( - &readback_value, - view2_tensor->mutable_data_ptr(), - sizeof(float), - cudaMemcpyDeviceToHost); - EXPECT_EQ(cuda_err, cudaSuccess); - EXPECT_EQ(readback_value, test_value); - - // Delete second view - NOW memory should be freed (reference count = 0) - error = aoti_torch_delete_tensor_object(view2_tensor); - EXPECT_EQ(error, Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); } -// Test reference counting behavior with NOT_OWN memory (from blob) - should -// SUCCEED and keep NOT_OWN -TEST_F(AOTITorchReinterpretTensorTest, ViewOfNotOwnMemoryKeepsNotOwnStatus) { - // Allocate external memory - void* external_memory; - cudaError_t cuda_err = - cudaMallocManaged(&external_memory, 12 * sizeof(float)); - ASSERT_EQ(cuda_err, cudaSuccess); - - // Create tensor from blob (which marks memory as NOT_OWN) - std::vector blob_sizes = {12}; - std::vector blob_strides = calculate_contiguous_strides(blob_sizes); - - Tensor* blob_tensor; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - external_memory, - blob_sizes.size(), - blob_sizes.data(), - blob_strides.data(), - 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device_index - &blob_tensor, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size +TEST_F(AOTITorchReinterpretTensorSlimTest, ChainedViews_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + + std::vector sizes = {24}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(orig_tensor, nullptr); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(blob_tensor, nullptr); - - // Create view of NOT_OWN memory - should SUCCEED and keep NOT_OWN status - std::vector view_sizes = {3, 4}; - std::vector view_strides = calculate_contiguous_strides(view_sizes); - - Tensor* view_tensor; - error = aoti_torch__reinterpret_tensor( - blob_tensor, - view_sizes.size(), - view_sizes.data(), - view_strides.data(), - 0, - &view_tensor); + void* orig_ptr = orig_tensor->data_ptr(); - // Should succeed - NOT_OWN memory can be reinterpreted but stays NOT_OWN - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(view_tensor, nullptr); - EXPECT_EQ(view_tensor->mutable_data_ptr(), external_memory); - - // Verify both tensors share the same memory - EXPECT_EQ(blob_tensor->mutable_data_ptr(), view_tensor->mutable_data_ptr()); - - // Test memory sharing by writing data through one tensor and reading through - // the other - float test_value = 42.0f; - cuda_err = cudaMemcpy( - external_memory, &test_value, sizeof(float), cudaMemcpyHostToDevice); - EXPECT_EQ(cuda_err, cudaSuccess); - - float readback_value = 0.0f; - cuda_err = cudaMemcpy( - &readback_value, - view_tensor->mutable_data_ptr(), - sizeof(float), - cudaMemcpyDeviceToHost); - EXPECT_EQ(cuda_err, cudaSuccess); - EXPECT_EQ(readback_value, test_value); - - // Delete the blob tensor - external memory should NOT be freed (NOT_OWN - // behavior) - error = aoti_torch_delete_tensor_object(blob_tensor); - EXPECT_EQ(error, Error::Ok); + std::vector sizes1 = {4, 6}; + std::vector strides1 = {6, 1}; - // View tensor should still be valid - test by accessing memory - readback_value = 0.0f; - cuda_err = cudaMemcpy( - &readback_value, - view_tensor->mutable_data_ptr(), - sizeof(float), - cudaMemcpyDeviceToHost); - EXPECT_EQ(cuda_err, cudaSuccess); - EXPECT_EQ(readback_value, test_value); - - // Delete view tensor - external memory should still NOT be freed (NOT_OWN - // behavior) - error = aoti_torch_delete_tensor_object(view_tensor); - EXPECT_EQ(error, Error::Ok); + Tensor* view1 = nullptr; + EXPECT_EQ( + aoti_torch__reinterpret_tensor( + orig_tensor, + sizes1.size(), + sizes1.data(), + strides1.data(), + 0, + &view1), + Error::Ok); + + std::vector sizes2 = {2, 2, 6}; + std::vector strides2 = {12, 6, 1}; + + Tensor* view2 = nullptr; + EXPECT_EQ( + aoti_torch__reinterpret_tensor( + view1, sizes2.size(), sizes2.data(), strides2.data(), 0, &view2), + Error::Ok); - // External memory should still be accessible (proves neither tensor freed it) - readback_value = 0.0f; - cuda_err = cudaMemcpy( - &readback_value, external_memory, sizeof(float), cudaMemcpyDeviceToHost); - EXPECT_EQ(cuda_err, cudaSuccess); - EXPECT_EQ(readback_value, test_value); + EXPECT_EQ(view1->data_ptr(), orig_ptr); + EXPECT_EQ(view2->data_ptr(), orig_ptr); - // Clean up external memory manually (as expected for NOT_OWN memory) - ASSERT_EQ(cudaFree(external_memory), cudaSuccess); + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(view1), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(view2), Error::Ok); } diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch__reinterpret_tensor_slim.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch__reinterpret_tensor_slim.cpp deleted file mode 100644 index d2ad645136e..00000000000 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch__reinterpret_tensor_slim.cpp +++ /dev/null @@ -1,692 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include - -using namespace executorch::backends::cuda; -using executorch::runtime::Error; - -namespace slim_c10 = executorch::backends::aoti::slim::c10; - -namespace { - -bool isCudaAvailable() { - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - return (err == cudaSuccess && device_count > 0); -} - -std::vector calculateContiguousStrides( - const std::vector& sizes) { - std::vector strides(sizes.size()); - if (sizes.empty()) { - return strides; - } - strides[sizes.size() - 1] = 1; - for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { - strides[i] = strides[i + 1] * sizes[i + 1]; - } - return strides; -} - -} // namespace - -class AOTITorchReinterpretTensorSlimTest : public ::testing::Test { - protected: - void SetUp() override { - et_pal_init(); - } - - Tensor* createTestTensor( - const std::vector& sizes, - const std::vector& strides = {}, - int32_t dtype = static_cast(slim_c10::ScalarType::Float), - int32_t device_type = static_cast(slim_c10::DeviceType::CPU), - int32_t device_index = 0) { - Tensor* tensor = nullptr; - - std::vector effective_strides = strides; - if (strides.empty()) { - effective_strides = calculateContiguousStrides(sizes); - } - - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - effective_strides.data(), - dtype, - device_type, - device_index, - &tensor); - - return (error == Error::Ok) ? tensor : nullptr; - } -}; - -// ============================================================================ -// Basic Functionality Tests -// ============================================================================ - -TEST_F(AOTITorchReinterpretTensorSlimTest, BasicView_CPU) { - std::vector sizes = {2, 3, 4}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - std::vector new_sizes = {6, 4}; - std::vector new_strides = {4, 1}; - int64_t storage_offset = 0; - - Tensor* view_tensor = nullptr; - AOTITorchError error = aoti_torch__reinterpret_tensor( - orig_tensor, - new_sizes.size(), - new_sizes.data(), - new_strides.data(), - storage_offset, - &view_tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(view_tensor, nullptr); - - EXPECT_EQ(view_tensor->dim(), 2); - EXPECT_EQ(view_tensor->size(0), 6); - EXPECT_EQ(view_tensor->size(1), 4); - EXPECT_EQ(view_tensor->stride(0), 4); - EXPECT_EQ(view_tensor->stride(1), 1); - - EXPECT_EQ(view_tensor->data_ptr(), orig_tensor->data_ptr()); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); -} - -TEST_F(AOTITorchReinterpretTensorSlimTest, NullSelf) { - std::vector sizes = {2, 3}; - std::vector strides = {3, 1}; - - Tensor* view_tensor = nullptr; - AOTITorchError error = aoti_torch__reinterpret_tensor( - nullptr, sizes.size(), sizes.data(), strides.data(), 0, &view_tensor); - - EXPECT_EQ(error, Error::InvalidArgument); -} - -TEST_F(AOTITorchReinterpretTensorSlimTest, NullReturnPointer) { - std::vector sizes = {2, 3}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - std::vector new_sizes = {6}; - std::vector new_strides = {1}; - - AOTITorchError error = aoti_torch__reinterpret_tensor( - orig_tensor, - new_sizes.size(), - new_sizes.data(), - new_strides.data(), - 0, - nullptr); - - EXPECT_EQ(error, Error::InvalidArgument); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); -} - -TEST_F(AOTITorchReinterpretTensorSlimTest, NegativeNdim) { - std::vector sizes = {2, 3}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - std::vector new_sizes = {6}; - std::vector new_strides = {1}; - - Tensor* view_tensor = nullptr; - AOTITorchError error = aoti_torch__reinterpret_tensor( - orig_tensor, -1, new_sizes.data(), new_strides.data(), 0, &view_tensor); - - EXPECT_EQ(error, Error::InvalidArgument); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); -} - -// ============================================================================ -// Storage Offset Tests -// ============================================================================ - -TEST_F(AOTITorchReinterpretTensorSlimTest, WithStorageOffset_CPU) { - std::vector sizes = {4, 4}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - std::vector new_sizes = {2, 4}; - std::vector new_strides = {4, 1}; - int64_t storage_offset = 4; // Skip first row - - Tensor* view_tensor = nullptr; - AOTITorchError error = aoti_torch__reinterpret_tensor( - orig_tensor, - new_sizes.size(), - new_sizes.data(), - new_strides.data(), - storage_offset, - &view_tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(view_tensor, nullptr); - - EXPECT_EQ(view_tensor->dim(), 2); - EXPECT_EQ(view_tensor->size(0), 2); - EXPECT_EQ(view_tensor->size(1), 4); - - char* orig_ptr = static_cast(orig_tensor->data_ptr()); - char* view_ptr = static_cast(view_tensor->data_ptr()); - EXPECT_EQ(view_ptr, orig_ptr + storage_offset * sizeof(float)); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); -} - -// ============================================================================ -// Memory Sharing Tests -// ============================================================================ - -TEST_F(AOTITorchReinterpretTensorSlimTest, MemorySharing_CPU) { - std::vector sizes = {6}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - void* orig_ptr = orig_tensor->data_ptr(); - - std::vector new_sizes = {2, 3}; - std::vector new_strides = {3, 1}; - - Tensor* view_tensor = nullptr; - AOTITorchError error = aoti_torch__reinterpret_tensor( - orig_tensor, - new_sizes.size(), - new_sizes.data(), - new_strides.data(), - 0, - &view_tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(view_tensor, nullptr); - - EXPECT_EQ(view_tensor->data_ptr(), orig_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - - EXPECT_EQ(view_tensor->data_ptr(), orig_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); -} - -TEST_F(AOTITorchReinterpretTensorSlimTest, MultipleViews_CPU) { - std::vector sizes = {24}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - void* orig_ptr = orig_tensor->data_ptr(); - - std::vector sizes1 = {2, 12}; - std::vector strides1 = {12, 1}; - - std::vector sizes2 = {4, 6}; - std::vector strides2 = {6, 1}; - - std::vector sizes3 = {2, 3, 4}; - std::vector strides3 = {12, 4, 1}; - - Tensor* view1 = nullptr; - Tensor* view2 = nullptr; - Tensor* view3 = nullptr; - - EXPECT_EQ( - aoti_torch__reinterpret_tensor( - orig_tensor, - sizes1.size(), - sizes1.data(), - strides1.data(), - 0, - &view1), - Error::Ok); - EXPECT_EQ( - aoti_torch__reinterpret_tensor( - orig_tensor, - sizes2.size(), - sizes2.data(), - strides2.data(), - 0, - &view2), - Error::Ok); - EXPECT_EQ( - aoti_torch__reinterpret_tensor( - orig_tensor, - sizes3.size(), - sizes3.data(), - strides3.data(), - 0, - &view3), - Error::Ok); - - EXPECT_EQ(view1->data_ptr(), orig_ptr); - EXPECT_EQ(view2->data_ptr(), orig_ptr); - EXPECT_EQ(view3->data_ptr(), orig_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - - EXPECT_EQ(view1->data_ptr(), orig_ptr); - EXPECT_EQ(view2->data_ptr(), orig_ptr); - EXPECT_EQ(view3->data_ptr(), orig_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(view1), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(view2), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(view3), Error::Ok); -} - -// ============================================================================ -// Dimension Change Tests -// ============================================================================ - -TEST_F(AOTITorchReinterpretTensorSlimTest, ExpandDimensions_CPU) { - std::vector sizes = {6}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - EXPECT_EQ(orig_tensor->dim(), 1); - - std::vector new_sizes = {2, 3}; - std::vector new_strides = {3, 1}; - - Tensor* view_tensor = nullptr; - AOTITorchError error = aoti_torch__reinterpret_tensor( - orig_tensor, - new_sizes.size(), - new_sizes.data(), - new_strides.data(), - 0, - &view_tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(view_tensor, nullptr); - EXPECT_EQ(view_tensor->dim(), 2); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); -} - -TEST_F(AOTITorchReinterpretTensorSlimTest, CollapseDimensions_CPU) { - std::vector sizes = {2, 3, 4}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - EXPECT_EQ(orig_tensor->dim(), 3); - - std::vector new_sizes = {24}; - std::vector new_strides = {1}; - - Tensor* view_tensor = nullptr; - AOTITorchError error = aoti_torch__reinterpret_tensor( - orig_tensor, - new_sizes.size(), - new_sizes.data(), - new_strides.data(), - 0, - &view_tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(view_tensor, nullptr); - EXPECT_EQ(view_tensor->dim(), 1); - EXPECT_EQ(view_tensor->numel(), 24); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); -} - -TEST_F(AOTITorchReinterpretTensorSlimTest, ScalarTensorView_CPU) { - std::vector sizes = {1}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - std::vector new_sizes = {}; - std::vector new_strides = {}; - - Tensor* view_tensor = nullptr; - AOTITorchError error = aoti_torch__reinterpret_tensor( - orig_tensor, 0, new_sizes.data(), new_strides.data(), 0, &view_tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(view_tensor, nullptr); - EXPECT_EQ(view_tensor->dim(), 0); - EXPECT_EQ(view_tensor->numel(), 1); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); -} - -// ============================================================================ -// Stride Tests -// ============================================================================ - -TEST_F(AOTITorchReinterpretTensorSlimTest, TransposeViaStrides_CPU) { - std::vector sizes = {3, 4}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - std::vector new_sizes = {4, 3}; - std::vector new_strides = {1, 4}; - - Tensor* view_tensor = nullptr; - AOTITorchError error = aoti_torch__reinterpret_tensor( - orig_tensor, - new_sizes.size(), - new_sizes.data(), - new_strides.data(), - 0, - &view_tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(view_tensor, nullptr); - EXPECT_EQ(view_tensor->size(0), 4); - EXPECT_EQ(view_tensor->size(1), 3); - EXPECT_EQ(view_tensor->stride(0), 1); - EXPECT_EQ(view_tensor->stride(1), 4); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); -} - -// ============================================================================ -// Different Dtype Tests -// ============================================================================ - -TEST_F(AOTITorchReinterpretTensorSlimTest, Int64Tensor_CPU) { - std::vector sizes = {6}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Long), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - std::vector new_sizes = {2, 3}; - std::vector new_strides = {3, 1}; - - Tensor* view_tensor = nullptr; - AOTITorchError error = aoti_torch__reinterpret_tensor( - orig_tensor, - new_sizes.size(), - new_sizes.data(), - new_strides.data(), - 0, - &view_tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(view_tensor, nullptr); - EXPECT_EQ(view_tensor->itemsize(), 8); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); -} - -TEST_F(AOTITorchReinterpretTensorSlimTest, BFloat16Tensor_CPU) { - std::vector sizes = {6}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::BFloat16), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - std::vector new_sizes = {2, 3}; - std::vector new_strides = {3, 1}; - - Tensor* view_tensor = nullptr; - AOTITorchError error = aoti_torch__reinterpret_tensor( - orig_tensor, - new_sizes.size(), - new_sizes.data(), - new_strides.data(), - 0, - &view_tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(view_tensor, nullptr); - EXPECT_EQ(view_tensor->itemsize(), 2); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); -} - -// ============================================================================ -// CUDA Tests -// ============================================================================ - -TEST_F(AOTITorchReinterpretTensorSlimTest, BasicView_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {2, 3, 4}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(orig_tensor, nullptr); - EXPECT_TRUE(orig_tensor->is_cuda()); - - std::vector new_sizes = {6, 4}; - std::vector new_strides = {4, 1}; - - Tensor* view_tensor = nullptr; - AOTITorchError error = aoti_torch__reinterpret_tensor( - orig_tensor, - new_sizes.size(), - new_sizes.data(), - new_strides.data(), - 0, - &view_tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(view_tensor, nullptr); - EXPECT_TRUE(view_tensor->is_cuda()); - - EXPECT_EQ(view_tensor->dim(), 2); - EXPECT_EQ(view_tensor->size(0), 6); - EXPECT_EQ(view_tensor->size(1), 4); - - EXPECT_EQ(view_tensor->data_ptr(), orig_tensor->data_ptr()); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); -} - -TEST_F(AOTITorchReinterpretTensorSlimTest, WithStorageOffset_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {4, 4}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(orig_tensor, nullptr); - - std::vector new_sizes = {2, 4}; - std::vector new_strides = {4, 1}; - int64_t storage_offset = 8; - - Tensor* view_tensor = nullptr; - AOTITorchError error = aoti_torch__reinterpret_tensor( - orig_tensor, - new_sizes.size(), - new_sizes.data(), - new_strides.data(), - storage_offset, - &view_tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(view_tensor, nullptr); - EXPECT_TRUE(view_tensor->is_cuda()); - - char* orig_ptr = static_cast(orig_tensor->data_ptr()); - char* view_ptr = static_cast(view_tensor->data_ptr()); - EXPECT_EQ(view_ptr, orig_ptr + storage_offset * sizeof(float)); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); -} - -TEST_F(AOTITorchReinterpretTensorSlimTest, MemorySharing_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {6}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(orig_tensor, nullptr); - - void* orig_ptr = orig_tensor->data_ptr(); - - std::vector new_sizes = {2, 3}; - std::vector new_strides = {3, 1}; - - Tensor* view_tensor = nullptr; - AOTITorchError error = aoti_torch__reinterpret_tensor( - orig_tensor, - new_sizes.size(), - new_sizes.data(), - new_strides.data(), - 0, - &view_tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(view_tensor, nullptr); - - EXPECT_EQ(view_tensor->data_ptr(), orig_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(view_tensor->data_ptr(), orig_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(view_tensor), Error::Ok); -} - -TEST_F(AOTITorchReinterpretTensorSlimTest, ChainedViews_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {24}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(orig_tensor, nullptr); - - void* orig_ptr = orig_tensor->data_ptr(); - - std::vector sizes1 = {4, 6}; - std::vector strides1 = {6, 1}; - - Tensor* view1 = nullptr; - EXPECT_EQ( - aoti_torch__reinterpret_tensor( - orig_tensor, - sizes1.size(), - sizes1.data(), - strides1.data(), - 0, - &view1), - Error::Ok); - - std::vector sizes2 = {2, 2, 6}; - std::vector strides2 = {12, 6, 1}; - - Tensor* view2 = nullptr; - EXPECT_EQ( - aoti_torch__reinterpret_tensor( - view1, sizes2.size(), sizes2.data(), strides2.data(), 0, &view2), - Error::Ok); - - EXPECT_EQ(view1->data_ptr(), orig_ptr); - EXPECT_EQ(view2->data_ptr(), orig_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(view1), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(view2), Error::Ok); -} diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch_assign_tensors_out.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch_assign_tensors_out.cpp index d5e1bcb2547..f01743745d2 100644 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch_assign_tensors_out.cpp +++ b/backends/cuda/runtime/shims/tests/test_aoti_torch_assign_tensors_out.cpp @@ -7,239 +7,431 @@ */ #include -#include -#include -#include -#include -#include -#include #include #include -using namespace executorch::backends::aoti; +#include +#include +#include +#include +#include + using namespace executorch::backends::cuda; -using namespace executorch::runtime; -using executorch::runtime::etensor::Tensor; +using executorch::runtime::Error; -// Test fixture for aoti_torch_assign_tensors_out tests -class AOTITorchAssignTensorsOutTest : public ::testing::Test { - protected: - void SetUp() override { - // Initialize ExecuTorch Platform Abstraction Layer - et_pal_init(); +namespace slim_c10 = executorch::backends::aoti::slim::c10; - // Check if CUDA is available - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - if (err != cudaSuccess || device_count == 0) { - GTEST_SKIP() << "CUDA not available, skipping CUDA tests"; - } +namespace { - // Clean up any existing cached metadata before each test - cleanup_tensor_metadata(); +bool isCudaAvailable() { + int device_count = 0; + cudaError_t err = cudaGetDeviceCount(&device_count); + return (err == cudaSuccess && device_count > 0); +} - // Clear any remaining tensors from previous tests - clear_all_tensors(); +std::vector calculateContiguousStrides( + const std::vector& sizes) { + std::vector strides(sizes.size()); + if (sizes.empty()) { + return strides; + } + strides[sizes.size() - 1] = 1; + for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { + strides[i] = strides[i + 1] * sizes[i + 1]; } + return strides; +} - void TearDown() override { - // Clean up metadata - cleanup_tensor_metadata(); +} // namespace - // Clear the global tensor storage using the provided function - clear_all_tensors(); +class AOTITorchAssignTensorsOutSlimTest : public ::testing::Test { + protected: + void SetUp() override { + et_pal_init(); } - // Helper to create a test tensor - Tensor* create_test_tensor( + Tensor* createTestTensor( const std::vector& sizes, - int32_t dtype = static_cast(SupportedDTypes::FLOAT32), - int32_t device_type = static_cast(SupportedDevices::CUDA)) { - std::vector strides; - // Calculate contiguous strides - if (!sizes.empty()) { - strides.resize(sizes.size()); - strides[sizes.size() - 1] = 1; - for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { - strides[i] = strides[i + 1] * sizes[i + 1]; - } + const std::vector& strides = {}, + int32_t dtype = static_cast(slim_c10::ScalarType::Float), + int32_t device_type = static_cast(slim_c10::DeviceType::CPU), + int32_t device_index = 0) { + Tensor* tensor = nullptr; + + std::vector effective_strides = strides; + if (strides.empty()) { + effective_strides = calculateContiguousStrides(sizes); } - Tensor* tensor; - const int64_t* strides_ptr = strides.empty() ? nullptr : strides.data(); - AOTITorchError error = aoti_torch_empty_strided( sizes.size(), sizes.data(), - strides_ptr, + effective_strides.data(), dtype, device_type, - 0, + device_index, &tensor); return (error == Error::Ok) ? tensor : nullptr; } }; -// Test basic functionality -TEST_F(AOTITorchAssignTensorsOutTest, BasicFunctionality) { - // Create a source tensor - std::vector sizes = {2, 3}; - Tensor* src = create_test_tensor(sizes); - ASSERT_NE(src, nullptr); +// ============================================================================ +// Basic Functionality Tests +// ============================================================================ - // Create output tensor handle - Tensor* dst = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src, &dst); +TEST_F(AOTITorchAssignTensorsOutSlimTest, BasicFunctionality_CPU) { + std::vector sizes = {2, 3}; + Tensor* src_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src_tensor, nullptr); + + // Store expected properties before move + int64_t expected_dim = src_tensor->dim(); + int64_t expected_size0 = src_tensor->size(0); + int64_t expected_size1 = src_tensor->size(1); + size_t expected_numel = src_tensor->numel(); + void* expected_data_ptr = src_tensor->data_ptr(); + + Tensor* dst_tensor = nullptr; + AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(dst, nullptr); + ASSERT_NE(dst_tensor, nullptr); + + // Verify destination tensor has the moved properties + EXPECT_EQ(dst_tensor->dim(), expected_dim); + EXPECT_EQ(dst_tensor->size(0), expected_size0); + EXPECT_EQ(dst_tensor->size(1), expected_size1); + EXPECT_EQ(dst_tensor->numel(), expected_numel); + EXPECT_EQ(dst_tensor->data_ptr(), expected_data_ptr); + + // Source tensor is now in undefined state after move - just delete it + // (accessing src_tensor properties is undefined behavior after move) + delete src_tensor; // Direct delete since it's in undefined state + EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); +} + +TEST_F(AOTITorchAssignTensorsOutSlimTest, NullSrc) { + Tensor* dst_tensor = nullptr; + AOTITorchError error = aoti_torch_assign_tensors_out(nullptr, &dst_tensor); - // Verify the output tensor has the same properties as source - EXPECT_EQ(dst->dim(), src->dim()); - EXPECT_EQ(dst->size(0), src->size(0)); - EXPECT_EQ(dst->size(1), src->size(1)); - EXPECT_EQ(dst->numel(), src->numel()); + EXPECT_EQ(error, Error::InvalidArgument); +} + +TEST_F(AOTITorchAssignTensorsOutSlimTest, NullDst) { + std::vector sizes = {2, 3}; + Tensor* src_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src_tensor, nullptr); + + AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, nullptr); - // Verify they share the same memory - EXPECT_EQ(dst->mutable_data_ptr(), src->mutable_data_ptr()); + EXPECT_EQ(error, Error::InvalidArgument); + + EXPECT_EQ(aoti_torch_delete_tensor_object(src_tensor), Error::Ok); } -// Test with 1D tensor -TEST_F(AOTITorchAssignTensorsOutTest, OneDimensionalTensor) { - std::vector sizes = {10}; - Tensor* src = create_test_tensor(sizes); - ASSERT_NE(src, nullptr); +// ============================================================================ +// Move Semantics Tests +// ============================================================================ - Tensor* dst = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src, &dst); +TEST_F(AOTITorchAssignTensorsOutSlimTest, SourceBecamesUndefinedAfterMove_CPU) { + std::vector sizes = {3, 4}; + Tensor* src_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src_tensor, nullptr); + void* original_ptr = src_tensor->data_ptr(); + ASSERT_NE(original_ptr, nullptr); + + Tensor* dst_tensor = nullptr; + AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(dst, nullptr); - EXPECT_EQ(dst->dim(), 1); - EXPECT_EQ(dst->size(0), 10); - EXPECT_EQ(dst->mutable_data_ptr(), src->mutable_data_ptr()); -} + ASSERT_NE(dst_tensor, nullptr); -// Test with 3D tensor -TEST_F(AOTITorchAssignTensorsOutTest, ThreeDimensionalTensor) { - std::vector sizes = {2, 3, 4}; - Tensor* src = create_test_tensor(sizes); - ASSERT_NE(src, nullptr); + // Destination has the original pointer + EXPECT_EQ(dst_tensor->data_ptr(), original_ptr); + + // Source tensor is now in undefined state - verify it's no longer defined + EXPECT_FALSE(src_tensor->defined()); - Tensor* dst = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src, &dst); + // Clean up - delete in this order since src is undefined + delete src_tensor; + EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); +} +// ============================================================================ +// Tensor Property Tests +// ============================================================================ + +TEST_F(AOTITorchAssignTensorsOutSlimTest, CustomStrides_CPU) { + std::vector sizes = {3, 4}; + std::vector strides = {4, 1}; + Tensor* src_tensor = createTestTensor( + sizes, + strides, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src_tensor, nullptr); + + // Store expected strides before move + int64_t expected_stride0 = src_tensor->stride(0); + int64_t expected_stride1 = src_tensor->stride(1); + + Tensor* dst_tensor = nullptr; + AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(dst, nullptr); - EXPECT_EQ(dst->dim(), 3); - EXPECT_EQ(dst->size(0), 2); - EXPECT_EQ(dst->size(1), 3); - EXPECT_EQ(dst->size(2), 4); - EXPECT_EQ(dst->mutable_data_ptr(), src->mutable_data_ptr()); + ASSERT_NE(dst_tensor, nullptr); + + // Verify destination has the expected strides + EXPECT_EQ(dst_tensor->stride(0), expected_stride0); + EXPECT_EQ(dst_tensor->stride(1), expected_stride1); + + delete src_tensor; // Source is undefined after move + EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); } -// Test with scalar (0D) tensor -TEST_F(AOTITorchAssignTensorsOutTest, ScalarTensor) { +TEST_F(AOTITorchAssignTensorsOutSlimTest, ScalarTensor_CPU) { std::vector sizes = {}; - Tensor* src = create_test_tensor(sizes); - ASSERT_NE(src, nullptr); + Tensor* src_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src_tensor, nullptr); + EXPECT_EQ(src_tensor->dim(), 0); + + Tensor* dst_tensor = nullptr; + AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(dst_tensor, nullptr); - Tensor* dst = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src, &dst); + EXPECT_EQ(dst_tensor->dim(), 0); + EXPECT_EQ(dst_tensor->numel(), 1); - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(dst, nullptr); - EXPECT_EQ(dst->dim(), 0); - EXPECT_EQ(dst->mutable_data_ptr(), src->mutable_data_ptr()); + EXPECT_EQ(aoti_torch_delete_tensor_object(src_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); } -// Test with null source pointer -TEST_F(AOTITorchAssignTensorsOutTest, NullSourcePointer) { - Tensor* dst = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(nullptr, &dst); - EXPECT_EQ(error, Error::InvalidArgument); +TEST_F(AOTITorchAssignTensorsOutSlimTest, LargeMultiDimensionalTensor_CPU) { + std::vector sizes = {10, 20, 30}; + Tensor* src_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src_tensor, nullptr); + + Tensor* dst_tensor = nullptr; + AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(dst_tensor, nullptr); + + EXPECT_EQ(dst_tensor->dim(), 3); + EXPECT_EQ(dst_tensor->size(0), 10); + EXPECT_EQ(dst_tensor->size(1), 20); + EXPECT_EQ(dst_tensor->size(2), 30); + EXPECT_EQ(dst_tensor->numel(), 6000); + + EXPECT_EQ(aoti_torch_delete_tensor_object(src_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); } -// Test with null destination pointer -TEST_F(AOTITorchAssignTensorsOutTest, NullDestinationPointer) { +// ============================================================================ +// Different Dtype Tests +// ============================================================================ + +TEST_F(AOTITorchAssignTensorsOutSlimTest, Int64Tensor_CPU) { std::vector sizes = {2, 3}; - Tensor* src = create_test_tensor(sizes); - ASSERT_NE(src, nullptr); + Tensor* src_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Long), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src_tensor, nullptr); + + Tensor* dst_tensor = nullptr; + AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(dst_tensor, nullptr); - AOTITorchError error = aoti_torch_assign_tensors_out(src, nullptr); - EXPECT_EQ(error, Error::InvalidArgument); + EXPECT_EQ(dst_tensor->itemsize(), 8); + + EXPECT_EQ(aoti_torch_delete_tensor_object(src_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); } -// Test that strides are preserved -TEST_F(AOTITorchAssignTensorsOutTest, StridesPreserved) { - std::vector sizes = {2, 3}; - Tensor* src = create_test_tensor(sizes); - ASSERT_NE(src, nullptr); +TEST_F(AOTITorchAssignTensorsOutSlimTest, BFloat16Tensor_CPU) { + std::vector sizes = {2, 3, 4}; + Tensor* src_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::BFloat16), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src_tensor, nullptr); + + Tensor* dst_tensor = nullptr; + AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(dst_tensor, nullptr); - Tensor* dst = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src, &dst); + EXPECT_EQ(dst_tensor->itemsize(), 2); + + EXPECT_EQ(aoti_torch_delete_tensor_object(src_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); +} +TEST_F(AOTITorchAssignTensorsOutSlimTest, BoolTensor_CPU) { + std::vector sizes = {4}; + Tensor* src_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Bool), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src_tensor, nullptr); + + Tensor* dst_tensor = nullptr; + AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(dst, nullptr); + ASSERT_NE(dst_tensor, nullptr); - // Get strides from both tensors - int64_t* src_strides; - int64_t* dst_strides; - aoti_torch_get_strides(src, &src_strides); - aoti_torch_get_strides(dst, &dst_strides); + EXPECT_EQ(dst_tensor->itemsize(), 1); - // Verify strides match - for (int64_t i = 0; i < src->dim(); i++) { - EXPECT_EQ(src_strides[i], dst_strides[i]); - } + EXPECT_EQ(aoti_torch_delete_tensor_object(src_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); } -// Test with CPU tensor -TEST_F(AOTITorchAssignTensorsOutTest, CPUTensor) { +// ============================================================================ +// CUDA Tests +// ============================================================================ + +TEST_F(AOTITorchAssignTensorsOutSlimTest, BasicFunctionality_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + std::vector sizes = {2, 3}; - Tensor* src = create_test_tensor( + Tensor* src_tensor = createTestTensor( sizes, - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CPU)); - ASSERT_NE(src, nullptr); + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(src_tensor, nullptr); + EXPECT_TRUE(src_tensor->is_cuda()); - Tensor* dst = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src, &dst); + // Store expected properties before move + void* expected_data_ptr = src_tensor->data_ptr(); + Tensor* dst_tensor = nullptr; + AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); + + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(dst_tensor, nullptr); + EXPECT_TRUE(dst_tensor->is_cuda()); + EXPECT_EQ(dst_tensor->data_ptr(), expected_data_ptr); + + // Source is undefined after move + EXPECT_FALSE(src_tensor->defined()); + + delete src_tensor; + EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); +} + +TEST_F( + AOTITorchAssignTensorsOutSlimTest, + SourceBecamesUndefinedAfterMove_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + + std::vector sizes = {3, 4}; + Tensor* src_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(src_tensor, nullptr); + + void* original_ptr = src_tensor->data_ptr(); + ASSERT_NE(original_ptr, nullptr); + + Tensor* dst_tensor = nullptr; + AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(dst, nullptr); - EXPECT_EQ(dst->mutable_data_ptr(), src->mutable_data_ptr()); + ASSERT_NE(dst_tensor, nullptr); + + // Destination has the original pointer + EXPECT_EQ(dst_tensor->data_ptr(), original_ptr); + + // Source tensor is now in undefined state + EXPECT_FALSE(src_tensor->defined()); + + delete src_tensor; + EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); } -// Test dtype is preserved -TEST_F(AOTITorchAssignTensorsOutTest, DtypePreserved) { - // Test with different dtypes - std::vector dtypes = { - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDTypes::INT32), - static_cast(SupportedDTypes::INT64), - }; - - for (int32_t dtype : dtypes) { - cleanup_tensor_metadata(); - clear_all_tensors(); - - std::vector sizes = {2, 3}; - Tensor* src = create_test_tensor(sizes, dtype); - ASSERT_NE(src, nullptr); - - Tensor* dst = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src, &dst); - - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(dst, nullptr); - - // Verify dtype is preserved - int32_t src_dtype, dst_dtype; - aoti_torch_get_dtype(src, &src_dtype); - aoti_torch_get_dtype(dst, &dst_dtype); - EXPECT_EQ(src_dtype, dst_dtype) - << "Dtype mismatch for dtype code: " << dtype; +// ============================================================================ +// Mixed Device Tests +// ============================================================================ + +TEST_F(AOTITorchAssignTensorsOutSlimTest, MixedDeviceAssignments) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; } + + std::vector sizes = {2, 3}; + + Tensor* cpu_src = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(cpu_src, nullptr); + EXPECT_TRUE(cpu_src->is_cpu()); + + Tensor* cuda_src = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(cuda_src, nullptr); + EXPECT_TRUE(cuda_src->is_cuda()); + + Tensor* cpu_dst = nullptr; + Tensor* cuda_dst = nullptr; + + EXPECT_EQ(aoti_torch_assign_tensors_out(cpu_src, &cpu_dst), Error::Ok); + EXPECT_EQ(aoti_torch_assign_tensors_out(cuda_src, &cuda_dst), Error::Ok); + + EXPECT_TRUE(cpu_dst->is_cpu()); + EXPECT_TRUE(cuda_dst->is_cuda()); + EXPECT_NE(cpu_dst->data_ptr(), cuda_dst->data_ptr()); + + EXPECT_EQ(aoti_torch_delete_tensor_object(cpu_src), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(cuda_src), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(cpu_dst), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(cuda_dst), Error::Ok); } diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch_assign_tensors_out_slim.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch_assign_tensors_out_slim.cpp deleted file mode 100644 index f01743745d2..00000000000 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch_assign_tensors_out_slim.cpp +++ /dev/null @@ -1,437 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include - -using namespace executorch::backends::cuda; -using executorch::runtime::Error; - -namespace slim_c10 = executorch::backends::aoti::slim::c10; - -namespace { - -bool isCudaAvailable() { - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - return (err == cudaSuccess && device_count > 0); -} - -std::vector calculateContiguousStrides( - const std::vector& sizes) { - std::vector strides(sizes.size()); - if (sizes.empty()) { - return strides; - } - strides[sizes.size() - 1] = 1; - for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { - strides[i] = strides[i + 1] * sizes[i + 1]; - } - return strides; -} - -} // namespace - -class AOTITorchAssignTensorsOutSlimTest : public ::testing::Test { - protected: - void SetUp() override { - et_pal_init(); - } - - Tensor* createTestTensor( - const std::vector& sizes, - const std::vector& strides = {}, - int32_t dtype = static_cast(slim_c10::ScalarType::Float), - int32_t device_type = static_cast(slim_c10::DeviceType::CPU), - int32_t device_index = 0) { - Tensor* tensor = nullptr; - - std::vector effective_strides = strides; - if (strides.empty()) { - effective_strides = calculateContiguousStrides(sizes); - } - - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - effective_strides.data(), - dtype, - device_type, - device_index, - &tensor); - - return (error == Error::Ok) ? tensor : nullptr; - } -}; - -// ============================================================================ -// Basic Functionality Tests -// ============================================================================ - -TEST_F(AOTITorchAssignTensorsOutSlimTest, BasicFunctionality_CPU) { - std::vector sizes = {2, 3}; - Tensor* src_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src_tensor, nullptr); - - // Store expected properties before move - int64_t expected_dim = src_tensor->dim(); - int64_t expected_size0 = src_tensor->size(0); - int64_t expected_size1 = src_tensor->size(1); - size_t expected_numel = src_tensor->numel(); - void* expected_data_ptr = src_tensor->data_ptr(); - - Tensor* dst_tensor = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(dst_tensor, nullptr); - - // Verify destination tensor has the moved properties - EXPECT_EQ(dst_tensor->dim(), expected_dim); - EXPECT_EQ(dst_tensor->size(0), expected_size0); - EXPECT_EQ(dst_tensor->size(1), expected_size1); - EXPECT_EQ(dst_tensor->numel(), expected_numel); - EXPECT_EQ(dst_tensor->data_ptr(), expected_data_ptr); - - // Source tensor is now in undefined state after move - just delete it - // (accessing src_tensor properties is undefined behavior after move) - delete src_tensor; // Direct delete since it's in undefined state - EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); -} - -TEST_F(AOTITorchAssignTensorsOutSlimTest, NullSrc) { - Tensor* dst_tensor = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(nullptr, &dst_tensor); - - EXPECT_EQ(error, Error::InvalidArgument); -} - -TEST_F(AOTITorchAssignTensorsOutSlimTest, NullDst) { - std::vector sizes = {2, 3}; - Tensor* src_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src_tensor, nullptr); - - AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, nullptr); - - EXPECT_EQ(error, Error::InvalidArgument); - - EXPECT_EQ(aoti_torch_delete_tensor_object(src_tensor), Error::Ok); -} - -// ============================================================================ -// Move Semantics Tests -// ============================================================================ - -TEST_F(AOTITorchAssignTensorsOutSlimTest, SourceBecamesUndefinedAfterMove_CPU) { - std::vector sizes = {3, 4}; - Tensor* src_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src_tensor, nullptr); - - void* original_ptr = src_tensor->data_ptr(); - ASSERT_NE(original_ptr, nullptr); - - Tensor* dst_tensor = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(dst_tensor, nullptr); - - // Destination has the original pointer - EXPECT_EQ(dst_tensor->data_ptr(), original_ptr); - - // Source tensor is now in undefined state - verify it's no longer defined - EXPECT_FALSE(src_tensor->defined()); - - // Clean up - delete in this order since src is undefined - delete src_tensor; - EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); -} - -// ============================================================================ -// Tensor Property Tests -// ============================================================================ - -TEST_F(AOTITorchAssignTensorsOutSlimTest, CustomStrides_CPU) { - std::vector sizes = {3, 4}; - std::vector strides = {4, 1}; - Tensor* src_tensor = createTestTensor( - sizes, - strides, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src_tensor, nullptr); - - // Store expected strides before move - int64_t expected_stride0 = src_tensor->stride(0); - int64_t expected_stride1 = src_tensor->stride(1); - - Tensor* dst_tensor = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(dst_tensor, nullptr); - - // Verify destination has the expected strides - EXPECT_EQ(dst_tensor->stride(0), expected_stride0); - EXPECT_EQ(dst_tensor->stride(1), expected_stride1); - - delete src_tensor; // Source is undefined after move - EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); -} - -TEST_F(AOTITorchAssignTensorsOutSlimTest, ScalarTensor_CPU) { - std::vector sizes = {}; - Tensor* src_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src_tensor, nullptr); - EXPECT_EQ(src_tensor->dim(), 0); - - Tensor* dst_tensor = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(dst_tensor, nullptr); - - EXPECT_EQ(dst_tensor->dim(), 0); - EXPECT_EQ(dst_tensor->numel(), 1); - - EXPECT_EQ(aoti_torch_delete_tensor_object(src_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); -} - -TEST_F(AOTITorchAssignTensorsOutSlimTest, LargeMultiDimensionalTensor_CPU) { - std::vector sizes = {10, 20, 30}; - Tensor* src_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src_tensor, nullptr); - - Tensor* dst_tensor = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(dst_tensor, nullptr); - - EXPECT_EQ(dst_tensor->dim(), 3); - EXPECT_EQ(dst_tensor->size(0), 10); - EXPECT_EQ(dst_tensor->size(1), 20); - EXPECT_EQ(dst_tensor->size(2), 30); - EXPECT_EQ(dst_tensor->numel(), 6000); - - EXPECT_EQ(aoti_torch_delete_tensor_object(src_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); -} - -// ============================================================================ -// Different Dtype Tests -// ============================================================================ - -TEST_F(AOTITorchAssignTensorsOutSlimTest, Int64Tensor_CPU) { - std::vector sizes = {2, 3}; - Tensor* src_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Long), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src_tensor, nullptr); - - Tensor* dst_tensor = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(dst_tensor, nullptr); - - EXPECT_EQ(dst_tensor->itemsize(), 8); - - EXPECT_EQ(aoti_torch_delete_tensor_object(src_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); -} - -TEST_F(AOTITorchAssignTensorsOutSlimTest, BFloat16Tensor_CPU) { - std::vector sizes = {2, 3, 4}; - Tensor* src_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::BFloat16), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src_tensor, nullptr); - - Tensor* dst_tensor = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(dst_tensor, nullptr); - - EXPECT_EQ(dst_tensor->itemsize(), 2); - - EXPECT_EQ(aoti_torch_delete_tensor_object(src_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); -} - -TEST_F(AOTITorchAssignTensorsOutSlimTest, BoolTensor_CPU) { - std::vector sizes = {4}; - Tensor* src_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Bool), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src_tensor, nullptr); - - Tensor* dst_tensor = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(dst_tensor, nullptr); - - EXPECT_EQ(dst_tensor->itemsize(), 1); - - EXPECT_EQ(aoti_torch_delete_tensor_object(src_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); -} - -// ============================================================================ -// CUDA Tests -// ============================================================================ - -TEST_F(AOTITorchAssignTensorsOutSlimTest, BasicFunctionality_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {2, 3}; - Tensor* src_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(src_tensor, nullptr); - EXPECT_TRUE(src_tensor->is_cuda()); - - // Store expected properties before move - void* expected_data_ptr = src_tensor->data_ptr(); - - Tensor* dst_tensor = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(dst_tensor, nullptr); - EXPECT_TRUE(dst_tensor->is_cuda()); - EXPECT_EQ(dst_tensor->data_ptr(), expected_data_ptr); - - // Source is undefined after move - EXPECT_FALSE(src_tensor->defined()); - - delete src_tensor; - EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); -} - -TEST_F( - AOTITorchAssignTensorsOutSlimTest, - SourceBecamesUndefinedAfterMove_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {3, 4}; - Tensor* src_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(src_tensor, nullptr); - - void* original_ptr = src_tensor->data_ptr(); - ASSERT_NE(original_ptr, nullptr); - - Tensor* dst_tensor = nullptr; - AOTITorchError error = aoti_torch_assign_tensors_out(src_tensor, &dst_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(dst_tensor, nullptr); - - // Destination has the original pointer - EXPECT_EQ(dst_tensor->data_ptr(), original_ptr); - - // Source tensor is now in undefined state - EXPECT_FALSE(src_tensor->defined()); - - delete src_tensor; - EXPECT_EQ(aoti_torch_delete_tensor_object(dst_tensor), Error::Ok); -} - -// ============================================================================ -// Mixed Device Tests -// ============================================================================ - -TEST_F(AOTITorchAssignTensorsOutSlimTest, MixedDeviceAssignments) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {2, 3}; - - Tensor* cpu_src = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(cpu_src, nullptr); - EXPECT_TRUE(cpu_src->is_cpu()); - - Tensor* cuda_src = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(cuda_src, nullptr); - EXPECT_TRUE(cuda_src->is_cuda()); - - Tensor* cpu_dst = nullptr; - Tensor* cuda_dst = nullptr; - - EXPECT_EQ(aoti_torch_assign_tensors_out(cpu_src, &cpu_dst), Error::Ok); - EXPECT_EQ(aoti_torch_assign_tensors_out(cuda_src, &cuda_dst), Error::Ok); - - EXPECT_TRUE(cpu_dst->is_cpu()); - EXPECT_TRUE(cuda_dst->is_cuda()); - EXPECT_NE(cpu_dst->data_ptr(), cuda_dst->data_ptr()); - - EXPECT_EQ(aoti_torch_delete_tensor_object(cpu_src), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(cuda_src), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(cpu_dst), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(cuda_dst), Error::Ok); -} diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch_copy_.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch_copy_.cpp index 9fca0f92cf8..c2e67732b41 100644 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch_copy_.cpp +++ b/backends/cuda/runtime/shims/tests/test_aoti_torch_copy_.cpp @@ -7,392 +7,481 @@ */ #include -#include -#include -#include -#include -#include -#include #include -#include #include +#include +#include +#include +#include +#include + using namespace executorch::backends::cuda; -using namespace executorch::backends::aoti; -using namespace executorch::runtime; +using executorch::runtime::Error; -// Test fixture for aoti_torch_copy_ tests -class AOTITorchCopyTest : public ::testing::Test { - protected: - void SetUp() override { - // Initialize ExecuTorch Platform Abstraction Layer - et_pal_init(); +namespace slim_c10 = executorch::backends::aoti::slim::c10; - // Check if CUDA is available - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - if (err != cudaSuccess || device_count == 0) { - GTEST_SKIP() << "CUDA not available, skipping CUDA tests"; - } +namespace { - // Clean up any existing cached metadata before each test - cleanup_tensor_metadata(); +bool isCudaAvailable() { + int device_count = 0; + cudaError_t err = cudaGetDeviceCount(&device_count); + return (err == cudaSuccess && device_count > 0); +} - // Clear any remaining tensors from previous tests - clear_all_tensors(); +std::vector calculateContiguousStrides( + const std::vector& sizes) { + std::vector strides(sizes.size()); + if (sizes.empty()) { + return strides; + } + strides[sizes.size() - 1] = 1; + for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { + strides[i] = strides[i + 1] * sizes[i + 1]; } + return strides; +} - void TearDown() override { - // Clean up metadata - cleanup_tensor_metadata(); +} // namespace - // Clear the global tensor storage using the provided function - clear_all_tensors(); +class AOTITorchCopySlimTest : public ::testing::Test { + protected: + void SetUp() override { + et_pal_init(); } - // Helper to create test tensors with specific data - Tensor* create_test_tensor_with_data( + Tensor* createTestTensor( const std::vector& sizes, - const std::vector& data, const std::vector& strides = {}, - int32_t dtype = static_cast(SupportedDTypes::FLOAT32), - int32_t device_type = static_cast(SupportedDevices::CUDA), + int32_t dtype = static_cast(slim_c10::ScalarType::Float), + int32_t device_type = static_cast(slim_c10::DeviceType::CPU), int32_t device_index = 0) { - Tensor* tensor; + Tensor* tensor = nullptr; - const int64_t* strides_ptr = strides.empty() ? nullptr : strides.data(); + std::vector effective_strides = strides; + if (strides.empty()) { + effective_strides = calculateContiguousStrides(sizes); + } AOTITorchError error = aoti_torch_empty_strided( sizes.size(), sizes.data(), - strides_ptr, + effective_strides.data(), dtype, device_type, device_index, &tensor); - if (error != Error::Ok || tensor == nullptr) { - return nullptr; - } + return (error == Error::Ok) ? tensor : nullptr; + } +}; - // Fill tensor with data - size_t total_bytes = data.size() * sizeof(float); - if (device_type == static_cast(SupportedDevices::CUDA)) { - cudaError_t memcpy_err = cudaMemcpy( - tensor->mutable_data_ptr(), - data.data(), - total_bytes, - cudaMemcpyHostToDevice); - // Note: Error is checked but we don't fail the function - // This allows tests to proceed and handle errors as needed - (void)memcpy_err; // Suppress unused variable warning - } else { // CPU - std::memcpy(tensor->mutable_data_ptr(), data.data(), total_bytes); - } +// ============================================================================ +// Basic Functionality Tests +// ============================================================================ - return tensor; +TEST_F(AOTITorchCopySlimTest, BasicCopy_CPU) { + std::vector sizes = {3, 4}; + Tensor* src = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src, nullptr); + + float* src_data = static_cast(src->data_ptr()); + for (int64_t i = 0; i < src->numel(); i++) { + src_data[i] = static_cast(i + 1); } - // Helper to get data from tensor - std::vector get_tensor_data(Tensor* tensor) { - if (!tensor) { - return {}; - } + Tensor* dst = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(dst, nullptr); - size_t num_elements = tensor->numel(); - std::vector data(num_elements); - - // Determine if this is a CUDA tensor - cudaPointerAttributes attributes{}; - cudaError_t err = cudaPointerGetAttributes(&attributes, tensor->data_ptr()); - bool is_device = - (err == cudaSuccess && attributes.type == cudaMemoryTypeDevice); - - if (is_device) { - cudaError_t memcpy_err = cudaMemcpy( - data.data(), - tensor->data_ptr(), - num_elements * sizeof(float), - cudaMemcpyDeviceToHost); - // Note: Error is checked but we don't fail the function - // This allows tests to proceed and handle errors as needed - (void)memcpy_err; // Suppress unused variable warning - } else { - std::memcpy( - data.data(), tensor->data_ptr(), num_elements * sizeof(float)); - } + AOTITorchError error = aoti_torch_copy_(dst, src, 0); + EXPECT_EQ(error, Error::Ok); - return data; + float* dst_data = static_cast(dst->data_ptr()); + for (int64_t i = 0; i < dst->numel(); i++) { + EXPECT_FLOAT_EQ(dst_data[i], static_cast(i + 1)); } - // Helper to verify two tensors have same data - bool tensors_equal(Tensor* a, Tensor* b, float tolerance = 1e-6f) { - if (!a || !b) { - return false; - } - if (a->numel() != b->numel()) { - return false; - } + EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); +} - auto data_a = get_tensor_data(a); - auto data_b = get_tensor_data(b); +TEST_F(AOTITorchCopySlimTest, NullSelf) { + std::vector sizes = {2, 3}; + Tensor* src = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src, nullptr); - for (size_t i = 0; i < data_a.size(); ++i) { - if (std::abs(data_a[i] - data_b[i]) > tolerance) { - return false; - } - } - return true; - } -}; + AOTITorchError error = aoti_torch_copy_(nullptr, src, 0); + EXPECT_EQ(error, Error::InvalidArgument); -// Test basic copy functionality - same schema (fast path) -TEST_F(AOTITorchCopyTest, BasicCopySameSchema) { - // Create source tensor with test data + EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); +} + +TEST_F(AOTITorchCopySlimTest, NullSrc) { std::vector sizes = {2, 3}; - std::vector src_data = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}; + Tensor* dst = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(dst, nullptr); - Tensor* src = create_test_tensor_with_data(sizes, src_data); - EXPECT_NE(src, nullptr); + AOTITorchError error = aoti_torch_copy_(dst, nullptr, 0); + EXPECT_EQ(error, Error::InvalidArgument); + + EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); +} + +// ============================================================================ +// Different Dtype Tests +// ============================================================================ + +TEST_F(AOTITorchCopySlimTest, Int64Copy_CPU) { + std::vector sizes = {2, 3}; + Tensor* src = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Long), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src, nullptr); + + int64_t* src_data = static_cast(src->data_ptr()); + for (int64_t i = 0; i < src->numel(); i++) { + src_data[i] = i * 100; + } - // Create destination tensor with same schema - Tensor* dst = - create_test_tensor_with_data(sizes, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}); - EXPECT_NE(dst, nullptr); + Tensor* dst = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Long), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(dst, nullptr); - // Perform copy AOTITorchError error = aoti_torch_copy_(dst, src, 0); EXPECT_EQ(error, Error::Ok); - // Verify copy was successful - EXPECT_TRUE(tensors_equal(dst, src)); -} - -// Test copy with different strides (pointwise fallback) -TEST_F(AOTITorchCopyTest, CopyDifferentStrides) { - // Create source tensor (2x3) with contiguous layout - std::vector src_sizes = {2, 3}; - std::vector src_data = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}; + int64_t* dst_data = static_cast(dst->data_ptr()); + for (int64_t i = 0; i < dst->numel(); i++) { + EXPECT_EQ(dst_data[i], i * 100); + } - Tensor* src = create_test_tensor_with_data(src_sizes, src_data); - EXPECT_NE(src, nullptr); + EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); +} - // Create destination tensor with transposed strides - std::vector dst_strides = {1, 2}; // Column-major layout - Tensor* dst = create_test_tensor_with_data( - src_sizes, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, dst_strides); - EXPECT_NE(dst, nullptr); +TEST_F(AOTITorchCopySlimTest, BoolCopy_CPU) { + std::vector sizes = {4}; + Tensor* src = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Bool), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src, nullptr); + + bool* src_data = static_cast(src->data_ptr()); + src_data[0] = true; + src_data[1] = false; + src_data[2] = true; + src_data[3] = false; + + Tensor* dst = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Bool), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(dst, nullptr); - // Perform copy - this should use pointwise fallback AOTITorchError error = aoti_torch_copy_(dst, src, 0); EXPECT_EQ(error, Error::Ok); - // Verify the copy worked correctly by checking specific elements - auto dst_data = get_tensor_data(dst); - auto src_data_check = get_tensor_data(src); + bool* dst_data = static_cast(dst->data_ptr()); + EXPECT_EQ(dst_data[0], true); + EXPECT_EQ(dst_data[1], false); + EXPECT_EQ(dst_data[2], true); + EXPECT_EQ(dst_data[3], false); - // For transposed layout, the data should be rearranged - EXPECT_EQ(dst_data.size(), 6); - EXPECT_EQ(src_data_check.size(), 6); + EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); } -// Test copy between CPU and CUDA tensors -TEST_F(AOTITorchCopyTest, CopyCPUToCUDA) { - std::vector sizes = {2, 2}; - std::vector data = {1.0f, 2.0f, 3.0f, 4.0f}; +// ============================================================================ +// Tensor Shape Tests +// ============================================================================ - // Create CPU tensor - Tensor* cpu_tensor = create_test_tensor_with_data( +TEST_F(AOTITorchCopySlimTest, ScalarTensorCopy_CPU) { + std::vector sizes = {}; + Tensor* src = createTestTensor( sizes, - data, {}, - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CPU)); // CPU - EXPECT_NE(cpu_tensor, nullptr); + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src, nullptr); + EXPECT_EQ(src->dim(), 0); + EXPECT_EQ(src->numel(), 1); + + float* src_data = static_cast(src->data_ptr()); + *src_data = 42.0f; - // Create CUDA tensor - Tensor* cuda_tensor = create_test_tensor_with_data( + Tensor* dst = createTestTensor( sizes, - {0.0f, 0.0f, 0.0f, 0.0f}, {}, - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA)); // CUDA - EXPECT_NE(cuda_tensor, nullptr); + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(dst, nullptr); - // Copy from CPU to CUDA - AOTITorchError error = aoti_torch_copy_(cuda_tensor, cpu_tensor, 0); + AOTITorchError error = aoti_torch_copy_(dst, src, 0); EXPECT_EQ(error, Error::Ok); - // Verify copy - EXPECT_TRUE(tensors_equal(cuda_tensor, cpu_tensor)); -} + float* dst_data = static_cast(dst->data_ptr()); + EXPECT_FLOAT_EQ(*dst_data, 42.0f); -// Test copy between CUDA and CPU tensors -TEST_F(AOTITorchCopyTest, CopyCUDAToCPU) { - std::vector sizes = {2, 2}; - std::vector data = {1.0f, 2.0f, 3.0f, 4.0f}; + EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); +} - // Create CUDA tensor - Tensor* cuda_tensor = create_test_tensor_with_data( +TEST_F(AOTITorchCopySlimTest, LargeTensorCopy_CPU) { + std::vector sizes = {100, 100}; + Tensor* src = createTestTensor( sizes, - data, {}, - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA)); // CUDA - EXPECT_NE(cuda_tensor, nullptr); + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src, nullptr); + + float* src_data = static_cast(src->data_ptr()); + for (int64_t i = 0; i < src->numel(); i++) { + src_data[i] = static_cast(i); + } - // Create CPU tensor - Tensor* cpu_tensor = create_test_tensor_with_data( + Tensor* dst = createTestTensor( sizes, - {0.0f, 0.0f, 0.0f, 0.0f}, {}, - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CPU)); // CPU - EXPECT_NE(cpu_tensor, nullptr); + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(dst, nullptr); - // Copy from CUDA to CPU - AOTITorchError error = aoti_torch_copy_(cpu_tensor, cuda_tensor, 0); + AOTITorchError error = aoti_torch_copy_(dst, src, 0); EXPECT_EQ(error, Error::Ok); - // Verify copy - EXPECT_TRUE(tensors_equal(cpu_tensor, cuda_tensor)); + float* dst_data = static_cast(dst->data_ptr()); + for (int64_t i = 0; i < dst->numel(); i++) { + EXPECT_FLOAT_EQ(dst_data[i], static_cast(i)); + } + + EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); } -// Test copy with bf16 dtype support -TEST_F(AOTITorchCopyTest, CopyBf16Tensors) { - // Test that bf16 tensors can be created and copied - std::vector sizes = {2, 3}; - std::vector src_data = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}; +// ============================================================================ +// CUDA Tests +// ============================================================================ - // Note: We create float32 data but the tensor will be created with bf16 dtype - // This simulates creating bf16 tensors - Tensor* src = create_test_tensor_with_data( +TEST_F(AOTITorchCopySlimTest, CudaToCuda) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + + std::vector sizes = {3, 4}; + + std::vector host_src_data(12); + for (size_t i = 0; i < host_src_data.size(); i++) { + host_src_data[i] = static_cast(i + 1); + } + + Tensor* src = createTestTensor( sizes, - src_data, - {}, // default strides - static_cast(SupportedDTypes::BFLOAT16), // bf16 dtype - static_cast(SupportedDevices::CUDA), // CUDA device - 0 // device_index = 0 - ); - EXPECT_NE(src, nullptr); - - // Create destination tensor with bf16 dtype - std::vector dst_init(6, 0.0f); - Tensor* dst = create_test_tensor_with_data( + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(src, nullptr); + EXPECT_TRUE(src->is_cuda()); + + cudaMemcpy( + src->data_ptr(), + host_src_data.data(), + host_src_data.size() * sizeof(float), + cudaMemcpyHostToDevice); + + Tensor* dst = createTestTensor( sizes, - dst_init, - {}, // default strides - static_cast(SupportedDTypes::BFLOAT16), // bf16 dtype - static_cast(SupportedDevices::CUDA), // CUDA device - 0 // device_index = 0 - ); - EXPECT_NE(dst, nullptr); - - // Perform copy between bf16 tensors + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(dst, nullptr); + EXPECT_TRUE(dst->is_cuda()); + AOTITorchError error = aoti_torch_copy_(dst, src, 0); EXPECT_EQ(error, Error::Ok); - // Verify that both tensors have the expected dtype - int32_t src_dtype, dst_dtype; - aoti_torch_get_dtype(src, &src_dtype); - aoti_torch_get_dtype(dst, &dst_dtype); + std::vector host_dst_data(12); + cudaMemcpy( + host_dst_data.data(), + dst->data_ptr(), + host_dst_data.size() * sizeof(float), + cudaMemcpyDeviceToHost); - EXPECT_EQ(src_dtype, static_cast(SupportedDTypes::BFLOAT16)); - EXPECT_EQ(dst_dtype, static_cast(SupportedDTypes::BFLOAT16)); + for (size_t i = 0; i < host_dst_data.size(); i++) { + EXPECT_FLOAT_EQ(host_dst_data[i], static_cast(i + 1)); + } - // Verify copy was successful by checking numel matches - EXPECT_EQ(src->numel(), dst->numel()); - EXPECT_EQ(src->numel(), 6); + EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); } -// Test copy between different dtypes should fail -TEST_F(AOTITorchCopyTest, CopyDTypeMismatchError) { - std::vector sizes = {2, 2}; - std::vector data = {1.0f, 2.0f, 3.0f, 4.0f}; +TEST_F(AOTITorchCopySlimTest, CpuToCuda) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } - // Create float32 tensor - Tensor* float32_tensor = create_test_tensor_with_data( + std::vector sizes = {2, 3}; + Tensor* src = createTestTensor( sizes, - data, - {}, // default strides - static_cast(SupportedDTypes::FLOAT32), // float32 dtype - static_cast(SupportedDevices::CUDA), // CUDA device - 0 // device_index = 0 - ); - EXPECT_NE(float32_tensor, nullptr); - - // Create bf16 tensor - Tensor* bf16_tensor = create_test_tensor_with_data( + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src, nullptr); + EXPECT_TRUE(src->is_cpu()); + + float* src_data = static_cast(src->data_ptr()); + for (int64_t i = 0; i < src->numel(); i++) { + src_data[i] = static_cast(i * 10); + } + + Tensor* dst = createTestTensor( sizes, - {0.0f, 0.0f, 0.0f, 0.0f}, - {}, // default strides - static_cast(SupportedDTypes::BFLOAT16), // bf16 dtype - static_cast(SupportedDevices::CUDA), // CUDA device - 0 // device_index = 0 - ); - EXPECT_NE(bf16_tensor, nullptr); - - // Attempting to copy between different dtypes should fail - AOTITorchError error = aoti_torch_copy_(bf16_tensor, float32_tensor, 0); - EXPECT_EQ(error, Error::InvalidArgument); + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(dst, nullptr); + EXPECT_TRUE(dst->is_cuda()); - // Reverse direction should also fail - error = aoti_torch_copy_(float32_tensor, bf16_tensor, 0); - EXPECT_EQ(error, Error::InvalidArgument); + AOTITorchError error = aoti_torch_copy_(dst, src, 0); + EXPECT_EQ(error, Error::Ok); + + std::vector host_dst_data(6); + cudaMemcpy( + host_dst_data.data(), + dst->data_ptr(), + host_dst_data.size() * sizeof(float), + cudaMemcpyDeviceToHost); + + for (size_t i = 0; i < host_dst_data.size(); i++) { + EXPECT_FLOAT_EQ(host_dst_data[i], static_cast(i * 10)); + } + + EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); } -// Test error conditions -TEST_F(AOTITorchCopyTest, ErrorHandling) { +TEST_F(AOTITorchCopySlimTest, CudaToCpu) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + std::vector sizes = {2, 3}; - std::vector data = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}; - Tensor* valid_tensor = create_test_tensor_with_data(sizes, data); - EXPECT_NE(valid_tensor, nullptr); + std::vector host_src_data(6); + for (size_t i = 0; i < host_src_data.size(); i++) { + host_src_data[i] = static_cast(i * 5); + } - // Test null pointers - AOTITorchError error = aoti_torch_copy_(nullptr, valid_tensor, 0); - EXPECT_NE(error, Error::Ok); + Tensor* src = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(src, nullptr); + + cudaMemcpy( + src->data_ptr(), + host_src_data.data(), + host_src_data.size() * sizeof(float), + cudaMemcpyHostToDevice); + + Tensor* dst = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(dst, nullptr); + EXPECT_TRUE(dst->is_cpu()); - error = aoti_torch_copy_(valid_tensor, nullptr, 0); - EXPECT_NE(error, Error::Ok); + AOTITorchError error = aoti_torch_copy_(dst, src, 0); + EXPECT_EQ(error, Error::Ok); - // Test numel mismatch (different total number of elements) - std::vector different_numel_sizes = { - 2, 3, 4}; // 24 elements vs 6 elements - std::vector different_data(24, 1.0f); - Tensor* different_numel = - create_test_tensor_with_data(different_numel_sizes, different_data); - EXPECT_NE(different_numel, nullptr); + float* dst_data = static_cast(dst->data_ptr()); + for (int64_t i = 0; i < dst->numel(); i++) { + EXPECT_FLOAT_EQ(dst_data[i], static_cast(i * 5)); + } - error = aoti_torch_copy_(valid_tensor, different_numel, 0); - EXPECT_EQ(error, Error::InvalidArgument); + EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); } -// Test copy from 1D to 3D with same total elements -TEST_F(AOTITorchCopyTest, Copy1DTo3DSameNumel) { - // Source tensor: 8 elements in 1D - std::vector src_sizes = {8}; - std::vector src_data = { - 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}; +// ============================================================================ +// Non-blocking Tests +// ============================================================================ - Tensor* src = create_test_tensor_with_data(src_sizes, src_data); - EXPECT_NE(src, nullptr); +TEST_F(AOTITorchCopySlimTest, NonBlockingFlag_CPU) { + std::vector sizes = {2, 3}; + Tensor* src = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(src, nullptr); + + float* src_data = static_cast(src->data_ptr()); + for (int64_t i = 0; i < src->numel(); i++) { + src_data[i] = static_cast(i); + } - // Destination tensor: 2x2x2 = 8 elements (different shape, same total) - std::vector dst_sizes = {2, 2, 2}; - std::vector dst_init(8, 0.0f); - Tensor* dst = create_test_tensor_with_data(dst_sizes, dst_init); - EXPECT_NE(dst, nullptr); + Tensor* dst = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(dst, nullptr); - // This should work - same total number of elements - AOTITorchError error = aoti_torch_copy_(dst, src, 0); + AOTITorchError error = aoti_torch_copy_(dst, src, 1); EXPECT_EQ(error, Error::Ok); - // Verify the data was copied correctly - auto dst_data = get_tensor_data(dst); - EXPECT_EQ(dst_data.size(), 8); + float* dst_data = static_cast(dst->data_ptr()); + for (int64_t i = 0; i < dst->numel(); i++) { + EXPECT_FLOAT_EQ(dst_data[i], static_cast(i)); + } - // Check some specific elements to verify correct copying - EXPECT_FLOAT_EQ(dst_data[0], 1.0f); - EXPECT_FLOAT_EQ(dst_data[7], 8.0f); + EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); } diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch_copy__slim.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch_copy__slim.cpp deleted file mode 100644 index c2e67732b41..00000000000 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch_copy__slim.cpp +++ /dev/null @@ -1,487 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include - -using namespace executorch::backends::cuda; -using executorch::runtime::Error; - -namespace slim_c10 = executorch::backends::aoti::slim::c10; - -namespace { - -bool isCudaAvailable() { - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - return (err == cudaSuccess && device_count > 0); -} - -std::vector calculateContiguousStrides( - const std::vector& sizes) { - std::vector strides(sizes.size()); - if (sizes.empty()) { - return strides; - } - strides[sizes.size() - 1] = 1; - for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { - strides[i] = strides[i + 1] * sizes[i + 1]; - } - return strides; -} - -} // namespace - -class AOTITorchCopySlimTest : public ::testing::Test { - protected: - void SetUp() override { - et_pal_init(); - } - - Tensor* createTestTensor( - const std::vector& sizes, - const std::vector& strides = {}, - int32_t dtype = static_cast(slim_c10::ScalarType::Float), - int32_t device_type = static_cast(slim_c10::DeviceType::CPU), - int32_t device_index = 0) { - Tensor* tensor = nullptr; - - std::vector effective_strides = strides; - if (strides.empty()) { - effective_strides = calculateContiguousStrides(sizes); - } - - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - effective_strides.data(), - dtype, - device_type, - device_index, - &tensor); - - return (error == Error::Ok) ? tensor : nullptr; - } -}; - -// ============================================================================ -// Basic Functionality Tests -// ============================================================================ - -TEST_F(AOTITorchCopySlimTest, BasicCopy_CPU) { - std::vector sizes = {3, 4}; - Tensor* src = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src, nullptr); - - float* src_data = static_cast(src->data_ptr()); - for (int64_t i = 0; i < src->numel(); i++) { - src_data[i] = static_cast(i + 1); - } - - Tensor* dst = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(dst, nullptr); - - AOTITorchError error = aoti_torch_copy_(dst, src, 0); - EXPECT_EQ(error, Error::Ok); - - float* dst_data = static_cast(dst->data_ptr()); - for (int64_t i = 0; i < dst->numel(); i++) { - EXPECT_FLOAT_EQ(dst_data[i], static_cast(i + 1)); - } - - EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); -} - -TEST_F(AOTITorchCopySlimTest, NullSelf) { - std::vector sizes = {2, 3}; - Tensor* src = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src, nullptr); - - AOTITorchError error = aoti_torch_copy_(nullptr, src, 0); - EXPECT_EQ(error, Error::InvalidArgument); - - EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); -} - -TEST_F(AOTITorchCopySlimTest, NullSrc) { - std::vector sizes = {2, 3}; - Tensor* dst = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(dst, nullptr); - - AOTITorchError error = aoti_torch_copy_(dst, nullptr, 0); - EXPECT_EQ(error, Error::InvalidArgument); - - EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); -} - -// ============================================================================ -// Different Dtype Tests -// ============================================================================ - -TEST_F(AOTITorchCopySlimTest, Int64Copy_CPU) { - std::vector sizes = {2, 3}; - Tensor* src = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Long), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src, nullptr); - - int64_t* src_data = static_cast(src->data_ptr()); - for (int64_t i = 0; i < src->numel(); i++) { - src_data[i] = i * 100; - } - - Tensor* dst = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Long), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(dst, nullptr); - - AOTITorchError error = aoti_torch_copy_(dst, src, 0); - EXPECT_EQ(error, Error::Ok); - - int64_t* dst_data = static_cast(dst->data_ptr()); - for (int64_t i = 0; i < dst->numel(); i++) { - EXPECT_EQ(dst_data[i], i * 100); - } - - EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); -} - -TEST_F(AOTITorchCopySlimTest, BoolCopy_CPU) { - std::vector sizes = {4}; - Tensor* src = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Bool), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src, nullptr); - - bool* src_data = static_cast(src->data_ptr()); - src_data[0] = true; - src_data[1] = false; - src_data[2] = true; - src_data[3] = false; - - Tensor* dst = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Bool), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(dst, nullptr); - - AOTITorchError error = aoti_torch_copy_(dst, src, 0); - EXPECT_EQ(error, Error::Ok); - - bool* dst_data = static_cast(dst->data_ptr()); - EXPECT_EQ(dst_data[0], true); - EXPECT_EQ(dst_data[1], false); - EXPECT_EQ(dst_data[2], true); - EXPECT_EQ(dst_data[3], false); - - EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); -} - -// ============================================================================ -// Tensor Shape Tests -// ============================================================================ - -TEST_F(AOTITorchCopySlimTest, ScalarTensorCopy_CPU) { - std::vector sizes = {}; - Tensor* src = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src, nullptr); - EXPECT_EQ(src->dim(), 0); - EXPECT_EQ(src->numel(), 1); - - float* src_data = static_cast(src->data_ptr()); - *src_data = 42.0f; - - Tensor* dst = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(dst, nullptr); - - AOTITorchError error = aoti_torch_copy_(dst, src, 0); - EXPECT_EQ(error, Error::Ok); - - float* dst_data = static_cast(dst->data_ptr()); - EXPECT_FLOAT_EQ(*dst_data, 42.0f); - - EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); -} - -TEST_F(AOTITorchCopySlimTest, LargeTensorCopy_CPU) { - std::vector sizes = {100, 100}; - Tensor* src = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src, nullptr); - - float* src_data = static_cast(src->data_ptr()); - for (int64_t i = 0; i < src->numel(); i++) { - src_data[i] = static_cast(i); - } - - Tensor* dst = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(dst, nullptr); - - AOTITorchError error = aoti_torch_copy_(dst, src, 0); - EXPECT_EQ(error, Error::Ok); - - float* dst_data = static_cast(dst->data_ptr()); - for (int64_t i = 0; i < dst->numel(); i++) { - EXPECT_FLOAT_EQ(dst_data[i], static_cast(i)); - } - - EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); -} - -// ============================================================================ -// CUDA Tests -// ============================================================================ - -TEST_F(AOTITorchCopySlimTest, CudaToCuda) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {3, 4}; - - std::vector host_src_data(12); - for (size_t i = 0; i < host_src_data.size(); i++) { - host_src_data[i] = static_cast(i + 1); - } - - Tensor* src = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(src, nullptr); - EXPECT_TRUE(src->is_cuda()); - - cudaMemcpy( - src->data_ptr(), - host_src_data.data(), - host_src_data.size() * sizeof(float), - cudaMemcpyHostToDevice); - - Tensor* dst = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(dst, nullptr); - EXPECT_TRUE(dst->is_cuda()); - - AOTITorchError error = aoti_torch_copy_(dst, src, 0); - EXPECT_EQ(error, Error::Ok); - - std::vector host_dst_data(12); - cudaMemcpy( - host_dst_data.data(), - dst->data_ptr(), - host_dst_data.size() * sizeof(float), - cudaMemcpyDeviceToHost); - - for (size_t i = 0; i < host_dst_data.size(); i++) { - EXPECT_FLOAT_EQ(host_dst_data[i], static_cast(i + 1)); - } - - EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); -} - -TEST_F(AOTITorchCopySlimTest, CpuToCuda) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {2, 3}; - Tensor* src = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src, nullptr); - EXPECT_TRUE(src->is_cpu()); - - float* src_data = static_cast(src->data_ptr()); - for (int64_t i = 0; i < src->numel(); i++) { - src_data[i] = static_cast(i * 10); - } - - Tensor* dst = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(dst, nullptr); - EXPECT_TRUE(dst->is_cuda()); - - AOTITorchError error = aoti_torch_copy_(dst, src, 0); - EXPECT_EQ(error, Error::Ok); - - std::vector host_dst_data(6); - cudaMemcpy( - host_dst_data.data(), - dst->data_ptr(), - host_dst_data.size() * sizeof(float), - cudaMemcpyDeviceToHost); - - for (size_t i = 0; i < host_dst_data.size(); i++) { - EXPECT_FLOAT_EQ(host_dst_data[i], static_cast(i * 10)); - } - - EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); -} - -TEST_F(AOTITorchCopySlimTest, CudaToCpu) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {2, 3}; - - std::vector host_src_data(6); - for (size_t i = 0; i < host_src_data.size(); i++) { - host_src_data[i] = static_cast(i * 5); - } - - Tensor* src = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(src, nullptr); - - cudaMemcpy( - src->data_ptr(), - host_src_data.data(), - host_src_data.size() * sizeof(float), - cudaMemcpyHostToDevice); - - Tensor* dst = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(dst, nullptr); - EXPECT_TRUE(dst->is_cpu()); - - AOTITorchError error = aoti_torch_copy_(dst, src, 0); - EXPECT_EQ(error, Error::Ok); - - float* dst_data = static_cast(dst->data_ptr()); - for (int64_t i = 0; i < dst->numel(); i++) { - EXPECT_FLOAT_EQ(dst_data[i], static_cast(i * 5)); - } - - EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); -} - -// ============================================================================ -// Non-blocking Tests -// ============================================================================ - -TEST_F(AOTITorchCopySlimTest, NonBlockingFlag_CPU) { - std::vector sizes = {2, 3}; - Tensor* src = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(src, nullptr); - - float* src_data = static_cast(src->data_ptr()); - for (int64_t i = 0; i < src->numel(); i++) { - src_data[i] = static_cast(i); - } - - Tensor* dst = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(dst, nullptr); - - AOTITorchError error = aoti_torch_copy_(dst, src, 1); - EXPECT_EQ(error, Error::Ok); - - float* dst_data = static_cast(dst->data_ptr()); - for (int64_t i = 0; i < dst->numel(); i++) { - EXPECT_FLOAT_EQ(dst_data[i], static_cast(i)); - } - - EXPECT_EQ(aoti_torch_delete_tensor_object(src), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(dst), Error::Ok); -} diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch_create_tensor_from_blob_v2.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch_create_tensor_from_blob_v2.cpp index db0ab84970d..21f8c79cc46 100644 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch_create_tensor_from_blob_v2.cpp +++ b/backends/cuda/runtime/shims/tests/test_aoti_torch_create_tensor_from_blob_v2.cpp @@ -7,380 +7,271 @@ */ #include -#include -#include -#include -#include -#include -#include #include #include -using namespace executorch::backends::aoti; +#include +#include +#include +#include +#include + using namespace executorch::backends::cuda; -using namespace executorch::runtime; -using executorch::runtime::etensor::Tensor; +using executorch::runtime::Error; -// Test fixture for aoti_torch_create_tensor_from_blob_v2 tests -class AOTITorchCreateTensorFromBlobV2Test : public ::testing::Test { - protected: - void SetUp() override { - // Initialize ExecuTorch Platform Abstraction Layer - et_pal_init(); +namespace slim_c10 = executorch::backends::aoti::slim::c10; - // Check if CUDA is available - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - if (err != cudaSuccess || device_count == 0) { - GTEST_SKIP() << "CUDA not available, skipping CUDA tests"; - } +namespace { + +// Helper to check if CUDA is available +bool isCudaAvailable() { + int device_count = 0; + cudaError_t err = cudaGetDeviceCount(&device_count); + return (err == cudaSuccess && device_count > 0); +} + +// Helper to calculate contiguous strides from sizes +std::vector calculateContiguousStrides( + const std::vector& sizes) { + std::vector strides(sizes.size()); + if (sizes.empty()) { + return strides; + } + strides[sizes.size() - 1] = 1; + for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { + strides[i] = strides[i + 1] * sizes[i + 1]; + } + return strides; +} - // Clean up any existing cached metadata before each test - cleanup_tensor_metadata(); +// Helper to calculate numel from sizes +int64_t calculateNumel(const std::vector& sizes) { + int64_t numel = 1; + for (int64_t size : sizes) { + numel *= size; + } + return numel; +} - // Clear any remaining tensors from previous tests - clear_all_tensors(); +} // namespace + +// Test fixture for SlimTensor-based aoti_torch_create_tensor_from_blob_v2 tests +class AOTITorchCreateTensorFromBlobV2SlimTest : public ::testing::Test { + protected: + void SetUp() override { + et_pal_init(); } void TearDown() override { - // Clean up metadata - cleanup_tensor_metadata(); - - // Clear the global tensor storage using the provided function - clear_all_tensors(); - - // Clean up any allocated memory buffers - for (void* ptr : cuda_memory_buffers_) { - if (ptr) { - cudaError_t cuda_err = cudaFree(ptr); - EXPECT_EQ(cuda_err, cudaSuccess) - << "Failed to free CUDA memory: " << cudaGetErrorString(cuda_err); + // Clean up tensors + for (Tensor* t : tensors_) { + delete t; + } + tensors_.clear(); + + // Clean up CUDA memory + for (void* ptr : cuda_memory_) { + if (ptr != nullptr) { + cudaFree(ptr); } } - cuda_memory_buffers_.clear(); + cuda_memory_.clear(); - for (void* ptr : cpu_memory_buffers_) { - if (ptr) { + // Clean up CPU memory + for (void* ptr : cpu_memory_) { + if (ptr != nullptr) { free(ptr); } } - cpu_memory_buffers_.clear(); + cpu_memory_.clear(); } - // Helper to allocate CUDA memory and track it for cleanup - void* allocate_cuda_memory(size_t bytes) { - void* ptr; - cudaError_t err = cudaMallocManaged(&ptr, bytes); - if (err == cudaSuccess) { - cuda_memory_buffers_.push_back(ptr); - return ptr; + void* allocateCudaMemory(size_t bytes) { + void* ptr = nullptr; + cudaError_t err = cudaMalloc(&ptr, bytes); + if (err == cudaSuccess && ptr != nullptr) { + cuda_memory_.push_back(ptr); } - return nullptr; + return ptr; } - // Helper to allocate CPU memory and track it for cleanup - void* allocate_cpu_memory(size_t bytes) { - void* ptr; - int result = posix_memalign(&ptr, 16, bytes); // 16-byte aligned + void* allocateCpuMemory(size_t bytes) { + void* ptr = nullptr; + int result = posix_memalign(&ptr, 16, bytes); if (result == 0 && ptr != nullptr) { - cpu_memory_buffers_.push_back(ptr); - return ptr; + cpu_memory_.push_back(ptr); } - return nullptr; + return ptr; } - // Helper to calculate number of elements from sizes - int64_t calculate_numel(const std::vector& sizes) { - int64_t numel = 1; - for (int64_t size : sizes) { - numel *= size; + void trackTensor(Tensor* t) { + if (t != nullptr) { + tensors_.push_back(t); } - return numel; - } - - // Helper to calculate contiguous strides from sizes - std::vector calculate_contiguous_strides( - const std::vector& sizes) { - std::vector strides(sizes.size()); - if (sizes.empty()) { - return strides; - } - - strides[sizes.size() - 1] = 1; - // Use int64_t and check for underflow to avoid unsigned integer wraparound - for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { - strides[i] = strides[i + 1] * sizes[i + 1]; - } - return strides; } private: - std::vector cuda_memory_buffers_; - std::vector cpu_memory_buffers_; + std::vector tensors_; + std::vector cuda_memory_; + std::vector cpu_memory_; }; -// Test basic functionality with CUDA memory -TEST_F(AOTITorchCreateTensorFromBlobV2Test, BasicFunctionalityCUDA) { - // Test 1D tensor - std::vector sizes_1d = {5}; - std::vector strides_1d = calculate_contiguous_strides(sizes_1d); - - // Allocate CUDA memory - size_t bytes = calculate_numel(sizes_1d) * sizeof(float); - void* cuda_data = allocate_cuda_memory(bytes); - ASSERT_NE(cuda_data, nullptr); - - Tensor* tensor_1d; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - cuda_data, - sizes_1d.size(), - sizes_1d.data(), - strides_1d.data(), - 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index - &tensor_1d, - 0, // layout (strided) - nullptr, // opaque_metadata - 0); // opaque_metadata_size - - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor_1d, nullptr); - - // Check tensor properties - EXPECT_EQ(tensor_1d->dim(), 1); - EXPECT_EQ(tensor_1d->size(0), 5); +// ============================================================================ +// Common test body - parameterized by device type +// ============================================================================ - // Verify the tensor uses the same data pointer - void* tensor_data = tensor_1d->mutable_data_ptr(); - EXPECT_EQ(tensor_data, cuda_data); - - // Delete the tensor - this should NOT free the original memory - error = aoti_torch_delete_tensor_object(tensor_1d); - EXPECT_EQ(error, Error::Ok); - - // Test that the original memory is still accessible (proves tensor didn't own - // it) For CUDA memory, check that we can still access it (synchronously) - // after tensor deletion - float pattern_value = 42.0f; - cudaError_t cuda_err = cudaMemcpy( - cuda_data, &pattern_value, sizeof(float), cudaMemcpyHostToDevice); - EXPECT_EQ(cuda_err, cudaSuccess) - << "Should be able to write to original CUDA memory after tensor deletion"; - - float readback_value = 0.0f; - cuda_err = cudaMemcpy( - &readback_value, cuda_data, sizeof(float), cudaMemcpyDeviceToHost); - EXPECT_EQ(cuda_err, cudaSuccess) - << "Should be able to read from original CUDA memory after tensor deletion"; - EXPECT_EQ(readback_value, pattern_value) - << "Original CUDA memory should still contain our test pattern"; -} - -// Test basic functionality with CPU memory -TEST_F(AOTITorchCreateTensorFromBlobV2Test, BasicFunctionalityCPU) { - // Test 2D tensor - std::vector sizes_2d = {3, 4}; - std::vector strides_2d = calculate_contiguous_strides(sizes_2d); - - // Allocate CPU memory - size_t bytes = calculate_numel(sizes_2d) * sizeof(float); - void* cpu_data = allocate_cpu_memory(bytes); - ASSERT_NE(cpu_data, nullptr); +void runBasicFromBlobTest( + AOTITorchCreateTensorFromBlobV2SlimTest* fixture, + void* data, + int32_t device_type, + int32_t device_index) { + std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); - Tensor* tensor_2d; + Tensor* tensor = nullptr; AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - cpu_data, - sizes_2d.size(), - sizes_2d.data(), - strides_2d.data(), + data, + sizes.size(), + sizes.data(), + strides.data(), 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CPU), - 0, // device index - &tensor_2d, - 0, // layout (strided) + static_cast(slim_c10::ScalarType::Float), + device_type, + device_index, + &tensor, + 0, // layout nullptr, // opaque_metadata 0); // opaque_metadata_size EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor_2d, nullptr); + ASSERT_NE(tensor, nullptr); // Check tensor properties - EXPECT_EQ(tensor_2d->dim(), 2); - EXPECT_EQ(tensor_2d->size(0), 3); - EXPECT_EQ(tensor_2d->size(1), 4); - - // Verify the tensor uses the same data pointer - void* tensor_data = tensor_2d->mutable_data_ptr(); - EXPECT_EQ(tensor_data, cpu_data); + EXPECT_EQ(tensor->dim(), 2); + EXPECT_EQ(tensor->size(0), 2); + EXPECT_EQ(tensor->size(1), 3); + EXPECT_EQ(tensor->numel(), 6); + EXPECT_EQ( + static_cast(tensor->dtype()), + static_cast(slim_c10::ScalarType::Float)); - // Delete the tensor - this should NOT free the original memory - error = aoti_torch_delete_tensor_object(tensor_2d); - EXPECT_EQ(error, Error::Ok); + // Verify the tensor uses the same data pointer (non-owning) + EXPECT_EQ(tensor->data_ptr(), data); - // Test that the original memory is still accessible (proves tensor didn't own - // it) For CPU memory, directly write and read to verify accessibility - float* float_ptr = reinterpret_cast(cpu_data); - float pattern_value = 42.0f; - *float_ptr = pattern_value; - EXPECT_EQ(*float_ptr, pattern_value) - << "Original CPU memory should still be accessible after tensor deletion"; + // Cleanup - tensor should NOT free the original memory + delete tensor; } -// Test with invalid dtype -TEST_F(AOTITorchCreateTensorFromBlobV2Test, InvalidDtype) { - std::vector sizes = {2, 3}; - std::vector strides = calculate_contiguous_strides(sizes); - - size_t bytes = calculate_numel(sizes) * sizeof(float); - void* data = allocate_cuda_memory(bytes); - ASSERT_NE(data, nullptr); +void runScalarFromBlobTest( + AOTITorchCreateTensorFromBlobV2SlimTest* fixture, + void* data, + int32_t device_type, + int32_t device_index) { + std::vector sizes = {}; // 0D tensor + std::vector strides = {}; - Tensor* tensor; + Tensor* tensor = nullptr; AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( data, sizes.size(), sizes.data(), strides.data(), 0, // storage_offset - 999, // invalid dtype - static_cast(SupportedDevices::CUDA), - 0, // device index + static_cast(slim_c10::ScalarType::Float), + device_type, + device_index, &tensor, 0, // layout nullptr, // opaque_metadata 0); // opaque_metadata_size - EXPECT_EQ(error, Error::InvalidArgument); -} - -// Test with non-zero storage offset (should fail since from_blob cannot handle -// offsets) -TEST_F(AOTITorchCreateTensorFromBlobV2Test, NonZeroStorageOffset) { - std::vector sizes = {2, 3}; - std::vector strides = calculate_contiguous_strides(sizes); - - size_t bytes = calculate_numel(sizes) * sizeof(float); - void* data = allocate_cuda_memory(bytes); - ASSERT_NE(data, nullptr); + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(tensor, nullptr); - Tensor* tensor; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - data, - sizes.size(), - sizes.data(), - strides.data(), - 1, // non-zero storage_offset (should fail since from_blob cannot handle - // offsets) - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index - &tensor, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size + EXPECT_EQ(tensor->dim(), 0); + EXPECT_EQ(tensor->numel(), 1); + EXPECT_EQ(tensor->data_ptr(), data); - EXPECT_EQ(error, Error::InvalidArgument); + delete tensor; } -// Test with custom strides (using stride parameter but still contiguous) -TEST_F(AOTITorchCreateTensorFromBlobV2Test, CustomContiguousStrides) { - std::vector sizes = {2, 3}; - // Use the correct contiguous strides but pass them explicitly - std::vector contiguous_strides = {3, 1}; // Proper contiguous strides - - size_t bytes = calculate_numel(sizes) * sizeof(float); - void* data = allocate_cuda_memory(bytes); - ASSERT_NE(data, nullptr); +void runMultiDimensionalFromBlobTest( + AOTITorchCreateTensorFromBlobV2SlimTest* fixture, + void* data, + int32_t device_type, + int32_t device_index) { + std::vector sizes = {2, 3, 4}; + std::vector strides = calculateContiguousStrides(sizes); - Tensor* tensor; + Tensor* tensor = nullptr; AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( data, sizes.size(), sizes.data(), - contiguous_strides.data(), // Explicitly pass contiguous strides + strides.data(), 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index + static_cast(slim_c10::ScalarType::Float), + device_type, + device_index, &tensor, 0, // layout nullptr, // opaque_metadata 0); // opaque_metadata_size EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor, nullptr); + ASSERT_NE(tensor, nullptr); - // Check tensor properties - EXPECT_EQ(tensor->dim(), 2); + EXPECT_EQ(tensor->dim(), 3); EXPECT_EQ(tensor->size(0), 2); EXPECT_EQ(tensor->size(1), 3); + EXPECT_EQ(tensor->size(2), 4); + EXPECT_EQ(tensor->numel(), 24); + EXPECT_EQ(tensor->data_ptr(), data); - // Verify the tensor uses the same data pointer - void* tensor_data = tensor->mutable_data_ptr(); - EXPECT_EQ(tensor_data, data); - - // Verify strides were properly set (we can check via aoti_torch_get_strides) - int64_t* tensor_strides; - error = aoti_torch_get_strides(tensor, &tensor_strides); - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(tensor_strides[0], 3); - EXPECT_EQ(tensor_strides[1], 1); - - // Delete the tensor - this should NOT free the original memory - error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); - - // Test that the original memory is still accessible (proves tensor didn't own - // it) - float pattern_value = 42.0f; - cudaError_t cuda_err = - cudaMemcpy(data, &pattern_value, sizeof(float), cudaMemcpyHostToDevice); - EXPECT_EQ(cuda_err, cudaSuccess) - << "Should be able to write to original CUDA memory after tensor deletion"; - - float readback_value = 0.0f; - cuda_err = - cudaMemcpy(&readback_value, data, sizeof(float), cudaMemcpyDeviceToHost); - EXPECT_EQ(cuda_err, cudaSuccess) - << "Should be able to read from original CUDA memory after tensor deletion"; - EXPECT_EQ(readback_value, pattern_value) - << "Original CUDA memory should still contain our test pattern"; + delete tensor; } -// Test with null data pointer -TEST_F(AOTITorchCreateTensorFromBlobV2Test, NullDataPointer) { - std::vector sizes = {2, 3}; - std::vector strides = calculate_contiguous_strides(sizes); +void runCustomStridesFromBlobTest( + AOTITorchCreateTensorFromBlobV2SlimTest* fixture, + void* data, + int32_t device_type, + int32_t device_index) { + std::vector sizes = {3, 4}; + std::vector strides = {1, 3}; // Column-major - Tensor* tensor; + Tensor* tensor = nullptr; AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - nullptr, // null data pointer + data, sizes.size(), sizes.data(), strides.data(), 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index + static_cast(slim_c10::ScalarType::Float), + device_type, + device_index, &tensor, 0, // layout nullptr, // opaque_metadata 0); // opaque_metadata_size - EXPECT_EQ(error, Error::InvalidArgument); -} + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(tensor, nullptr); -// Test scalar tensor (0D) -TEST_F(AOTITorchCreateTensorFromBlobV2Test, ScalarTensor) { - std::vector sizes = {}; // 0D tensor - std::vector strides = {}; // Empty strides for scalar + EXPECT_EQ(tensor->stride(0), 1); + EXPECT_EQ(tensor->stride(1), 3); + EXPECT_FALSE(tensor->is_contiguous()); + EXPECT_EQ(tensor->data_ptr(), data); - size_t bytes = sizeof(float); // Single element - void* data = allocate_cuda_memory(bytes); - ASSERT_NE(data, nullptr); + delete tensor; +} + +void runStorageOffsetFromBlobTest( + AOTITorchCreateTensorFromBlobV2SlimTest* fixture, + void* data, + int32_t device_type, + int32_t device_index) { + std::vector sizes = {2, 2}; + std::vector strides = calculateContiguousStrides(sizes); Tensor* tensor = nullptr; AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( @@ -388,10 +279,10 @@ TEST_F(AOTITorchCreateTensorFromBlobV2Test, ScalarTensor) { sizes.size(), sizes.data(), strides.data(), - 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index + 2, // storage_offset = 2 elements + static_cast(slim_c10::ScalarType::Float), + device_type, + device_index, &tensor, 0, // layout nullptr, // opaque_metadata @@ -400,420 +291,343 @@ TEST_F(AOTITorchCreateTensorFromBlobV2Test, ScalarTensor) { EXPECT_EQ(error, Error::Ok); ASSERT_NE(tensor, nullptr); - // Check tensor properties - EXPECT_EQ(tensor->dim(), 0); - - // Verify the tensor uses the same data pointer - void* tensor_data = tensor->mutable_data_ptr(); - EXPECT_EQ(tensor_data, data); - - // Delete the tensor - this should NOT free the original memory - error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); + EXPECT_EQ(tensor->storage_offset(), 2); + // data_ptr should point to base + offset * itemsize + char* expected_ptr = static_cast(data) + 2 * sizeof(float); + EXPECT_EQ(tensor->data_ptr(), expected_ptr); - // Test that the original memory is still accessible (proves tensor didn't own - // it) - float pattern_value = 42.0f; - cudaError_t cuda_err = - cudaMemcpy(data, &pattern_value, sizeof(float), cudaMemcpyHostToDevice); - EXPECT_EQ(cuda_err, cudaSuccess) - << "Should be able to write to original CUDA memory after tensor deletion"; - - float readback_value = 0.0f; - cuda_err = - cudaMemcpy(&readback_value, data, sizeof(float), cudaMemcpyDeviceToHost); - EXPECT_EQ(cuda_err, cudaSuccess) - << "Should be able to read from original CUDA memory after tensor deletion"; - EXPECT_EQ(readback_value, pattern_value) - << "Original CUDA memory should still contain our test pattern"; + delete tensor; } -// Test zero-sized tensor -TEST_F(AOTITorchCreateTensorFromBlobV2Test, ZeroSizedTensor) { - std::vector sizes = {0, 5}; // Zero elements - std::vector strides = calculate_contiguous_strides(sizes); +// ============================================================================ +// CPU Tests +// ============================================================================ - // Even for zero-sized tensor, we need some memory allocated - size_t bytes = sizeof(float); // Minimum allocation - void* data = allocate_cuda_memory(bytes); +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, BasicFunctionality_CPU) { + size_t bytes = 6 * sizeof(float); + void* data = allocateCpuMemory(bytes); ASSERT_NE(data, nullptr); - Tensor* tensor; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - data, - sizes.size(), - sizes.data(), - strides.data(), - 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index - &tensor, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size + runBasicFromBlobTest( + this, data, static_cast(slim_c10::DeviceType::CPU), 0); +} - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor, nullptr); +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, ScalarTensor_CPU) { + size_t bytes = sizeof(float); + void* data = allocateCpuMemory(bytes); + ASSERT_NE(data, nullptr); - // Check tensor properties - EXPECT_EQ(tensor->dim(), 2); - EXPECT_EQ(tensor->size(0), 0); - EXPECT_EQ(tensor->size(1), 5); + runScalarFromBlobTest( + this, data, static_cast(slim_c10::DeviceType::CPU), 0); +} - // Verify the tensor uses the same data pointer - void* tensor_data = tensor->mutable_data_ptr(); - EXPECT_EQ(tensor_data, data); +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, MultiDimensional_CPU) { + size_t bytes = 24 * sizeof(float); + void* data = allocateCpuMemory(bytes); + ASSERT_NE(data, nullptr); - // Delete the tensor - this should NOT free the original memory - error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); + runMultiDimensionalFromBlobTest( + this, data, static_cast(slim_c10::DeviceType::CPU), 0); +} - // Test that the original memory is still accessible (proves tensor didn't own - // it) - float pattern_value = 42.0f; - cudaError_t cuda_err = - cudaMemcpy(data, &pattern_value, sizeof(float), cudaMemcpyHostToDevice); - EXPECT_EQ(cuda_err, cudaSuccess) - << "Should be able to write to original CUDA memory after tensor deletion"; - - float readback_value = 0.0f; - cuda_err = - cudaMemcpy(&readback_value, data, sizeof(float), cudaMemcpyDeviceToHost); - EXPECT_EQ(cuda_err, cudaSuccess) - << "Should be able to read from original CUDA memory after tensor deletion"; - EXPECT_EQ(readback_value, pattern_value) - << "Original CUDA memory should still contain our test pattern"; +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, CustomStrides_CPU) { + size_t bytes = 12 * sizeof(float); + void* data = allocateCpuMemory(bytes); + ASSERT_NE(data, nullptr); + + runCustomStridesFromBlobTest( + this, data, static_cast(slim_c10::DeviceType::CPU), 0); } -// Test multi-dimensional tensors -TEST_F(AOTITorchCreateTensorFromBlobV2Test, MultiDimensionalTensors) { - // Test 3D tensor - std::vector sizes_3d = {2, 3, 4}; - std::vector strides_3d = calculate_contiguous_strides(sizes_3d); +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, StorageOffset_CPU) { + // Allocate extra space for offset + size_t bytes = 6 * sizeof(float); // 2 for offset + 4 for tensor + void* data = allocateCpuMemory(bytes); + ASSERT_NE(data, nullptr); - size_t bytes_3d = calculate_numel(sizes_3d) * sizeof(float); - void* data_3d = allocate_cuda_memory(bytes_3d); - ASSERT_NE(data_3d, nullptr); + runStorageOffsetFromBlobTest( + this, data, static_cast(slim_c10::DeviceType::CPU), 0); +} - Tensor* tensor_3d; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - data_3d, - sizes_3d.size(), - sizes_3d.data(), - strides_3d.data(), - 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index - &tensor_3d, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size +// ============================================================================ +// CUDA Tests +// ============================================================================ - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor_3d, nullptr); - EXPECT_EQ(tensor_3d->dim(), 3); - EXPECT_EQ(tensor_3d->size(0), 2); - EXPECT_EQ(tensor_3d->size(1), 3); - EXPECT_EQ(tensor_3d->size(2), 4); - - // Test 4D tensor - std::vector sizes_4d = {2, 3, 4, 5}; - std::vector strides_4d = calculate_contiguous_strides(sizes_4d); - - size_t bytes_4d = calculate_numel(sizes_4d) * sizeof(float); - void* data_4d = allocate_cuda_memory(bytes_4d); - ASSERT_NE(data_4d, nullptr); - - Tensor* tensor_4d; - error = aoti_torch_create_tensor_from_blob_v2( - data_4d, - sizes_4d.size(), - sizes_4d.data(), - strides_4d.data(), - 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index - &tensor_4d, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, BasicFunctionality_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor_4d, nullptr); - EXPECT_EQ(tensor_4d->dim(), 4); - EXPECT_EQ(tensor_4d->size(0), 2); - EXPECT_EQ(tensor_4d->size(1), 3); - EXPECT_EQ(tensor_4d->size(2), 4); - EXPECT_EQ(tensor_4d->size(3), 5); + size_t bytes = 6 * sizeof(float); + void* data = allocateCudaMemory(bytes); + ASSERT_NE(data, nullptr); + + runBasicFromBlobTest( + this, data, static_cast(slim_c10::DeviceType::CUDA), 0); } -// Test tensor data pointer consistency -TEST_F(AOTITorchCreateTensorFromBlobV2Test, DataPointerConsistency) { - std::vector sizes = {2, 3}; - std::vector strides = calculate_contiguous_strides(sizes); +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, ScalarTensor_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } - size_t bytes = calculate_numel(sizes) * sizeof(float); - void* original_data = allocate_cuda_memory(bytes); - ASSERT_NE(original_data, nullptr); + size_t bytes = sizeof(float); + void* data = allocateCudaMemory(bytes); + ASSERT_NE(data, nullptr); - Tensor* tensor; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - original_data, - sizes.size(), - sizes.data(), - strides.data(), - 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index - &tensor, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size + runScalarFromBlobTest( + this, data, static_cast(slim_c10::DeviceType::CUDA), 0); +} - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor, nullptr); +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, MultiDimensional_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + + size_t bytes = 24 * sizeof(float); + void* data = allocateCudaMemory(bytes); + ASSERT_NE(data, nullptr); - // Check that the tensor uses the same data pointer - void* tensor_data = tensor->mutable_data_ptr(); - EXPECT_EQ(tensor_data, original_data); + runMultiDimensionalFromBlobTest( + this, data, static_cast(slim_c10::DeviceType::CUDA), 0); } -// Test creating multiple tensors from different blobs -TEST_F(AOTITorchCreateTensorFromBlobV2Test, MultipleTensorsFromBlobs) { - const int num_tensors = 5; - std::vector tensors; - std::vector data_ptrs; - - for (int i = 0; i < num_tensors; i++) { - std::vector sizes = {i + 1, i + 2}; - std::vector strides = calculate_contiguous_strides(sizes); - - size_t bytes = calculate_numel(sizes) * sizeof(float); - void* data = allocate_cuda_memory(bytes); - ASSERT_NE(data, nullptr); - data_ptrs.push_back(data); - - Tensor* tensor; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - data, - sizes.size(), - sizes.data(), - strides.data(), - 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index - &tensor, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size - - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor, nullptr); - tensors.push_back(tensor); - - // Verify dimensions - EXPECT_EQ(tensor->dim(), 2); - EXPECT_EQ(tensor->size(0), i + 1); - EXPECT_EQ(tensor->size(1), i + 2); - - // Verify the tensor uses the correct data pointer - EXPECT_EQ(tensor->mutable_data_ptr(), data); +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, CustomStrides_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; } - // Verify all tensors have different data pointers - for (int i = 0; i < num_tensors; i++) { - EXPECT_EQ(tensors[i]->mutable_data_ptr(), data_ptrs[i]); - for (int j = i + 1; j < num_tensors; j++) { - EXPECT_NE(tensors[i]->mutable_data_ptr(), tensors[j]->mutable_data_ptr()); - } + size_t bytes = 12 * sizeof(float); + void* data = allocateCudaMemory(bytes); + ASSERT_NE(data, nullptr); + + runCustomStridesFromBlobTest( + this, data, static_cast(slim_c10::DeviceType::CUDA), 0); +} + +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, StorageOffset_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; } + + // Allocate extra space for offset + size_t bytes = 6 * sizeof(float); + void* data = allocateCudaMemory(bytes); + ASSERT_NE(data, nullptr); + + runStorageOffsetFromBlobTest( + this, data, static_cast(slim_c10::DeviceType::CUDA), 0); } -// Test deletion of tensor created from blob (should not free the original -// memory) -TEST_F(AOTITorchCreateTensorFromBlobV2Test, DeletionDoesNotFreeOriginalMemory) { - std::vector sizes = {2, 3}; - std::vector strides = calculate_contiguous_strides(sizes); +// ============================================================================ +// Verify Non-Owning Behavior +// ============================================================================ - size_t bytes = calculate_numel(sizes) * sizeof(float); - void* data = allocate_cuda_memory(bytes); +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, NonOwningBehavior_CPU) { + size_t bytes = 6 * sizeof(float); + void* data = allocateCpuMemory(bytes); ASSERT_NE(data, nullptr); - Tensor* tensor; + // Write a pattern + float* float_data = static_cast(data); + float_data[0] = 42.0f; + + std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); + + Tensor* tensor = nullptr; AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( data, sizes.size(), sizes.data(), strides.data(), - 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index + 0, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0, &tensor, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size + 0, + nullptr, + 0); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor, nullptr); + ASSERT_NE(tensor, nullptr); - // Delete the tensor - this should NOT free the original memory - error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); + // Delete tensor - memory should NOT be freed + delete tensor; + tensor = nullptr; - // The original memory should still be valid (we'll free it in teardown) - // We can't easily test if the memory is still valid without risking crashes, - // but the test should pass without issues if memory management is correct + // Memory should still be accessible + EXPECT_FLOAT_EQ(float_data[0], 42.0f); } -// Test with opaque metadata -TEST_F(AOTITorchCreateTensorFromBlobV2Test, WithOpaqueMetadata) { - std::vector sizes = {2, 3}; - std::vector strides = calculate_contiguous_strides(sizes); +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, NonOwningBehavior_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } - size_t bytes = calculate_numel(sizes) * sizeof(float); - void* data = allocate_cuda_memory(bytes); + size_t bytes = 6 * sizeof(float); + void* data = allocateCudaMemory(bytes); ASSERT_NE(data, nullptr); - // Create some opaque metadata - std::vector metadata = {0x01, 0x02, 0x03, 0x04}; + // Write a pattern + float pattern = 42.0f; + cudaMemcpy(data, &pattern, sizeof(float), cudaMemcpyHostToDevice); - Tensor* tensor; + std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); + + Tensor* tensor = nullptr; AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( data, sizes.size(), sizes.data(), strides.data(), - 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index + 0, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0, &tensor, - 0, // layout - metadata.data(), // opaque_metadata - metadata.size()); // opaque_metadata_size + 0, + nullptr, + 0); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor, nullptr); + ASSERT_NE(tensor, nullptr); - // Check tensor properties - EXPECT_EQ(tensor->dim(), 2); - EXPECT_EQ(tensor->size(0), 2); - EXPECT_EQ(tensor->size(1), 3); -} + // Delete tensor - memory should NOT be freed + delete tensor; + tensor = nullptr; -// Test stress test with many small tensors from blobs -TEST_F(AOTITorchCreateTensorFromBlobV2Test, StressTestManySmallTensors) { - const int num_tensors = 50; // Reduced for reasonable test time - std::vector tensors; + // Memory should still be accessible + float readback = 0.0f; + cudaError_t cuda_err = + cudaMemcpy(&readback, data, sizeof(float), cudaMemcpyDeviceToHost); + EXPECT_EQ(cuda_err, cudaSuccess); + EXPECT_FLOAT_EQ(readback, 42.0f); +} - for (int i = 0; i < num_tensors; i++) { - std::vector sizes = {1, 1}; // Minimal size - std::vector strides = calculate_contiguous_strides(sizes); +// ============================================================================ +// Error Cases +// ============================================================================ - size_t bytes = calculate_numel(sizes) * sizeof(float); - void* data = allocate_cuda_memory(bytes); - if (data == nullptr) { - // Skip if we run out of memory - continue; - } +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, NullDataPointer) { + std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); - Tensor* tensor; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - data, - sizes.size(), - sizes.data(), - strides.data(), - 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index - &tensor, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size - - if (error == Error::Ok && tensor != nullptr) { - tensors.push_back(tensor); - - // Verify the tensor uses the correct data pointer - EXPECT_EQ(tensor->mutable_data_ptr(), data); - } - } + Tensor* tensor = nullptr; + AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( + nullptr, // null data + sizes.size(), + sizes.data(), + strides.data(), + 0, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0, + &tensor, + 0, + nullptr, + 0); - // Delete all created tensors - for (Tensor* tensor : tensors) { - AOTITorchError error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); - } + EXPECT_EQ(error, Error::InvalidArgument); } -// Test device type mismatch: CPU data with CUDA device request should fail -TEST_F(AOTITorchCreateTensorFromBlobV2Test, DeviceMismatchCPUDataCUDADevice) { +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, NullReturnPointer) { + size_t bytes = 6 * sizeof(float); + void* data = allocateCpuMemory(bytes); + ASSERT_NE(data, nullptr); + std::vector sizes = {2, 3}; - std::vector strides = calculate_contiguous_strides(sizes); + std::vector strides = calculateContiguousStrides(sizes); + + AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( + data, + sizes.size(), + sizes.data(), + strides.data(), + 0, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0, + nullptr, // null return pointer + 0, + nullptr, + 0); + + EXPECT_EQ(error, Error::InvalidArgument); +} - // Allocate CPU memory - size_t bytes = calculate_numel(sizes) * sizeof(float); - void* cpu_data = allocate_cpu_memory(bytes); - ASSERT_NE(cpu_data, nullptr); +// ============================================================================ +// Verify Device Properties +// ============================================================================ + +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, VerifyCPUDevice) { + size_t bytes = 6 * sizeof(float); + void* data = allocateCpuMemory(bytes); + ASSERT_NE(data, nullptr); + + std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); - Tensor* tensor; - // Request CUDA device but provide CPU memory - should fail + Tensor* tensor = nullptr; AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - cpu_data, + data, sizes.size(), sizes.data(), strides.data(), - 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), // Request CUDA - 0, // device index + 0, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0, &tensor, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size + 0, + nullptr, + 0); + + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(tensor, nullptr); + + EXPECT_TRUE(tensor->is_cpu()); + EXPECT_FALSE(tensor->is_cuda()); + EXPECT_EQ(tensor->device_type(), slim_c10::DeviceType::CPU); - EXPECT_EQ(error, Error::InvalidArgument) - << "Should fail when CPU data is provided but CUDA device is requested"; + delete tensor; } -// Test device type mismatch: CUDA data with CPU device request should fail -TEST_F(AOTITorchCreateTensorFromBlobV2Test, DeviceMismatchCUDADataCPUDevice) { - std::vector sizes = {2, 3}; - std::vector strides = calculate_contiguous_strides(sizes); +TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, VerifyCUDADevice) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } - // Allocate CUDA memory (device memory, not managed) - size_t bytes = calculate_numel(sizes) * sizeof(float); - void* cuda_data = nullptr; - cudaError_t cuda_err = cudaMalloc(&cuda_data, bytes); - ASSERT_EQ(cuda_err, cudaSuccess); - ASSERT_NE(cuda_data, nullptr); + size_t bytes = 6 * sizeof(float); + void* data = allocateCudaMemory(bytes); + ASSERT_NE(data, nullptr); + + std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); - Tensor* tensor; - // Request CPU device but provide CUDA memory - should fail + Tensor* tensor = nullptr; AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - cuda_data, + data, sizes.size(), sizes.data(), strides.data(), - 0, // storage_offset - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CPU), // Request CPU - 0, // device index + 0, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0, &tensor, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size + 0, + nullptr, + 0); + + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(tensor, nullptr); - EXPECT_EQ(error, Error::InvalidArgument) - << "Should fail when CUDA data is provided but CPU device is requested"; + EXPECT_FALSE(tensor->is_cpu()); + EXPECT_TRUE(tensor->is_cuda()); + EXPECT_EQ(tensor->device_type(), slim_c10::DeviceType::CUDA); - // Clean up the CUDA memory we allocated directly - cudaFree(cuda_data); + delete tensor; } diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch_create_tensor_from_blob_v2_slim.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch_create_tensor_from_blob_v2_slim.cpp deleted file mode 100644 index 21f8c79cc46..00000000000 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch_create_tensor_from_blob_v2_slim.cpp +++ /dev/null @@ -1,633 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include - -using namespace executorch::backends::cuda; -using executorch::runtime::Error; - -namespace slim_c10 = executorch::backends::aoti::slim::c10; - -namespace { - -// Helper to check if CUDA is available -bool isCudaAvailable() { - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - return (err == cudaSuccess && device_count > 0); -} - -// Helper to calculate contiguous strides from sizes -std::vector calculateContiguousStrides( - const std::vector& sizes) { - std::vector strides(sizes.size()); - if (sizes.empty()) { - return strides; - } - strides[sizes.size() - 1] = 1; - for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { - strides[i] = strides[i + 1] * sizes[i + 1]; - } - return strides; -} - -// Helper to calculate numel from sizes -int64_t calculateNumel(const std::vector& sizes) { - int64_t numel = 1; - for (int64_t size : sizes) { - numel *= size; - } - return numel; -} - -} // namespace - -// Test fixture for SlimTensor-based aoti_torch_create_tensor_from_blob_v2 tests -class AOTITorchCreateTensorFromBlobV2SlimTest : public ::testing::Test { - protected: - void SetUp() override { - et_pal_init(); - } - - void TearDown() override { - // Clean up tensors - for (Tensor* t : tensors_) { - delete t; - } - tensors_.clear(); - - // Clean up CUDA memory - for (void* ptr : cuda_memory_) { - if (ptr != nullptr) { - cudaFree(ptr); - } - } - cuda_memory_.clear(); - - // Clean up CPU memory - for (void* ptr : cpu_memory_) { - if (ptr != nullptr) { - free(ptr); - } - } - cpu_memory_.clear(); - } - - void* allocateCudaMemory(size_t bytes) { - void* ptr = nullptr; - cudaError_t err = cudaMalloc(&ptr, bytes); - if (err == cudaSuccess && ptr != nullptr) { - cuda_memory_.push_back(ptr); - } - return ptr; - } - - void* allocateCpuMemory(size_t bytes) { - void* ptr = nullptr; - int result = posix_memalign(&ptr, 16, bytes); - if (result == 0 && ptr != nullptr) { - cpu_memory_.push_back(ptr); - } - return ptr; - } - - void trackTensor(Tensor* t) { - if (t != nullptr) { - tensors_.push_back(t); - } - } - - private: - std::vector tensors_; - std::vector cuda_memory_; - std::vector cpu_memory_; -}; - -// ============================================================================ -// Common test body - parameterized by device type -// ============================================================================ - -void runBasicFromBlobTest( - AOTITorchCreateTensorFromBlobV2SlimTest* fixture, - void* data, - int32_t device_type, - int32_t device_index) { - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - data, - sizes.size(), - sizes.data(), - strides.data(), - 0, // storage_offset - static_cast(slim_c10::ScalarType::Float), - device_type, - device_index, - &tensor, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - - // Check tensor properties - EXPECT_EQ(tensor->dim(), 2); - EXPECT_EQ(tensor->size(0), 2); - EXPECT_EQ(tensor->size(1), 3); - EXPECT_EQ(tensor->numel(), 6); - EXPECT_EQ( - static_cast(tensor->dtype()), - static_cast(slim_c10::ScalarType::Float)); - - // Verify the tensor uses the same data pointer (non-owning) - EXPECT_EQ(tensor->data_ptr(), data); - - // Cleanup - tensor should NOT free the original memory - delete tensor; -} - -void runScalarFromBlobTest( - AOTITorchCreateTensorFromBlobV2SlimTest* fixture, - void* data, - int32_t device_type, - int32_t device_index) { - std::vector sizes = {}; // 0D tensor - std::vector strides = {}; - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - data, - sizes.size(), - sizes.data(), - strides.data(), - 0, // storage_offset - static_cast(slim_c10::ScalarType::Float), - device_type, - device_index, - &tensor, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - - EXPECT_EQ(tensor->dim(), 0); - EXPECT_EQ(tensor->numel(), 1); - EXPECT_EQ(tensor->data_ptr(), data); - - delete tensor; -} - -void runMultiDimensionalFromBlobTest( - AOTITorchCreateTensorFromBlobV2SlimTest* fixture, - void* data, - int32_t device_type, - int32_t device_index) { - std::vector sizes = {2, 3, 4}; - std::vector strides = calculateContiguousStrides(sizes); - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - data, - sizes.size(), - sizes.data(), - strides.data(), - 0, // storage_offset - static_cast(slim_c10::ScalarType::Float), - device_type, - device_index, - &tensor, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - - EXPECT_EQ(tensor->dim(), 3); - EXPECT_EQ(tensor->size(0), 2); - EXPECT_EQ(tensor->size(1), 3); - EXPECT_EQ(tensor->size(2), 4); - EXPECT_EQ(tensor->numel(), 24); - EXPECT_EQ(tensor->data_ptr(), data); - - delete tensor; -} - -void runCustomStridesFromBlobTest( - AOTITorchCreateTensorFromBlobV2SlimTest* fixture, - void* data, - int32_t device_type, - int32_t device_index) { - std::vector sizes = {3, 4}; - std::vector strides = {1, 3}; // Column-major - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - data, - sizes.size(), - sizes.data(), - strides.data(), - 0, // storage_offset - static_cast(slim_c10::ScalarType::Float), - device_type, - device_index, - &tensor, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - - EXPECT_EQ(tensor->stride(0), 1); - EXPECT_EQ(tensor->stride(1), 3); - EXPECT_FALSE(tensor->is_contiguous()); - EXPECT_EQ(tensor->data_ptr(), data); - - delete tensor; -} - -void runStorageOffsetFromBlobTest( - AOTITorchCreateTensorFromBlobV2SlimTest* fixture, - void* data, - int32_t device_type, - int32_t device_index) { - std::vector sizes = {2, 2}; - std::vector strides = calculateContiguousStrides(sizes); - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - data, - sizes.size(), - sizes.data(), - strides.data(), - 2, // storage_offset = 2 elements - static_cast(slim_c10::ScalarType::Float), - device_type, - device_index, - &tensor, - 0, // layout - nullptr, // opaque_metadata - 0); // opaque_metadata_size - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - - EXPECT_EQ(tensor->storage_offset(), 2); - // data_ptr should point to base + offset * itemsize - char* expected_ptr = static_cast(data) + 2 * sizeof(float); - EXPECT_EQ(tensor->data_ptr(), expected_ptr); - - delete tensor; -} - -// ============================================================================ -// CPU Tests -// ============================================================================ - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, BasicFunctionality_CPU) { - size_t bytes = 6 * sizeof(float); - void* data = allocateCpuMemory(bytes); - ASSERT_NE(data, nullptr); - - runBasicFromBlobTest( - this, data, static_cast(slim_c10::DeviceType::CPU), 0); -} - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, ScalarTensor_CPU) { - size_t bytes = sizeof(float); - void* data = allocateCpuMemory(bytes); - ASSERT_NE(data, nullptr); - - runScalarFromBlobTest( - this, data, static_cast(slim_c10::DeviceType::CPU), 0); -} - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, MultiDimensional_CPU) { - size_t bytes = 24 * sizeof(float); - void* data = allocateCpuMemory(bytes); - ASSERT_NE(data, nullptr); - - runMultiDimensionalFromBlobTest( - this, data, static_cast(slim_c10::DeviceType::CPU), 0); -} - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, CustomStrides_CPU) { - size_t bytes = 12 * sizeof(float); - void* data = allocateCpuMemory(bytes); - ASSERT_NE(data, nullptr); - - runCustomStridesFromBlobTest( - this, data, static_cast(slim_c10::DeviceType::CPU), 0); -} - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, StorageOffset_CPU) { - // Allocate extra space for offset - size_t bytes = 6 * sizeof(float); // 2 for offset + 4 for tensor - void* data = allocateCpuMemory(bytes); - ASSERT_NE(data, nullptr); - - runStorageOffsetFromBlobTest( - this, data, static_cast(slim_c10::DeviceType::CPU), 0); -} - -// ============================================================================ -// CUDA Tests -// ============================================================================ - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, BasicFunctionality_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - size_t bytes = 6 * sizeof(float); - void* data = allocateCudaMemory(bytes); - ASSERT_NE(data, nullptr); - - runBasicFromBlobTest( - this, data, static_cast(slim_c10::DeviceType::CUDA), 0); -} - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, ScalarTensor_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - size_t bytes = sizeof(float); - void* data = allocateCudaMemory(bytes); - ASSERT_NE(data, nullptr); - - runScalarFromBlobTest( - this, data, static_cast(slim_c10::DeviceType::CUDA), 0); -} - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, MultiDimensional_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - size_t bytes = 24 * sizeof(float); - void* data = allocateCudaMemory(bytes); - ASSERT_NE(data, nullptr); - - runMultiDimensionalFromBlobTest( - this, data, static_cast(slim_c10::DeviceType::CUDA), 0); -} - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, CustomStrides_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - size_t bytes = 12 * sizeof(float); - void* data = allocateCudaMemory(bytes); - ASSERT_NE(data, nullptr); - - runCustomStridesFromBlobTest( - this, data, static_cast(slim_c10::DeviceType::CUDA), 0); -} - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, StorageOffset_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - // Allocate extra space for offset - size_t bytes = 6 * sizeof(float); - void* data = allocateCudaMemory(bytes); - ASSERT_NE(data, nullptr); - - runStorageOffsetFromBlobTest( - this, data, static_cast(slim_c10::DeviceType::CUDA), 0); -} - -// ============================================================================ -// Verify Non-Owning Behavior -// ============================================================================ - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, NonOwningBehavior_CPU) { - size_t bytes = 6 * sizeof(float); - void* data = allocateCpuMemory(bytes); - ASSERT_NE(data, nullptr); - - // Write a pattern - float* float_data = static_cast(data); - float_data[0] = 42.0f; - - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - data, - sizes.size(), - sizes.data(), - strides.data(), - 0, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0, - &tensor, - 0, - nullptr, - 0); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - - // Delete tensor - memory should NOT be freed - delete tensor; - tensor = nullptr; - - // Memory should still be accessible - EXPECT_FLOAT_EQ(float_data[0], 42.0f); -} - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, NonOwningBehavior_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - size_t bytes = 6 * sizeof(float); - void* data = allocateCudaMemory(bytes); - ASSERT_NE(data, nullptr); - - // Write a pattern - float pattern = 42.0f; - cudaMemcpy(data, &pattern, sizeof(float), cudaMemcpyHostToDevice); - - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - data, - sizes.size(), - sizes.data(), - strides.data(), - 0, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0, - &tensor, - 0, - nullptr, - 0); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - - // Delete tensor - memory should NOT be freed - delete tensor; - tensor = nullptr; - - // Memory should still be accessible - float readback = 0.0f; - cudaError_t cuda_err = - cudaMemcpy(&readback, data, sizeof(float), cudaMemcpyDeviceToHost); - EXPECT_EQ(cuda_err, cudaSuccess); - EXPECT_FLOAT_EQ(readback, 42.0f); -} - -// ============================================================================ -// Error Cases -// ============================================================================ - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, NullDataPointer) { - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - nullptr, // null data - sizes.size(), - sizes.data(), - strides.data(), - 0, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0, - &tensor, - 0, - nullptr, - 0); - - EXPECT_EQ(error, Error::InvalidArgument); -} - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, NullReturnPointer) { - size_t bytes = 6 * sizeof(float); - void* data = allocateCpuMemory(bytes); - ASSERT_NE(data, nullptr); - - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - data, - sizes.size(), - sizes.data(), - strides.data(), - 0, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0, - nullptr, // null return pointer - 0, - nullptr, - 0); - - EXPECT_EQ(error, Error::InvalidArgument); -} - -// ============================================================================ -// Verify Device Properties -// ============================================================================ - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, VerifyCPUDevice) { - size_t bytes = 6 * sizeof(float); - void* data = allocateCpuMemory(bytes); - ASSERT_NE(data, nullptr); - - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - data, - sizes.size(), - sizes.data(), - strides.data(), - 0, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0, - &tensor, - 0, - nullptr, - 0); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - - EXPECT_TRUE(tensor->is_cpu()); - EXPECT_FALSE(tensor->is_cuda()); - EXPECT_EQ(tensor->device_type(), slim_c10::DeviceType::CPU); - - delete tensor; -} - -TEST_F(AOTITorchCreateTensorFromBlobV2SlimTest, VerifyCUDADevice) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - size_t bytes = 6 * sizeof(float); - void* data = allocateCudaMemory(bytes); - ASSERT_NE(data, nullptr); - - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_create_tensor_from_blob_v2( - data, - sizes.size(), - sizes.data(), - strides.data(), - 0, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0, - &tensor, - 0, - nullptr, - 0); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - - EXPECT_FALSE(tensor->is_cpu()); - EXPECT_TRUE(tensor->is_cuda()); - EXPECT_EQ(tensor->device_type(), slim_c10::DeviceType::CUDA); - - delete tensor; -} diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch_delete_tensor_object.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch_delete_tensor_object.cpp index 10c8d8c1a31..e88ebb3185c 100644 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch_delete_tensor_object.cpp +++ b/backends/cuda/runtime/shims/tests/test_aoti_torch_delete_tensor_object.cpp @@ -7,64 +7,70 @@ */ #include -#include -#include -#include -#include -#include -#include #include #include -using namespace executorch::backends::aoti; +#include +#include +#include +#include +#include + using namespace executorch::backends::cuda; -using namespace executorch::runtime; -using executorch::runtime::etensor::Tensor; +using executorch::runtime::Error; -// Test fixture for aoti_torch_delete_tensor_object tests -class AOTITorchDeleteTensorObjectTest : public ::testing::Test { - protected: - void SetUp() override { - // Initialize ExecuTorch Platform Abstraction Layer - et_pal_init(); +namespace slim_c10 = executorch::backends::aoti::slim::c10; - // Check if CUDA is available - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - if (err != cudaSuccess || device_count == 0) { - GTEST_SKIP() << "CUDA not available, skipping CUDA tests"; - } +namespace { - // Clean up any existing cached metadata before each test - cleanup_tensor_metadata(); +bool isCudaAvailable() { + int device_count = 0; + cudaError_t err = cudaGetDeviceCount(&device_count); + return (err == cudaSuccess && device_count > 0); +} - // Clear any remaining tensors from previous tests - clear_all_tensors(); +std::vector calculateContiguousStrides( + const std::vector& sizes) { + std::vector strides(sizes.size()); + if (sizes.empty()) { + return strides; + } + strides[sizes.size() - 1] = 1; + for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { + strides[i] = strides[i + 1] * sizes[i + 1]; } + return strides; +} - void TearDown() override { - // Clean up metadata - cleanup_tensor_metadata(); +} // namespace - // Clear the global tensor storage using the provided function - clear_all_tensors(); +class AOTITorchDeleteTensorObjectSlimTest : public ::testing::Test { + protected: + void SetUp() override { + et_pal_init(); } - // Helper to create test tensors - Tensor* create_test_tensor( + void TearDown() override { + // SlimTensor uses automatic reference counting - no manual cleanup needed + } + + Tensor* createTestTensor( const std::vector& sizes, const std::vector& strides = {}, - int32_t dtype = 6, // float32 - int32_t device_type = 1, // CUDA + int32_t dtype = static_cast(slim_c10::ScalarType::Float), + int32_t device_type = static_cast(slim_c10::DeviceType::CPU), int32_t device_index = 0) { - Tensor* tensor; + Tensor* tensor = nullptr; - const int64_t* strides_ptr = strides.empty() ? nullptr : strides.data(); + std::vector effective_strides = strides; + if (strides.empty()) { + effective_strides = calculateContiguousStrides(sizes); + } AOTITorchError error = aoti_torch_empty_strided( sizes.size(), sizes.data(), - strides_ptr, + effective_strides.data(), dtype, device_type, device_index, @@ -74,254 +80,241 @@ class AOTITorchDeleteTensorObjectTest : public ::testing::Test { } }; -// Test basic deletion of CUDA tensor -TEST_F(AOTITorchDeleteTensorObjectTest, DeleteCudaTensorBasic) { - // Create a CUDA tensor +// ============================================================================ +// CPU Tests +// ============================================================================ + +TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteCpuTensorBasic) { std::vector sizes = {2, 3}; - Tensor* tensor = create_test_tensor(sizes, {}, 6, 1, 0); // CUDA device + Tensor* tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(tensor, nullptr); - // Verify tensor properties before deletion EXPECT_EQ(tensor->dim(), 2); EXPECT_EQ(tensor->size(0), 2); EXPECT_EQ(tensor->size(1), 3); - // Delete the tensor AOTITorchError error = aoti_torch_delete_tensor_object(tensor); EXPECT_EQ(error, Error::Ok); } -// Test basic deletion of CPU tensor -TEST_F(AOTITorchDeleteTensorObjectTest, DeleteCpuTensorBasic) { - // Create a CPU tensor - std::vector sizes = {3, 4}; - Tensor* tensor = create_test_tensor(sizes, {}, 6, 0, 0); // CPU device - ASSERT_NE(tensor, nullptr); - - // Verify tensor properties before deletion - EXPECT_EQ(tensor->dim(), 2); - EXPECT_EQ(tensor->size(0), 3); - EXPECT_EQ(tensor->size(1), 4); - - // Delete the tensor - AOTITorchError error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); -} - -// Test deletion of null tensor pointer -TEST_F(AOTITorchDeleteTensorObjectTest, DeleteNullTensor) { +TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteNullTensor) { AOTITorchError error = aoti_torch_delete_tensor_object(nullptr); EXPECT_EQ(error, Error::InvalidArgument); } -// Test deletion of tensor not in tracking system -TEST_F(AOTITorchDeleteTensorObjectTest, DeleteUntrackedTensor) { - // Create a tensor and then clear the tracking system - std::vector sizes = {2, 3}; - Tensor* tensor = create_test_tensor(sizes); - ASSERT_NE(tensor, nullptr); - - // Clear the tracking system (simulating an untracked tensor) - clear_all_tensors(); - - // Try to delete the tensor - should fail - AOTITorchError error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::InvalidArgument); -} - -// Test deletion of multiple tensors -TEST_F(AOTITorchDeleteTensorObjectTest, DeleteMultipleTensors) { - // Create multiple tensors +TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteMultipleTensors_CPU) { std::vector tensors; for (int i = 1; i <= 5; i++) { std::vector sizes = {i, i + 1}; - Tensor* tensor = create_test_tensor(sizes); + Tensor* tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(tensor, nullptr); tensors.push_back(tensor); } - // Delete all tensors for (Tensor* tensor : tensors) { AOTITorchError error = aoti_torch_delete_tensor_object(tensor); EXPECT_EQ(error, Error::Ok); } } -// Test deletion of zero-sized tensors -TEST_F(AOTITorchDeleteTensorObjectTest, DeleteZeroSizedTensor) { - // Create a zero-sized tensor +TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteZeroSizedTensor_CPU) { std::vector sizes = {0, 5}; - Tensor* tensor = create_test_tensor(sizes); + Tensor* tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(tensor, nullptr); - // Verify tensor properties EXPECT_EQ(tensor->dim(), 2); EXPECT_EQ(tensor->size(0), 0); EXPECT_EQ(tensor->size(1), 5); + EXPECT_EQ(tensor->numel(), 0); - // Delete the tensor AOTITorchError error = aoti_torch_delete_tensor_object(tensor); EXPECT_EQ(error, Error::Ok); } -// Test deletion of scalar (0D) tensors -TEST_F(AOTITorchDeleteTensorObjectTest, DeleteScalarTensor) { - // Create a scalar tensor +TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteScalarTensor_CPU) { std::vector sizes = {}; - Tensor* tensor = create_test_tensor(sizes); + Tensor* tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(tensor, nullptr); - // Verify tensor properties EXPECT_EQ(tensor->dim(), 0); + EXPECT_EQ(tensor->numel(), 1); - // Delete the tensor AOTITorchError error = aoti_torch_delete_tensor_object(tensor); EXPECT_EQ(error, Error::Ok); } -// Test deletion of large multi-dimensional tensors -TEST_F(AOTITorchDeleteTensorObjectTest, DeleteLargeTensor) { - // Create a large multi-dimensional tensor +TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteLargeTensor_CPU) { std::vector sizes = {10, 20, 30}; - Tensor* tensor = create_test_tensor(sizes); + Tensor* tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(tensor, nullptr); - // Verify tensor properties EXPECT_EQ(tensor->dim(), 3); - EXPECT_EQ(tensor->size(0), 10); - EXPECT_EQ(tensor->size(1), 20); - EXPECT_EQ(tensor->size(2), 30); + EXPECT_EQ(tensor->numel(), 6000); - // Delete the tensor AOTITorchError error = aoti_torch_delete_tensor_object(tensor); EXPECT_EQ(error, Error::Ok); } -// Test deletion of tensors with custom strides -TEST_F(AOTITorchDeleteTensorObjectTest, DeleteTensorWithCustomStrides) { - // Create tensor with custom strides +TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteTensorWithCustomStrides_CPU) { std::vector sizes = {3, 4}; - std::vector strides = {4, 1}; // Row-major strides - Tensor* tensor = create_test_tensor(sizes, strides); + std::vector strides = {1, 3}; // Column-major + Tensor* tensor = createTestTensor( + sizes, + strides, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(tensor, nullptr); - // Verify tensor properties - EXPECT_EQ(tensor->dim(), 2); - EXPECT_EQ(tensor->size(0), 3); - EXPECT_EQ(tensor->size(1), 4); + EXPECT_EQ(tensor->stride(0), 1); + EXPECT_EQ(tensor->stride(1), 3); - // Delete the tensor AOTITorchError error = aoti_torch_delete_tensor_object(tensor); EXPECT_EQ(error, Error::Ok); } -// Test deletion after accessing tensor data -TEST_F(AOTITorchDeleteTensorObjectTest, DeleteAfterDataAccess) { - // Create a tensor +TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteDifferentDtypes_CPU) { std::vector sizes = {2, 3}; - Tensor* tensor = create_test_tensor(sizes); - ASSERT_NE(tensor, nullptr); - // Access tensor data (this should not prevent deletion) - void* data_ptr = tensor->mutable_data_ptr(); - EXPECT_NE(data_ptr, nullptr); - - // Delete the tensor - AOTITorchError error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); -} + // Float + { + Tensor* tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(tensor, nullptr); + EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); + } -// Test double deletion (should fail on second attempt) -TEST_F(AOTITorchDeleteTensorObjectTest, DoubleDeletion) { - // Create a tensor - std::vector sizes = {2, 3}; - Tensor* tensor = create_test_tensor(sizes); - ASSERT_NE(tensor, nullptr); + // BFloat16 + { + Tensor* tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::BFloat16), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(tensor, nullptr); + EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); + } - // First deletion should succeed - AOTITorchError error1 = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error1, Error::Ok); + // Long + { + Tensor* tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Long), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(tensor, nullptr); + EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); + } - // Second deletion should fail (tensor no longer tracked) - AOTITorchError error2 = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error2, Error::InvalidArgument); + // Bool + { + Tensor* tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Bool), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(tensor, nullptr); + EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); + } } -// Test deletion of tensors on both CUDA and CPU devices -TEST_F(AOTITorchDeleteTensorObjectTest, DeleteMixedDeviceTensors) { - // Create CUDA tensor - std::vector sizes = {2, 3}; - Tensor* cuda_tensor = create_test_tensor(sizes, {}, 6, 1, 0); - ASSERT_NE(cuda_tensor, nullptr); - - // Create CPU tensor - Tensor* cpu_tensor = create_test_tensor(sizes, {}, 6, 0, 0); - ASSERT_NE(cpu_tensor, nullptr); +// ============================================================================ +// CUDA Tests +// ============================================================================ - // Delete both tensors - AOTITorchError cuda_error = aoti_torch_delete_tensor_object(cuda_tensor); - EXPECT_EQ(cuda_error, Error::Ok); +TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteCudaTensorBasic) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } - AOTITorchError cpu_error = aoti_torch_delete_tensor_object(cpu_tensor); - EXPECT_EQ(cpu_error, Error::Ok); -} + std::vector sizes = {2, 3}; + Tensor* tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(tensor, nullptr); -// Test memory consistency after deletion -TEST_F(AOTITorchDeleteTensorObjectTest, MemoryConsistencyAfterDeletion) { - // Create multiple tensors - std::vector tensors; - const int num_tensors = 10; + EXPECT_EQ(tensor->dim(), 2); + EXPECT_TRUE(tensor->is_cuda()); - for (int i = 0; i < num_tensors; i++) { - std::vector sizes = {i + 1, i + 2}; - Tensor* tensor = create_test_tensor(sizes); - ASSERT_NE(tensor, nullptr); - tensors.push_back(tensor); - } + AOTITorchError error = aoti_torch_delete_tensor_object(tensor); + EXPECT_EQ(error, Error::Ok); +} - // Delete every other tensor - for (int i = 0; i < num_tensors; i += 2) { - AOTITorchError error = aoti_torch_delete_tensor_object(tensors[i]); - EXPECT_EQ(error, Error::Ok); +TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteMultipleTensors_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; } - // Delete remaining tensors - for (int i = 1; i < num_tensors; i += 2) { - AOTITorchError error = aoti_torch_delete_tensor_object(tensors[i]); - EXPECT_EQ(error, Error::Ok); - } -} - -// Test stress deletion with many small tensors -TEST_F(AOTITorchDeleteTensorObjectTest, StressDeletionManySmallTensors) { - const int num_tensors = 100; std::vector tensors; - // Create many small tensors - for (int i = 0; i < num_tensors; i++) { - std::vector sizes = {1, 1}; // Minimal size - Tensor* tensor = create_test_tensor(sizes); - if (tensor != nullptr) { - tensors.push_back(tensor); - } + for (int i = 1; i <= 5; i++) { + std::vector sizes = {i, i + 1}; + Tensor* tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(tensor, nullptr); + tensors.push_back(tensor); } - // Delete all created tensors for (Tensor* tensor : tensors) { AOTITorchError error = aoti_torch_delete_tensor_object(tensor); EXPECT_EQ(error, Error::Ok); } } -// Test CUDA synchronization during deletion -TEST_F(AOTITorchDeleteTensorObjectTest, CudaSynchronizationDuringDeletion) { - // Create a larger CUDA tensor to ensure memory allocation +TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteLargeTensor_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + std::vector sizes = {100, 100}; - Tensor* tensor = create_test_tensor(sizes, {}, 6, 1, 0); // CUDA device + Tensor* tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); ASSERT_NE(tensor, nullptr); - // Delete the tensor (should handle synchronization internally) AOTITorchError error = aoti_torch_delete_tensor_object(tensor); EXPECT_EQ(error, Error::Ok); @@ -330,125 +323,63 @@ TEST_F(AOTITorchDeleteTensorObjectTest, CudaSynchronizationDuringDeletion) { EXPECT_EQ(cuda_error, cudaSuccess); } -// Test specific deletion of bfloat16 tensors -TEST_F(AOTITorchDeleteTensorObjectTest, DeleteBFloat16Tensor) { - // Test 1D bfloat16 tensor deletion - std::vector sizes_1d = {10}; - Tensor* tensor_bf16_1d = create_test_tensor( - sizes_1d, - {}, - static_cast(SupportedDTypes::BFLOAT16), - 1, // CUDA device - 0); - ASSERT_NE(tensor_bf16_1d, nullptr); - - // Verify it's bfloat16 before deletion - int32_t actual_dtype; - EXPECT_EQ(aoti_torch_get_dtype(tensor_bf16_1d, &actual_dtype), Error::Ok); - EXPECT_EQ(actual_dtype, static_cast(SupportedDTypes::BFLOAT16)) - << "Expected bfloat16 dtype (" - << static_cast(SupportedDTypes::BFLOAT16) << "), got " - << actual_dtype; - - // Verify element size (bfloat16 should be 2 bytes per element) - EXPECT_EQ(tensor_bf16_1d->element_size(), 2); - - // Delete the bfloat16 tensor - AOTITorchError error = aoti_torch_delete_tensor_object(tensor_bf16_1d); - EXPECT_EQ(error, Error::Ok); - - // Test 2D bfloat16 tensor deletion with custom strides - std::vector sizes_2d = {4, 6}; - std::vector strides_2d = {6, 1}; // Row-major strides - Tensor* tensor_bf16_2d = create_test_tensor( - sizes_2d, - strides_2d, - static_cast(SupportedDTypes::BFLOAT16), - 1, // CUDA device - 0); - ASSERT_NE(tensor_bf16_2d, nullptr); - - // Verify tensor properties - EXPECT_EQ(tensor_bf16_2d->dim(), 2); - EXPECT_EQ(tensor_bf16_2d->size(0), 4); - EXPECT_EQ(tensor_bf16_2d->size(1), 6); - EXPECT_EQ(tensor_bf16_2d->element_size(), 2); - - // Verify it's bfloat16 - int32_t dtype_2d; - EXPECT_EQ(aoti_torch_get_dtype(tensor_bf16_2d, &dtype_2d), Error::Ok); - EXPECT_EQ(dtype_2d, static_cast(SupportedDTypes::BFLOAT16)); +TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteMixedDeviceTensors) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } - // Delete the 2D bfloat16 tensor - error = aoti_torch_delete_tensor_object(tensor_bf16_2d); - EXPECT_EQ(error, Error::Ok); + std::vector sizes = {2, 3}; - // Test 3D bfloat16 tensor deletion - std::vector sizes_3d = {2, 3, 4}; - Tensor* tensor_bf16_3d = create_test_tensor( - sizes_3d, + // Create CUDA tensor + Tensor* cuda_tensor = createTestTensor( + sizes, {}, - static_cast(SupportedDTypes::BFLOAT16), - 1, // CUDA device + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), 0); - ASSERT_NE(tensor_bf16_3d, nullptr); - - // Verify tensor properties - EXPECT_EQ(tensor_bf16_3d->dim(), 3); - EXPECT_EQ(tensor_bf16_3d->size(0), 2); - EXPECT_EQ(tensor_bf16_3d->size(1), 3); - EXPECT_EQ(tensor_bf16_3d->size(2), 4); - EXPECT_EQ(tensor_bf16_3d->element_size(), 2); - - // Verify memory size (2 * 3 * 4 * 2 bytes = 48 bytes) - size_t expected_memory = 2 * 3 * 4 * 2; - size_t actual_memory = - tensor_bf16_3d->numel() * tensor_bf16_3d->element_size(); - EXPECT_EQ(actual_memory, expected_memory); - - // Delete the 3D bfloat16 tensor - error = aoti_torch_delete_tensor_object(tensor_bf16_3d); - EXPECT_EQ(error, Error::Ok); + ASSERT_NE(cuda_tensor, nullptr); + EXPECT_TRUE(cuda_tensor->is_cuda()); - // Test bfloat16 scalar tensor (0D) deletion - std::vector scalar_sizes = {}; - Tensor* tensor_bf16_scalar = create_test_tensor( - scalar_sizes, + // Create CPU tensor + Tensor* cpu_tensor = createTestTensor( + sizes, {}, - static_cast(SupportedDTypes::BFLOAT16), - 1, // CUDA device + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), 0); - ASSERT_NE(tensor_bf16_scalar, nullptr); + ASSERT_NE(cpu_tensor, nullptr); + EXPECT_TRUE(cpu_tensor->is_cpu()); - // Verify scalar tensor properties - EXPECT_EQ(tensor_bf16_scalar->dim(), 0); - EXPECT_EQ(tensor_bf16_scalar->numel(), 1); - EXPECT_EQ(tensor_bf16_scalar->element_size(), 2); + // Delete both tensors + EXPECT_EQ(aoti_torch_delete_tensor_object(cuda_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(cpu_tensor), Error::Ok); +} - // Delete the scalar bfloat16 tensor - error = aoti_torch_delete_tensor_object(tensor_bf16_scalar); - EXPECT_EQ(error, Error::Ok); +// ============================================================================ +// Stress Tests +// ============================================================================ - // Test zero-element bfloat16 tensor deletion - std::vector zero_sizes = {0, 5}; - Tensor* tensor_bf16_zero = create_test_tensor( - zero_sizes, - {}, - static_cast(SupportedDTypes::BFLOAT16), - 1, // CUDA device - 0); - ASSERT_NE(tensor_bf16_zero, nullptr); +TEST_F( + AOTITorchDeleteTensorObjectSlimTest, + StressDeletionManySmallTensors_CPU) { + const int num_tensors = 100; + std::vector tensors; - // Verify zero-element tensor properties - EXPECT_EQ(tensor_bf16_zero->dim(), 2); - EXPECT_EQ(tensor_bf16_zero->size(0), 0); - EXPECT_EQ(tensor_bf16_zero->size(1), 5); - EXPECT_EQ(tensor_bf16_zero->numel(), 0); - EXPECT_EQ(tensor_bf16_zero->element_size(), 2); + for (int i = 0; i < num_tensors; i++) { + std::vector sizes = {1, 1}; + Tensor* tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + if (tensor != nullptr) { + tensors.push_back(tensor); + } + } - // Delete the zero-element bfloat16 tensor - error = aoti_torch_delete_tensor_object(tensor_bf16_zero); - EXPECT_EQ(error, Error::Ok); + for (Tensor* tensor : tensors) { + AOTITorchError error = aoti_torch_delete_tensor_object(tensor); + EXPECT_EQ(error, Error::Ok); + } } - -// Test deletion of mixed dtype tensors (float32 and bfloat16) diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch_delete_tensor_object_slim.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch_delete_tensor_object_slim.cpp deleted file mode 100644 index e88ebb3185c..00000000000 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch_delete_tensor_object_slim.cpp +++ /dev/null @@ -1,385 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include - -using namespace executorch::backends::cuda; -using executorch::runtime::Error; - -namespace slim_c10 = executorch::backends::aoti::slim::c10; - -namespace { - -bool isCudaAvailable() { - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - return (err == cudaSuccess && device_count > 0); -} - -std::vector calculateContiguousStrides( - const std::vector& sizes) { - std::vector strides(sizes.size()); - if (sizes.empty()) { - return strides; - } - strides[sizes.size() - 1] = 1; - for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { - strides[i] = strides[i + 1] * sizes[i + 1]; - } - return strides; -} - -} // namespace - -class AOTITorchDeleteTensorObjectSlimTest : public ::testing::Test { - protected: - void SetUp() override { - et_pal_init(); - } - - void TearDown() override { - // SlimTensor uses automatic reference counting - no manual cleanup needed - } - - Tensor* createTestTensor( - const std::vector& sizes, - const std::vector& strides = {}, - int32_t dtype = static_cast(slim_c10::ScalarType::Float), - int32_t device_type = static_cast(slim_c10::DeviceType::CPU), - int32_t device_index = 0) { - Tensor* tensor = nullptr; - - std::vector effective_strides = strides; - if (strides.empty()) { - effective_strides = calculateContiguousStrides(sizes); - } - - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - effective_strides.data(), - dtype, - device_type, - device_index, - &tensor); - - return (error == Error::Ok) ? tensor : nullptr; - } -}; - -// ============================================================================ -// CPU Tests -// ============================================================================ - -TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteCpuTensorBasic) { - std::vector sizes = {2, 3}; - Tensor* tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(tensor, nullptr); - - EXPECT_EQ(tensor->dim(), 2); - EXPECT_EQ(tensor->size(0), 2); - EXPECT_EQ(tensor->size(1), 3); - - AOTITorchError error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); -} - -TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteNullTensor) { - AOTITorchError error = aoti_torch_delete_tensor_object(nullptr); - EXPECT_EQ(error, Error::InvalidArgument); -} - -TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteMultipleTensors_CPU) { - std::vector tensors; - - for (int i = 1; i <= 5; i++) { - std::vector sizes = {i, i + 1}; - Tensor* tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(tensor, nullptr); - tensors.push_back(tensor); - } - - for (Tensor* tensor : tensors) { - AOTITorchError error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); - } -} - -TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteZeroSizedTensor_CPU) { - std::vector sizes = {0, 5}; - Tensor* tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(tensor, nullptr); - - EXPECT_EQ(tensor->dim(), 2); - EXPECT_EQ(tensor->size(0), 0); - EXPECT_EQ(tensor->size(1), 5); - EXPECT_EQ(tensor->numel(), 0); - - AOTITorchError error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); -} - -TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteScalarTensor_CPU) { - std::vector sizes = {}; - Tensor* tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(tensor, nullptr); - - EXPECT_EQ(tensor->dim(), 0); - EXPECT_EQ(tensor->numel(), 1); - - AOTITorchError error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); -} - -TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteLargeTensor_CPU) { - std::vector sizes = {10, 20, 30}; - Tensor* tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(tensor, nullptr); - - EXPECT_EQ(tensor->dim(), 3); - EXPECT_EQ(tensor->numel(), 6000); - - AOTITorchError error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); -} - -TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteTensorWithCustomStrides_CPU) { - std::vector sizes = {3, 4}; - std::vector strides = {1, 3}; // Column-major - Tensor* tensor = createTestTensor( - sizes, - strides, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(tensor, nullptr); - - EXPECT_EQ(tensor->stride(0), 1); - EXPECT_EQ(tensor->stride(1), 3); - - AOTITorchError error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); -} - -TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteDifferentDtypes_CPU) { - std::vector sizes = {2, 3}; - - // Float - { - Tensor* tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(tensor, nullptr); - EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); - } - - // BFloat16 - { - Tensor* tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::BFloat16), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(tensor, nullptr); - EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); - } - - // Long - { - Tensor* tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Long), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(tensor, nullptr); - EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); - } - - // Bool - { - Tensor* tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Bool), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(tensor, nullptr); - EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); - } -} - -// ============================================================================ -// CUDA Tests -// ============================================================================ - -TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteCudaTensorBasic) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {2, 3}; - Tensor* tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(tensor, nullptr); - - EXPECT_EQ(tensor->dim(), 2); - EXPECT_TRUE(tensor->is_cuda()); - - AOTITorchError error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); -} - -TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteMultipleTensors_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector tensors; - - for (int i = 1; i <= 5; i++) { - std::vector sizes = {i, i + 1}; - Tensor* tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(tensor, nullptr); - tensors.push_back(tensor); - } - - for (Tensor* tensor : tensors) { - AOTITorchError error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); - } -} - -TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteLargeTensor_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {100, 100}; - Tensor* tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(tensor, nullptr); - - AOTITorchError error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); - - // Verify CUDA state is still good - cudaError_t cuda_error = cudaGetLastError(); - EXPECT_EQ(cuda_error, cudaSuccess); -} - -TEST_F(AOTITorchDeleteTensorObjectSlimTest, DeleteMixedDeviceTensors) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {2, 3}; - - // Create CUDA tensor - Tensor* cuda_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(cuda_tensor, nullptr); - EXPECT_TRUE(cuda_tensor->is_cuda()); - - // Create CPU tensor - Tensor* cpu_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(cpu_tensor, nullptr); - EXPECT_TRUE(cpu_tensor->is_cpu()); - - // Delete both tensors - EXPECT_EQ(aoti_torch_delete_tensor_object(cuda_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(cpu_tensor), Error::Ok); -} - -// ============================================================================ -// Stress Tests -// ============================================================================ - -TEST_F( - AOTITorchDeleteTensorObjectSlimTest, - StressDeletionManySmallTensors_CPU) { - const int num_tensors = 100; - std::vector tensors; - - for (int i = 0; i < num_tensors; i++) { - std::vector sizes = {1, 1}; - Tensor* tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - if (tensor != nullptr) { - tensors.push_back(tensor); - } - } - - for (Tensor* tensor : tensors) { - AOTITorchError error = aoti_torch_delete_tensor_object(tensor); - EXPECT_EQ(error, Error::Ok); - } -} diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch_empty_strided.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch_empty_strided.cpp index 799a8d1221b..d563eea98bc 100644 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch_empty_strided.cpp +++ b/backends/cuda/runtime/shims/tests/test_aoti_torch_empty_strided.cpp @@ -7,661 +7,461 @@ */ #include -#include -#include -#include -#include -#include -#include #include #include +#include +#include +#include +#include +#include + using namespace executorch::backends::cuda; -using namespace executorch::backends::aoti; -using namespace executorch::runtime; -using executorch::runtime::etensor::Tensor; +using executorch::runtime::Error; -// Test fixture for aoti_torch_empty_strided tests -class AOTITorchEmptyStridedTest : public ::testing::Test { - protected: - void SetUp() override { - // Initialize ExecuTorch Platform Abstraction Layer - et_pal_init(); +namespace slim_c10 = executorch::backends::aoti::slim::c10; - // Check if CUDA is available - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - if (err != cudaSuccess || device_count == 0) { - GTEST_SKIP() << "CUDA not available, skipping CUDA tests"; - } +namespace { - // Clean up any existing cached metadata before each test - cleanup_tensor_metadata(); +// Helper to check if CUDA is available +bool isCudaAvailable() { + int device_count = 0; + cudaError_t err = cudaGetDeviceCount(&device_count); + return (err == cudaSuccess && device_count > 0); +} - // Clear any remaining tensors from previous tests - clear_all_tensors(); +// Helper to calculate contiguous strides from sizes +std::vector calculateContiguousStrides( + const std::vector& sizes) { + std::vector strides(sizes.size()); + if (sizes.empty()) { + return strides; } - - void TearDown() override { - // Clean up metadata - cleanup_tensor_metadata(); - - // Clear the global tensor storage using the provided function - clear_all_tensors(); + strides[sizes.size() - 1] = 1; + for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { + strides[i] = strides[i + 1] * sizes[i + 1]; } + return strides; +} - // Helper to create test tensors - Tensor* create_tracked_tensor( - const std::vector& sizes, - const std::vector& strides = {}, - int32_t dtype = static_cast(SupportedDTypes::FLOAT32), - int32_t device_type = static_cast(SupportedDevices::CUDA), - int32_t device_index = 0) { - Tensor* tensor; +} // namespace - const int64_t* strides_ptr = strides.empty() ? nullptr : strides.data(); +// Test fixture for SlimTensor-based aoti_torch_empty_strided tests +class AOTITorchEmptyStridedSlimTest : public ::testing::Test { + protected: + void SetUp() override { + et_pal_init(); + } - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - strides_ptr, - dtype, - device_type, - device_index, - &tensor); + void TearDown() override { + // Tensors are cleaned up via their destructors + for (Tensor* t : tensors_) { + delete t; + } + tensors_.clear(); + } - return (error == Error::Ok) ? tensor : nullptr; + // Track tensors for cleanup + void trackTensor(Tensor* t) { + if (t != nullptr) { + tensors_.push_back(t); + } } + + private: + std::vector tensors_; }; -// Test aoti_torch_empty_strided basic functionality -TEST_F(AOTITorchEmptyStridedTest, BasicFunctionality) { +// ============================================================================ +// Common test body - parameterized by device type +// ============================================================================ + +void runBasicEmptyStridedTest(int32_t device_type, int32_t device_index) { // Test 1D tensor std::vector sizes_1d = {5}; - Tensor* tensor_1d; + std::vector strides_1d = calculateContiguousStrides(sizes_1d); + + Tensor* tensor_1d = nullptr; AOTITorchError error = aoti_torch_empty_strided( sizes_1d.size(), sizes_1d.data(), - nullptr, // Let function compute strides - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index + strides_1d.data(), + static_cast(slim_c10::ScalarType::Float), // dtype = 6 + device_type, + device_index, &tensor_1d); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor_1d, nullptr); - - // CRITICAL: Verify the tensor is actually float32 - int32_t actual_dtype; - EXPECT_EQ(aoti_torch_get_dtype(tensor_1d, &actual_dtype), Error::Ok); - EXPECT_EQ(actual_dtype, static_cast(SupportedDTypes::FLOAT32)) - << "Expected float32 dtype (" - << static_cast(SupportedDTypes::FLOAT32) << "), got " - << actual_dtype; - - // Verify element size (float32 should be 4 bytes per element) - size_t element_size = tensor_1d->element_size(); - EXPECT_EQ(element_size, 4) - << "Expected float32 element size to be 4 bytes, got " << element_size; - - // Verify total number of elements and memory usage - int64_t expected_numel = 5; // 5 elements - EXPECT_EQ(tensor_1d->numel(), expected_numel) - << "Expected " << expected_numel << " elements, got " - << tensor_1d->numel(); - - // Verify total memory size (numel * element_size) - size_t expected_memory_size = expected_numel * 4; // 5 * 4 = 20 bytes - size_t actual_memory_size = tensor_1d->numel() * tensor_1d->element_size(); - EXPECT_EQ(actual_memory_size, expected_memory_size) - << "Expected " << expected_memory_size << " bytes, got " - << actual_memory_size; + ASSERT_NE(tensor_1d, nullptr); // Check tensor properties EXPECT_EQ(tensor_1d->dim(), 1); EXPECT_EQ(tensor_1d->size(0), 5); + EXPECT_EQ(tensor_1d->numel(), 5); + EXPECT_EQ( + static_cast(tensor_1d->dtype()), + static_cast(slim_c10::ScalarType::Float)); + EXPECT_NE(tensor_1d->data_ptr(), nullptr); - // Test 2D tensor with explicit strides - std::vector sizes_2d = {3, 4}; - std::vector strides_2d = {4, 1}; - Tensor* tensor_2d; - error = aoti_torch_empty_strided( - sizes_2d.size(), - sizes_2d.data(), - strides_2d.data(), - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index - &tensor_2d); - - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor_2d, nullptr); - - // Verify 2D tensor is also float32 - int32_t dtype_2d; - EXPECT_EQ(aoti_torch_get_dtype(tensor_2d, &dtype_2d), Error::Ok); - EXPECT_EQ(dtype_2d, static_cast(SupportedDTypes::FLOAT32)) - << "Expected float32 dtype (" - << static_cast(SupportedDTypes::FLOAT32) << "), got " - << dtype_2d; - - // Verify element size for 2D tensor - EXPECT_EQ(tensor_2d->element_size(), 4); - - // Check tensor properties - EXPECT_EQ(tensor_2d->dim(), 2); - EXPECT_EQ(tensor_2d->size(0), 3); - EXPECT_EQ(tensor_2d->size(1), 4); - - // Verify memory size for 2D tensor - int64_t expected_numel_2d = 3 * 4; // 12 elements - size_t expected_memory_2d = expected_numel_2d * 4; // 12 * 4 = 48 bytes - EXPECT_EQ(tensor_2d->numel() * tensor_2d->element_size(), expected_memory_2d); + // Cleanup + delete tensor_1d; } -// Test aoti_torch_empty_strided with CPU device -TEST_F(AOTITorchEmptyStridedTest, CPUDevice) { - std::vector sizes = {2, 3}; - Tensor* tensor; +void runMultiDimensionalEmptyStridedTest( + int32_t device_type, + int32_t device_index) { + // Test 3D tensor + std::vector sizes = {2, 3, 4}; + std::vector strides = calculateContiguousStrides(sizes); + + Tensor* tensor = nullptr; AOTITorchError error = aoti_torch_empty_strided( sizes.size(), sizes.data(), - nullptr, // Let function compute strides - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CPU), - 0, // device index + strides.data(), + static_cast(slim_c10::ScalarType::Float), + device_type, + device_index, &tensor); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor, nullptr); + ASSERT_NE(tensor, nullptr); // Check tensor properties - EXPECT_EQ(tensor->dim(), 2); + EXPECT_EQ(tensor->dim(), 3); EXPECT_EQ(tensor->size(0), 2); EXPECT_EQ(tensor->size(1), 3); -} + EXPECT_EQ(tensor->size(2), 4); + EXPECT_EQ(tensor->numel(), 24); -// Test aoti_torch_empty_strided with invalid dtype -TEST_F(AOTITorchEmptyStridedTest, InvalidDtype) { - std::vector sizes = {2, 3}; - Tensor* tensor; - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - nullptr, - 999, // invalid dtype - 1, // CUDA device - 0, // device index - &tensor); + // Check strides + EXPECT_EQ(tensor->stride(0), 12); + EXPECT_EQ(tensor->stride(1), 4); + EXPECT_EQ(tensor->stride(2), 1); - EXPECT_EQ(error, Error::InvalidArgument); + delete tensor; } -// Test aoti_torch_empty_strided with unsupported device -TEST_F(AOTITorchEmptyStridedTest, UnsupportedDevice) { - std::vector sizes = {2, 3}; - Tensor* tensor; +void runScalarTensorEmptyStridedTest( + int32_t device_type, + int32_t device_index) { + std::vector sizes = {}; // 0D tensor + std::vector strides = {}; + + Tensor* tensor = nullptr; AOTITorchError error = aoti_torch_empty_strided( sizes.size(), sizes.data(), - nullptr, - 6, // float32 - 2, // unsupported device type - 0, // device index + strides.data(), + static_cast(slim_c10::ScalarType::Float), + device_type, + device_index, &tensor); - EXPECT_EQ(error, Error::NotImplemented); + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(tensor, nullptr); + + EXPECT_EQ(tensor->dim(), 0); + EXPECT_EQ(tensor->numel(), 1); + EXPECT_NE(tensor->data_ptr(), nullptr); + + delete tensor; } -// Test aoti_torch_empty_strided with zero-sized tensor -TEST_F(AOTITorchEmptyStridedTest, ZeroSized) { +void runZeroSizedTensorEmptyStridedTest( + int32_t device_type, + int32_t device_index) { std::vector sizes = {0, 5}; - Tensor* tensor; + std::vector strides = calculateContiguousStrides(sizes); + + Tensor* tensor = nullptr; AOTITorchError error = aoti_torch_empty_strided( sizes.size(), sizes.data(), - nullptr, - 6, // float32 - 1, // CUDA device - 0, // device index + strides.data(), + static_cast(slim_c10::ScalarType::Float), + device_type, + device_index, &tensor); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor, nullptr); + ASSERT_NE(tensor, nullptr); - // Check tensor properties EXPECT_EQ(tensor->dim(), 2); EXPECT_EQ(tensor->size(0), 0); EXPECT_EQ(tensor->size(1), 5); + EXPECT_EQ(tensor->numel(), 0); + + delete tensor; } -// Test aoti_torch_empty_strided scalar tensor (0D) -TEST_F(AOTITorchEmptyStridedTest, Scalar) { - std::vector sizes = {}; - Tensor* tensor; +void runCustomStridesEmptyStridedTest( + int32_t device_type, + int32_t device_index) { + // Create a transposed (column-major) tensor + std::vector sizes = {3, 4}; + std::vector strides = {1, 3}; // Column-major + + Tensor* tensor = nullptr; AOTITorchError error = aoti_torch_empty_strided( sizes.size(), sizes.data(), - nullptr, - 6, // float32 - 1, // CUDA device - 0, // device index + strides.data(), + static_cast(slim_c10::ScalarType::Float), + device_type, + device_index, &tensor); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor, nullptr); - - // Check tensor properties - EXPECT_EQ(tensor->dim(), 0); -} + ASSERT_NE(tensor, nullptr); -// Test aoti_torch_empty_strided with large tensor -TEST_F(AOTITorchEmptyStridedTest, LargeTensor) { - std::vector sizes = {100, 200, 50}; - Tensor* tensor; - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - nullptr, - 6, // float32 - 1, // CUDA device - 0, // device index - &tensor); + EXPECT_EQ(tensor->dim(), 2); + EXPECT_EQ(tensor->size(0), 3); + EXPECT_EQ(tensor->size(1), 4); + EXPECT_EQ(tensor->stride(0), 1); + EXPECT_EQ(tensor->stride(1), 3); - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor, nullptr); + // Non-contiguous due to custom strides + EXPECT_FALSE(tensor->is_contiguous()); - // Check tensor properties - EXPECT_EQ(tensor->dim(), 3); - EXPECT_EQ(tensor->size(0), 100); - EXPECT_EQ(tensor->size(1), 200); - EXPECT_EQ(tensor->size(2), 50); + delete tensor; } -// Test aoti_torch_empty_strided with bfloat16 dtype -TEST_F(AOTITorchEmptyStridedTest, BFloat16Tensor) { - // Test creating bfloat16 tensor on CUDA - std::vector sizes = {2, 3, 4}; - Tensor* tensor_bf16; - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - nullptr, // Let function compute strides - static_cast(SupportedDTypes::BFLOAT16), - static_cast(SupportedDevices::CUDA), - 0, // device index - &tensor_bf16); - - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor_bf16, nullptr); - - // CRITICAL: Verify the tensor is actually bfloat16 - int32_t actual_dtype; - EXPECT_EQ(aoti_torch_get_dtype(tensor_bf16, &actual_dtype), Error::Ok); - EXPECT_EQ(actual_dtype, static_cast(SupportedDTypes::BFLOAT16)) - << "Expected bfloat16 dtype (" - << static_cast(SupportedDTypes::BFLOAT16) << "), got " - << actual_dtype; - - // Verify element size (bfloat16 should be 2 bytes per element) - size_t element_size = tensor_bf16->element_size(); - EXPECT_EQ(element_size, 2) - << "Expected bfloat16 element size to be 2 bytes, got " << element_size; - - // Verify total number of elements and memory usage - int64_t expected_numel = 2 * 3 * 4; // 24 elements - EXPECT_EQ(tensor_bf16->numel(), expected_numel) - << "Expected " << expected_numel << " elements, got " - << tensor_bf16->numel(); - - // Verify total memory size (numel * element_size) - size_t expected_memory_size = expected_numel * 2; // 24 * 2 = 48 bytes - size_t actual_memory_size = - tensor_bf16->numel() * tensor_bf16->element_size(); - EXPECT_EQ(actual_memory_size, expected_memory_size) - << "Expected " << expected_memory_size << " bytes, got " - << actual_memory_size; +void runDifferentDtypesEmptyStridedTest( + int32_t device_type, + int32_t device_index) { + std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); - // Check tensor properties - EXPECT_EQ(tensor_bf16->dim(), 3); - EXPECT_EQ(tensor_bf16->size(0), 2); - EXPECT_EQ(tensor_bf16->size(1), 3); - EXPECT_EQ(tensor_bf16->size(2), 4); - - // Verify we can get tensor metadata - int64_t* sizes_ptr; - int64_t* strides_ptr; - EXPECT_EQ(aoti_torch_get_sizes(tensor_bf16, &sizes_ptr), Error::Ok); - EXPECT_EQ(aoti_torch_get_strides(tensor_bf16, &strides_ptr), Error::Ok); - - // Check sizes match - EXPECT_EQ(sizes_ptr[0], 2); - EXPECT_EQ(sizes_ptr[1], 3); - EXPECT_EQ(sizes_ptr[2], 4); - - // Check that strides are computed correctly (row-major order) - EXPECT_EQ(strides_ptr[0], 12); // 3 * 4 - EXPECT_EQ(strides_ptr[1], 4); // 4 - EXPECT_EQ(strides_ptr[2], 1); // 1 - - // Test bfloat16 tensor with custom strides - std::vector sizes_2d = {3, 2}; - std::vector strides_2d = {2, 1}; // Row-major strides - Tensor* tensor_bf16_custom; - error = aoti_torch_empty_strided( - sizes_2d.size(), - sizes_2d.data(), - strides_2d.data(), - static_cast(SupportedDTypes::BFLOAT16), - static_cast(SupportedDevices::CUDA), - 0, // device index - &tensor_bf16_custom); + // Test Float32 + { + Tensor* tensor = nullptr; + AOTITorchError error = aoti_torch_empty_strided( + sizes.size(), + sizes.data(), + strides.data(), + static_cast(slim_c10::ScalarType::Float), + device_type, + device_index, + &tensor); + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(tensor, nullptr); + EXPECT_EQ(tensor->dtype(), slim_c10::ScalarType::Float); + EXPECT_EQ(tensor->itemsize(), 4); + delete tensor; + } - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor_bf16_custom, nullptr); + // Test BFloat16 + { + Tensor* tensor = nullptr; + AOTITorchError error = aoti_torch_empty_strided( + sizes.size(), + sizes.data(), + strides.data(), + static_cast(slim_c10::ScalarType::BFloat16), + device_type, + device_index, + &tensor); + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(tensor, nullptr); + EXPECT_EQ(tensor->dtype(), slim_c10::ScalarType::BFloat16); + EXPECT_EQ(tensor->itemsize(), 2); + delete tensor; + } - // Verify custom stride tensor is also bfloat16 - int32_t custom_dtype; - EXPECT_EQ(aoti_torch_get_dtype(tensor_bf16_custom, &custom_dtype), Error::Ok); - EXPECT_EQ(custom_dtype, static_cast(SupportedDTypes::BFLOAT16)) - << "Expected bfloat16 dtype (" - << static_cast(SupportedDTypes::BFLOAT16) << "), got " - << custom_dtype; + // Test Int64 + { + Tensor* tensor = nullptr; + AOTITorchError error = aoti_torch_empty_strided( + sizes.size(), + sizes.data(), + strides.data(), + static_cast(slim_c10::ScalarType::Long), + device_type, + device_index, + &tensor); + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(tensor, nullptr); + EXPECT_EQ(tensor->dtype(), slim_c10::ScalarType::Long); + EXPECT_EQ(tensor->itemsize(), 8); + delete tensor; + } - // Verify element size for custom stride tensor - EXPECT_EQ(tensor_bf16_custom->element_size(), 2); + // Test Bool + { + Tensor* tensor = nullptr; + AOTITorchError error = aoti_torch_empty_strided( + sizes.size(), + sizes.data(), + strides.data(), + static_cast(slim_c10::ScalarType::Bool), + device_type, + device_index, + &tensor); + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(tensor, nullptr); + EXPECT_EQ(tensor->dtype(), slim_c10::ScalarType::Bool); + EXPECT_EQ(tensor->itemsize(), 1); + delete tensor; + } +} - // Check tensor properties - EXPECT_EQ(tensor_bf16_custom->dim(), 2); - EXPECT_EQ(tensor_bf16_custom->size(0), 3); - EXPECT_EQ(tensor_bf16_custom->size(1), 2); +// ============================================================================ +// CPU Tests +// ============================================================================ - // Verify memory size for custom stride tensor - int64_t custom_expected_numel = 3 * 2; // 6 elements - size_t custom_expected_memory = custom_expected_numel * 2; // 6 * 2 = 12 bytes - EXPECT_EQ( - tensor_bf16_custom->numel() * tensor_bf16_custom->element_size(), - custom_expected_memory); +TEST_F(AOTITorchEmptyStridedSlimTest, BasicFunctionality_CPU) { + runBasicEmptyStridedTest(static_cast(slim_c10::DeviceType::CPU), 0); +} - // Check custom strides - int64_t* custom_strides_ptr; - EXPECT_EQ( - aoti_torch_get_strides(tensor_bf16_custom, &custom_strides_ptr), - Error::Ok); - EXPECT_EQ(custom_strides_ptr[0], 2); - EXPECT_EQ(custom_strides_ptr[1], 1); - - // Test bfloat16 scalar tensor (0D) - std::vector scalar_sizes = {}; - Tensor* tensor_bf16_scalar; - error = aoti_torch_empty_strided( - scalar_sizes.size(), - scalar_sizes.data(), - nullptr, - static_cast(SupportedDTypes::BFLOAT16), - static_cast(SupportedDevices::CUDA), - 0, // device index - &tensor_bf16_scalar); +TEST_F(AOTITorchEmptyStridedSlimTest, MultiDimensional_CPU) { + runMultiDimensionalEmptyStridedTest( + static_cast(slim_c10::DeviceType::CPU), 0); +} - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor_bf16_scalar, nullptr); - EXPECT_EQ(tensor_bf16_scalar->dim(), 0); - - // Verify scalar tensor is also bfloat16 - int32_t scalar_dtype; - EXPECT_EQ(aoti_torch_get_dtype(tensor_bf16_scalar, &scalar_dtype), Error::Ok); - EXPECT_EQ(scalar_dtype, static_cast(SupportedDTypes::BFLOAT16)) - << "Expected bfloat16 dtype (" - << static_cast(SupportedDTypes::BFLOAT16) << "), got " - << scalar_dtype; - - // Verify scalar tensor properties - EXPECT_EQ(tensor_bf16_scalar->element_size(), 2); - EXPECT_EQ(tensor_bf16_scalar->numel(), 1); // Scalar tensor has 1 element - EXPECT_EQ( - tensor_bf16_scalar->numel() * tensor_bf16_scalar->element_size(), - 2); // 1 * 2 = 2 bytes +TEST_F(AOTITorchEmptyStridedSlimTest, ScalarTensor_CPU) { + runScalarTensorEmptyStridedTest( + static_cast(slim_c10::DeviceType::CPU), 0); } -// Test custom strides functionality -TEST_F(AOTITorchEmptyStridedTest, CustomStrides) { - // Create tensor with valid custom strides (contiguous layout) - std::vector sizes = {2, 3}; - std::vector strides = {3, 1}; // Standard row-major strides +TEST_F(AOTITorchEmptyStridedSlimTest, ZeroSizedTensor_CPU) { + runZeroSizedTensorEmptyStridedTest( + static_cast(slim_c10::DeviceType::CPU), 0); +} - Tensor* tensor = create_tracked_tensor(sizes, strides); - EXPECT_NE(tensor, nullptr); +TEST_F(AOTITorchEmptyStridedSlimTest, CustomStrides_CPU) { + runCustomStridesEmptyStridedTest( + static_cast(slim_c10::DeviceType::CPU), 0); +} - // Verify the tensor was created correctly - EXPECT_EQ(tensor->dim(), 2); - EXPECT_EQ(tensor->size(0), 2); - EXPECT_EQ(tensor->size(1), 3); +TEST_F(AOTITorchEmptyStridedSlimTest, DifferentDtypes_CPU) { + runDifferentDtypesEmptyStridedTest( + static_cast(slim_c10::DeviceType::CPU), 0); +} - // Check strides through AOTI interface - int64_t* strides_ptr; - EXPECT_EQ(aoti_torch_get_strides(tensor, &strides_ptr), Error::Ok); - EXPECT_EQ(strides_ptr[0], 3); - EXPECT_EQ(strides_ptr[1], 1); +// ============================================================================ +// CUDA Tests +// ============================================================================ - // Test another valid stride pattern - transpose-like - std::vector sizes_2 = {3, 2}; - std::vector strides_2 = {1, 3}; // Column-major strides +TEST_F(AOTITorchEmptyStridedSlimTest, BasicFunctionality_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + runBasicEmptyStridedTest(static_cast(slim_c10::DeviceType::CUDA), 0); +} - Tensor* tensor_2 = create_tracked_tensor(sizes_2, strides_2); - EXPECT_NE(tensor_2, nullptr); +TEST_F(AOTITorchEmptyStridedSlimTest, MultiDimensional_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + runMultiDimensionalEmptyStridedTest( + static_cast(slim_c10::DeviceType::CUDA), 0); +} - // Verify the tensor properties - EXPECT_EQ(tensor_2->dim(), 2); - EXPECT_EQ(tensor_2->size(0), 3); - EXPECT_EQ(tensor_2->size(1), 2); +TEST_F(AOTITorchEmptyStridedSlimTest, ScalarTensor_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + runScalarTensorEmptyStridedTest( + static_cast(slim_c10::DeviceType::CUDA), 0); +} - // Check strides - int64_t* strides_ptr_2; - EXPECT_EQ(aoti_torch_get_strides(tensor_2, &strides_ptr_2), Error::Ok); - EXPECT_EQ(strides_ptr_2[0], 1); - EXPECT_EQ(strides_ptr_2[1], 3); +TEST_F(AOTITorchEmptyStridedSlimTest, ZeroSizedTensor_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + runZeroSizedTensorEmptyStridedTest( + static_cast(slim_c10::DeviceType::CUDA), 0); } -// Test edge case: zero-element tensor with non-zero dimensions -TEST_F(AOTITorchEmptyStridedTest, ZeroElementTensor) { - std::vector sizes = {2, 0, 3}; // Total elements = 0 - Tensor* tensor = create_tracked_tensor(sizes); - EXPECT_NE(tensor, nullptr); +TEST_F(AOTITorchEmptyStridedSlimTest, CustomStrides_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + runCustomStridesEmptyStridedTest( + static_cast(slim_c10::DeviceType::CUDA), 0); +} - // Verify the tensor properties - EXPECT_EQ(tensor->dim(), 3); - EXPECT_EQ(tensor->size(0), 2); - EXPECT_EQ(tensor->size(1), 0); - EXPECT_EQ(tensor->size(2), 3); - - // Should be able to get metadata - int64_t* sizes_ptr; - int64_t* strides_ptr; - EXPECT_EQ(aoti_torch_get_sizes(tensor, &sizes_ptr), Error::Ok); - EXPECT_EQ(aoti_torch_get_strides(tensor, &strides_ptr), Error::Ok); - - EXPECT_EQ(sizes_ptr[0], 2); - EXPECT_EQ(sizes_ptr[1], 0); - EXPECT_EQ(sizes_ptr[2], 3); +TEST_F(AOTITorchEmptyStridedSlimTest, DifferentDtypes_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + runDifferentDtypesEmptyStridedTest( + static_cast(slim_c10::DeviceType::CUDA), 0); } -// Test different data types (currently we support bf16, fp32 and int32) -TEST_F(AOTITorchEmptyStridedTest, DifferentDataTypes) { +// ============================================================================ +// Verify Device Properties +// ============================================================================ + +TEST_F(AOTITorchEmptyStridedSlimTest, VerifyCPUDevice) { std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); - // Test float32 (dtype 6) - one of the supported types - Tensor* tensor_float32; + Tensor* tensor = nullptr; AOTITorchError error = aoti_torch_empty_strided( sizes.size(), sizes.data(), - nullptr, - 6, // float32 - 1, // CUDA device - 0, // device index - &tensor_float32); - - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor_float32, nullptr); - - // Test int32 (dtype 3) - one of the supported types - Tensor* tensor_int32; - error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - nullptr, - 3, // int32 - unsupported - 1, // CUDA device - 0, // device index - &tensor_int32); + strides.data(), + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0, + &tensor); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor_int32, nullptr); + ASSERT_NE(tensor, nullptr); - // Test another unsupported data type - Tensor* tensor_float64; - error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - nullptr, - 7, // float64 - unsupported - 1, // CUDA device - 0, // device index - &tensor_float64); + EXPECT_TRUE(tensor->is_cpu()); + EXPECT_FALSE(tensor->is_cuda()); + EXPECT_EQ(tensor->device_type(), slim_c10::DeviceType::CPU); - EXPECT_EQ(error, Error::InvalidArgument); // Should fail for unsupported dtype + delete tensor; } -// Test multi-dimensional tensors with various shapes -TEST_F(AOTITorchEmptyStridedTest, MultiDimensionalTensors) { - // Test 3D tensor - std::vector sizes_3d = {2, 3, 4}; - Tensor* tensor_3d = create_tracked_tensor(sizes_3d); - EXPECT_NE(tensor_3d, nullptr); - EXPECT_EQ(tensor_3d->dim(), 3); - EXPECT_EQ(tensor_3d->size(0), 2); - EXPECT_EQ(tensor_3d->size(1), 3); - EXPECT_EQ(tensor_3d->size(2), 4); - - // Test 4D tensor - std::vector sizes_4d = {2, 3, 4, 5}; - Tensor* tensor_4d = create_tracked_tensor(sizes_4d); - EXPECT_NE(tensor_4d, nullptr); - EXPECT_EQ(tensor_4d->dim(), 4); - EXPECT_EQ(tensor_4d->size(0), 2); - EXPECT_EQ(tensor_4d->size(1), 3); - EXPECT_EQ(tensor_4d->size(2), 4); - EXPECT_EQ(tensor_4d->size(3), 5); - - // Test 5D tensor - std::vector sizes_5d = {1, 2, 3, 4, 5}; - Tensor* tensor_5d = create_tracked_tensor(sizes_5d); - EXPECT_NE(tensor_5d, nullptr); - EXPECT_EQ(tensor_5d->dim(), 5); - EXPECT_EQ(tensor_5d->size(0), 1); - EXPECT_EQ(tensor_5d->size(1), 2); - EXPECT_EQ(tensor_5d->size(2), 3); - EXPECT_EQ(tensor_5d->size(3), 4); - EXPECT_EQ(tensor_5d->size(4), 5); -} +TEST_F(AOTITorchEmptyStridedSlimTest, VerifyCUDADevice) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } -// Test incontiguous tensor creation - transpose-like layout -TEST_F(AOTITorchEmptyStridedTest, IncontiguousTransposeLayout) { - // Create a tensor with transpose-like strides (column-major) - // For a 3x4 tensor in column-major order, strides should be [1, 3] - // This means each row step is 1, and each column step is 3 - std::vector sizes = {3, 4}; - std::vector strides = {1, 3}; // Column-major (incontiguous) + std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); - Tensor* tensor; + Tensor* tensor = nullptr; AOTITorchError error = aoti_torch_empty_strided( sizes.size(), sizes.data(), strides.data(), - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0, &tensor); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor, nullptr); + ASSERT_NE(tensor, nullptr); - // Verify tensor properties - EXPECT_EQ(tensor->dim(), 2); - EXPECT_EQ(tensor->size(0), 3); - EXPECT_EQ(tensor->size(1), 4); + EXPECT_FALSE(tensor->is_cpu()); + EXPECT_TRUE(tensor->is_cuda()); + EXPECT_EQ(tensor->device_type(), slim_c10::DeviceType::CUDA); - // Verify the strides are what we specified - int64_t* strides_ptr; - EXPECT_EQ(aoti_torch_get_strides(tensor, &strides_ptr), Error::Ok); - EXPECT_EQ(strides_ptr[0], 1); // Column-major stride for dimension 0 - EXPECT_EQ(strides_ptr[1], 3); // Column-major stride for dimension 1 - - // Verify that memory was allocated correctly for incontiguous layout - // Storage size should be: stride[0] * (size[0] - 1) + stride[1] * (size[1] - - // 1) + 1 = 1 * (3 - 1) + 3 * (4 - 1) + 1 = 1 * 2 + 3 * 3 + 1 = 2 + 9 + 1 = 12 - // elements Total bytes = 12 * 4 = 48 bytes (for float32) - EXPECT_EQ(tensor->numel(), 12); // numel is still 3*4=12 for logical shape - - // The tensor should be accessible and writable - void* data_ptr = tensor->mutable_data_ptr(); - EXPECT_NE(data_ptr, nullptr); - - // Verify we can use CUDA to write to the memory - std::vector test_data(12, 1.0f); - cudaError_t cuda_err = cudaMemcpy( - data_ptr, test_data.data(), 12 * sizeof(float), cudaMemcpyHostToDevice); - EXPECT_EQ(cuda_err, cudaSuccess); + delete tensor; } -// Test incontiguous tensor creation - expanded/broadcasted stride pattern -TEST_F(AOTITorchEmptyStridedTest, IncontiguousExpandedStrides) { - // Create a tensor with expanded strides (simulating broadcasting) - // A 2x3x4 tensor where the first dimension has stride 0 (expanded) - // This creates a tensor where the first dimension is "broadcasted" - std::vector sizes = {2, 3, 4}; - std::vector strides = {0, 4, 1}; // First dimension has stride 0 +// ============================================================================ +// Error Cases +// ============================================================================ + +TEST_F(AOTITorchEmptyStridedSlimTest, NullReturnPointer) { + std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); - Tensor* tensor; AOTITorchError error = aoti_torch_empty_strided( sizes.size(), sizes.data(), strides.data(), - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA), - 0, // device index - &tensor); + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0, + nullptr); // null return pointer - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(tensor, nullptr); - - // Verify tensor properties - EXPECT_EQ(tensor->dim(), 3); - EXPECT_EQ(tensor->size(0), 2); - EXPECT_EQ(tensor->size(1), 3); - EXPECT_EQ(tensor->size(2), 4); - - // Verify the strides are what we specified - int64_t* strides_ptr; - EXPECT_EQ(aoti_torch_get_strides(tensor, &strides_ptr), Error::Ok); - EXPECT_EQ(strides_ptr[0], 0); // Expanded dimension stride - EXPECT_EQ(strides_ptr[1], 4); - EXPECT_EQ(strides_ptr[2], 1); - - // Verify that memory was allocated correctly for this incontiguous layout - // Storage size should be: stride[0] * (size[0] - 1) + stride[1] * (size[1] - - // 1) + stride[2] * (size[2] - 1) + 1 = 0 * (2 - 1) + 4 * (3 - 1) + 1 * (4 - - // 1) + 1 = 0 + 8 + 3 + 1 = 12 elements Note: numel() returns logical number - // of elements (2*3*4=24), not storage size - EXPECT_EQ(tensor->numel(), 24); // Logical numel is 2*3*4=24 - - // The tensor should be accessible and writable - void* data_ptr = tensor->mutable_data_ptr(); - EXPECT_NE(data_ptr, nullptr); - - // Verify we can use CUDA to write to the allocated memory - // We only need to allocate 12 elements (storage size), not 24 - std::vector test_data(12, 2.0f); - cudaError_t cuda_err = cudaMemcpy( - data_ptr, test_data.data(), 12 * sizeof(float), cudaMemcpyHostToDevice); - EXPECT_EQ(cuda_err, cudaSuccess); + EXPECT_EQ(error, Error::InvalidArgument); } diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch_empty_strided_slim.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch_empty_strided_slim.cpp deleted file mode 100644 index d563eea98bc..00000000000 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch_empty_strided_slim.cpp +++ /dev/null @@ -1,467 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include - -using namespace executorch::backends::cuda; -using executorch::runtime::Error; - -namespace slim_c10 = executorch::backends::aoti::slim::c10; - -namespace { - -// Helper to check if CUDA is available -bool isCudaAvailable() { - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - return (err == cudaSuccess && device_count > 0); -} - -// Helper to calculate contiguous strides from sizes -std::vector calculateContiguousStrides( - const std::vector& sizes) { - std::vector strides(sizes.size()); - if (sizes.empty()) { - return strides; - } - strides[sizes.size() - 1] = 1; - for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { - strides[i] = strides[i + 1] * sizes[i + 1]; - } - return strides; -} - -} // namespace - -// Test fixture for SlimTensor-based aoti_torch_empty_strided tests -class AOTITorchEmptyStridedSlimTest : public ::testing::Test { - protected: - void SetUp() override { - et_pal_init(); - } - - void TearDown() override { - // Tensors are cleaned up via their destructors - for (Tensor* t : tensors_) { - delete t; - } - tensors_.clear(); - } - - // Track tensors for cleanup - void trackTensor(Tensor* t) { - if (t != nullptr) { - tensors_.push_back(t); - } - } - - private: - std::vector tensors_; -}; - -// ============================================================================ -// Common test body - parameterized by device type -// ============================================================================ - -void runBasicEmptyStridedTest(int32_t device_type, int32_t device_index) { - // Test 1D tensor - std::vector sizes_1d = {5}; - std::vector strides_1d = calculateContiguousStrides(sizes_1d); - - Tensor* tensor_1d = nullptr; - AOTITorchError error = aoti_torch_empty_strided( - sizes_1d.size(), - sizes_1d.data(), - strides_1d.data(), - static_cast(slim_c10::ScalarType::Float), // dtype = 6 - device_type, - device_index, - &tensor_1d); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor_1d, nullptr); - - // Check tensor properties - EXPECT_EQ(tensor_1d->dim(), 1); - EXPECT_EQ(tensor_1d->size(0), 5); - EXPECT_EQ(tensor_1d->numel(), 5); - EXPECT_EQ( - static_cast(tensor_1d->dtype()), - static_cast(slim_c10::ScalarType::Float)); - EXPECT_NE(tensor_1d->data_ptr(), nullptr); - - // Cleanup - delete tensor_1d; -} - -void runMultiDimensionalEmptyStridedTest( - int32_t device_type, - int32_t device_index) { - // Test 3D tensor - std::vector sizes = {2, 3, 4}; - std::vector strides = calculateContiguousStrides(sizes); - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - strides.data(), - static_cast(slim_c10::ScalarType::Float), - device_type, - device_index, - &tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - - // Check tensor properties - EXPECT_EQ(tensor->dim(), 3); - EXPECT_EQ(tensor->size(0), 2); - EXPECT_EQ(tensor->size(1), 3); - EXPECT_EQ(tensor->size(2), 4); - EXPECT_EQ(tensor->numel(), 24); - - // Check strides - EXPECT_EQ(tensor->stride(0), 12); - EXPECT_EQ(tensor->stride(1), 4); - EXPECT_EQ(tensor->stride(2), 1); - - delete tensor; -} - -void runScalarTensorEmptyStridedTest( - int32_t device_type, - int32_t device_index) { - std::vector sizes = {}; // 0D tensor - std::vector strides = {}; - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - strides.data(), - static_cast(slim_c10::ScalarType::Float), - device_type, - device_index, - &tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - - EXPECT_EQ(tensor->dim(), 0); - EXPECT_EQ(tensor->numel(), 1); - EXPECT_NE(tensor->data_ptr(), nullptr); - - delete tensor; -} - -void runZeroSizedTensorEmptyStridedTest( - int32_t device_type, - int32_t device_index) { - std::vector sizes = {0, 5}; - std::vector strides = calculateContiguousStrides(sizes); - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - strides.data(), - static_cast(slim_c10::ScalarType::Float), - device_type, - device_index, - &tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - - EXPECT_EQ(tensor->dim(), 2); - EXPECT_EQ(tensor->size(0), 0); - EXPECT_EQ(tensor->size(1), 5); - EXPECT_EQ(tensor->numel(), 0); - - delete tensor; -} - -void runCustomStridesEmptyStridedTest( - int32_t device_type, - int32_t device_index) { - // Create a transposed (column-major) tensor - std::vector sizes = {3, 4}; - std::vector strides = {1, 3}; // Column-major - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - strides.data(), - static_cast(slim_c10::ScalarType::Float), - device_type, - device_index, - &tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - - EXPECT_EQ(tensor->dim(), 2); - EXPECT_EQ(tensor->size(0), 3); - EXPECT_EQ(tensor->size(1), 4); - EXPECT_EQ(tensor->stride(0), 1); - EXPECT_EQ(tensor->stride(1), 3); - - // Non-contiguous due to custom strides - EXPECT_FALSE(tensor->is_contiguous()); - - delete tensor; -} - -void runDifferentDtypesEmptyStridedTest( - int32_t device_type, - int32_t device_index) { - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - - // Test Float32 - { - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - strides.data(), - static_cast(slim_c10::ScalarType::Float), - device_type, - device_index, - &tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - EXPECT_EQ(tensor->dtype(), slim_c10::ScalarType::Float); - EXPECT_EQ(tensor->itemsize(), 4); - delete tensor; - } - - // Test BFloat16 - { - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - strides.data(), - static_cast(slim_c10::ScalarType::BFloat16), - device_type, - device_index, - &tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - EXPECT_EQ(tensor->dtype(), slim_c10::ScalarType::BFloat16); - EXPECT_EQ(tensor->itemsize(), 2); - delete tensor; - } - - // Test Int64 - { - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - strides.data(), - static_cast(slim_c10::ScalarType::Long), - device_type, - device_index, - &tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - EXPECT_EQ(tensor->dtype(), slim_c10::ScalarType::Long); - EXPECT_EQ(tensor->itemsize(), 8); - delete tensor; - } - - // Test Bool - { - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - strides.data(), - static_cast(slim_c10::ScalarType::Bool), - device_type, - device_index, - &tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - EXPECT_EQ(tensor->dtype(), slim_c10::ScalarType::Bool); - EXPECT_EQ(tensor->itemsize(), 1); - delete tensor; - } -} - -// ============================================================================ -// CPU Tests -// ============================================================================ - -TEST_F(AOTITorchEmptyStridedSlimTest, BasicFunctionality_CPU) { - runBasicEmptyStridedTest(static_cast(slim_c10::DeviceType::CPU), 0); -} - -TEST_F(AOTITorchEmptyStridedSlimTest, MultiDimensional_CPU) { - runMultiDimensionalEmptyStridedTest( - static_cast(slim_c10::DeviceType::CPU), 0); -} - -TEST_F(AOTITorchEmptyStridedSlimTest, ScalarTensor_CPU) { - runScalarTensorEmptyStridedTest( - static_cast(slim_c10::DeviceType::CPU), 0); -} - -TEST_F(AOTITorchEmptyStridedSlimTest, ZeroSizedTensor_CPU) { - runZeroSizedTensorEmptyStridedTest( - static_cast(slim_c10::DeviceType::CPU), 0); -} - -TEST_F(AOTITorchEmptyStridedSlimTest, CustomStrides_CPU) { - runCustomStridesEmptyStridedTest( - static_cast(slim_c10::DeviceType::CPU), 0); -} - -TEST_F(AOTITorchEmptyStridedSlimTest, DifferentDtypes_CPU) { - runDifferentDtypesEmptyStridedTest( - static_cast(slim_c10::DeviceType::CPU), 0); -} - -// ============================================================================ -// CUDA Tests -// ============================================================================ - -TEST_F(AOTITorchEmptyStridedSlimTest, BasicFunctionality_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - runBasicEmptyStridedTest(static_cast(slim_c10::DeviceType::CUDA), 0); -} - -TEST_F(AOTITorchEmptyStridedSlimTest, MultiDimensional_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - runMultiDimensionalEmptyStridedTest( - static_cast(slim_c10::DeviceType::CUDA), 0); -} - -TEST_F(AOTITorchEmptyStridedSlimTest, ScalarTensor_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - runScalarTensorEmptyStridedTest( - static_cast(slim_c10::DeviceType::CUDA), 0); -} - -TEST_F(AOTITorchEmptyStridedSlimTest, ZeroSizedTensor_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - runZeroSizedTensorEmptyStridedTest( - static_cast(slim_c10::DeviceType::CUDA), 0); -} - -TEST_F(AOTITorchEmptyStridedSlimTest, CustomStrides_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - runCustomStridesEmptyStridedTest( - static_cast(slim_c10::DeviceType::CUDA), 0); -} - -TEST_F(AOTITorchEmptyStridedSlimTest, DifferentDtypes_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - runDifferentDtypesEmptyStridedTest( - static_cast(slim_c10::DeviceType::CUDA), 0); -} - -// ============================================================================ -// Verify Device Properties -// ============================================================================ - -TEST_F(AOTITorchEmptyStridedSlimTest, VerifyCPUDevice) { - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - strides.data(), - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0, - &tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - - EXPECT_TRUE(tensor->is_cpu()); - EXPECT_FALSE(tensor->is_cuda()); - EXPECT_EQ(tensor->device_type(), slim_c10::DeviceType::CPU); - - delete tensor; -} - -TEST_F(AOTITorchEmptyStridedSlimTest, VerifyCUDADevice) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - - Tensor* tensor = nullptr; - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - strides.data(), - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0, - &tensor); - - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(tensor, nullptr); - - EXPECT_FALSE(tensor->is_cpu()); - EXPECT_TRUE(tensor->is_cuda()); - EXPECT_EQ(tensor->device_type(), slim_c10::DeviceType::CUDA); - - delete tensor; -} - -// ============================================================================ -// Error Cases -// ============================================================================ - -TEST_F(AOTITorchEmptyStridedSlimTest, NullReturnPointer) { - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - strides.data(), - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0, - nullptr); // null return pointer - - EXPECT_EQ(error, Error::InvalidArgument); -} diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch_item_bool.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch_item_bool.cpp index 8e6bcbbfad6..dee95cbafe2 100644 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch_item_bool.cpp +++ b/backends/cuda/runtime/shims/tests/test_aoti_torch_item_bool.cpp @@ -7,197 +7,285 @@ */ #include -#include -#include -#include -#include -#include -#include #include #include -using namespace executorch::backends::aoti; -using namespace executorch::backends::cuda; -using namespace executorch::runtime; -using executorch::runtime::etensor::Tensor; +#include +#include +#include +#include +#include -// Test fixture for aoti_torch_item_bool tests -class AOTITorchItemBoolTest : public ::testing::Test { - protected: - void SetUp() override { - // Initialize ExecuTorch Platform Abstraction Layer - et_pal_init(); +using namespace executorch::backends::cuda; +using executorch::runtime::Error; - // Check if CUDA is available - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - if (err != cudaSuccess || device_count == 0) { - GTEST_SKIP() << "CUDA not available, skipping CUDA tests"; - } +namespace slim_c10 = executorch::backends::aoti::slim::c10; - // Clean up any existing cached metadata before each test - cleanup_tensor_metadata(); +namespace { - // Clear any remaining tensors from previous tests - clear_all_tensors(); - } +bool isCudaAvailable() { + int device_count = 0; + cudaError_t err = cudaGetDeviceCount(&device_count); + return (err == cudaSuccess && device_count > 0); +} - void TearDown() override { - // Clean up metadata - cleanup_tensor_metadata(); +} // namespace - // Clear the global tensor storage using the provided function - clear_all_tensors(); +class AOTITorchItemBoolSlimTest : public ::testing::Test { + protected: + void SetUp() override { + et_pal_init(); } - // Helper to create a bool tensor on CUDA with a specific value - Tensor* create_cuda_bool_tensor(bool value) { - // Create a 0D (scalar) bool tensor - std::vector sizes = {}; // 0D tensor - std::vector strides = {}; // Empty strides for scalar - Tensor* tensor; + Tensor* createScalarBoolTensor( + bool value, + int32_t device_type = static_cast(slim_c10::DeviceType::CPU), + int32_t device_index = 0) { + Tensor* tensor = nullptr; + + std::vector sizes = {1}; + std::vector strides = {1}; AOTITorchError error = aoti_torch_empty_strided( sizes.size(), sizes.data(), strides.data(), - static_cast(SupportedDTypes::BOOL), - static_cast(SupportedDevices::CUDA), - 0, + static_cast(slim_c10::ScalarType::Bool), + device_type, + device_index, &tensor); if (error != Error::Ok || tensor == nullptr) { return nullptr; } - // Set the value - bool host_value = value; - cudaError_t cuda_err = cudaMemcpy( - tensor->mutable_data_ptr(), - &host_value, - sizeof(bool), - cudaMemcpyHostToDevice); - - if (cuda_err != cudaSuccess) { - aoti_torch_delete_tensor_object(tensor); - return nullptr; + if (device_type == static_cast(slim_c10::DeviceType::CPU)) { + bool* data = static_cast(tensor->data_ptr()); + *data = value; + } else { + cudaMemcpy( + tensor->data_ptr(), &value, sizeof(bool), cudaMemcpyHostToDevice); } return tensor; } - // Helper to create a bool tensor on CPU with a specific value - Tensor* create_cpu_bool_tensor(bool value) { - // Create a 0D (scalar) bool tensor - std::vector sizes = {}; // 0D tensor - std::vector strides = {}; // Empty strides for scalar - Tensor* tensor; + Tensor* createTestTensor( + const std::vector& sizes, + int32_t dtype = static_cast(slim_c10::ScalarType::Float), + int32_t device_type = static_cast(slim_c10::DeviceType::CPU), + int32_t device_index = 0) { + Tensor* tensor = nullptr; + + std::vector strides(sizes.size()); + if (!sizes.empty()) { + strides[sizes.size() - 1] = 1; + for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { + strides[i] = strides[i + 1] * sizes[i + 1]; + } + } AOTITorchError error = aoti_torch_empty_strided( sizes.size(), sizes.data(), strides.data(), - static_cast(SupportedDTypes::BOOL), - static_cast(SupportedDevices::CPU), - 0, + dtype, + device_type, + device_index, &tensor); - if (error != Error::Ok || tensor == nullptr) { - return nullptr; - } - - // Set the value directly - bool* data_ptr = static_cast(tensor->mutable_data_ptr()); - *data_ptr = value; - - return tensor; + return (error == Error::Ok) ? tensor : nullptr; } }; -// Test extracting true value from CUDA bool tensor -TEST_F(AOTITorchItemBoolTest, CUDATensorTrueValue) { - Tensor* tensor = create_cuda_bool_tensor(true); +// ============================================================================ +// Basic Functionality Tests +// ============================================================================ + +TEST_F(AOTITorchItemBoolSlimTest, TrueValue_CPU) { + Tensor* tensor = createScalarBoolTensor( + true, static_cast(slim_c10::DeviceType::CPU), 0); ASSERT_NE(tensor, nullptr); bool result = false; AOTITorchError error = aoti_torch_item_bool(tensor, &result); EXPECT_EQ(error, Error::Ok); - EXPECT_TRUE(result); + EXPECT_EQ(result, true); + + EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); } -// Test extracting false value from CUDA bool tensor -TEST_F(AOTITorchItemBoolTest, CUDATensorFalseValue) { - Tensor* tensor = create_cuda_bool_tensor(false); +TEST_F(AOTITorchItemBoolSlimTest, FalseValue_CPU) { + Tensor* tensor = createScalarBoolTensor( + false, static_cast(slim_c10::DeviceType::CPU), 0); ASSERT_NE(tensor, nullptr); bool result = true; AOTITorchError error = aoti_torch_item_bool(tensor, &result); EXPECT_EQ(error, Error::Ok); - EXPECT_FALSE(result); + EXPECT_EQ(result, false); + + EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); +} + +// ============================================================================ +// Error Handling Tests +// ============================================================================ + +TEST_F(AOTITorchItemBoolSlimTest, NullTensor) { + bool result = false; + AOTITorchError error = aoti_torch_item_bool(nullptr, &result); + + EXPECT_EQ(error, Error::InvalidArgument); +} + +TEST_F(AOTITorchItemBoolSlimTest, NullReturnValue) { + Tensor* tensor = createScalarBoolTensor( + true, static_cast(slim_c10::DeviceType::CPU), 0); + ASSERT_NE(tensor, nullptr); + + AOTITorchError error = aoti_torch_item_bool(tensor, nullptr); + + EXPECT_EQ(error, Error::InvalidArgument); + + EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); +} + +TEST_F(AOTITorchItemBoolSlimTest, MultiElementTensor) { + std::vector sizes = {2, 3}; + Tensor* tensor = createTestTensor( + sizes, + static_cast(slim_c10::ScalarType::Bool), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(tensor, nullptr); + EXPECT_GT(tensor->numel(), 1); + + bool result = false; + AOTITorchError error = aoti_torch_item_bool(tensor, &result); + + EXPECT_EQ(error, Error::InvalidArgument); + + EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); +} + +TEST_F(AOTITorchItemBoolSlimTest, WrongDtype_Float) { + std::vector sizes = {1}; + Tensor* tensor = createTestTensor( + sizes, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(tensor, nullptr); + + bool result = false; + AOTITorchError error = aoti_torch_item_bool(tensor, &result); + + EXPECT_EQ(error, Error::InvalidArgument); + + EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); +} + +TEST_F(AOTITorchItemBoolSlimTest, WrongDtype_Long) { + std::vector sizes = {1}; + Tensor* tensor = createTestTensor( + sizes, + static_cast(slim_c10::ScalarType::Long), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(tensor, nullptr); + + bool result = false; + AOTITorchError error = aoti_torch_item_bool(tensor, &result); + + EXPECT_EQ(error, Error::InvalidArgument); + + EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); } -// Test extracting true value from CPU bool tensor -TEST_F(AOTITorchItemBoolTest, CPUTensorTrueValue) { - Tensor* tensor = create_cpu_bool_tensor(true); +// ============================================================================ +// CUDA Tests +// ============================================================================ + +TEST_F(AOTITorchItemBoolSlimTest, TrueValue_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + + Tensor* tensor = createScalarBoolTensor( + true, static_cast(slim_c10::DeviceType::CUDA), 0); ASSERT_NE(tensor, nullptr); + EXPECT_TRUE(tensor->is_cuda()); bool result = false; AOTITorchError error = aoti_torch_item_bool(tensor, &result); EXPECT_EQ(error, Error::Ok); - EXPECT_TRUE(result); + EXPECT_EQ(result, true); + + EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); } -// Test extracting false value from CPU bool tensor -TEST_F(AOTITorchItemBoolTest, CPUTensorFalseValue) { - Tensor* tensor = create_cpu_bool_tensor(false); +TEST_F(AOTITorchItemBoolSlimTest, FalseValue_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + + Tensor* tensor = createScalarBoolTensor( + false, static_cast(slim_c10::DeviceType::CUDA), 0); ASSERT_NE(tensor, nullptr); + EXPECT_TRUE(tensor->is_cuda()); bool result = true; AOTITorchError error = aoti_torch_item_bool(tensor, &result); EXPECT_EQ(error, Error::Ok); - EXPECT_FALSE(result); -} + EXPECT_EQ(result, false); -// Test with null tensor pointer -TEST_F(AOTITorchItemBoolTest, NullTensorPointer) { - bool result; - AOTITorchError error = aoti_torch_item_bool(nullptr, &result); - EXPECT_EQ(error, Error::InvalidArgument); + EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); } -// Test with null result pointer -TEST_F(AOTITorchItemBoolTest, NullResultPointer) { - Tensor* tensor = create_cuda_bool_tensor(true); +TEST_F(AOTITorchItemBoolSlimTest, MultiElementTensor_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + + std::vector sizes = {2, 3}; + Tensor* tensor = createTestTensor( + sizes, + static_cast(slim_c10::ScalarType::Bool), + static_cast(slim_c10::DeviceType::CUDA), + 0); ASSERT_NE(tensor, nullptr); + EXPECT_TRUE(tensor->is_cuda()); + + bool result = false; + AOTITorchError error = aoti_torch_item_bool(tensor, &result); - AOTITorchError error = aoti_torch_item_bool(tensor, nullptr); EXPECT_EQ(error, Error::InvalidArgument); + + EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); } -// Test with non-bool dtype (should fail) -TEST_F(AOTITorchItemBoolTest, NonBoolDtype) { - // Create a float tensor - std::vector sizes = {}; - std::vector strides = {}; - Tensor* tensor; - - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - strides.data(), - static_cast(SupportedDTypes::FLOAT32), // Not bool - static_cast(SupportedDevices::CUDA), - 0, - &tensor); - - ASSERT_EQ(error, Error::Ok); +TEST_F(AOTITorchItemBoolSlimTest, WrongDtype_Float_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + + std::vector sizes = {1}; + Tensor* tensor = createTestTensor( + sizes, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); ASSERT_NE(tensor, nullptr); - bool result; - error = aoti_torch_item_bool(tensor, &result); + bool result = false; + AOTITorchError error = aoti_torch_item_bool(tensor, &result); + EXPECT_EQ(error, Error::InvalidArgument); + + EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); } diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch_item_bool_slim.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch_item_bool_slim.cpp deleted file mode 100644 index dee95cbafe2..00000000000 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch_item_bool_slim.cpp +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include - -using namespace executorch::backends::cuda; -using executorch::runtime::Error; - -namespace slim_c10 = executorch::backends::aoti::slim::c10; - -namespace { - -bool isCudaAvailable() { - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - return (err == cudaSuccess && device_count > 0); -} - -} // namespace - -class AOTITorchItemBoolSlimTest : public ::testing::Test { - protected: - void SetUp() override { - et_pal_init(); - } - - Tensor* createScalarBoolTensor( - bool value, - int32_t device_type = static_cast(slim_c10::DeviceType::CPU), - int32_t device_index = 0) { - Tensor* tensor = nullptr; - - std::vector sizes = {1}; - std::vector strides = {1}; - - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - strides.data(), - static_cast(slim_c10::ScalarType::Bool), - device_type, - device_index, - &tensor); - - if (error != Error::Ok || tensor == nullptr) { - return nullptr; - } - - if (device_type == static_cast(slim_c10::DeviceType::CPU)) { - bool* data = static_cast(tensor->data_ptr()); - *data = value; - } else { - cudaMemcpy( - tensor->data_ptr(), &value, sizeof(bool), cudaMemcpyHostToDevice); - } - - return tensor; - } - - Tensor* createTestTensor( - const std::vector& sizes, - int32_t dtype = static_cast(slim_c10::ScalarType::Float), - int32_t device_type = static_cast(slim_c10::DeviceType::CPU), - int32_t device_index = 0) { - Tensor* tensor = nullptr; - - std::vector strides(sizes.size()); - if (!sizes.empty()) { - strides[sizes.size() - 1] = 1; - for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { - strides[i] = strides[i + 1] * sizes[i + 1]; - } - } - - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - strides.data(), - dtype, - device_type, - device_index, - &tensor); - - return (error == Error::Ok) ? tensor : nullptr; - } -}; - -// ============================================================================ -// Basic Functionality Tests -// ============================================================================ - -TEST_F(AOTITorchItemBoolSlimTest, TrueValue_CPU) { - Tensor* tensor = createScalarBoolTensor( - true, static_cast(slim_c10::DeviceType::CPU), 0); - ASSERT_NE(tensor, nullptr); - - bool result = false; - AOTITorchError error = aoti_torch_item_bool(tensor, &result); - - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(result, true); - - EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); -} - -TEST_F(AOTITorchItemBoolSlimTest, FalseValue_CPU) { - Tensor* tensor = createScalarBoolTensor( - false, static_cast(slim_c10::DeviceType::CPU), 0); - ASSERT_NE(tensor, nullptr); - - bool result = true; - AOTITorchError error = aoti_torch_item_bool(tensor, &result); - - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(result, false); - - EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); -} - -// ============================================================================ -// Error Handling Tests -// ============================================================================ - -TEST_F(AOTITorchItemBoolSlimTest, NullTensor) { - bool result = false; - AOTITorchError error = aoti_torch_item_bool(nullptr, &result); - - EXPECT_EQ(error, Error::InvalidArgument); -} - -TEST_F(AOTITorchItemBoolSlimTest, NullReturnValue) { - Tensor* tensor = createScalarBoolTensor( - true, static_cast(slim_c10::DeviceType::CPU), 0); - ASSERT_NE(tensor, nullptr); - - AOTITorchError error = aoti_torch_item_bool(tensor, nullptr); - - EXPECT_EQ(error, Error::InvalidArgument); - - EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); -} - -TEST_F(AOTITorchItemBoolSlimTest, MultiElementTensor) { - std::vector sizes = {2, 3}; - Tensor* tensor = createTestTensor( - sizes, - static_cast(slim_c10::ScalarType::Bool), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(tensor, nullptr); - EXPECT_GT(tensor->numel(), 1); - - bool result = false; - AOTITorchError error = aoti_torch_item_bool(tensor, &result); - - EXPECT_EQ(error, Error::InvalidArgument); - - EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); -} - -TEST_F(AOTITorchItemBoolSlimTest, WrongDtype_Float) { - std::vector sizes = {1}; - Tensor* tensor = createTestTensor( - sizes, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(tensor, nullptr); - - bool result = false; - AOTITorchError error = aoti_torch_item_bool(tensor, &result); - - EXPECT_EQ(error, Error::InvalidArgument); - - EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); -} - -TEST_F(AOTITorchItemBoolSlimTest, WrongDtype_Long) { - std::vector sizes = {1}; - Tensor* tensor = createTestTensor( - sizes, - static_cast(slim_c10::ScalarType::Long), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(tensor, nullptr); - - bool result = false; - AOTITorchError error = aoti_torch_item_bool(tensor, &result); - - EXPECT_EQ(error, Error::InvalidArgument); - - EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); -} - -// ============================================================================ -// CUDA Tests -// ============================================================================ - -TEST_F(AOTITorchItemBoolSlimTest, TrueValue_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - Tensor* tensor = createScalarBoolTensor( - true, static_cast(slim_c10::DeviceType::CUDA), 0); - ASSERT_NE(tensor, nullptr); - EXPECT_TRUE(tensor->is_cuda()); - - bool result = false; - AOTITorchError error = aoti_torch_item_bool(tensor, &result); - - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(result, true); - - EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); -} - -TEST_F(AOTITorchItemBoolSlimTest, FalseValue_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - Tensor* tensor = createScalarBoolTensor( - false, static_cast(slim_c10::DeviceType::CUDA), 0); - ASSERT_NE(tensor, nullptr); - EXPECT_TRUE(tensor->is_cuda()); - - bool result = true; - AOTITorchError error = aoti_torch_item_bool(tensor, &result); - - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(result, false); - - EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); -} - -TEST_F(AOTITorchItemBoolSlimTest, MultiElementTensor_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {2, 3}; - Tensor* tensor = createTestTensor( - sizes, - static_cast(slim_c10::ScalarType::Bool), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(tensor, nullptr); - EXPECT_TRUE(tensor->is_cuda()); - - bool result = false; - AOTITorchError error = aoti_torch_item_bool(tensor, &result); - - EXPECT_EQ(error, Error::InvalidArgument); - - EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); -} - -TEST_F(AOTITorchItemBoolSlimTest, WrongDtype_Float_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {1}; - Tensor* tensor = createTestTensor( - sizes, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(tensor, nullptr); - - bool result = false; - AOTITorchError error = aoti_torch_item_bool(tensor, &result); - - EXPECT_EQ(error, Error::InvalidArgument); - - EXPECT_EQ(aoti_torch_delete_tensor_object(tensor), Error::Ok); -} diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch_new_tensor_handle.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch_new_tensor_handle.cpp index d123443cbfa..3a1de152f0b 100644 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch_new_tensor_handle.cpp +++ b/backends/cuda/runtime/shims/tests/test_aoti_torch_new_tensor_handle.cpp @@ -7,64 +7,70 @@ */ #include -#include -#include -#include -#include -#include -#include #include #include -using namespace executorch::backends::aoti; +#include +#include +#include +#include +#include + using namespace executorch::backends::cuda; -using namespace executorch::runtime; -using executorch::runtime::etensor::Tensor; +using executorch::runtime::Error; -// Test fixture for aoti_torch_new_tensor_handle tests -class AOTITorchNewTensorHandleTest : public ::testing::Test { - protected: - void SetUp() override { - // Initialize ExecuTorch Platform Abstraction Layer - et_pal_init(); +namespace slim_c10 = executorch::backends::aoti::slim::c10; - // Check if CUDA is available - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - if (err != cudaSuccess || device_count == 0) { - GTEST_SKIP() << "CUDA not available, skipping CUDA tests"; - } +namespace { + +bool isCudaAvailable() { + int device_count = 0; + cudaError_t err = cudaGetDeviceCount(&device_count); + return (err == cudaSuccess && device_count > 0); +} - // Clean up any existing cached metadata before each test - cleanup_tensor_metadata(); +std::vector calculateContiguousStrides( + const std::vector& sizes) { + std::vector strides(sizes.size()); + if (sizes.empty()) { + return strides; + } + strides[sizes.size() - 1] = 1; + for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { + strides[i] = strides[i + 1] * sizes[i + 1]; + } + return strides; +} - // Clear any remaining tensors from previous tests - clear_all_tensors(); +} // namespace + +class AOTITorchNewTensorHandleSlimTest : public ::testing::Test { + protected: + void SetUp() override { + et_pal_init(); } void TearDown() override { - // Clean up metadata - cleanup_tensor_metadata(); - - // Clear the global tensor storage using the provided function - clear_all_tensors(); + // SlimTensor uses automatic reference counting - no manual cleanup needed } - // Helper to create test tensors - Tensor* create_test_tensor( + Tensor* createTestTensor( const std::vector& sizes, const std::vector& strides = {}, - int32_t dtype = static_cast(SupportedDTypes::FLOAT32), - int32_t device_type = static_cast(SupportedDevices::CUDA), + int32_t dtype = static_cast(slim_c10::ScalarType::Float), + int32_t device_type = static_cast(slim_c10::DeviceType::CPU), int32_t device_index = 0) { - Tensor* tensor; + Tensor* tensor = nullptr; - const int64_t* strides_ptr = strides.empty() ? nullptr : strides.data(); + std::vector effective_strides = strides; + if (strides.empty()) { + effective_strides = calculateContiguousStrides(sizes); + } AOTITorchError error = aoti_torch_empty_strided( sizes.size(), sizes.data(), - strides_ptr, + effective_strides.data(), dtype, device_type, device_index, @@ -74,97 +80,106 @@ class AOTITorchNewTensorHandleTest : public ::testing::Test { } }; -// Test basic functionality of creating a new tensor handle -TEST_F(AOTITorchNewTensorHandleTest, BasicFunctionality) { - // Create an original tensor +// ============================================================================ +// Basic Functionality Tests +// ============================================================================ + +TEST_F(AOTITorchNewTensorHandleSlimTest, BasicFunctionality_CPU) { std::vector sizes = {2, 3}; - Tensor* orig_tensor = create_test_tensor(sizes); + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(orig_tensor, nullptr); - // Create a new handle from the original tensor Tensor* new_tensor; AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); EXPECT_EQ(error, Error::Ok); EXPECT_NE(new_tensor, nullptr); - // Verify the new tensor has the same properties EXPECT_EQ(new_tensor->dim(), orig_tensor->dim()); EXPECT_EQ(new_tensor->size(0), orig_tensor->size(0)); EXPECT_EQ(new_tensor->size(1), orig_tensor->size(1)); EXPECT_EQ(new_tensor->numel(), orig_tensor->numel()); - // Verify they share the same memory - EXPECT_EQ(new_tensor->mutable_data_ptr(), orig_tensor->mutable_data_ptr()); + EXPECT_EQ(new_tensor->data_ptr(), orig_tensor->data_ptr()); - // Clean up EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); } -// Test creating new handle from null tensor -TEST_F(AOTITorchNewTensorHandleTest, NullOriginalTensor) { +TEST_F(AOTITorchNewTensorHandleSlimTest, NullOriginalTensor) { Tensor* new_tensor; AOTITorchError error = aoti_torch_new_tensor_handle(nullptr, &new_tensor); EXPECT_EQ(error, Error::InvalidArgument); } -// Test passing null pointer for new handle -TEST_F(AOTITorchNewTensorHandleTest, NullNewHandle) { +TEST_F(AOTITorchNewTensorHandleSlimTest, NullNewHandle) { std::vector sizes = {2, 3}; - Tensor* orig_tensor = create_test_tensor(sizes); + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(orig_tensor, nullptr); AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, nullptr); EXPECT_EQ(error, Error::InvalidArgument); - // Clean up EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); } -// Test memory sharing between original and new tensor handle -TEST_F(AOTITorchNewTensorHandleTest, MemorySharing) { - // Create an original tensor +// ============================================================================ +// Memory Sharing Tests +// ============================================================================ + +TEST_F(AOTITorchNewTensorHandleSlimTest, MemorySharing_CPU) { std::vector sizes = {3, 4}; - Tensor* orig_tensor = create_test_tensor(sizes); + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(orig_tensor, nullptr); - // Get original memory pointer - void* orig_ptr = orig_tensor->mutable_data_ptr(); + void* orig_ptr = orig_tensor->data_ptr(); ASSERT_NE(orig_ptr, nullptr); - // Create a new handle Tensor* new_tensor; AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); EXPECT_EQ(error, Error::Ok); ASSERT_NE(new_tensor, nullptr); - // Verify both tensors point to the same memory - void* new_ptr = new_tensor->mutable_data_ptr(); + void* new_ptr = new_tensor->data_ptr(); EXPECT_EQ(orig_ptr, new_ptr); - // Clean up - deleting one should not affect the other's validity EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - // New tensor should still be valid and accessible - void* still_valid_ptr = new_tensor->mutable_data_ptr(); + void* still_valid_ptr = new_tensor->data_ptr(); EXPECT_EQ(still_valid_ptr, new_ptr); EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); } -// Test creating multiple handles from the same tensor -TEST_F(AOTITorchNewTensorHandleTest, MultipleHandles) { - // Create an original tensor +TEST_F(AOTITorchNewTensorHandleSlimTest, MultipleHandles_CPU) { std::vector sizes = {2, 3}; - Tensor* orig_tensor = create_test_tensor(sizes); + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(orig_tensor, nullptr); - void* orig_ptr = orig_tensor->mutable_data_ptr(); + void* orig_ptr = orig_tensor->data_ptr(); - // Create multiple handles std::vector handles; const int num_handles = 5; @@ -174,246 +189,165 @@ TEST_F(AOTITorchNewTensorHandleTest, MultipleHandles) { aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); EXPECT_EQ(error, Error::Ok); ASSERT_NE(new_tensor, nullptr); - EXPECT_EQ(new_tensor->mutable_data_ptr(), orig_ptr); + EXPECT_EQ(new_tensor->data_ptr(), orig_ptr); handles.push_back(new_tensor); } - // Delete original tensor EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - // All handles should still be valid for (Tensor* handle : handles) { - EXPECT_EQ(handle->mutable_data_ptr(), orig_ptr); + EXPECT_EQ(handle->data_ptr(), orig_ptr); EXPECT_EQ(handle->dim(), 2); EXPECT_EQ(handle->size(0), 2); EXPECT_EQ(handle->size(1), 3); } - // Delete all handles for (Tensor* handle : handles) { EXPECT_EQ(aoti_torch_delete_tensor_object(handle), Error::Ok); } } -// Test creating handle from tensor with custom strides -TEST_F(AOTITorchNewTensorHandleTest, CustomStrides) { +// ============================================================================ +// Tensor Property Tests +// ============================================================================ + +TEST_F(AOTITorchNewTensorHandleSlimTest, CustomStrides_CPU) { std::vector sizes = {3, 4}; std::vector strides = {4, 1}; // Row-major strides - Tensor* orig_tensor = create_test_tensor(sizes, strides); + Tensor* orig_tensor = createTestTensor( + sizes, + strides, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(orig_tensor, nullptr); - // Create new handle Tensor* new_tensor; AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); EXPECT_EQ(error, Error::Ok); ASSERT_NE(new_tensor, nullptr); - // Verify strides are preserved - int64_t* orig_strides_ptr; - int64_t* new_strides_ptr; - EXPECT_EQ(aoti_torch_get_strides(orig_tensor, &orig_strides_ptr), Error::Ok); - EXPECT_EQ(aoti_torch_get_strides(new_tensor, &new_strides_ptr), Error::Ok); + EXPECT_EQ(orig_tensor->stride(0), new_tensor->stride(0)); + EXPECT_EQ(orig_tensor->stride(1), new_tensor->stride(1)); - EXPECT_EQ(orig_strides_ptr[0], new_strides_ptr[0]); - EXPECT_EQ(orig_strides_ptr[1], new_strides_ptr[1]); - - // Clean up EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); } -// Test creating handle from bfloat16 tensor -TEST_F(AOTITorchNewTensorHandleTest, BFloat16Tensor) { +TEST_F(AOTITorchNewTensorHandleSlimTest, BFloat16Tensor_CPU) { std::vector sizes = {2, 3, 4}; - Tensor* orig_tensor = create_test_tensor( + Tensor* orig_tensor = createTestTensor( sizes, {}, - static_cast(SupportedDTypes::BFLOAT16), - static_cast(SupportedDevices::CUDA)); + static_cast(slim_c10::ScalarType::BFloat16), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(orig_tensor, nullptr); - // Verify original is bfloat16 - int32_t orig_dtype; - EXPECT_EQ(aoti_torch_get_dtype(orig_tensor, &orig_dtype), Error::Ok); - EXPECT_EQ(orig_dtype, static_cast(SupportedDTypes::BFLOAT16)); - - // Create new handle Tensor* new_tensor; AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); EXPECT_EQ(error, Error::Ok); ASSERT_NE(new_tensor, nullptr); - // Verify new tensor is also bfloat16 - int32_t new_dtype; - EXPECT_EQ(aoti_torch_get_dtype(new_tensor, &new_dtype), Error::Ok); - EXPECT_EQ(new_dtype, static_cast(SupportedDTypes::BFLOAT16)); - - // Verify element size (bfloat16 should be 2 bytes) - EXPECT_EQ(new_tensor->element_size(), 2); + EXPECT_EQ(new_tensor->itemsize(), 2); - // Clean up EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); } -// Test creating handle from scalar (0D) tensor -TEST_F(AOTITorchNewTensorHandleTest, ScalarTensor) { +TEST_F(AOTITorchNewTensorHandleSlimTest, ScalarTensor_CPU) { std::vector sizes = {}; - Tensor* orig_tensor = create_test_tensor(sizes); + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(orig_tensor, nullptr); EXPECT_EQ(orig_tensor->dim(), 0); - // Create new handle Tensor* new_tensor; AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); EXPECT_EQ(error, Error::Ok); ASSERT_NE(new_tensor, nullptr); - // Verify scalar properties EXPECT_EQ(new_tensor->dim(), 0); EXPECT_EQ(new_tensor->numel(), 1); - // Clean up EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); } -// Test creating handle from zero-sized tensor -TEST_F(AOTITorchNewTensorHandleTest, ZeroSizedTensor) { - std::vector sizes = {0, 5}; - Tensor* orig_tensor = create_test_tensor(sizes); - ASSERT_NE(orig_tensor, nullptr); - EXPECT_EQ(orig_tensor->numel(), 0); - - // Attempt to create new handle - should fail because zero-sized tensors have - // null data pointers - Tensor* new_tensor = nullptr; - AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); - - // Zero-sized tensors are not currently supported - EXPECT_EQ(error, Error::InvalidArgument); - EXPECT_EQ(new_tensor, nullptr); - - // Clean up original tensor - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); -} - -// Test creating handle from large multi-dimensional tensor -TEST_F(AOTITorchNewTensorHandleTest, LargeMultiDimensionalTensor) { +TEST_F(AOTITorchNewTensorHandleSlimTest, LargeMultiDimensionalTensor_CPU) { std::vector sizes = {10, 20, 30}; - Tensor* orig_tensor = create_test_tensor(sizes); + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(orig_tensor, nullptr); - // Create new handle Tensor* new_tensor; AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); EXPECT_EQ(error, Error::Ok); ASSERT_NE(new_tensor, nullptr); - // Verify dimensions EXPECT_EQ(new_tensor->dim(), 3); EXPECT_EQ(new_tensor->size(0), 10); EXPECT_EQ(new_tensor->size(1), 20); EXPECT_EQ(new_tensor->size(2), 30); EXPECT_EQ(new_tensor->numel(), 6000); - // Clean up EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); } -// Test creating handle preserves tensor metadata -TEST_F(AOTITorchNewTensorHandleTest, MetadataPreservation) { - std::vector sizes = {2, 3, 4}; - std::vector strides = {12, 4, 1}; - Tensor* orig_tensor = create_test_tensor( - sizes, - strides, - static_cast(SupportedDTypes::FLOAT32), - static_cast(SupportedDevices::CUDA)); - ASSERT_NE(orig_tensor, nullptr); - - // Create new handle - Tensor* new_tensor; - AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(new_tensor, nullptr); - - // Get and compare all metadata - int64_t* orig_sizes_ptr; - int64_t* new_sizes_ptr; - int64_t* orig_strides_ptr; - int64_t* new_strides_ptr; - int32_t orig_dtype, new_dtype; - int32_t orig_device_type, new_device_type; - int32_t orig_device_index, new_device_index; - - EXPECT_EQ(aoti_torch_get_sizes(orig_tensor, &orig_sizes_ptr), Error::Ok); - EXPECT_EQ(aoti_torch_get_sizes(new_tensor, &new_sizes_ptr), Error::Ok); - EXPECT_EQ(aoti_torch_get_strides(orig_tensor, &orig_strides_ptr), Error::Ok); - EXPECT_EQ(aoti_torch_get_strides(new_tensor, &new_strides_ptr), Error::Ok); - EXPECT_EQ(aoti_torch_get_dtype(orig_tensor, &orig_dtype), Error::Ok); - EXPECT_EQ(aoti_torch_get_dtype(new_tensor, &new_dtype), Error::Ok); - EXPECT_EQ( - aoti_torch_get_device_type(orig_tensor, &orig_device_type), Error::Ok); - EXPECT_EQ( - aoti_torch_get_device_type(new_tensor, &new_device_type), Error::Ok); - EXPECT_EQ( - aoti_torch_get_device_index(orig_tensor, &orig_device_index), Error::Ok); - EXPECT_EQ( - aoti_torch_get_device_index(new_tensor, &new_device_index), Error::Ok); - - // Verify all metadata matches - for (int i = 0; i < 3; i++) { - EXPECT_EQ(orig_sizes_ptr[i], new_sizes_ptr[i]); - EXPECT_EQ(orig_strides_ptr[i], new_strides_ptr[i]); - } - EXPECT_EQ(orig_dtype, new_dtype); - EXPECT_EQ(orig_device_type, new_device_type); - EXPECT_EQ(orig_device_index, new_device_index); +// ============================================================================ +// Handle Chain Tests +// ============================================================================ - // Clean up - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); -} - -// Test creating handle chain: orig -> handle1 -> handle2 -TEST_F(AOTITorchNewTensorHandleTest, HandleChain) { +TEST_F(AOTITorchNewTensorHandleSlimTest, HandleChain_CPU) { std::vector sizes = {2, 3}; - Tensor* orig_tensor = create_test_tensor(sizes); + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(orig_tensor, nullptr); - void* orig_ptr = orig_tensor->mutable_data_ptr(); + void* orig_ptr = orig_tensor->data_ptr(); - // Create first handle Tensor* handle1; AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &handle1); EXPECT_EQ(error, Error::Ok); ASSERT_NE(handle1, nullptr); - EXPECT_EQ(handle1->mutable_data_ptr(), orig_ptr); + EXPECT_EQ(handle1->data_ptr(), orig_ptr); - // Create second handle from the first handle Tensor* handle2; error = aoti_torch_new_tensor_handle(handle1, &handle2); EXPECT_EQ(error, Error::Ok); ASSERT_NE(handle2, nullptr); - EXPECT_EQ(handle2->mutable_data_ptr(), orig_ptr); + EXPECT_EQ(handle2->data_ptr(), orig_ptr); - // Delete in reverse order EXPECT_EQ(aoti_torch_delete_tensor_object(handle2), Error::Ok); EXPECT_EQ(aoti_torch_delete_tensor_object(handle1), Error::Ok); EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); } -// Test creating handle and verifying reference counting -TEST_F(AOTITorchNewTensorHandleTest, ReferenceCountingTest) { +TEST_F(AOTITorchNewTensorHandleSlimTest, ReferenceCountingTest_CPU) { std::vector sizes = {2, 3}; - Tensor* orig_tensor = create_test_tensor(sizes); + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(orig_tensor, nullptr); - void* orig_ptr = orig_tensor->mutable_data_ptr(); + void* orig_ptr = orig_tensor->data_ptr(); - // Create multiple handles Tensor* handle1; Tensor* handle2; Tensor* handle3; @@ -422,116 +356,276 @@ TEST_F(AOTITorchNewTensorHandleTest, ReferenceCountingTest) { EXPECT_EQ(aoti_torch_new_tensor_handle(orig_tensor, &handle2), Error::Ok); EXPECT_EQ(aoti_torch_new_tensor_handle(orig_tensor, &handle3), Error::Ok); - // Delete original EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - // All handles should still be valid - EXPECT_EQ(handle1->mutable_data_ptr(), orig_ptr); - EXPECT_EQ(handle2->mutable_data_ptr(), orig_ptr); - EXPECT_EQ(handle3->mutable_data_ptr(), orig_ptr); + EXPECT_EQ(handle1->data_ptr(), orig_ptr); + EXPECT_EQ(handle2->data_ptr(), orig_ptr); + EXPECT_EQ(handle3->data_ptr(), orig_ptr); - // Delete handles one by one EXPECT_EQ(aoti_torch_delete_tensor_object(handle1), Error::Ok); - // Remaining handles should still be valid - EXPECT_EQ(handle2->mutable_data_ptr(), orig_ptr); - EXPECT_EQ(handle3->mutable_data_ptr(), orig_ptr); + EXPECT_EQ(handle2->data_ptr(), orig_ptr); + EXPECT_EQ(handle3->data_ptr(), orig_ptr); EXPECT_EQ(aoti_torch_delete_tensor_object(handle2), Error::Ok); - // Last handle should still be valid - EXPECT_EQ(handle3->mutable_data_ptr(), orig_ptr); + EXPECT_EQ(handle3->data_ptr(), orig_ptr); EXPECT_EQ(aoti_torch_delete_tensor_object(handle3), Error::Ok); } -// Test creating handle from int32 tensor -TEST_F(AOTITorchNewTensorHandleTest, Int32Tensor) { +// ============================================================================ +// Different Dtype Tests +// ============================================================================ + +TEST_F(AOTITorchNewTensorHandleSlimTest, Int64Tensor_CPU) { std::vector sizes = {2, 3}; - Tensor* orig_tensor = create_test_tensor( + Tensor* orig_tensor = createTestTensor( sizes, {}, - 3, // int32 - static_cast(SupportedDevices::CUDA)); + static_cast(slim_c10::ScalarType::Long), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(orig_tensor, nullptr); - // Create new handle Tensor* new_tensor; AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); EXPECT_EQ(error, Error::Ok); ASSERT_NE(new_tensor, nullptr); - // Verify dtype - int32_t new_dtype; - EXPECT_EQ(aoti_torch_get_dtype(new_tensor, &new_dtype), Error::Ok); - EXPECT_EQ(new_dtype, 3); // int32 + EXPECT_EQ(new_tensor->itemsize(), 8); - // Clean up EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); } -// Test creating handle with incontiguous tensor (transpose-like layout) -TEST_F(AOTITorchNewTensorHandleTest, IncontiguousTransposeLayout) { +TEST_F(AOTITorchNewTensorHandleSlimTest, IncontiguousLayout_CPU) { std::vector sizes = {3, 4}; std::vector strides = {1, 3}; // Column-major (incontiguous) - Tensor* orig_tensor = create_test_tensor(sizes, strides); + Tensor* orig_tensor = createTestTensor( + sizes, + strides, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(orig_tensor, nullptr); - // Create new handle Tensor* new_tensor; AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); EXPECT_EQ(error, Error::Ok); ASSERT_NE(new_tensor, nullptr); - // Verify strides are preserved - int64_t* new_strides_ptr; - EXPECT_EQ(aoti_torch_get_strides(new_tensor, &new_strides_ptr), Error::Ok); - EXPECT_EQ(new_strides_ptr[0], 1); - EXPECT_EQ(new_strides_ptr[1], 3); + EXPECT_EQ(new_tensor->stride(0), 1); + EXPECT_EQ(new_tensor->stride(1), 3); - // Verify both tensors share the same memory - EXPECT_EQ(new_tensor->mutable_data_ptr(), orig_tensor->mutable_data_ptr()); + EXPECT_EQ(new_tensor->data_ptr(), orig_tensor->data_ptr()); - // Clean up EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); } -// Test creating handle with expanded strides (broadcasted dimension) -TEST_F(AOTITorchNewTensorHandleTest, ExpandedStrides) { - std::vector sizes = {2, 3, 4}; - std::vector strides = {0, 4, 1}; // First dimension has stride 0 - Tensor* orig_tensor = create_test_tensor(sizes, strides); +// ============================================================================ +// CUDA Tests +// ============================================================================ + +TEST_F(AOTITorchNewTensorHandleSlimTest, BasicFunctionality_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + + std::vector sizes = {2, 3}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); ASSERT_NE(orig_tensor, nullptr); + EXPECT_TRUE(orig_tensor->is_cuda()); + + Tensor* new_tensor; + AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); + + EXPECT_EQ(error, Error::Ok); + EXPECT_NE(new_tensor, nullptr); + EXPECT_TRUE(new_tensor->is_cuda()); + + EXPECT_EQ(new_tensor->data_ptr(), orig_tensor->data_ptr()); + + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); +} + +TEST_F(AOTITorchNewTensorHandleSlimTest, MemorySharing_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + + std::vector sizes = {3, 4}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(orig_tensor, nullptr); + + void* orig_ptr = orig_tensor->data_ptr(); + ASSERT_NE(orig_ptr, nullptr); - // Create new handle Tensor* new_tensor; AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); EXPECT_EQ(error, Error::Ok); ASSERT_NE(new_tensor, nullptr); - // Verify expanded strides are preserved - int64_t* new_strides_ptr; - EXPECT_EQ(aoti_torch_get_strides(new_tensor, &new_strides_ptr), Error::Ok); - EXPECT_EQ(new_strides_ptr[0], 0); - EXPECT_EQ(new_strides_ptr[1], 4); - EXPECT_EQ(new_strides_ptr[2], 1); + void* new_ptr = new_tensor->data_ptr(); + EXPECT_EQ(orig_ptr, new_ptr); - // Clean up EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + + void* still_valid_ptr = new_tensor->data_ptr(); + EXPECT_EQ(still_valid_ptr, new_ptr); + EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); } -// Stress test: create many handles -TEST_F(AOTITorchNewTensorHandleTest, StressTestManyHandles) { +TEST_F(AOTITorchNewTensorHandleSlimTest, MultipleHandles_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + std::vector sizes = {2, 3}; - Tensor* orig_tensor = create_test_tensor(sizes); + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(orig_tensor, nullptr); + + void* orig_ptr = orig_tensor->data_ptr(); + + std::vector handles; + const int num_handles = 5; + + for (int i = 0; i < num_handles; i++) { + Tensor* new_tensor; + AOTITorchError error = + aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); + EXPECT_EQ(error, Error::Ok); + ASSERT_NE(new_tensor, nullptr); + EXPECT_EQ(new_tensor->data_ptr(), orig_ptr); + handles.push_back(new_tensor); + } + + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + + for (Tensor* handle : handles) { + EXPECT_EQ(handle->data_ptr(), orig_ptr); + EXPECT_TRUE(handle->is_cuda()); + } + + for (Tensor* handle : handles) { + EXPECT_EQ(aoti_torch_delete_tensor_object(handle), Error::Ok); + } +} + +TEST_F(AOTITorchNewTensorHandleSlimTest, ReferenceCountingTest_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + + std::vector sizes = {2, 3}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(orig_tensor, nullptr); + + void* orig_ptr = orig_tensor->data_ptr(); + + Tensor* handle1; + Tensor* handle2; + Tensor* handle3; + + EXPECT_EQ(aoti_torch_new_tensor_handle(orig_tensor, &handle1), Error::Ok); + EXPECT_EQ(aoti_torch_new_tensor_handle(orig_tensor, &handle2), Error::Ok); + EXPECT_EQ(aoti_torch_new_tensor_handle(orig_tensor, &handle3), Error::Ok); + + EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); + + EXPECT_EQ(handle1->data_ptr(), orig_ptr); + EXPECT_EQ(handle2->data_ptr(), orig_ptr); + EXPECT_EQ(handle3->data_ptr(), orig_ptr); + + EXPECT_EQ(aoti_torch_delete_tensor_object(handle1), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(handle2), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(handle3), Error::Ok); +} + +// ============================================================================ +// Mixed Device Tests +// ============================================================================ + +TEST_F(AOTITorchNewTensorHandleSlimTest, MixedDeviceHandles) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + + std::vector sizes = {2, 3}; + + Tensor* cpu_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); + ASSERT_NE(cpu_tensor, nullptr); + EXPECT_TRUE(cpu_tensor->is_cpu()); + + Tensor* cuda_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CUDA), + 0); + ASSERT_NE(cuda_tensor, nullptr); + EXPECT_TRUE(cuda_tensor->is_cuda()); + + Tensor* cpu_handle; + Tensor* cuda_handle; + + EXPECT_EQ(aoti_torch_new_tensor_handle(cpu_tensor, &cpu_handle), Error::Ok); + EXPECT_EQ(aoti_torch_new_tensor_handle(cuda_tensor, &cuda_handle), Error::Ok); + + EXPECT_TRUE(cpu_handle->is_cpu()); + EXPECT_TRUE(cuda_handle->is_cuda()); + EXPECT_NE(cpu_handle->data_ptr(), cuda_handle->data_ptr()); + + EXPECT_EQ(aoti_torch_delete_tensor_object(cpu_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(cuda_tensor), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(cpu_handle), Error::Ok); + EXPECT_EQ(aoti_torch_delete_tensor_object(cuda_handle), Error::Ok); +} + +// ============================================================================ +// Stress Tests +// ============================================================================ + +TEST_F(AOTITorchNewTensorHandleSlimTest, StressTestManyHandles_CPU) { + std::vector sizes = {2, 3}; + Tensor* orig_tensor = createTestTensor( + sizes, + {}, + static_cast(slim_c10::ScalarType::Float), + static_cast(slim_c10::DeviceType::CPU), + 0); ASSERT_NE(orig_tensor, nullptr); - void* orig_ptr = orig_tensor->mutable_data_ptr(); + void* orig_ptr = orig_tensor->data_ptr(); - // Create many handles const int num_handles = 100; std::vector handles; @@ -541,19 +635,16 @@ TEST_F(AOTITorchNewTensorHandleTest, StressTestManyHandles) { aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); EXPECT_EQ(error, Error::Ok); ASSERT_NE(new_tensor, nullptr); - EXPECT_EQ(new_tensor->mutable_data_ptr(), orig_ptr); + EXPECT_EQ(new_tensor->data_ptr(), orig_ptr); handles.push_back(new_tensor); } - // Delete original EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - // All handles should still be valid for (Tensor* handle : handles) { - EXPECT_EQ(handle->mutable_data_ptr(), orig_ptr); + EXPECT_EQ(handle->data_ptr(), orig_ptr); } - // Delete all handles for (Tensor* handle : handles) { EXPECT_EQ(aoti_torch_delete_tensor_object(handle), Error::Ok); } diff --git a/backends/cuda/runtime/shims/tests/test_aoti_torch_new_tensor_handle_slim.cpp b/backends/cuda/runtime/shims/tests/test_aoti_torch_new_tensor_handle_slim.cpp deleted file mode 100644 index 3a1de152f0b..00000000000 --- a/backends/cuda/runtime/shims/tests/test_aoti_torch_new_tensor_handle_slim.cpp +++ /dev/null @@ -1,651 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include - -using namespace executorch::backends::cuda; -using executorch::runtime::Error; - -namespace slim_c10 = executorch::backends::aoti::slim::c10; - -namespace { - -bool isCudaAvailable() { - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - return (err == cudaSuccess && device_count > 0); -} - -std::vector calculateContiguousStrides( - const std::vector& sizes) { - std::vector strides(sizes.size()); - if (sizes.empty()) { - return strides; - } - strides[sizes.size() - 1] = 1; - for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { - strides[i] = strides[i + 1] * sizes[i + 1]; - } - return strides; -} - -} // namespace - -class AOTITorchNewTensorHandleSlimTest : public ::testing::Test { - protected: - void SetUp() override { - et_pal_init(); - } - - void TearDown() override { - // SlimTensor uses automatic reference counting - no manual cleanup needed - } - - Tensor* createTestTensor( - const std::vector& sizes, - const std::vector& strides = {}, - int32_t dtype = static_cast(slim_c10::ScalarType::Float), - int32_t device_type = static_cast(slim_c10::DeviceType::CPU), - int32_t device_index = 0) { - Tensor* tensor = nullptr; - - std::vector effective_strides = strides; - if (strides.empty()) { - effective_strides = calculateContiguousStrides(sizes); - } - - AOTITorchError error = aoti_torch_empty_strided( - sizes.size(), - sizes.data(), - effective_strides.data(), - dtype, - device_type, - device_index, - &tensor); - - return (error == Error::Ok) ? tensor : nullptr; - } -}; - -// ============================================================================ -// Basic Functionality Tests -// ============================================================================ - -TEST_F(AOTITorchNewTensorHandleSlimTest, BasicFunctionality_CPU) { - std::vector sizes = {2, 3}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - Tensor* new_tensor; - AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); - - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(new_tensor, nullptr); - - EXPECT_EQ(new_tensor->dim(), orig_tensor->dim()); - EXPECT_EQ(new_tensor->size(0), orig_tensor->size(0)); - EXPECT_EQ(new_tensor->size(1), orig_tensor->size(1)); - EXPECT_EQ(new_tensor->numel(), orig_tensor->numel()); - - EXPECT_EQ(new_tensor->data_ptr(), orig_tensor->data_ptr()); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); -} - -TEST_F(AOTITorchNewTensorHandleSlimTest, NullOriginalTensor) { - Tensor* new_tensor; - AOTITorchError error = aoti_torch_new_tensor_handle(nullptr, &new_tensor); - - EXPECT_EQ(error, Error::InvalidArgument); -} - -TEST_F(AOTITorchNewTensorHandleSlimTest, NullNewHandle) { - std::vector sizes = {2, 3}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, nullptr); - - EXPECT_EQ(error, Error::InvalidArgument); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); -} - -// ============================================================================ -// Memory Sharing Tests -// ============================================================================ - -TEST_F(AOTITorchNewTensorHandleSlimTest, MemorySharing_CPU) { - std::vector sizes = {3, 4}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - void* orig_ptr = orig_tensor->data_ptr(); - ASSERT_NE(orig_ptr, nullptr); - - Tensor* new_tensor; - AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(new_tensor, nullptr); - - void* new_ptr = new_tensor->data_ptr(); - EXPECT_EQ(orig_ptr, new_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - - void* still_valid_ptr = new_tensor->data_ptr(); - EXPECT_EQ(still_valid_ptr, new_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); -} - -TEST_F(AOTITorchNewTensorHandleSlimTest, MultipleHandles_CPU) { - std::vector sizes = {2, 3}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - void* orig_ptr = orig_tensor->data_ptr(); - - std::vector handles; - const int num_handles = 5; - - for (int i = 0; i < num_handles; i++) { - Tensor* new_tensor; - AOTITorchError error = - aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(new_tensor, nullptr); - EXPECT_EQ(new_tensor->data_ptr(), orig_ptr); - handles.push_back(new_tensor); - } - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - - for (Tensor* handle : handles) { - EXPECT_EQ(handle->data_ptr(), orig_ptr); - EXPECT_EQ(handle->dim(), 2); - EXPECT_EQ(handle->size(0), 2); - EXPECT_EQ(handle->size(1), 3); - } - - for (Tensor* handle : handles) { - EXPECT_EQ(aoti_torch_delete_tensor_object(handle), Error::Ok); - } -} - -// ============================================================================ -// Tensor Property Tests -// ============================================================================ - -TEST_F(AOTITorchNewTensorHandleSlimTest, CustomStrides_CPU) { - std::vector sizes = {3, 4}; - std::vector strides = {4, 1}; // Row-major strides - Tensor* orig_tensor = createTestTensor( - sizes, - strides, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - Tensor* new_tensor; - AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(new_tensor, nullptr); - - EXPECT_EQ(orig_tensor->stride(0), new_tensor->stride(0)); - EXPECT_EQ(orig_tensor->stride(1), new_tensor->stride(1)); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); -} - -TEST_F(AOTITorchNewTensorHandleSlimTest, BFloat16Tensor_CPU) { - std::vector sizes = {2, 3, 4}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::BFloat16), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - Tensor* new_tensor; - AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(new_tensor, nullptr); - - EXPECT_EQ(new_tensor->itemsize(), 2); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); -} - -TEST_F(AOTITorchNewTensorHandleSlimTest, ScalarTensor_CPU) { - std::vector sizes = {}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - EXPECT_EQ(orig_tensor->dim(), 0); - - Tensor* new_tensor; - AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(new_tensor, nullptr); - - EXPECT_EQ(new_tensor->dim(), 0); - EXPECT_EQ(new_tensor->numel(), 1); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); -} - -TEST_F(AOTITorchNewTensorHandleSlimTest, LargeMultiDimensionalTensor_CPU) { - std::vector sizes = {10, 20, 30}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - Tensor* new_tensor; - AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(new_tensor, nullptr); - - EXPECT_EQ(new_tensor->dim(), 3); - EXPECT_EQ(new_tensor->size(0), 10); - EXPECT_EQ(new_tensor->size(1), 20); - EXPECT_EQ(new_tensor->size(2), 30); - EXPECT_EQ(new_tensor->numel(), 6000); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); -} - -// ============================================================================ -// Handle Chain Tests -// ============================================================================ - -TEST_F(AOTITorchNewTensorHandleSlimTest, HandleChain_CPU) { - std::vector sizes = {2, 3}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - void* orig_ptr = orig_tensor->data_ptr(); - - Tensor* handle1; - AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &handle1); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(handle1, nullptr); - EXPECT_EQ(handle1->data_ptr(), orig_ptr); - - Tensor* handle2; - error = aoti_torch_new_tensor_handle(handle1, &handle2); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(handle2, nullptr); - EXPECT_EQ(handle2->data_ptr(), orig_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(handle2), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(handle1), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); -} - -TEST_F(AOTITorchNewTensorHandleSlimTest, ReferenceCountingTest_CPU) { - std::vector sizes = {2, 3}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - void* orig_ptr = orig_tensor->data_ptr(); - - Tensor* handle1; - Tensor* handle2; - Tensor* handle3; - - EXPECT_EQ(aoti_torch_new_tensor_handle(orig_tensor, &handle1), Error::Ok); - EXPECT_EQ(aoti_torch_new_tensor_handle(orig_tensor, &handle2), Error::Ok); - EXPECT_EQ(aoti_torch_new_tensor_handle(orig_tensor, &handle3), Error::Ok); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - - EXPECT_EQ(handle1->data_ptr(), orig_ptr); - EXPECT_EQ(handle2->data_ptr(), orig_ptr); - EXPECT_EQ(handle3->data_ptr(), orig_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(handle1), Error::Ok); - - EXPECT_EQ(handle2->data_ptr(), orig_ptr); - EXPECT_EQ(handle3->data_ptr(), orig_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(handle2), Error::Ok); - - EXPECT_EQ(handle3->data_ptr(), orig_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(handle3), Error::Ok); -} - -// ============================================================================ -// Different Dtype Tests -// ============================================================================ - -TEST_F(AOTITorchNewTensorHandleSlimTest, Int64Tensor_CPU) { - std::vector sizes = {2, 3}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Long), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - Tensor* new_tensor; - AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(new_tensor, nullptr); - - EXPECT_EQ(new_tensor->itemsize(), 8); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); -} - -TEST_F(AOTITorchNewTensorHandleSlimTest, IncontiguousLayout_CPU) { - std::vector sizes = {3, 4}; - std::vector strides = {1, 3}; // Column-major (incontiguous) - Tensor* orig_tensor = createTestTensor( - sizes, - strides, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - Tensor* new_tensor; - AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(new_tensor, nullptr); - - EXPECT_EQ(new_tensor->stride(0), 1); - EXPECT_EQ(new_tensor->stride(1), 3); - - EXPECT_EQ(new_tensor->data_ptr(), orig_tensor->data_ptr()); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); -} - -// ============================================================================ -// CUDA Tests -// ============================================================================ - -TEST_F(AOTITorchNewTensorHandleSlimTest, BasicFunctionality_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {2, 3}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(orig_tensor, nullptr); - EXPECT_TRUE(orig_tensor->is_cuda()); - - Tensor* new_tensor; - AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); - - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(new_tensor, nullptr); - EXPECT_TRUE(new_tensor->is_cuda()); - - EXPECT_EQ(new_tensor->data_ptr(), orig_tensor->data_ptr()); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); -} - -TEST_F(AOTITorchNewTensorHandleSlimTest, MemorySharing_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {3, 4}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(orig_tensor, nullptr); - - void* orig_ptr = orig_tensor->data_ptr(); - ASSERT_NE(orig_ptr, nullptr); - - Tensor* new_tensor; - AOTITorchError error = aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(new_tensor, nullptr); - - void* new_ptr = new_tensor->data_ptr(); - EXPECT_EQ(orig_ptr, new_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - - void* still_valid_ptr = new_tensor->data_ptr(); - EXPECT_EQ(still_valid_ptr, new_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(new_tensor), Error::Ok); -} - -TEST_F(AOTITorchNewTensorHandleSlimTest, MultipleHandles_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {2, 3}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(orig_tensor, nullptr); - - void* orig_ptr = orig_tensor->data_ptr(); - - std::vector handles; - const int num_handles = 5; - - for (int i = 0; i < num_handles; i++) { - Tensor* new_tensor; - AOTITorchError error = - aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(new_tensor, nullptr); - EXPECT_EQ(new_tensor->data_ptr(), orig_ptr); - handles.push_back(new_tensor); - } - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - - for (Tensor* handle : handles) { - EXPECT_EQ(handle->data_ptr(), orig_ptr); - EXPECT_TRUE(handle->is_cuda()); - } - - for (Tensor* handle : handles) { - EXPECT_EQ(aoti_torch_delete_tensor_object(handle), Error::Ok); - } -} - -TEST_F(AOTITorchNewTensorHandleSlimTest, ReferenceCountingTest_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {2, 3}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(orig_tensor, nullptr); - - void* orig_ptr = orig_tensor->data_ptr(); - - Tensor* handle1; - Tensor* handle2; - Tensor* handle3; - - EXPECT_EQ(aoti_torch_new_tensor_handle(orig_tensor, &handle1), Error::Ok); - EXPECT_EQ(aoti_torch_new_tensor_handle(orig_tensor, &handle2), Error::Ok); - EXPECT_EQ(aoti_torch_new_tensor_handle(orig_tensor, &handle3), Error::Ok); - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - - EXPECT_EQ(handle1->data_ptr(), orig_ptr); - EXPECT_EQ(handle2->data_ptr(), orig_ptr); - EXPECT_EQ(handle3->data_ptr(), orig_ptr); - - EXPECT_EQ(aoti_torch_delete_tensor_object(handle1), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(handle2), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(handle3), Error::Ok); -} - -// ============================================================================ -// Mixed Device Tests -// ============================================================================ - -TEST_F(AOTITorchNewTensorHandleSlimTest, MixedDeviceHandles) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - - std::vector sizes = {2, 3}; - - Tensor* cpu_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(cpu_tensor, nullptr); - EXPECT_TRUE(cpu_tensor->is_cpu()); - - Tensor* cuda_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CUDA), - 0); - ASSERT_NE(cuda_tensor, nullptr); - EXPECT_TRUE(cuda_tensor->is_cuda()); - - Tensor* cpu_handle; - Tensor* cuda_handle; - - EXPECT_EQ(aoti_torch_new_tensor_handle(cpu_tensor, &cpu_handle), Error::Ok); - EXPECT_EQ(aoti_torch_new_tensor_handle(cuda_tensor, &cuda_handle), Error::Ok); - - EXPECT_TRUE(cpu_handle->is_cpu()); - EXPECT_TRUE(cuda_handle->is_cuda()); - EXPECT_NE(cpu_handle->data_ptr(), cuda_handle->data_ptr()); - - EXPECT_EQ(aoti_torch_delete_tensor_object(cpu_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(cuda_tensor), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(cpu_handle), Error::Ok); - EXPECT_EQ(aoti_torch_delete_tensor_object(cuda_handle), Error::Ok); -} - -// ============================================================================ -// Stress Tests -// ============================================================================ - -TEST_F(AOTITorchNewTensorHandleSlimTest, StressTestManyHandles_CPU) { - std::vector sizes = {2, 3}; - Tensor* orig_tensor = createTestTensor( - sizes, - {}, - static_cast(slim_c10::ScalarType::Float), - static_cast(slim_c10::DeviceType::CPU), - 0); - ASSERT_NE(orig_tensor, nullptr); - - void* orig_ptr = orig_tensor->data_ptr(); - - const int num_handles = 100; - std::vector handles; - - for (int i = 0; i < num_handles; i++) { - Tensor* new_tensor; - AOTITorchError error = - aoti_torch_new_tensor_handle(orig_tensor, &new_tensor); - EXPECT_EQ(error, Error::Ok); - ASSERT_NE(new_tensor, nullptr); - EXPECT_EQ(new_tensor->data_ptr(), orig_ptr); - handles.push_back(new_tensor); - } - - EXPECT_EQ(aoti_torch_delete_tensor_object(orig_tensor), Error::Ok); - - for (Tensor* handle : handles) { - EXPECT_EQ(handle->data_ptr(), orig_ptr); - } - - for (Tensor* handle : handles) { - EXPECT_EQ(aoti_torch_delete_tensor_object(handle), Error::Ok); - } -} diff --git a/backends/cuda/runtime/utils.h b/backends/cuda/runtime/utils.h index 4474f8cf57e..8517ec21af6 100644 --- a/backends/cuda/runtime/utils.h +++ b/backends/cuda/runtime/utils.h @@ -31,6 +31,7 @@ } while (0) // CUDA error checking macro (without return, for use in void functions) +#ifndef ET_CUDA_CHECK #define ET_CUDA_CHECK(EXPR) \ do { \ const cudaError_t err = EXPR; \ @@ -45,6 +46,7 @@ cudaGetErrorString(err)); \ ET_CHECK_MSG(false, "CUDA error: %s", cudaGetErrorString(err)); \ } while (0) +#endif // Kernel launch check macro (with return) #define ET_CUDA_KERNEL_LAUNCH_CHECK_OR_RETURN_ERROR() \ From 58b70ce082bbcdf328252838cf20586c24f6edd8 Mon Sep 17 00:00:00 2001 From: gasoonjia Date: Tue, 13 Jan 2026 11:37:40 -0800 Subject: [PATCH 2/9] Update on "[slimtensor] integration into backend" Differential Revision: [D90606409](https://our.internmc.facebook.com/intern/diff/D90606409/) [ghstack-poisoned] --- backends/aoti/CMakeLists.txt | 27 +- backends/aoti/common_shims.h | 367 +++++++-- backends/aoti/common_shims_slim.h | 352 -------- backends/aoti/tests/test_common_shims.cpp | 763 ++++++++++++------ .../aoti/tests/test_common_shims_slim.cpp | 632 --------------- 5 files changed, 842 insertions(+), 1299 deletions(-) delete mode 100644 backends/aoti/common_shims_slim.h delete mode 100644 backends/aoti/tests/test_common_shims_slim.cpp diff --git a/backends/aoti/CMakeLists.txt b/backends/aoti/CMakeLists.txt index d5582dfe7c7..121f4b60418 100644 --- a/backends/aoti/CMakeLists.txt +++ b/backends/aoti/CMakeLists.txt @@ -25,34 +25,25 @@ endif() include(${EXECUTORCH_ROOT}/tools/cmake/Utils.cmake) find_package_torch() -# Common AOTI functionality - combines all AOTI common components -set(_aoti_common_sources common_shims.cpp) -add_library(aoti_common STATIC ${_aoti_common_sources}) +# Common AOTI functionality - header-only library for common shims +add_library(aoti_common INTERFACE) target_include_directories( aoti_common - PUBLIC $ $ - $ + INTERFACE $ + $ + $ ) target_compile_options( aoti_common - PUBLIC $<$:/EHsc /GR> - $<$>:-fexceptions -frtti -fPIC> + INTERFACE $<$:/EHsc /GR> + $<$>:-fexceptions -frtti -fPIC> ) target_compile_definitions( - aoti_common PRIVATE $<$:EXPORT_AOTI_FUNCTIONS> + aoti_common INTERFACE $<$:EXPORT_AOTI_FUNCTIONS> ) -# Ensure symbols are exported properly -if(APPLE) - target_link_options(aoti_common PUBLIC -Wl,-export_dynamic) -else() - target_link_options( - aoti_common PUBLIC $<$>:-Wl,--export-dynamic> - ) -endif() # Link against ExecuTorch libraries and standard libraries -target_link_libraries(aoti_common PUBLIC extension_tensor ${CMAKE_DL_LIBS}) -executorch_target_link_options_shared_lib(aoti_common) +target_link_libraries(aoti_common INTERFACE extension_tensor ${CMAKE_DL_LIBS}) install( TARGETS aoti_common diff --git a/backends/aoti/common_shims.h b/backends/aoti/common_shims.h index 3fc414fb669..dfcdecd2bc2 100644 --- a/backends/aoti/common_shims.h +++ b/backends/aoti/common_shims.h @@ -9,104 +9,343 @@ #pragma once #include -#include #include -#include #include #include #include +// Uses conditional compilation to separate the implementation between +// CUDA backend (SlimTensor) and other backends like MPS (ETensor). +// The caller determines which path is used by defining CUDA_AVAILABLE. +#ifdef CUDA_AVAILABLE +#include +#else +#include +#endif + namespace executorch { namespace backends { namespace aoti { // Common using declarations for ExecuTorch types using executorch::runtime::Error; -using executorch::runtime::etensor::Tensor; - -// Global storage for tensor metadata -extern std::unordered_map> tensor_to_sizes; -extern std::unordered_map> tensor_to_strides; -extern "C" { +// ============================================================ +// Tensor Type Definition - branched based on CUDA_AVAILABLE +// ============================================================ +#ifdef CUDA_AVAILABLE +using Tensor = executorch::backends::aoti::slim::SlimTensor; +#else +using Tensor = executorch::runtime::etensor::Tensor; +#endif // Common AOTI type aliases using AOTIRuntimeError = Error; using AOTITorchError = Error; -// Attribute-related operations (memory-irrelevant) -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_get_data_ptr(Tensor* tensor, void** ret_data_ptr); +#ifndef CUDA_AVAILABLE +namespace internal { +// Global storage for tensor metadata (ETensor path only) +// SlimTensor stores sizes/strides directly in int64_t[] - no caching needed +inline std::unordered_map>& tensor_to_sizes() { + static std::unordered_map> instance; + return instance; +} +inline std::unordered_map>& tensor_to_strides() { + static std::unordered_map> instance; + return instance; +} +} // namespace internal +#endif + +// ============================================================ +// Basic Property Getters - Inline implementations +// ============================================================ + +inline AOTITorchError aoti_torch_get_data_ptr( + Tensor* tensor, + void** ret_data_ptr) { + if (tensor == nullptr) { + return Error::InvalidArgument; + } + if (ret_data_ptr == nullptr) { + return Error::InvalidArgument; + } + +#ifdef CUDA_AVAILABLE + *ret_data_ptr = tensor->data_ptr(); +#else + *ret_data_ptr = tensor->mutable_data_ptr(); +#endif + return Error::Ok; +} + +inline AOTITorchError aoti_torch_get_sizes( + Tensor* tensor, + int64_t** ret_sizes) { + if (tensor == nullptr) { + return Error::InvalidArgument; + } + if (ret_sizes == nullptr) { + return Error::InvalidArgument; + } + +#ifdef CUDA_AVAILABLE + // SlimTensor stores sizes directly in int64_t[] - no caching needed + *ret_sizes = const_cast(tensor->sizes().data()); +#else + auto it = internal::tensor_to_sizes().find(tensor); + bool needs_update = false; + + if (it == internal::tensor_to_sizes().end()) { + needs_update = true; + } else { + // Validate cached metadata matches current tensor state + auto tensor_sizes = tensor->sizes(); + needs_update = !std::equal( + it->second.begin(), + it->second.end(), + tensor_sizes.begin(), + tensor_sizes.end()); + } + + if (needs_update) { + std::vector sizes(tensor->dim()); + auto tensor_sizes = tensor->sizes(); + for (int i = 0; i < tensor->dim(); i++) { + sizes[i] = tensor_sizes[i]; + } + it = internal::tensor_to_sizes() + .insert_or_assign(tensor, std::move(sizes)) + .first; + } + + // For 0D tensors, data() returns nullptr on empty vectors + if (it->second.empty()) { + static int64_t empty_sizes_placeholder = 0; + *ret_sizes = &empty_sizes_placeholder; + } else { + *ret_sizes = it->second.data(); + } +#endif + return Error::Ok; +} + +inline AOTITorchError aoti_torch_get_strides( + Tensor* tensor, + int64_t** ret_strides) { + if (tensor == nullptr) { + return Error::InvalidArgument; + } + if (ret_strides == nullptr) { + return Error::InvalidArgument; + } + +#ifdef CUDA_AVAILABLE + // SlimTensor stores strides directly in int64_t[] - no caching needed + *ret_strides = const_cast(tensor->strides().data()); +#else + auto it = internal::tensor_to_strides().find(tensor); + bool needs_update = false; + + if (it == internal::tensor_to_strides().end()) { + needs_update = true; + } else { + // Validate cached metadata matches current tensor state + auto tensor_strides = tensor->strides(); + needs_update = !std::equal( + it->second.begin(), + it->second.end(), + tensor_strides.begin(), + tensor_strides.end()); + } + + if (needs_update) { + std::vector strides(tensor->dim()); + auto tensor_strides = tensor->strides(); + for (int i = 0; i < tensor->dim(); i++) { + strides[i] = tensor_strides[i]; + } + it = internal::tensor_to_strides() + .insert_or_assign(tensor, std::move(strides)) + .first; + } + + // For 0D tensors, data() returns nullptr on empty vectors + if (it->second.empty()) { + static int64_t empty_strides_placeholder = 0; + *ret_strides = &empty_strides_placeholder; + } else { + *ret_strides = it->second.data(); + } +#endif + return Error::Ok; +} + +inline AOTITorchError aoti_torch_get_dtype(Tensor* tensor, int32_t* ret_dtype) { + if (tensor == nullptr) { + return Error::InvalidArgument; + } + if (ret_dtype == nullptr) { + return Error::InvalidArgument; + } + +#ifdef CUDA_AVAILABLE + *ret_dtype = static_cast(tensor->dtype()); +#else + *ret_dtype = static_cast(tensor->scalar_type()); +#endif + return Error::Ok; +} + +inline AOTITorchError aoti_torch_get_dim(Tensor* tensor, int64_t* ret_dim) { + if (tensor == nullptr) { + return Error::InvalidArgument; + } + if (ret_dim == nullptr) { + return Error::InvalidArgument; + } + + *ret_dim = static_cast(tensor->dim()); + return Error::Ok; +} + +// ============================================================ +// Storage & Device Property Getters - Inline implementations +// ============================================================ + +inline AOTITorchError aoti_torch_get_storage_offset( + Tensor* tensor, + int64_t* ret_storage_offset) { + if (tensor == nullptr) { + return Error::InvalidArgument; + } + if (ret_storage_offset == nullptr) { + return Error::InvalidArgument; + } + +#ifdef CUDA_AVAILABLE + // SlimTensor supports real storage offset + *ret_storage_offset = tensor->storage_offset(); +#else + // ETensor doesn't support storage_offset, return 0 + *ret_storage_offset = 0; +#endif + return Error::Ok; +} + +inline AOTITorchError aoti_torch_get_storage_size( + Tensor* tensor, + int64_t* ret_size) { + if (tensor == nullptr) { + return Error::InvalidArgument; + } + if (ret_size == nullptr) { + return Error::InvalidArgument; + } + + *ret_size = static_cast(tensor->nbytes()); + return Error::Ok; +} + +inline AOTITorchError aoti_torch_get_device_type( + Tensor* tensor, + int32_t* ret_device_type) { + if (tensor == nullptr) { + return Error::InvalidArgument; + } + if (ret_device_type == nullptr) { + return Error::InvalidArgument; + } -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_get_storage_offset(Tensor* tensor, int64_t* ret_storage_offset); +#ifdef CUDA_AVAILABLE + // SlimTensor supports real device type + *ret_device_type = static_cast(tensor->device_type()); +#else + // ETensor is always CPU in default mode + *ret_device_type = 0; // CPU +#endif + return Error::Ok; +} -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_get_strides(Tensor* tensor, int64_t** ret_strides); +inline AOTITorchError aoti_torch_get_device_index( + Tensor* tensor, + int32_t* ret_device_index) { + if (tensor == nullptr) { + return Error::InvalidArgument; + } + if (ret_device_index == nullptr) { + return Error::InvalidArgument; + } -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_get_dtype(Tensor* tensor, int32_t* ret_dtype); +#ifdef CUDA_AVAILABLE + // SlimTensor supports real device index + *ret_device_index = static_cast(tensor->device_index()); +#else + // ETensor doesn't support multi-device, return 0 + *ret_device_index = 0; +#endif + return Error::Ok; +} -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_get_sizes(Tensor* tensor, int64_t** ret_sizes); +// ============================================================ +// DType Constants - These return PyTorch ScalarType enum values +// ============================================================ -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_get_storage_size(Tensor* tensor, int64_t* ret_size); +inline int32_t aoti_torch_dtype_float32() { + return 6; // ScalarType::Float +} -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_get_device_index(Tensor* tensor, int32_t* ret_device_index); +inline int32_t aoti_torch_dtype_bfloat16() { + return 15; // ScalarType::BFloat16 +} -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_get_dim(Tensor* tensor, int64_t* ret_dim); +inline int32_t aoti_torch_dtype_int64() { + return 4; // ScalarType::Long +} -// Utility functions for device and layout information -AOTI_SHIM_EXPORT int32_t aoti_torch_device_type_cpu(); -AOTI_SHIM_EXPORT int32_t aoti_torch_layout_strided(); -AOTI_SHIM_EXPORT int32_t aoti_torch_dtype_float32(); -AOTI_SHIM_EXPORT int32_t aoti_torch_dtype_bfloat16(); -AOTI_SHIM_EXPORT int32_t aoti_torch_dtype_bool(); -AOTI_SHIM_EXPORT int32_t aoti_torch_dtype_int8(); -AOTI_SHIM_EXPORT int32_t aoti_torch_dtype_int16(); -AOTI_SHIM_EXPORT int32_t aoti_torch_dtype_int32(); -AOTI_SHIM_EXPORT int32_t aoti_torch_dtype_int64(); +inline int32_t aoti_torch_dtype_int32() { + return 3; // ScalarType::Int +} -// Dtype utility function needed by Metal backend -AOTI_SHIM_EXPORT size_t aoti_torch_dtype_element_size(int32_t dtype); +inline int32_t aoti_torch_dtype_int16() { + return 2; // ScalarType::Short +} -// Autograd mode functions -AOTI_SHIM_EXPORT int32_t aoti_torch_grad_mode_is_enabled(); -AOTI_SHIM_EXPORT void aoti_torch_grad_mode_set_enabled(bool enabled); +inline int32_t aoti_torch_dtype_int8() { + return 1; // ScalarType::Char +} -// Cleanup functions for clearing global state -AOTI_SHIM_EXPORT void cleanup_tensor_metadata(); +inline int32_t aoti_torch_dtype_bool() { + return 11; // ScalarType::Bool +} -AOTI_SHIM_EXPORT void aoti_torch_warn( - const char* func, - const char* file, - uint32_t line, - const char* msg); +// ============================================================ +// Device Type Constants +// ============================================================ -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_get_storage_size(Tensor* tensor, int64_t* ret_size); +inline int32_t aoti_torch_device_type_cpu() { + return 0; // DeviceType::CPU +} -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_clone_preserve_strides(Tensor* self, Tensor** ret_new_tensor); +inline int32_t aoti_torch_device_type_cuda() { + return 1; // DeviceType::CUDA +} -AOTI_SHIM_EXPORT AOTITorchError -aoti_torch_clone(Tensor* self, Tensor** ret_new_tensor); +// ============================================================ +// Grad Mode Functions (not supported in ExecuTorch) +// ============================================================ -AOTI_SHIM_EXPORT AOTITorchError aoti_torch_create_tensor_from_blob( - void* data_ptr, - int64_t ndim, - const int64_t* sizes, - const int64_t* strides, - int64_t storage_offset, - int32_t dtype, - int32_t device_type, - int32_t device_index, - Tensor** ret_new_tensor); +inline bool aoti_torch_grad_mode_is_enabled() { + return false; // ExecuTorch doesn't support autograd +} -} // extern "C" +inline AOTITorchError aoti_torch_grad_mode_set_enabled(bool enabled) { + if (enabled) { + return Error::NotSupported; // Grad mode not supported in ExecuTorch + } + return Error::Ok; +} } // namespace aoti } // namespace backends diff --git a/backends/aoti/common_shims_slim.h b/backends/aoti/common_shims_slim.h deleted file mode 100644 index dfcdecd2bc2..00000000000 --- a/backends/aoti/common_shims_slim.h +++ /dev/null @@ -1,352 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#pragma once - -#include -#include -#include -#include -#include - -// Uses conditional compilation to separate the implementation between -// CUDA backend (SlimTensor) and other backends like MPS (ETensor). -// The caller determines which path is used by defining CUDA_AVAILABLE. -#ifdef CUDA_AVAILABLE -#include -#else -#include -#endif - -namespace executorch { -namespace backends { -namespace aoti { - -// Common using declarations for ExecuTorch types -using executorch::runtime::Error; - -// ============================================================ -// Tensor Type Definition - branched based on CUDA_AVAILABLE -// ============================================================ -#ifdef CUDA_AVAILABLE -using Tensor = executorch::backends::aoti::slim::SlimTensor; -#else -using Tensor = executorch::runtime::etensor::Tensor; -#endif - -// Common AOTI type aliases -using AOTIRuntimeError = Error; -using AOTITorchError = Error; - -#ifndef CUDA_AVAILABLE -namespace internal { -// Global storage for tensor metadata (ETensor path only) -// SlimTensor stores sizes/strides directly in int64_t[] - no caching needed -inline std::unordered_map>& tensor_to_sizes() { - static std::unordered_map> instance; - return instance; -} -inline std::unordered_map>& tensor_to_strides() { - static std::unordered_map> instance; - return instance; -} -} // namespace internal -#endif - -// ============================================================ -// Basic Property Getters - Inline implementations -// ============================================================ - -inline AOTITorchError aoti_torch_get_data_ptr( - Tensor* tensor, - void** ret_data_ptr) { - if (tensor == nullptr) { - return Error::InvalidArgument; - } - if (ret_data_ptr == nullptr) { - return Error::InvalidArgument; - } - -#ifdef CUDA_AVAILABLE - *ret_data_ptr = tensor->data_ptr(); -#else - *ret_data_ptr = tensor->mutable_data_ptr(); -#endif - return Error::Ok; -} - -inline AOTITorchError aoti_torch_get_sizes( - Tensor* tensor, - int64_t** ret_sizes) { - if (tensor == nullptr) { - return Error::InvalidArgument; - } - if (ret_sizes == nullptr) { - return Error::InvalidArgument; - } - -#ifdef CUDA_AVAILABLE - // SlimTensor stores sizes directly in int64_t[] - no caching needed - *ret_sizes = const_cast(tensor->sizes().data()); -#else - auto it = internal::tensor_to_sizes().find(tensor); - bool needs_update = false; - - if (it == internal::tensor_to_sizes().end()) { - needs_update = true; - } else { - // Validate cached metadata matches current tensor state - auto tensor_sizes = tensor->sizes(); - needs_update = !std::equal( - it->second.begin(), - it->second.end(), - tensor_sizes.begin(), - tensor_sizes.end()); - } - - if (needs_update) { - std::vector sizes(tensor->dim()); - auto tensor_sizes = tensor->sizes(); - for (int i = 0; i < tensor->dim(); i++) { - sizes[i] = tensor_sizes[i]; - } - it = internal::tensor_to_sizes() - .insert_or_assign(tensor, std::move(sizes)) - .first; - } - - // For 0D tensors, data() returns nullptr on empty vectors - if (it->second.empty()) { - static int64_t empty_sizes_placeholder = 0; - *ret_sizes = &empty_sizes_placeholder; - } else { - *ret_sizes = it->second.data(); - } -#endif - return Error::Ok; -} - -inline AOTITorchError aoti_torch_get_strides( - Tensor* tensor, - int64_t** ret_strides) { - if (tensor == nullptr) { - return Error::InvalidArgument; - } - if (ret_strides == nullptr) { - return Error::InvalidArgument; - } - -#ifdef CUDA_AVAILABLE - // SlimTensor stores strides directly in int64_t[] - no caching needed - *ret_strides = const_cast(tensor->strides().data()); -#else - auto it = internal::tensor_to_strides().find(tensor); - bool needs_update = false; - - if (it == internal::tensor_to_strides().end()) { - needs_update = true; - } else { - // Validate cached metadata matches current tensor state - auto tensor_strides = tensor->strides(); - needs_update = !std::equal( - it->second.begin(), - it->second.end(), - tensor_strides.begin(), - tensor_strides.end()); - } - - if (needs_update) { - std::vector strides(tensor->dim()); - auto tensor_strides = tensor->strides(); - for (int i = 0; i < tensor->dim(); i++) { - strides[i] = tensor_strides[i]; - } - it = internal::tensor_to_strides() - .insert_or_assign(tensor, std::move(strides)) - .first; - } - - // For 0D tensors, data() returns nullptr on empty vectors - if (it->second.empty()) { - static int64_t empty_strides_placeholder = 0; - *ret_strides = &empty_strides_placeholder; - } else { - *ret_strides = it->second.data(); - } -#endif - return Error::Ok; -} - -inline AOTITorchError aoti_torch_get_dtype(Tensor* tensor, int32_t* ret_dtype) { - if (tensor == nullptr) { - return Error::InvalidArgument; - } - if (ret_dtype == nullptr) { - return Error::InvalidArgument; - } - -#ifdef CUDA_AVAILABLE - *ret_dtype = static_cast(tensor->dtype()); -#else - *ret_dtype = static_cast(tensor->scalar_type()); -#endif - return Error::Ok; -} - -inline AOTITorchError aoti_torch_get_dim(Tensor* tensor, int64_t* ret_dim) { - if (tensor == nullptr) { - return Error::InvalidArgument; - } - if (ret_dim == nullptr) { - return Error::InvalidArgument; - } - - *ret_dim = static_cast(tensor->dim()); - return Error::Ok; -} - -// ============================================================ -// Storage & Device Property Getters - Inline implementations -// ============================================================ - -inline AOTITorchError aoti_torch_get_storage_offset( - Tensor* tensor, - int64_t* ret_storage_offset) { - if (tensor == nullptr) { - return Error::InvalidArgument; - } - if (ret_storage_offset == nullptr) { - return Error::InvalidArgument; - } - -#ifdef CUDA_AVAILABLE - // SlimTensor supports real storage offset - *ret_storage_offset = tensor->storage_offset(); -#else - // ETensor doesn't support storage_offset, return 0 - *ret_storage_offset = 0; -#endif - return Error::Ok; -} - -inline AOTITorchError aoti_torch_get_storage_size( - Tensor* tensor, - int64_t* ret_size) { - if (tensor == nullptr) { - return Error::InvalidArgument; - } - if (ret_size == nullptr) { - return Error::InvalidArgument; - } - - *ret_size = static_cast(tensor->nbytes()); - return Error::Ok; -} - -inline AOTITorchError aoti_torch_get_device_type( - Tensor* tensor, - int32_t* ret_device_type) { - if (tensor == nullptr) { - return Error::InvalidArgument; - } - if (ret_device_type == nullptr) { - return Error::InvalidArgument; - } - -#ifdef CUDA_AVAILABLE - // SlimTensor supports real device type - *ret_device_type = static_cast(tensor->device_type()); -#else - // ETensor is always CPU in default mode - *ret_device_type = 0; // CPU -#endif - return Error::Ok; -} - -inline AOTITorchError aoti_torch_get_device_index( - Tensor* tensor, - int32_t* ret_device_index) { - if (tensor == nullptr) { - return Error::InvalidArgument; - } - if (ret_device_index == nullptr) { - return Error::InvalidArgument; - } - -#ifdef CUDA_AVAILABLE - // SlimTensor supports real device index - *ret_device_index = static_cast(tensor->device_index()); -#else - // ETensor doesn't support multi-device, return 0 - *ret_device_index = 0; -#endif - return Error::Ok; -} - -// ============================================================ -// DType Constants - These return PyTorch ScalarType enum values -// ============================================================ - -inline int32_t aoti_torch_dtype_float32() { - return 6; // ScalarType::Float -} - -inline int32_t aoti_torch_dtype_bfloat16() { - return 15; // ScalarType::BFloat16 -} - -inline int32_t aoti_torch_dtype_int64() { - return 4; // ScalarType::Long -} - -inline int32_t aoti_torch_dtype_int32() { - return 3; // ScalarType::Int -} - -inline int32_t aoti_torch_dtype_int16() { - return 2; // ScalarType::Short -} - -inline int32_t aoti_torch_dtype_int8() { - return 1; // ScalarType::Char -} - -inline int32_t aoti_torch_dtype_bool() { - return 11; // ScalarType::Bool -} - -// ============================================================ -// Device Type Constants -// ============================================================ - -inline int32_t aoti_torch_device_type_cpu() { - return 0; // DeviceType::CPU -} - -inline int32_t aoti_torch_device_type_cuda() { - return 1; // DeviceType::CUDA -} - -// ============================================================ -// Grad Mode Functions (not supported in ExecuTorch) -// ============================================================ - -inline bool aoti_torch_grad_mode_is_enabled() { - return false; // ExecuTorch doesn't support autograd -} - -inline AOTITorchError aoti_torch_grad_mode_set_enabled(bool enabled) { - if (enabled) { - return Error::NotSupported; // Grad mode not supported in ExecuTorch - } - return Error::Ok; -} - -} // namespace aoti -} // namespace backends -} // namespace executorch diff --git a/backends/aoti/tests/test_common_shims.cpp b/backends/aoti/tests/test_common_shims.cpp index 0fd1b057f99..94319c6f94d 100644 --- a/backends/aoti/tests/test_common_shims.cpp +++ b/backends/aoti/tests/test_common_shims.cpp @@ -6,330 +6,627 @@ * LICENSE file in the root directory of this source tree. */ -#include -#include -#include #include -#include #include +#include +#include +#include +#include +#include +#include + +#ifdef CUDA_AVAILABLE +#include +#endif + using namespace executorch::backends::aoti; -using namespace executorch::backends::aoti::test; -using namespace executorch::runtime; -using executorch::runtime::etensor::Tensor; +using executorch::runtime::Error; + +namespace slim_c10 = executorch::backends::aoti::slim::c10; +namespace slim = executorch::backends::aoti::slim; + +namespace { + +#ifdef CUDA_AVAILABLE +bool isCudaAvailable() { + int device_count = 0; + cudaError_t err = cudaGetDeviceCount(&device_count); + return (err == cudaSuccess && device_count > 0); +} +#endif + +// Helper to calculate contiguous strides from sizes +std::vector calculateContiguousStrides( + const std::vector& sizes) { + std::vector strides(sizes.size()); + if (sizes.empty()) { + return strides; + } + strides[sizes.size() - 1] = 1; + for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { + strides[i] = strides[i + 1] * sizes[i + 1]; + } + return strides; +} + +} // namespace -// Test fixture for common shims tests -class CommonShimsTest : public ::testing::Test { +// Test fixture for common_shims_slim tests +class CommonShimsSlimTest : public ::testing::Test { protected: void SetUp() override { - // Clean up any existing cached metadata before each test - cleanup_tensor_metadata(); + et_pal_init(); } void TearDown() override { - // Clean up metadata and free any tensor data - cleanup_tensor_metadata(); - for (auto& tensor : test_tensors_) { - free_tensor_data(tensor.get()); + // Cleanup tracked tensors + for (Tensor* t : tensors_) { + delete t; } - test_tensors_.clear(); + tensors_.clear(); } - // Helper to create and track test tensors for cleanup - Tensor* create_tracked_tensor(const std::vector& sizes) { - auto tensor = create_test_tensor(sizes); - Tensor* ptr = tensor.get(); - test_tensors_.push_back(tensor); - return ptr; + void trackTensor(Tensor* t) { + if (t != nullptr) { + tensors_.push_back(t); + } + } + + Tensor* createTestTensor( + const std::vector& sizes, + slim_c10::DeviceType device_type) { + std::vector strides = calculateContiguousStrides(sizes); + slim_c10::Device device(device_type, 0); + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::Float, + device)); + trackTensor(tensor); + return tensor; } private: - std::vector> test_tensors_; + std::vector tensors_; }; -// Test aoti_torch_get_sizes basic functionality -TEST_F(CommonShimsTest, GetSizesBasicFunctionality) { - // Test 1D tensor - auto tensor_1d = create_tracked_tensor({5}); - int64_t* sizes_ptr; - AOTITorchError error = aoti_torch_get_sizes(tensor_1d, &sizes_ptr); +// ============================================================================ +// Common test body implementations - parameterized by device type +// ============================================================================ + +void runGetDataPtrTest(slim_c10::DeviceType device_type) { + std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); + slim_c10::Device device(device_type, 0); + + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::Float, + device)); + + void* data_ptr = nullptr; + AOTITorchError error = aoti_torch_get_data_ptr(tensor, &data_ptr); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(sizes_ptr, nullptr); - EXPECT_EQ(sizes_ptr[0], 5); + EXPECT_NE(data_ptr, nullptr); + + // Verify the returned pointer matches tensor's data_ptr + EXPECT_EQ(data_ptr, tensor->data_ptr()); + + delete tensor; +} + +void runGetSizesTest(slim_c10::DeviceType device_type) { + std::vector sizes = {2, 3, 4}; + std::vector strides = calculateContiguousStrides(sizes); + slim_c10::Device device(device_type, 0); - // Test 2D tensor - auto tensor_2d = create_tracked_tensor({3, 4}); - error = aoti_torch_get_sizes(tensor_2d, &sizes_ptr); + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::Float, + device)); + + int64_t* ret_sizes = nullptr; + AOTITorchError error = aoti_torch_get_sizes(tensor, &ret_sizes); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(sizes_ptr, nullptr); - EXPECT_EQ(sizes_ptr[0], 3); - EXPECT_EQ(sizes_ptr[1], 4); + EXPECT_NE(ret_sizes, nullptr); - // Test 3D tensor - auto tensor_3d = create_tracked_tensor({2, 3, 4}); - error = aoti_torch_get_sizes(tensor_3d, &sizes_ptr); + // Verify sizes match + EXPECT_EQ(ret_sizes[0], 2); + EXPECT_EQ(ret_sizes[1], 3); + EXPECT_EQ(ret_sizes[2], 4); + + delete tensor; +} + +void runGetStridesTest(slim_c10::DeviceType device_type) { + std::vector sizes = {2, 3, 4}; + std::vector strides = calculateContiguousStrides(sizes); + slim_c10::Device device(device_type, 0); + + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::Float, + device)); + + int64_t* ret_strides = nullptr; + AOTITorchError error = aoti_torch_get_strides(tensor, &ret_strides); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(sizes_ptr, nullptr); - EXPECT_EQ(sizes_ptr[0], 2); - EXPECT_EQ(sizes_ptr[1], 3); - EXPECT_EQ(sizes_ptr[2], 4); + EXPECT_NE(ret_strides, nullptr); + + // Verify strides match: [12, 4, 1] for contiguous [2, 3, 4] + EXPECT_EQ(ret_strides[0], 12); + EXPECT_EQ(ret_strides[1], 4); + EXPECT_EQ(ret_strides[2], 1); + + delete tensor; } -// Test aoti_torch_get_strides basic functionality -TEST_F(CommonShimsTest, GetStridesBasicFunctionality) { +void runGetDtypeTest(slim_c10::DeviceType device_type) { + std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); + slim_c10::Device device(device_type, 0); + + // Test Float32 + { + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::Float, + device)); + + int32_t ret_dtype = -1; + AOTITorchError error = aoti_torch_get_dtype(tensor, &ret_dtype); + + EXPECT_EQ(error, Error::Ok); + EXPECT_EQ(ret_dtype, static_cast(slim_c10::ScalarType::Float)); + + delete tensor; + } + + // Test Int64 + { + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::Long, + device)); + + int32_t ret_dtype = -1; + AOTITorchError error = aoti_torch_get_dtype(tensor, &ret_dtype); + + EXPECT_EQ(error, Error::Ok); + EXPECT_EQ(ret_dtype, static_cast(slim_c10::ScalarType::Long)); + + delete tensor; + } + + // Test BFloat16 + { + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::BFloat16, + device)); + + int32_t ret_dtype = -1; + AOTITorchError error = aoti_torch_get_dtype(tensor, &ret_dtype); + + EXPECT_EQ(error, Error::Ok); + EXPECT_EQ(ret_dtype, static_cast(slim_c10::ScalarType::BFloat16)); + + delete tensor; + } +} + +void runGetDimTest(slim_c10::DeviceType device_type) { + slim_c10::Device device(device_type, 0); + + // Test 0D tensor (scalar) + { + std::vector sizes = {}; + std::vector strides = {}; + + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::Float, + device)); + + int64_t ret_dim = -1; + AOTITorchError error = aoti_torch_get_dim(tensor, &ret_dim); + + EXPECT_EQ(error, Error::Ok); + EXPECT_EQ(ret_dim, 0); + + delete tensor; + } + // Test 1D tensor - auto tensor_1d = create_tracked_tensor({5}); - int64_t* strides_ptr; - AOTITorchError error = aoti_torch_get_strides(tensor_1d, &strides_ptr); + { + std::vector sizes = {5}; + std::vector strides = calculateContiguousStrides(sizes); - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(strides_ptr, nullptr); - EXPECT_EQ(strides_ptr[0], 1); + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::Float, + device)); - // Test 2D tensor - row major: [3, 4] should have strides [4, 1] - auto tensor_2d = create_tracked_tensor({3, 4}); - error = aoti_torch_get_strides(tensor_2d, &strides_ptr); + int64_t ret_dim = -1; + AOTITorchError error = aoti_torch_get_dim(tensor, &ret_dim); - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(strides_ptr, nullptr); - EXPECT_EQ(strides_ptr[0], 4); - EXPECT_EQ(strides_ptr[1], 1); + EXPECT_EQ(error, Error::Ok); + EXPECT_EQ(ret_dim, 1); - // Test 3D tensor - row major: [2, 3, 4] should have strides [12, 4, 1] - auto tensor_3d = create_tracked_tensor({2, 3, 4}); - error = aoti_torch_get_strides(tensor_3d, &strides_ptr); + delete tensor; + } - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(strides_ptr, nullptr); - EXPECT_EQ(strides_ptr[0], 12); - EXPECT_EQ(strides_ptr[1], 4); - EXPECT_EQ(strides_ptr[2], 1); + // Test 3D tensor + { + std::vector sizes = {2, 3, 4}; + std::vector strides = calculateContiguousStrides(sizes); + + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::Float, + device)); + + int64_t ret_dim = -1; + AOTITorchError error = aoti_torch_get_dim(tensor, &ret_dim); + + EXPECT_EQ(error, Error::Ok); + EXPECT_EQ(ret_dim, 3); + + delete tensor; + } } -// Test caching logic for sizes -TEST_F(CommonShimsTest, SizesCachingLogic) { - auto tensor = create_tracked_tensor({2, 3, 4}); +// ============================================================================ +// Storage & Device Property Tests +// ============================================================================ + +void runGetStorageOffsetTest(slim_c10::DeviceType device_type) { + std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); + slim_c10::Device device(device_type, 0); + + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::Float, + device)); + + int64_t ret_storage_offset = -1; + AOTITorchError error = + aoti_torch_get_storage_offset(tensor, &ret_storage_offset); - // First call should cache the sizes - int64_t* sizes_ptr1; - AOTITorchError error = aoti_torch_get_sizes(tensor, &sizes_ptr1); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(sizes_ptr1, nullptr); + // Default storage offset for newly created tensor is 0 + EXPECT_EQ(ret_storage_offset, 0); + + delete tensor; +} + +void runGetStorageSizeTest(slim_c10::DeviceType device_type) { + std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); + slim_c10::Device device(device_type, 0); + + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::Float, + device)); + + int64_t ret_size = -1; + AOTITorchError error = aoti_torch_get_storage_size(tensor, &ret_size); - // Second call should return the same cached pointer - int64_t* sizes_ptr2; - error = aoti_torch_get_sizes(tensor, &sizes_ptr2); EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(sizes_ptr1, sizes_ptr2); // Should be the exact same pointer + // 2 * 3 * sizeof(float) = 6 * 4 = 24 bytes + EXPECT_EQ(ret_size, 24); - // Values should still be correct - EXPECT_EQ(sizes_ptr2[0], 2); - EXPECT_EQ(sizes_ptr2[1], 3); - EXPECT_EQ(sizes_ptr2[2], 4); + delete tensor; } -// Test caching logic for strides -TEST_F(CommonShimsTest, StridesCachingLogic) { - auto tensor = create_tracked_tensor({2, 3, 4}); +void runGetDeviceTypeTest(slim_c10::DeviceType device_type) { + std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); + slim_c10::Device device(device_type, 0); + + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::Float, + device)); + + int32_t ret_device_type = -1; + AOTITorchError error = aoti_torch_get_device_type(tensor, &ret_device_type); - // First call should cache the strides - int64_t* strides_ptr1; - AOTITorchError error = aoti_torch_get_strides(tensor, &strides_ptr1); EXPECT_EQ(error, Error::Ok); - EXPECT_NE(strides_ptr1, nullptr); + EXPECT_EQ(ret_device_type, static_cast(device_type)); + + delete tensor; +} + +void runGetDeviceIndexTest(slim_c10::DeviceType device_type) { + std::vector sizes = {2, 3}; + std::vector strides = calculateContiguousStrides(sizes); + slim_c10::Device device(device_type, 0); + + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::Float, + device)); + + int32_t ret_device_index = -1; + AOTITorchError error = aoti_torch_get_device_index(tensor, &ret_device_index); - // Second call should return the same cached pointer - int64_t* strides_ptr2; - error = aoti_torch_get_strides(tensor, &strides_ptr2); EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(strides_ptr1, strides_ptr2); // Should be the exact same pointer + EXPECT_EQ(ret_device_index, 0); - // Values should still be correct - EXPECT_EQ(strides_ptr2[0], 12); - EXPECT_EQ(strides_ptr2[1], 4); - EXPECT_EQ(strides_ptr2[2], 1); + delete tensor; } -// Test that different tensors have different cached entries -TEST_F(CommonShimsTest, DifferentTensorsCacheSeparately) { - auto tensor1 = create_tracked_tensor({2, 3}); - auto tensor2 = create_tracked_tensor({4, 5}); +// ============================================================================ +// CPU Tests +// ============================================================================ - // Get sizes for both tensors - int64_t* sizes1_ptr; - int64_t* sizes2_ptr; +TEST_F(CommonShimsSlimTest, GetDataPtr_CPU) { + runGetDataPtrTest(slim_c10::DeviceType::CPU); +} - EXPECT_EQ(aoti_torch_get_sizes(tensor1, &sizes1_ptr), Error::Ok); - EXPECT_EQ(aoti_torch_get_sizes(tensor2, &sizes2_ptr), Error::Ok); +TEST_F(CommonShimsSlimTest, GetSizes_CPU) { + runGetSizesTest(slim_c10::DeviceType::CPU); +} - // Pointers should be different (different cache entries) - EXPECT_NE(sizes1_ptr, sizes2_ptr); +TEST_F(CommonShimsSlimTest, GetStrides_CPU) { + runGetStridesTest(slim_c10::DeviceType::CPU); +} - // Values should be correct - EXPECT_EQ(sizes1_ptr[0], 2); - EXPECT_EQ(sizes1_ptr[1], 3); - EXPECT_EQ(sizes2_ptr[0], 4); - EXPECT_EQ(sizes2_ptr[1], 5); +TEST_F(CommonShimsSlimTest, GetDtype_CPU) { + runGetDtypeTest(slim_c10::DeviceType::CPU); +} - // Test strides as well - int64_t* strides1_ptr; - int64_t* strides2_ptr; +TEST_F(CommonShimsSlimTest, GetDim_CPU) { + runGetDimTest(slim_c10::DeviceType::CPU); +} - EXPECT_EQ(aoti_torch_get_strides(tensor1, &strides1_ptr), Error::Ok); - EXPECT_EQ(aoti_torch_get_strides(tensor2, &strides2_ptr), Error::Ok); +TEST_F(CommonShimsSlimTest, GetStorageOffset_CPU) { + runGetStorageOffsetTest(slim_c10::DeviceType::CPU); +} - // Pointers should be different (different cache entries) - EXPECT_NE(strides1_ptr, strides2_ptr); +TEST_F(CommonShimsSlimTest, GetStorageSize_CPU) { + runGetStorageSizeTest(slim_c10::DeviceType::CPU); +} - // Values should be correct - EXPECT_EQ(strides1_ptr[0], 3); - EXPECT_EQ(strides1_ptr[1], 1); - EXPECT_EQ(strides2_ptr[0], 5); - EXPECT_EQ(strides2_ptr[1], 1); +TEST_F(CommonShimsSlimTest, GetDeviceType_CPU) { + runGetDeviceTypeTest(slim_c10::DeviceType::CPU); } -// Test cache persistence across multiple calls -TEST_F(CommonShimsTest, CachePersistence) { - auto tensor = create_tracked_tensor({3, 4, 5}); +TEST_F(CommonShimsSlimTest, GetDeviceIndex_CPU) { + runGetDeviceIndexTest(slim_c10::DeviceType::CPU); +} - // Multiple calls to sizes should all return the same pointer - int64_t* sizes_ptr1; - int64_t* sizes_ptr2; - int64_t* sizes_ptr3; +// ============================================================================ +// CUDA Tests +// ============================================================================ - EXPECT_EQ(aoti_torch_get_sizes(tensor, &sizes_ptr1), Error::Ok); - EXPECT_EQ(aoti_torch_get_sizes(tensor, &sizes_ptr2), Error::Ok); - EXPECT_EQ(aoti_torch_get_sizes(tensor, &sizes_ptr3), Error::Ok); +#ifdef CUDA_AVAILABLE +TEST_F(CommonShimsSlimTest, GetDataPtr_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + runGetDataPtrTest(slim_c10::DeviceType::CUDA); +} - EXPECT_EQ(sizes_ptr1, sizes_ptr2); - EXPECT_EQ(sizes_ptr2, sizes_ptr3); +TEST_F(CommonShimsSlimTest, GetSizes_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + runGetSizesTest(slim_c10::DeviceType::CUDA); +} - // Multiple calls to strides should all return the same pointer - int64_t* strides_ptr1; - int64_t* strides_ptr2; - int64_t* strides_ptr3; +TEST_F(CommonShimsSlimTest, GetStrides_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + runGetStridesTest(slim_c10::DeviceType::CUDA); +} - EXPECT_EQ(aoti_torch_get_strides(tensor, &strides_ptr1), Error::Ok); - EXPECT_EQ(aoti_torch_get_strides(tensor, &strides_ptr2), Error::Ok); - EXPECT_EQ(aoti_torch_get_strides(tensor, &strides_ptr3), Error::Ok); +TEST_F(CommonShimsSlimTest, GetDtype_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + runGetDtypeTest(slim_c10::DeviceType::CUDA); +} - EXPECT_EQ(strides_ptr1, strides_ptr2); - EXPECT_EQ(strides_ptr2, strides_ptr3); +TEST_F(CommonShimsSlimTest, GetDim_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + runGetDimTest(slim_c10::DeviceType::CUDA); } -// Test 0D tensor (scalar) -TEST_F(CommonShimsTest, ScalarTensor) { - auto tensor_0d = create_tracked_tensor({}); +TEST_F(CommonShimsSlimTest, GetStorageOffset_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + runGetStorageOffsetTest(slim_c10::DeviceType::CUDA); +} - // Test sizes for 0D tensor - int64_t* sizes_ptr; - AOTITorchError error = aoti_torch_get_sizes(tensor_0d, &sizes_ptr); - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(sizes_ptr, nullptr); +TEST_F(CommonShimsSlimTest, GetStorageSize_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + runGetStorageSizeTest(slim_c10::DeviceType::CUDA); +} - // Test strides for 0D tensor - int64_t* strides_ptr; - error = aoti_torch_get_strides(tensor_0d, &strides_ptr); - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(strides_ptr, nullptr); +TEST_F(CommonShimsSlimTest, GetDeviceType_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + runGetDeviceTypeTest(slim_c10::DeviceType::CUDA); +} - // Cache should work for 0D tensors too - int64_t* sizes_ptr2; - error = aoti_torch_get_sizes(tensor_0d, &sizes_ptr2); - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(sizes_ptr, sizes_ptr2); +TEST_F(CommonShimsSlimTest, GetDeviceIndex_CUDA) { + if (!isCudaAvailable()) { + GTEST_SKIP() << "CUDA not available"; + } + runGetDeviceIndexTest(slim_c10::DeviceType::CUDA); +} +#endif + +// ============================================================================ +// Error Cases +// ============================================================================ + +TEST_F(CommonShimsSlimTest, NullTensorArgument) { + void* data_ptr = nullptr; + int64_t* sizes = nullptr; + int64_t* strides = nullptr; + int32_t dtype = -1; + int64_t dim = -1; + + EXPECT_EQ( + aoti_torch_get_data_ptr(nullptr, &data_ptr), Error::InvalidArgument); + EXPECT_EQ(aoti_torch_get_sizes(nullptr, &sizes), Error::InvalidArgument); + EXPECT_EQ(aoti_torch_get_strides(nullptr, &strides), Error::InvalidArgument); + EXPECT_EQ(aoti_torch_get_dtype(nullptr, &dtype), Error::InvalidArgument); + EXPECT_EQ(aoti_torch_get_dim(nullptr, &dim), Error::InvalidArgument); } -// Test large tensor dimensions -TEST_F(CommonShimsTest, LargeTensorDimensions) { - auto tensor = create_tracked_tensor({100, 200, 300, 400}); +TEST_F(CommonShimsSlimTest, NullReturnPointer) { + Tensor* tensor = createTestTensor({2, 3}, slim_c10::DeviceType::CPU); - // Test sizes - int64_t* sizes_ptr; - AOTITorchError error = aoti_torch_get_sizes(tensor, &sizes_ptr); - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(sizes_ptr, nullptr); - EXPECT_EQ(sizes_ptr[0], 100); - EXPECT_EQ(sizes_ptr[1], 200); - EXPECT_EQ(sizes_ptr[2], 300); - EXPECT_EQ(sizes_ptr[3], 400); - - // Test strides - expected: [24000000, 120000, 400, 1] - int64_t* strides_ptr; - error = aoti_torch_get_strides(tensor, &strides_ptr); - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(strides_ptr, nullptr); - EXPECT_EQ(strides_ptr[0], 24000000); - EXPECT_EQ(strides_ptr[1], 120000); - EXPECT_EQ(strides_ptr[2], 400); - EXPECT_EQ(strides_ptr[3], 1); + EXPECT_EQ(aoti_torch_get_data_ptr(tensor, nullptr), Error::InvalidArgument); + EXPECT_EQ(aoti_torch_get_sizes(tensor, nullptr), Error::InvalidArgument); + EXPECT_EQ(aoti_torch_get_strides(tensor, nullptr), Error::InvalidArgument); + EXPECT_EQ(aoti_torch_get_dtype(tensor, nullptr), Error::InvalidArgument); + EXPECT_EQ(aoti_torch_get_dim(tensor, nullptr), Error::InvalidArgument); } -// Test that cleanup_tensor_metadata clears the cache -TEST_F(CommonShimsTest, CleanupFunctionality) { - auto tensor = create_tracked_tensor({2, 3}); +// ============================================================================ +// Edge Cases +// ============================================================================ - // Cache some data - int64_t* sizes_ptr1; - int64_t* strides_ptr1; - EXPECT_EQ(aoti_torch_get_sizes(tensor, &sizes_ptr1), Error::Ok); - EXPECT_EQ(aoti_torch_get_strides(tensor, &strides_ptr1), Error::Ok); +TEST_F(CommonShimsSlimTest, ScalarTensor) { + std::vector sizes = {}; + std::vector strides = {}; + slim_c10::Device device(slim_c10::DeviceType::CPU, 0); - // Clear the cache - cleanup_tensor_metadata(); + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::Float, + device)); + trackTensor(tensor); - // Getting sizes/strides again should create new cache entries - // (We can't directly test if the pointers are different since that would be - // implementation-dependent, but we can at least verify the functions still - // work) - int64_t* sizes_ptr2; - int64_t* strides_ptr2; - EXPECT_EQ(aoti_torch_get_sizes(tensor, &sizes_ptr2), Error::Ok); - EXPECT_EQ(aoti_torch_get_strides(tensor, &strides_ptr2), Error::Ok); + // Get sizes and strides for 0D tensor + int64_t* ret_sizes = nullptr; + int64_t* ret_strides = nullptr; + int64_t ret_dim = -1; + + EXPECT_EQ(aoti_torch_get_sizes(tensor, &ret_sizes), Error::Ok); + EXPECT_NE(ret_sizes, nullptr); + + EXPECT_EQ(aoti_torch_get_strides(tensor, &ret_strides), Error::Ok); + EXPECT_NE(ret_strides, nullptr); - // Values should still be correct - EXPECT_EQ(sizes_ptr2[0], 2); - EXPECT_EQ(sizes_ptr2[1], 3); - EXPECT_EQ(strides_ptr2[0], 3); - EXPECT_EQ(strides_ptr2[1], 1); + EXPECT_EQ(aoti_torch_get_dim(tensor, &ret_dim), Error::Ok); + EXPECT_EQ(ret_dim, 0); } -// Test mixed operations to ensure caches are independent -TEST_F(CommonShimsTest, IndependentCaches) { - auto tensor = create_tracked_tensor({2, 3, 4}); +TEST_F(CommonShimsSlimTest, LargeTensor) { + std::vector sizes = {100, 200, 300}; + std::vector strides = calculateContiguousStrides(sizes); + slim_c10::Device device(slim_c10::DeviceType::CPU, 0); + + Tensor* tensor = new Tensor(slim::empty_strided( + slim::makeArrayRef(sizes), + slim::makeArrayRef(strides), + slim_c10::ScalarType::Float, + device)); + trackTensor(tensor); + + int64_t* ret_sizes = nullptr; + int64_t* ret_strides = nullptr; + + EXPECT_EQ(aoti_torch_get_sizes(tensor, &ret_sizes), Error::Ok); + EXPECT_EQ(ret_sizes[0], 100); + EXPECT_EQ(ret_sizes[1], 200); + EXPECT_EQ(ret_sizes[2], 300); + + EXPECT_EQ(aoti_torch_get_strides(tensor, &ret_strides), Error::Ok); + EXPECT_EQ(ret_strides[0], 60000); // 200 * 300 + EXPECT_EQ(ret_strides[1], 300); // 300 + EXPECT_EQ(ret_strides[2], 1); +} - // Get sizes first - int64_t* sizes_ptr1; - EXPECT_EQ(aoti_torch_get_sizes(tensor, &sizes_ptr1), Error::Ok); +TEST_F(CommonShimsSlimTest, ConsistentPointerReturn) { + Tensor* tensor = createTestTensor({2, 3, 4}, slim_c10::DeviceType::CPU); - // Get strides - int64_t* strides_ptr1; - EXPECT_EQ(aoti_torch_get_strides(tensor, &strides_ptr1), Error::Ok); + // Multiple calls should return the same pointer (for SlimTensor) + int64_t* sizes_ptr1 = nullptr; + int64_t* sizes_ptr2 = nullptr; - // Get sizes again - should be cached - int64_t* sizes_ptr2; + EXPECT_EQ(aoti_torch_get_sizes(tensor, &sizes_ptr1), Error::Ok); EXPECT_EQ(aoti_torch_get_sizes(tensor, &sizes_ptr2), Error::Ok); EXPECT_EQ(sizes_ptr1, sizes_ptr2); - // Get strides again - should be cached - int64_t* strides_ptr2; + int64_t* strides_ptr1 = nullptr; + int64_t* strides_ptr2 = nullptr; + + EXPECT_EQ(aoti_torch_get_strides(tensor, &strides_ptr1), Error::Ok); EXPECT_EQ(aoti_torch_get_strides(tensor, &strides_ptr2), Error::Ok); EXPECT_EQ(strides_ptr1, strides_ptr2); +} + +// ============================================================================ +// DType Constants Tests +// ============================================================================ + +TEST_F(CommonShimsSlimTest, DTypeConstants) { + // Verify dtype constants match expected PyTorch ScalarType values + EXPECT_EQ(aoti_torch_dtype_float32(), 6); // ScalarType::Float + EXPECT_EQ(aoti_torch_dtype_bfloat16(), 15); // ScalarType::BFloat16 + EXPECT_EQ(aoti_torch_dtype_int64(), 4); // ScalarType::Long + EXPECT_EQ(aoti_torch_dtype_int32(), 3); // ScalarType::Int + EXPECT_EQ(aoti_torch_dtype_int16(), 2); // ScalarType::Short + EXPECT_EQ(aoti_torch_dtype_int8(), 1); // ScalarType::Char + EXPECT_EQ(aoti_torch_dtype_bool(), 11); // ScalarType::Bool +} + +// ============================================================================ +// Device Type Constants Tests +// ============================================================================ - // Sizes and strides pointers should be different (different caches) - EXPECT_NE(sizes_ptr1, strides_ptr1); +TEST_F(CommonShimsSlimTest, DeviceTypeConstants) { + EXPECT_EQ(aoti_torch_device_type_cpu(), 0); // DeviceType::CPU + EXPECT_EQ(aoti_torch_device_type_cuda(), 1); // DeviceType::CUDA } -// Test all dtype functions return correct PyTorch dtype codes -TEST_F(CommonShimsTest, AllDtypesReturnCorrectValues) { - EXPECT_EQ(aoti_torch_dtype_float32(), 6); // PyTorch's float32 dtype code - EXPECT_EQ(aoti_torch_dtype_bfloat16(), 15); // PyTorch's bfloat16 dtype code - EXPECT_EQ(aoti_torch_dtype_int8(), 1); // PyTorch's int8 dtype code - EXPECT_EQ(aoti_torch_dtype_int16(), 2); // PyTorch's int16 dtype code - EXPECT_EQ(aoti_torch_dtype_int32(), 3); // PyTorch's int32 dtype code - EXPECT_EQ(aoti_torch_dtype_int64(), 4); // PyTorch's int64 dtype code - EXPECT_EQ(aoti_torch_dtype_bool(), 11); // PyTorch's bool dtype code +// ============================================================================ +// Grad Mode Tests +// ============================================================================ + +TEST_F(CommonShimsSlimTest, GradModeIsEnabled) { + // ExecuTorch doesn't support autograd, so should always return false + EXPECT_EQ(aoti_torch_grad_mode_is_enabled(), false); +} + +TEST_F(CommonShimsSlimTest, GradModeSetEnabled) { + // Setting to false should succeed + EXPECT_EQ(aoti_torch_grad_mode_set_enabled(false), Error::Ok); + + // Setting to true should fail (not supported in ExecuTorch) + EXPECT_EQ(aoti_torch_grad_mode_set_enabled(true), Error::NotSupported); } diff --git a/backends/aoti/tests/test_common_shims_slim.cpp b/backends/aoti/tests/test_common_shims_slim.cpp deleted file mode 100644 index 94319c6f94d..00000000000 --- a/backends/aoti/tests/test_common_shims_slim.cpp +++ /dev/null @@ -1,632 +0,0 @@ -/* - * Copyright (c) Meta Platforms, Inc. and affiliates. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include -#include - -#include -#include -#include -#include -#include -#include - -#ifdef CUDA_AVAILABLE -#include -#endif - -using namespace executorch::backends::aoti; -using executorch::runtime::Error; - -namespace slim_c10 = executorch::backends::aoti::slim::c10; -namespace slim = executorch::backends::aoti::slim; - -namespace { - -#ifdef CUDA_AVAILABLE -bool isCudaAvailable() { - int device_count = 0; - cudaError_t err = cudaGetDeviceCount(&device_count); - return (err == cudaSuccess && device_count > 0); -} -#endif - -// Helper to calculate contiguous strides from sizes -std::vector calculateContiguousStrides( - const std::vector& sizes) { - std::vector strides(sizes.size()); - if (sizes.empty()) { - return strides; - } - strides[sizes.size() - 1] = 1; - for (int64_t i = static_cast(sizes.size()) - 2; i >= 0; i--) { - strides[i] = strides[i + 1] * sizes[i + 1]; - } - return strides; -} - -} // namespace - -// Test fixture for common_shims_slim tests -class CommonShimsSlimTest : public ::testing::Test { - protected: - void SetUp() override { - et_pal_init(); - } - - void TearDown() override { - // Cleanup tracked tensors - for (Tensor* t : tensors_) { - delete t; - } - tensors_.clear(); - } - - void trackTensor(Tensor* t) { - if (t != nullptr) { - tensors_.push_back(t); - } - } - - Tensor* createTestTensor( - const std::vector& sizes, - slim_c10::DeviceType device_type) { - std::vector strides = calculateContiguousStrides(sizes); - slim_c10::Device device(device_type, 0); - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::Float, - device)); - trackTensor(tensor); - return tensor; - } - - private: - std::vector tensors_; -}; - -// ============================================================================ -// Common test body implementations - parameterized by device type -// ============================================================================ - -void runGetDataPtrTest(slim_c10::DeviceType device_type) { - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - slim_c10::Device device(device_type, 0); - - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::Float, - device)); - - void* data_ptr = nullptr; - AOTITorchError error = aoti_torch_get_data_ptr(tensor, &data_ptr); - - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(data_ptr, nullptr); - - // Verify the returned pointer matches tensor's data_ptr - EXPECT_EQ(data_ptr, tensor->data_ptr()); - - delete tensor; -} - -void runGetSizesTest(slim_c10::DeviceType device_type) { - std::vector sizes = {2, 3, 4}; - std::vector strides = calculateContiguousStrides(sizes); - slim_c10::Device device(device_type, 0); - - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::Float, - device)); - - int64_t* ret_sizes = nullptr; - AOTITorchError error = aoti_torch_get_sizes(tensor, &ret_sizes); - - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(ret_sizes, nullptr); - - // Verify sizes match - EXPECT_EQ(ret_sizes[0], 2); - EXPECT_EQ(ret_sizes[1], 3); - EXPECT_EQ(ret_sizes[2], 4); - - delete tensor; -} - -void runGetStridesTest(slim_c10::DeviceType device_type) { - std::vector sizes = {2, 3, 4}; - std::vector strides = calculateContiguousStrides(sizes); - slim_c10::Device device(device_type, 0); - - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::Float, - device)); - - int64_t* ret_strides = nullptr; - AOTITorchError error = aoti_torch_get_strides(tensor, &ret_strides); - - EXPECT_EQ(error, Error::Ok); - EXPECT_NE(ret_strides, nullptr); - - // Verify strides match: [12, 4, 1] for contiguous [2, 3, 4] - EXPECT_EQ(ret_strides[0], 12); - EXPECT_EQ(ret_strides[1], 4); - EXPECT_EQ(ret_strides[2], 1); - - delete tensor; -} - -void runGetDtypeTest(slim_c10::DeviceType device_type) { - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - slim_c10::Device device(device_type, 0); - - // Test Float32 - { - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::Float, - device)); - - int32_t ret_dtype = -1; - AOTITorchError error = aoti_torch_get_dtype(tensor, &ret_dtype); - - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(ret_dtype, static_cast(slim_c10::ScalarType::Float)); - - delete tensor; - } - - // Test Int64 - { - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::Long, - device)); - - int32_t ret_dtype = -1; - AOTITorchError error = aoti_torch_get_dtype(tensor, &ret_dtype); - - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(ret_dtype, static_cast(slim_c10::ScalarType::Long)); - - delete tensor; - } - - // Test BFloat16 - { - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::BFloat16, - device)); - - int32_t ret_dtype = -1; - AOTITorchError error = aoti_torch_get_dtype(tensor, &ret_dtype); - - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(ret_dtype, static_cast(slim_c10::ScalarType::BFloat16)); - - delete tensor; - } -} - -void runGetDimTest(slim_c10::DeviceType device_type) { - slim_c10::Device device(device_type, 0); - - // Test 0D tensor (scalar) - { - std::vector sizes = {}; - std::vector strides = {}; - - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::Float, - device)); - - int64_t ret_dim = -1; - AOTITorchError error = aoti_torch_get_dim(tensor, &ret_dim); - - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(ret_dim, 0); - - delete tensor; - } - - // Test 1D tensor - { - std::vector sizes = {5}; - std::vector strides = calculateContiguousStrides(sizes); - - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::Float, - device)); - - int64_t ret_dim = -1; - AOTITorchError error = aoti_torch_get_dim(tensor, &ret_dim); - - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(ret_dim, 1); - - delete tensor; - } - - // Test 3D tensor - { - std::vector sizes = {2, 3, 4}; - std::vector strides = calculateContiguousStrides(sizes); - - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::Float, - device)); - - int64_t ret_dim = -1; - AOTITorchError error = aoti_torch_get_dim(tensor, &ret_dim); - - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(ret_dim, 3); - - delete tensor; - } -} - -// ============================================================================ -// Storage & Device Property Tests -// ============================================================================ - -void runGetStorageOffsetTest(slim_c10::DeviceType device_type) { - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - slim_c10::Device device(device_type, 0); - - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::Float, - device)); - - int64_t ret_storage_offset = -1; - AOTITorchError error = - aoti_torch_get_storage_offset(tensor, &ret_storage_offset); - - EXPECT_EQ(error, Error::Ok); - // Default storage offset for newly created tensor is 0 - EXPECT_EQ(ret_storage_offset, 0); - - delete tensor; -} - -void runGetStorageSizeTest(slim_c10::DeviceType device_type) { - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - slim_c10::Device device(device_type, 0); - - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::Float, - device)); - - int64_t ret_size = -1; - AOTITorchError error = aoti_torch_get_storage_size(tensor, &ret_size); - - EXPECT_EQ(error, Error::Ok); - // 2 * 3 * sizeof(float) = 6 * 4 = 24 bytes - EXPECT_EQ(ret_size, 24); - - delete tensor; -} - -void runGetDeviceTypeTest(slim_c10::DeviceType device_type) { - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - slim_c10::Device device(device_type, 0); - - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::Float, - device)); - - int32_t ret_device_type = -1; - AOTITorchError error = aoti_torch_get_device_type(tensor, &ret_device_type); - - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(ret_device_type, static_cast(device_type)); - - delete tensor; -} - -void runGetDeviceIndexTest(slim_c10::DeviceType device_type) { - std::vector sizes = {2, 3}; - std::vector strides = calculateContiguousStrides(sizes); - slim_c10::Device device(device_type, 0); - - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::Float, - device)); - - int32_t ret_device_index = -1; - AOTITorchError error = aoti_torch_get_device_index(tensor, &ret_device_index); - - EXPECT_EQ(error, Error::Ok); - EXPECT_EQ(ret_device_index, 0); - - delete tensor; -} - -// ============================================================================ -// CPU Tests -// ============================================================================ - -TEST_F(CommonShimsSlimTest, GetDataPtr_CPU) { - runGetDataPtrTest(slim_c10::DeviceType::CPU); -} - -TEST_F(CommonShimsSlimTest, GetSizes_CPU) { - runGetSizesTest(slim_c10::DeviceType::CPU); -} - -TEST_F(CommonShimsSlimTest, GetStrides_CPU) { - runGetStridesTest(slim_c10::DeviceType::CPU); -} - -TEST_F(CommonShimsSlimTest, GetDtype_CPU) { - runGetDtypeTest(slim_c10::DeviceType::CPU); -} - -TEST_F(CommonShimsSlimTest, GetDim_CPU) { - runGetDimTest(slim_c10::DeviceType::CPU); -} - -TEST_F(CommonShimsSlimTest, GetStorageOffset_CPU) { - runGetStorageOffsetTest(slim_c10::DeviceType::CPU); -} - -TEST_F(CommonShimsSlimTest, GetStorageSize_CPU) { - runGetStorageSizeTest(slim_c10::DeviceType::CPU); -} - -TEST_F(CommonShimsSlimTest, GetDeviceType_CPU) { - runGetDeviceTypeTest(slim_c10::DeviceType::CPU); -} - -TEST_F(CommonShimsSlimTest, GetDeviceIndex_CPU) { - runGetDeviceIndexTest(slim_c10::DeviceType::CPU); -} - -// ============================================================================ -// CUDA Tests -// ============================================================================ - -#ifdef CUDA_AVAILABLE -TEST_F(CommonShimsSlimTest, GetDataPtr_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - runGetDataPtrTest(slim_c10::DeviceType::CUDA); -} - -TEST_F(CommonShimsSlimTest, GetSizes_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - runGetSizesTest(slim_c10::DeviceType::CUDA); -} - -TEST_F(CommonShimsSlimTest, GetStrides_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - runGetStridesTest(slim_c10::DeviceType::CUDA); -} - -TEST_F(CommonShimsSlimTest, GetDtype_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - runGetDtypeTest(slim_c10::DeviceType::CUDA); -} - -TEST_F(CommonShimsSlimTest, GetDim_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - runGetDimTest(slim_c10::DeviceType::CUDA); -} - -TEST_F(CommonShimsSlimTest, GetStorageOffset_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - runGetStorageOffsetTest(slim_c10::DeviceType::CUDA); -} - -TEST_F(CommonShimsSlimTest, GetStorageSize_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - runGetStorageSizeTest(slim_c10::DeviceType::CUDA); -} - -TEST_F(CommonShimsSlimTest, GetDeviceType_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - runGetDeviceTypeTest(slim_c10::DeviceType::CUDA); -} - -TEST_F(CommonShimsSlimTest, GetDeviceIndex_CUDA) { - if (!isCudaAvailable()) { - GTEST_SKIP() << "CUDA not available"; - } - runGetDeviceIndexTest(slim_c10::DeviceType::CUDA); -} -#endif - -// ============================================================================ -// Error Cases -// ============================================================================ - -TEST_F(CommonShimsSlimTest, NullTensorArgument) { - void* data_ptr = nullptr; - int64_t* sizes = nullptr; - int64_t* strides = nullptr; - int32_t dtype = -1; - int64_t dim = -1; - - EXPECT_EQ( - aoti_torch_get_data_ptr(nullptr, &data_ptr), Error::InvalidArgument); - EXPECT_EQ(aoti_torch_get_sizes(nullptr, &sizes), Error::InvalidArgument); - EXPECT_EQ(aoti_torch_get_strides(nullptr, &strides), Error::InvalidArgument); - EXPECT_EQ(aoti_torch_get_dtype(nullptr, &dtype), Error::InvalidArgument); - EXPECT_EQ(aoti_torch_get_dim(nullptr, &dim), Error::InvalidArgument); -} - -TEST_F(CommonShimsSlimTest, NullReturnPointer) { - Tensor* tensor = createTestTensor({2, 3}, slim_c10::DeviceType::CPU); - - EXPECT_EQ(aoti_torch_get_data_ptr(tensor, nullptr), Error::InvalidArgument); - EXPECT_EQ(aoti_torch_get_sizes(tensor, nullptr), Error::InvalidArgument); - EXPECT_EQ(aoti_torch_get_strides(tensor, nullptr), Error::InvalidArgument); - EXPECT_EQ(aoti_torch_get_dtype(tensor, nullptr), Error::InvalidArgument); - EXPECT_EQ(aoti_torch_get_dim(tensor, nullptr), Error::InvalidArgument); -} - -// ============================================================================ -// Edge Cases -// ============================================================================ - -TEST_F(CommonShimsSlimTest, ScalarTensor) { - std::vector sizes = {}; - std::vector strides = {}; - slim_c10::Device device(slim_c10::DeviceType::CPU, 0); - - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::Float, - device)); - trackTensor(tensor); - - // Get sizes and strides for 0D tensor - int64_t* ret_sizes = nullptr; - int64_t* ret_strides = nullptr; - int64_t ret_dim = -1; - - EXPECT_EQ(aoti_torch_get_sizes(tensor, &ret_sizes), Error::Ok); - EXPECT_NE(ret_sizes, nullptr); - - EXPECT_EQ(aoti_torch_get_strides(tensor, &ret_strides), Error::Ok); - EXPECT_NE(ret_strides, nullptr); - - EXPECT_EQ(aoti_torch_get_dim(tensor, &ret_dim), Error::Ok); - EXPECT_EQ(ret_dim, 0); -} - -TEST_F(CommonShimsSlimTest, LargeTensor) { - std::vector sizes = {100, 200, 300}; - std::vector strides = calculateContiguousStrides(sizes); - slim_c10::Device device(slim_c10::DeviceType::CPU, 0); - - Tensor* tensor = new Tensor(slim::empty_strided( - slim::makeArrayRef(sizes), - slim::makeArrayRef(strides), - slim_c10::ScalarType::Float, - device)); - trackTensor(tensor); - - int64_t* ret_sizes = nullptr; - int64_t* ret_strides = nullptr; - - EXPECT_EQ(aoti_torch_get_sizes(tensor, &ret_sizes), Error::Ok); - EXPECT_EQ(ret_sizes[0], 100); - EXPECT_EQ(ret_sizes[1], 200); - EXPECT_EQ(ret_sizes[2], 300); - - EXPECT_EQ(aoti_torch_get_strides(tensor, &ret_strides), Error::Ok); - EXPECT_EQ(ret_strides[0], 60000); // 200 * 300 - EXPECT_EQ(ret_strides[1], 300); // 300 - EXPECT_EQ(ret_strides[2], 1); -} - -TEST_F(CommonShimsSlimTest, ConsistentPointerReturn) { - Tensor* tensor = createTestTensor({2, 3, 4}, slim_c10::DeviceType::CPU); - - // Multiple calls should return the same pointer (for SlimTensor) - int64_t* sizes_ptr1 = nullptr; - int64_t* sizes_ptr2 = nullptr; - - EXPECT_EQ(aoti_torch_get_sizes(tensor, &sizes_ptr1), Error::Ok); - EXPECT_EQ(aoti_torch_get_sizes(tensor, &sizes_ptr2), Error::Ok); - EXPECT_EQ(sizes_ptr1, sizes_ptr2); - - int64_t* strides_ptr1 = nullptr; - int64_t* strides_ptr2 = nullptr; - - EXPECT_EQ(aoti_torch_get_strides(tensor, &strides_ptr1), Error::Ok); - EXPECT_EQ(aoti_torch_get_strides(tensor, &strides_ptr2), Error::Ok); - EXPECT_EQ(strides_ptr1, strides_ptr2); -} - -// ============================================================================ -// DType Constants Tests -// ============================================================================ - -TEST_F(CommonShimsSlimTest, DTypeConstants) { - // Verify dtype constants match expected PyTorch ScalarType values - EXPECT_EQ(aoti_torch_dtype_float32(), 6); // ScalarType::Float - EXPECT_EQ(aoti_torch_dtype_bfloat16(), 15); // ScalarType::BFloat16 - EXPECT_EQ(aoti_torch_dtype_int64(), 4); // ScalarType::Long - EXPECT_EQ(aoti_torch_dtype_int32(), 3); // ScalarType::Int - EXPECT_EQ(aoti_torch_dtype_int16(), 2); // ScalarType::Short - EXPECT_EQ(aoti_torch_dtype_int8(), 1); // ScalarType::Char - EXPECT_EQ(aoti_torch_dtype_bool(), 11); // ScalarType::Bool -} - -// ============================================================================ -// Device Type Constants Tests -// ============================================================================ - -TEST_F(CommonShimsSlimTest, DeviceTypeConstants) { - EXPECT_EQ(aoti_torch_device_type_cpu(), 0); // DeviceType::CPU - EXPECT_EQ(aoti_torch_device_type_cuda(), 1); // DeviceType::CUDA -} - -// ============================================================================ -// Grad Mode Tests -// ============================================================================ - -TEST_F(CommonShimsSlimTest, GradModeIsEnabled) { - // ExecuTorch doesn't support autograd, so should always return false - EXPECT_EQ(aoti_torch_grad_mode_is_enabled(), false); -} - -TEST_F(CommonShimsSlimTest, GradModeSetEnabled) { - // Setting to false should succeed - EXPECT_EQ(aoti_torch_grad_mode_set_enabled(false), Error::Ok); - - // Setting to true should fail (not supported in ExecuTorch) - EXPECT_EQ(aoti_torch_grad_mode_set_enabled(true), Error::NotSupported); -} From 40cf5ea119587a87987957165131a67ad9caf4ba Mon Sep 17 00:00:00 2001 From: gasoonjia Date: Tue, 13 Jan 2026 15:57:32 -0800 Subject: [PATCH 3/9] make cmake work --- backends/aoti/aoti_delegate_handle.h | 6 -- backends/aoti/tests/test_common_shims.cpp | 2 +- backends/cuda/CMakeLists.txt | 5 +- backends/cuda/runtime/cuda_backend.cpp | 74 ++++++++++++++--------- backends/cuda/runtime/shims/int4mm.cuh | 5 +- 5 files changed, 53 insertions(+), 39 deletions(-) diff --git a/backends/aoti/aoti_delegate_handle.h b/backends/aoti/aoti_delegate_handle.h index cc56c747f8e..7447292e5d9 100644 --- a/backends/aoti/aoti_delegate_handle.h +++ b/backends/aoti/aoti_delegate_handle.h @@ -101,12 +101,6 @@ struct AOTIDelegateHandle { AOTInductorModelContainerRunFunc run; AOTInductorModelUpdateConstantsFromBlobFunc update_constants_from_blob; -#ifdef CUDA_AVAILABLE - // Cached output tensors for skip-copy optimization. - // When copy-skip is enabled, output SlimTensors are cached here to keep - // GPU memory alive while the caller processes the results. - std::vector cached_outputs; -#endif }; } // namespace aoti diff --git a/backends/aoti/tests/test_common_shims.cpp b/backends/aoti/tests/test_common_shims.cpp index 94319c6f94d..3bc76e522cf 100644 --- a/backends/aoti/tests/test_common_shims.cpp +++ b/backends/aoti/tests/test_common_shims.cpp @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include #include diff --git a/backends/cuda/CMakeLists.txt b/backends/cuda/CMakeLists.txt index c85e07d4b59..0c1abc52320 100644 --- a/backends/cuda/CMakeLists.txt +++ b/backends/cuda/CMakeLists.txt @@ -98,14 +98,17 @@ install( ) # CUDA-specific AOTI shim symbols (dynamically linked) +# Note: common_shims.h is header-only (all functions are inline) set(_aoti_cuda_shim_sources runtime/shims/memory.cpp runtime/shims/tensor_attribute.cpp runtime/guard.cpp runtime/shims/cuda_guard.cpp runtime/shims/int4mm.cu - ${EXECUTORCH_ROOT}/backends/aoti/common_shims.cpp ) add_library(aoti_cuda_shims SHARED ${_aoti_cuda_shim_sources}) +# Define CUDA_AVAILABLE to use SlimTensor in common_shims.h +target_compile_definitions(aoti_cuda_shims PRIVATE CUDA_AVAILABLE=1) + # Define export macros for shared library if(MSVC) target_compile_definitions(aoti_cuda_shims PRIVATE EXPORT_AOTI_FUNCTIONS) diff --git a/backends/cuda/runtime/cuda_backend.cpp b/backends/cuda/runtime/cuda_backend.cpp index 4cf0b1521d6..4f3bdf6321a 100644 --- a/backends/cuda/runtime/cuda_backend.cpp +++ b/backends/cuda/runtime/cuda_backend.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include // Include SlimTensor headers for CUDA backend @@ -398,7 +399,7 @@ class ET_EXPERIMENTAL CudaBackend final n_outputs, args.size()) - // NOTE: ExecuTorch tensors are always on CPU/host memory + // NOTE: ExecuTorch tensors maybe on CPU or GPU due to the skip-copy optimization // We need to create GPU copies for CUDA kernel execution using SlimTensor std::vector gpu_input_tensors(n_inputs); std::vector gpu_inputs(n_inputs); @@ -489,35 +490,42 @@ class ET_EXPERIMENTAL CudaBackend final } else { // Skip-copy optimization: wrap GPU data as ETensor using from_blob // The caller is responsible for handling GPU data directly - for (size_t i = 0; i < n_outputs; i++) { - // Move output SlimTensors to cached_outputs for lifetime management - handle->cached_outputs.push_back(std::move(gpu_output_tensors[i])); - - // Create an ETensor wrapper pointing to the GPU data - // The data stays on GPU and the caller handles it - SlimTensor& cached = handle->cached_outputs.back(); - auto slim_sizes = cached.sizes(); - auto slim_strides = cached.strides(); - - std::vector et_sizes(cached.dim()); - std::vector et_strides(cached.dim()); - for (size_t d = 0; d < cached.dim(); d++) { - et_sizes[d] = - static_cast(slim_sizes[d]); - et_strides[d] = - static_cast(slim_strides[d]); + { + std::lock_guard guard(cached_outputs_mutex_); + auto& cached_outputs = cached_outputs_[handle]; + + // Clear cached outputs for previous round + cached_outputs.clear(); + for (size_t i = 0; i < n_outputs; i++) { + // Move output SlimTensors to cached_outputs for lifetime management + cached_outputs.push_back(std::move(gpu_output_tensors[i])); + + // Create an ETensor wrapper pointing to the GPU data + // The data stays on GPU and the caller handles it + SlimTensor& cached = cached_outputs.back(); + auto slim_sizes = cached.sizes(); + auto slim_strides = cached.strides(); + + std::vector et_sizes(cached.dim()); + std::vector et_strides(cached.dim()); + for (size_t d = 0; d < cached.dim(); d++) { + et_sizes[d] = + static_cast(slim_sizes[d]); + et_strides[d] = + static_cast(slim_strides[d]); + } + + // Use tensor_ptr_maker to create a non-owning ETensor wrapper + // Note: This creates a view into the SlimTensor's GPU memory + auto tensor_ptr = executorch::extension::from_blob( + cached.data_ptr(), + std::move(et_sizes), + std::move(et_strides), + static_cast(cached.dtype())); + + // Assign the wrapped tensor to the output EValue + args[i + n_inputs]->toTensor() = *tensor_ptr; } - - // Use tensor_ptr_maker to create a non-owning ETensor wrapper - // Note: This creates a view into the SlimTensor's GPU memory - auto tensor_ptr = executorch::extension::from_blob( - cached.data_ptr(), - std::move(et_sizes), - std::move(et_strides), - static_cast(cached.dtype())); - - // Assign the wrapped tensor to the output EValue - args[i + n_inputs]->toTensor() = *tensor_ptr; } } @@ -575,6 +583,14 @@ class ET_EXPERIMENTAL CudaBackend final private: mutable std::mutex skip_copy_method_mutex_; std::string skip_copy_method_; + + // Cached output tensors for skip-copy optimization. + // When copy-skip is enabled, output SlimTensors are cached here to keep + // GPU memory alive while the caller processes the results. + // Maps from AOTIDelegateHandle* to its cached outputs. + mutable std::mutex cached_outputs_mutex_; + mutable std::unordered_map> + cached_outputs_; }; } // namespace executorch::backends::cuda diff --git a/backends/cuda/runtime/shims/int4mm.cuh b/backends/cuda/runtime/shims/int4mm.cuh index ee12fb51004..8ee3fcb957e 100644 --- a/backends/cuda/runtime/shims/int4mm.cuh +++ b/backends/cuda/runtime/shims/int4mm.cuh @@ -1177,13 +1177,14 @@ Tensor* _weight_int4pack_mm_cuda( ET_CHECK(B_innerKTiles == 2 || B_innerKTiles == 4 || B_innerKTiles == 8); // A is standard row major - ET_CHECK(A.dtype() == executorch::aten::ScalarType::BFloat16); + // SlimTensor::dtype() returns slim::c10::ScalarType, cast to int32_t for comparison + ET_CHECK(static_cast(A.dtype()) == static_cast(SupportedDTypes::BFLOAT16)); // ET only supports contiguous tensors for now // ET_CHECK(A.is_contiguous()); ET_CHECK(A.dim() == 2); // B has B_innerKTiles k-tiles in the innermost dimension - ET_CHECK(B.dtype() == executorch::aten::ScalarType::Int); + ET_CHECK(static_cast(B.dtype()) == static_cast(SupportedDTypes::INT32)); // ET only supports contiguous tensors for now // ET_CHECK(B.is_contiguous()); ET_CHECK(B.dim() == 4); From c225e32f7a2bca5851f0774d664523f0d3dcd4d0 Mon Sep 17 00:00:00 2001 From: gasoonjia Date: Mon, 26 Jan 2026 11:24:35 -0800 Subject: [PATCH 4/9] parakeet works --- CMakeLists.txt | 4 ++ backends/aoti/CMakeLists.txt | 11 +++- backends/aoti/aoti_backend.py | 9 ++- backends/aoti/common_shims_slim.cpp | 10 ++++ backends/aoti/common_shims_slim.h | 7 +++ backends/cuda/runtime/cuda_backend.cpp | 54 +++++++++++------- examples/models/parakeet/2086-149220-0033.wav | Bin 0 -> 237964 bytes 7 files changed, 69 insertions(+), 26 deletions(-) create mode 100644 examples/models/parakeet/2086-149220-0033.wav diff --git a/CMakeLists.txt b/CMakeLists.txt index 30cee4afe53..d230e5fed8c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -123,6 +123,10 @@ if(EXECUTORCH_ENABLE_BUNDLE_IO) add_definitions(-DET_BUNDLE_IO_ENABLED) endif() +if(EXECUTORCH_BUILD_CUDA) + add_definitions(-DCUDA_AVAILABLE=1) +endif() + # -ffunction-sections -fdata-sections: breaks function and data into sections so # they can be properly gc'd. -s: strip symbol. if(WIN32) diff --git a/backends/aoti/CMakeLists.txt b/backends/aoti/CMakeLists.txt index e7acc02e93b..20e8d83a36c 100644 --- a/backends/aoti/CMakeLists.txt +++ b/backends/aoti/CMakeLists.txt @@ -50,7 +50,6 @@ install( EXPORT ExecuTorchTargets DESTINATION ${CMAKE_INSTALL_LIBDIR} ) - # ============================================================================== # AOTI common shims using SlimTensor (for CUDA backend) # Uses SlimTensor for all tensor operations @@ -70,6 +69,16 @@ target_compile_options( target_compile_definitions( aoti_common_shims_slim PUBLIC $<$:EXPORT_AOTI_FUNCTIONS> ) + +# Add CUDA include directories and link CUDA runtime when building with CUDA +if(EXECUTORCH_BUILD_CUDA) + find_package(CUDAToolkit REQUIRED) + target_include_directories( + aoti_common_shims_slim PUBLIC ${CUDAToolkit_INCLUDE_DIRS} + ) + target_link_libraries(aoti_common_shims_slim PUBLIC CUDA::cudart) +endif() + target_link_libraries( aoti_common_shims_slim PUBLIC slimtensor extension_tensor ${CMAKE_DL_LIBS} ) diff --git a/backends/aoti/aoti_backend.py b/backends/aoti/aoti_backend.py index eb732df2a83..3ad5811e6b4 100644 --- a/backends/aoti/aoti_backend.py +++ b/backends/aoti/aoti_backend.py @@ -232,9 +232,12 @@ def preprocess( method_name + "_weights_blob", blob_data, 1, weights_blob_data_type ) - # Clean up the generated files - os.remove(so_path) - os.remove(blob_path) + print("so_path:", so_path) + print("blob_path:", blob_path) + + # # Clean up the generated files + # os.remove(so_path) + # os.remove(blob_path) return PreprocessResult( processed_bytes=b"", diff --git a/backends/aoti/common_shims_slim.cpp b/backends/aoti/common_shims_slim.cpp index 1976ee94a3d..4a3ba43381c 100644 --- a/backends/aoti/common_shims_slim.cpp +++ b/backends/aoti/common_shims_slim.cpp @@ -12,6 +12,8 @@ namespace executorch { namespace backends { namespace aoti { +extern "C" { + // ============================================================ // Basic Property Getters - Implementations // ============================================================ @@ -56,6 +58,12 @@ AOTITorchError aoti_torch_get_dim(Tensor* tensor, int64_t* ret_dim) { return Error::Ok; } +int32_t aoti_torch_layout_strided() { + // Slimtensor only support strided layout, the return value will always be 0, a.k.a + // at::Layout::Strided; + return 0; +} + // ============================================================ // Storage & Device Property Getters - Implementations // ============================================================ @@ -159,6 +167,8 @@ AOTITorchError aoti_torch_grad_mode_set_enabled(bool enabled) { return Error::Ok; } +} // extern "C" + } // namespace aoti } // namespace backends } // namespace executorch diff --git a/backends/aoti/common_shims_slim.h b/backends/aoti/common_shims_slim.h index f3db534f4ce..b4d70ee47b0 100644 --- a/backends/aoti/common_shims_slim.h +++ b/backends/aoti/common_shims_slim.h @@ -27,6 +27,8 @@ using Tensor = executorch::backends::aoti::slim::SlimTensor; using AOTIRuntimeError = Error; using AOTITorchError = Error; +extern "C" { + // ============================================================ // Basic Property Getters - Declarations // ============================================================ @@ -46,6 +48,8 @@ aoti_torch_get_dtype(Tensor* tensor, int32_t* ret_dtype); AOTI_SHIM_EXPORT AOTITorchError aoti_torch_get_dim(Tensor* tensor, int64_t* ret_dim); +AOTI_SHIM_EXPORT int32_t aoti_torch_layout_strided(); + // ============================================================ // Storage & Device Property Getters - Declarations // ============================================================ @@ -62,6 +66,7 @@ aoti_torch_get_device_type(Tensor* tensor, int32_t* ret_device_type); AOTI_SHIM_EXPORT AOTITorchError aoti_torch_get_device_index(Tensor* tensor, int32_t* ret_device_index); + // ============================================================ // DType Constants - Declarations // ============================================================ @@ -88,6 +93,8 @@ AOTI_SHIM_EXPORT int32_t aoti_torch_device_type_cuda(); AOTI_SHIM_EXPORT bool aoti_torch_grad_mode_is_enabled(); AOTI_SHIM_EXPORT AOTITorchError aoti_torch_grad_mode_set_enabled(bool enabled); +} // extern "C" + } // namespace aoti } // namespace backends } // namespace executorch diff --git a/backends/cuda/runtime/cuda_backend.cpp b/backends/cuda/runtime/cuda_backend.cpp index 3b255ae6c6f..09001239a30 100644 --- a/backends/cuda/runtime/cuda_backend.cpp +++ b/backends/cuda/runtime/cuda_backend.cpp @@ -280,7 +280,6 @@ class ET_EXPERIMENTAL CudaBackend final cudaStream_t cuda_stream; ET_CUDA_CHECK_OR_RETURN_ERROR(cudaStreamCreate(&cuda_stream)); handle->cuda_stream = static_cast(cuda_stream); - return (DelegateHandle*)handle; // Return the handle post-processing } @@ -290,6 +289,12 @@ class ET_EXPERIMENTAL CudaBackend final DelegateHandle* handle_, Span args) const override { AOTIDelegateHandle* handle = (AOTIDelegateHandle*)handle_; + ET_LOG(Info, "line 292"); + + // executorch::backends::cuda::setCurrentCUDAStream( + // static_cast(handle->cuda_stream), + // 0 // device index + // ); size_t n_inputs; handle->get_num_inputs(handle->container_handle, &n_inputs); @@ -304,13 +309,12 @@ class ET_EXPERIMENTAL CudaBackend final n_inputs, n_outputs, args.size()) + ET_LOG(Info, "line 307"); // NOTE: ExecuTorch tensors maybe on CPU or GPU due to the skip-copy // optimization We need to create GPU copies for CUDA kernel execution using // SlimTensor - std::vector gpu_input_tensors(n_inputs); std::vector gpu_inputs(n_inputs); - std::vector gpu_output_tensors(n_outputs); std::vector gpu_outputs(n_outputs); // Process input tensors: convert ETensor (CPU) to SlimTensor (GPU) @@ -330,23 +334,21 @@ class ET_EXPERIMENTAL CudaBackend final std::vector sizes_vec(sizes.begin(), sizes.end()); std::vector strides_vec(strides.begin(), strides.end()); - gpu_input_tensors[i] = slim::from_blob( + gpu_inputs[i] = new SlimTensor(slim::from_blob( const_cast(data_ptr), slim::makeArrayRef(sizes_vec), slim::makeArrayRef(strides_vec), static_cast(cpu_tensor->scalar_type()), DEFAULT_CUDA_DEVICE, 0 // storage_offset - ); - gpu_inputs[i] = &gpu_input_tensors[i]; + )); continue; } } // Data is on CPU - use from_etensor to copy to GPU - gpu_input_tensors[i] = - from_etensor(*cpu_tensor, CPU_DEVICE, DEFAULT_CUDA_DEVICE); - gpu_inputs[i] = &gpu_input_tensors[i]; + gpu_inputs[i] = + new SlimTensor(from_etensor(*cpu_tensor, CPU_DEVICE, DEFAULT_CUDA_DEVICE)); } // Process output tensors: create GPU SlimTensors for kernel output @@ -359,14 +361,15 @@ class ET_EXPERIMENTAL CudaBackend final std::vector sizes_vec(sizes.begin(), sizes.end()); std::vector strides_vec(strides.begin(), strides.end()); - gpu_output_tensors[i] = slim::empty_strided( + gpu_outputs[i] = new SlimTensor(slim::empty_strided( slim::makeArrayRef(sizes_vec), slim::makeArrayRef(strides_vec), static_cast(scalar_type), - DEFAULT_CUDA_DEVICE); - gpu_outputs[i] = &gpu_output_tensors[i]; + DEFAULT_CUDA_DEVICE)); } + ET_LOG(Info, "line 374"); + // Run AOTI container with GPU SlimTensors AOTIRuntimeError error = handle->run( handle->container_handle, @@ -377,6 +380,9 @@ class ET_EXPERIMENTAL CudaBackend final handle->cuda_stream, nullptr); + ET_LOG(Info, "line 387"); + + ET_CHECK_OR_RETURN_ERROR( error == Error::Ok, Internal, @@ -385,6 +391,8 @@ class ET_EXPERIMENTAL CudaBackend final const bool copy_outputs = !should_skip_copy_for_method(handle->method_name); + ET_LOG(Info, "line 398"); + if (copy_outputs) { // Copy GPU SlimTensor results back to CPU ETensors for (size_t i = 0; i < n_outputs; i++) { @@ -405,17 +413,17 @@ class ET_EXPERIMENTAL CudaBackend final cached_outputs.clear(); for (size_t i = 0; i < n_outputs; i++) { // Move output SlimTensors to cached_outputs for lifetime management - cached_outputs.push_back(std::move(gpu_output_tensors[i])); + cached_outputs.push_back(std::move(gpu_outputs[i])); // Create an ETensor wrapper pointing to the GPU data // The data stays on GPU and the caller handles it - SlimTensor& cached = cached_outputs.back(); - auto slim_sizes = cached.sizes(); - auto slim_strides = cached.strides(); + SlimTensor* cached = cached_outputs.back(); + auto slim_sizes = cached->sizes(); + auto slim_strides = cached->strides(); - std::vector et_sizes(cached.dim()); - std::vector et_strides(cached.dim()); - for (size_t d = 0; d < cached.dim(); d++) { + std::vector et_sizes(cached->dim()); + std::vector et_strides(cached->dim()); + for (size_t d = 0; d < cached->dim(); d++) { et_sizes[d] = static_cast(slim_sizes[d]); et_strides[d] = @@ -425,10 +433,10 @@ class ET_EXPERIMENTAL CudaBackend final // Use tensor_ptr_maker to create a non-owning ETensor wrapper // Note: This creates a view into the SlimTensor's GPU memory auto tensor_ptr = executorch::extension::from_blob( - cached.data_ptr(), + cached->data_ptr(), std::move(et_sizes), std::move(et_strides), - static_cast(cached.dtype())); + static_cast(cached->dtype())); // Assign the wrapped tensor to the output EValue args[i + n_inputs]->toTensor() = *tensor_ptr; @@ -436,6 +444,8 @@ class ET_EXPERIMENTAL CudaBackend final } } + ET_LOG(Info, "line 451"); + return Error::Ok; } @@ -496,7 +506,7 @@ class ET_EXPERIMENTAL CudaBackend final // GPU memory alive while the caller processes the results. // Maps from AOTIDelegateHandle* to its cached outputs. mutable std::mutex cached_outputs_mutex_; - mutable std::unordered_map> + mutable std::unordered_map> cached_outputs_; }; diff --git a/examples/models/parakeet/2086-149220-0033.wav b/examples/models/parakeet/2086-149220-0033.wav new file mode 100644 index 0000000000000000000000000000000000000000..bd4276beead8e7c34ad0d58cc683034b7ded004d GIT binary patch literal 237964 zcmYg&1)Nmn_x`{+|g56 z@xLdyXA0Fb3l=&v&@+kJMjdOYmLb#=t}XO2QwfwuS+t2h&?`ShTexcu{qi@r&wcRt z{_j)V3HOiIZq0y4hO*oO&j!7rFFsHn^VQLqAp;|gIZB8YBIwsbA1j@i=_zC+(l1L5 zGvry~9_nM({0uUMbOdUT|5W;+OS)DySySjcs0B2?=B2|&XTkrjeVUsNkLLRC7w4YP z9hAiVRyv&Yiv!P`p*s$`a?{75*|ior9CY77zs*{s)}S?oz8nAhZutLKcvi2~YIStr zE{u2ls2IXW?`15u^9h=zY|CGxg-4ar$YFGTp%$X0C*uW8AVPhNQ-_ z0Ybu>!|yHqIr{%qWp(IHKZFg zq#ZegKWGwK1FrZ08r>nh0hx@uFdBdZ;1Rz-A&_Aw1mFM?fFs;#4qf3MBP)Cd_xWfE z)nU{DVQ2wmQQsA6n|o#paD_(brEx2SJs*u6qYo0!f}zoR>Dxmyfo1vWfTf`nQ0NcY z8m@svAQRGoMS}U*x?$yfh3l|QOaIBAt)3+bZbb={k%X{~<1VulsWA20@GA`CzuFBhswE)R+%g9(<2$W_=(pY$uFl z#-*O_#?lcT;z1xVk&dYUkZK~E0a}6WC@K{v*yxB3m5!ydz+_D5H=4e|qYQJ$ zuu~k2S6Bqw04(U%XfZdS9lVS2@;B~={~$@s5g)z>Sp(gH|9=1%&^ZqN#W&-E5fb2ej>2~p5t59pgmyKNaud~6$CG}CJ8}m8>2v9jB|_{7=Zoisht)&kEPO$ z8hEpT=9xydD>NPH4t5qpZ8lJSGqvxf8b(2;E5;~#hBhD*XaM(hRL)QG)=@cJ3G@s3 zW5g(fgRfAVHS~Y)0oNEm{uqzYy^+p!G`=W0fFa-!C}MwXpw>|a2T;ts1`H$S-~${R zj!l3cpbXOF10x9I20dfM%z)4@kP}7>aKngz?idNc2cv_v2^~YX%$n?>fq+|_ z)-@2}3^5wGlMiqkKJ53wq&WA6umXw0monBkYG7Qltbrvw1wz0;UTWzUcRPteoit*O zGcYn=s3da=vx1Q%!;BF2U(f^i3@t(%%x1WT3?VbtF?5fb0)4}B;pb2nXV4pK2|B>N z@H4(<)(WYlB^b*U`%mj!&>ImLl{|z*Ki+tgzgaag|&}su8A|g3siEeypq8;<^)6l z>x?_b72^;0xi^&KH2`K1_MnVy)--gFXIzqzhTd@C-mTVg&fX1U!wkX*E zqUUf6h*L0H=2$)t#{%e!TSQAN8=SEX1J~SMc($m`7R{cNwHwwkqb#gVa1b;W{>FXg zb^M|%u!D1;4a^99GNbc<(zEA?Kg6|*$F~rvx19ZUG><6G_ ziSXy4cG=HjRA>+U$nt`H1Vg?Ibzv!Z@_(Zspa>BQ5X0I4n=u}74ZSip!g^)f=4UvE zOt}=K%#yx^f^cab4`U8xAaj;pc-GA7&>W^F9G?RLESE4EU{jC=`#GSISsT6q zt>BLu3~@NuL`lBJH`hcd)Pr5|%7+;p92bshxyM_v7g~ju&>C9`dv|W1d%!dP!x{I* z*XR+P0>%jbr*&o>U<31ItS}dX%fMJzWBRE_U@M5f~4SWaK$Xeatlij*a4DJim`_p7%AHa?s9a%PkG#|J=Eeq zzKip)e$hVK!5zMjZ?qe_q`Pd5KyLUhGb37n4FCm<7e+F!;6I>ej^i*Z*faOSr6FHU zGZ2$woxoldJP&RM7QvN>>cXpgwn(sESVm|E=X{6#IYxv(j0%0SByY)%=Y;1NJ@gr_ zfhW)|-3a|LBVmk?3`b^fiVY!vi0LRn3NA@0&o~` zj_3e7MHyfc8pbRc|C(l^DA`6*lPEs-P#H)O9EO;|O`|b~)@m^%0gesWzJW)K77|Cq zZKXEB8t{9JXGjjZ4*OY_9rVC#$FgH4<2zyG;0c7rGR1EgRnRgdhCi0lEslp|!_QFv zmc-fmAUAv=AAXARVMH@4^VnGim?;nf zDdE7%6r*D%W%L7$m@O;?$iW}m3iDK$*?{m{whlBg#&5+mY}<@%_PLBp_-l>>+4ed6 zhG*=r@y&5yIO@kUvn;1`d~x&1yXqZ0Q3N1oKfN29oD|U2BQ%< zYG4#A)^P3`d1kDOkyk_p5G!N$&)g69h1oN2G4sNz!<>vY2ztYMm)rLLcaD~kHG%%L z&pY%j2FW&qee zdO<{hwy<)7_Bk?!jxieM2AuI9^F7+YfqSq&_6@vW!C5?bNkp>9cQYP=4@Llb1!jR6 zW^$aP9K0iNVI-_22uYOpOP~y24Ec@(I#2%Z8Y^Mg4^SCiMWOZZnweWd3C_d=*=%R% zh1X@gX2U2j79f;a1iy%j7+s9QFdv5Rh4F`{kOJflX+s-0XN_~Ng|jR`BQSwq;1B1( z7I20PHokf9q&b8--sRxE7OaKQGe@832{A5@7N}uHCt!rQ9yvJ&wa!e+c)&Sq1yQM= z${~{lkHev3qmf61xiMQY;{n&;31r}b zI%F*nv3RHlU>37P4QSRtvj*-sE`eP_5?GzV%iv(o0&fai#z+hyL^9qm9%e-#KP)X^ zm;E^FjxC5+mhggLEaovFiL#clo|Z>|k@0Q`*e|@s#r_y)`QWp` zP_F;8#{s4U0q1hWr?a(3priv#bE}(E%HX(CT0jJ zh0zC1vi6aA<$Wh)y^zOe>&1Qm`zh!SH5vb~Z;jS25@`^qHHO#*S~rIffao1N1QO{l zo-7(%8l+6Km1-qY+lm%Nw&K^~DPq+T*Z8S~QK+X#)*v(!6Pl?`B(;RBH0+G`5^VGo zdl1M!fz2>BMk7$bx`d{IPgo>o7iJk?6Ze5TjGM3dj5(ey1?T1v^5R152S3a!67~zg z6)XbyfmB;b#$cZ?kHM~iWw2%f&D}=znG=~Mqv;9=WOl$>kz;y}oS=8$D(p-6oFlz( zIc7Xq8uZ8rfK_pPhHD?`AN#l1nd2-lund0-w!>Zo+H%uVupGwV3q6C^1ruWL4KX02 z61W7u(N}oA1KWZwV8a~a;tq19D%HnsGJAKF_fuPTtwyV){boODGCCwd%z`~CqAjdY zVo9^`ND-lPtcfA(Tb_#hVLuEM!(TBs0e2i}an=GB&)x>IMs#WlAsXmnTLLa{1)YPX zU`5;$@984ji**t2oFZ-j(%`AWOb=|Sgni%$Bg6dJlK~C9|ArEnBdi8`!7s**yO;-S z5EjjT9P)+65jP;#;5-#}(oqt=0XxF6G#c!9g7cWwxnIT%?sCkir#k5GmM4Z4LQW_T z8{(V2+dIom@89W-y3L5be{sr1ma#J116->rH z8aBhbBtSGi><`cnJT2sj7{~jcRl%2NbOpABggC2=$cOz^I2wZ&2~u5~kU@H8?7Qg> z`)k&Ki_lj=yUP(m3L#9TS)omicYpy%)u32lfIG zQ9zf_4Mv8#Cj|LYxL31b%=&cxiAf&_>oW=p>3H3k3VFYt*NE^} z;AL=L_#GFF1B`-epcAMB3a~Se7U1C`=^pUKehQHok8yaMFoJS*jIN`z?e*FV(@`EU$!_`*`m> zNN8|UFBplB^eqYCK=2RPj?uzVBzAV~p_YIdAP*V=BYTe#jFVAaC`zT@ls8&E5?E71J)2ly)tvby3rCO;Ujt15DTc9jhGBE zYk=kz9kM>WEdiM!-w(dUIdBfX!d$>#EO%g=pF$FdtHaU-7H?(Bct-$;3zDXR5suiw zKAc4XE8$HKtSm8qhgL}Fi6re}6KrbS^AoYp&&>(fZJCZ;+ ziz8&!(Q|jGpK2-ybiiWu2OSM7uxI!9mZmDx+cGB6wX=)}+%VI`4NGl}jQ z2&1q8a0e_NmI%GUf)KS}bqxgJogP>l_z(7a3#&jXY)T{+Y!2}jtcfE*L}_Rp9s{~< zCQhuNGMK%G(24f~TWMZk3m_D0VqgUFa8z!uU!Mg*S=yu#u+HpChNt6Gj$;n`6i zSOmMkzjAH{*6b#3uhL4i0%{Sn!#)VSJMum_TBw~Ul8->1x|K>mBj^bc0%8KJrI3%~ zH88LQPJvvQD|jUdzQr|59qR^2f>8$jB4PvXVAxNf^R|1D%sNwHNo!~IeZ|>p>O=M4~3V6rtpWpc*V?FA9!)djH6O$2r~kX@ymZ~ z1F&i^PAp+DE#!aU1ybldi!i2!WQLJ}Az_IE;SDh(@897ZkqNUfTIYT+R&WXIANB|d z!0w?bXqcr9pDBf~gZGQEuZ2hs{Dt?HYeRK`i8|sk&X;I}ibkr16_iGtcY&^uQ?U~k z$_Rg`2j9rG@SbTk;iQ&+(I=w72D*b%h&#QZ8bHxat(2a@GF)`8j#@zU3*JJ6g*6Zk zV9`Prn@o4gv>RF;wGH2n`Ps=93aO@*n!+5M@LIj5h12)C^duV~Pp@;;4eYAyl zDUgll6%ww&0mz}l2G9n$8`&>l89GHxuon1SpfLjJSSunL#8ZxOS^F3#+JsN!tTE3J z^8zl}uE1>~%{i0UE77*1F9h##=S^wAzH@T7WWfm z_h3&H`=wY@z?-oBK~q?BAj&`{7a0^t2fh%y7f$lE)uctN72rkrU1hKybO710?ZIl$ z3$Gy%Y1UH>SP~AjgY`jJ2f#Vj$ee>l?ghHR$avQR7S1bkSSr{F{lPnh-)Mm>AT?+} zN7Ay9{%WXK#GQx_fWmNm5~Q9RNJnjiTq@_KSvW{S;V6b>6GJjatc}$kd=Byswvg`$ z$5U7(@Tvno1o(|4K1d~Ih@~gcE~0*DEHD=&iy$0wFUU%w51s=?42y)9!9EJV zQw?8_8DX~_%mJ)pEybAx_-Zga(2MUd8-s}uTVjT=JkAV*b%8dZfN_8x!Jy2mh|_Su zF9WUcl|U$>NUWK02PkbNHUpypqd0(R{M!+nBZr7}59c|-z_1{W;~^=mF!{{^UV*^p z#gaCYX!Ph6`~W|V@x!NLY%RouWppQ&BpFL>K$BQqLw3*t7)2jy7ZDb+LGW*oDqx}lAs6Tgt4|;gwZmQ;y<%k0J*19WTpC^zQ9HN>@c{4` z_Qv2dw2TOV*R8x_5;{`kmm{>NqZ8f9C3Gf)+C$VAN9Vk22PVWCDV|W8PWoUg#=4nf-S7zb z4Qb39Sb{AeJ^}Ob$^#q(j|YUnYXTb$|6zrHYsg<>?NA%S4v^DC&+F(8d~1bP9{QA$ z2QHyiL>YO~Vl7|05;|^>XDy^7kFG9gm$ggudsVwezO#VJT%#kON}~m|Q%rTMX#|{E z=KVa58lYi#C0GaIHmpcI)H~`nPz}_vhen0Cp@P~er+G+J&P3%92Vm{O@fv&{^8cwM zIYiCakAvq$%!4&FnhO$%WVY~7A9);<)q0COGax-DI0hcMWg5ZI37fVBfwb@ zz%R}&(izYJeDG07x-Oyv7y(ww==>(ty+UPC?>e=@=(?bt)&AA~34Km$IQ|J;o!0); zj%r7=W7=^#PSEjJ==fVZ89Gi=y$jUZ`B3k9A#7FANUI2wIFK{scmVO6k5C1iRnTm( z2g5eyAT7Zo!Qvr2T($Yg`kQ%UuEA~2&CcF_NGK=cLBf(3d zb{lFXo6a-n9C$??t`}|>t)oxq1}n=9D#0z{9<+~NtTy4lQ>ndNI>0r2bQO9Cy@WnO z@6e~0(4DvmcX|sw=@(a>iKRLcKjji*bq#$w(^U?&)Q%9;naXsh?`)a@Y$<_yx$8fzl$)zDE#`URJP!GN$bI)@kIGduwbfZ9?jQxbYwN(js& z1YRTT5P4Mw-x&5= z?AgFXm_ITN*v|n!A_B&IVQIV?#ah-#GC?#AMq_*BcR0`j5DIRCC&~#q^60I1e6`QOf*xnb;2u2(JpREC`{pkj{Y4 zywG(XVH7-jF~n`)Nz}sk1>(CaA)YM=p|mW7TVw~Ze&P2e;Fn0;^v8L8j%krK#mWgQ zCRht|Wf{$;Iuuv>hzVh*$z+*WBS0#*@)C&m;JXk#F-{TlK<@kv9kdIb!ykdqaDl$O4+fdKvK% z;vYmx&?F+~a5N4N4Ij^IH?)X8@W=50@5W(u$E!qSQ@Bs$b>T@dKX5l&SZ0Xtv*<2% z>5!wsY7e}CF=8#lYY|uv{-q``h>Rsx4fw+d_z#&I^osc)I|lrL`x?j_*OIn4lB)`7 z9w-AtmC?bG9drvWLj(vv2)?W&6kaFvT-45xSA{P zd*R~(;SL91qwGaOC!)fl|FB;g>Jd>Y%Ndy`L{W&`Qiy%PLkVPSocDmNv6_PHBM3vi zg#N-np&$Js>x;YKB-S>svRG@--Ytnjs>n^Uh8PK0kMP?X;cNcIU6e-)h}3Ww`4V7- zS3~^n5G)8>%$ej{Yez&0Sa;*C9FEKQZ7ALm1c&j?EXNYyGma?`S0L{UFTgB^C%o#& zAWxi2oZW%A3@eE)#NhDLZAn_~>C=vQj^hTz6ImoD#1DumI4(qtfRzMW5$9>Z!W{cz zWrW-mOA!cz|j zMoa@AjO-Wm4n0E3(IjQ;G$X=Kp(|h?=*HU|;82bBB8@^d&DBk8gwZ#U3{*Pz5a%-5 znbpA8sLMHH-mL}Gg)>!<9ajBlm)VhXmACvXXWHPqfjIWyh~Y2?>_sBNL#!H066qC+ zA9{sQ*qvfW#E~3JAZ|oN*+=Lr+!l%|@!Ok9_6-*{X%?poe+tPnUO}3OFS}Bctm(kSRuS8>=gD0e+Z|Ae4$ckC0dUVv&H`6 zL~*A0g7~)hzWAy5iTIJYQCulLA&wJ!iK(J0Gzd3@Bf@Us6X6|Ujj&93O1NJbLUM|s zorx>jH` zN?mena!uk72{Yq8wgs_T^tq_tB4l{BL{(RgT+dDD3D4*q!ai^|9 zSf$RE3j$aDBmAwtzkMJ1ru&k8C%sR4bG%93(cTZdX+F2l6i5krgInYq@&UQKJSOP! zkM}+1dBiogb&!2}^LLHj`f+vpYwoM=Q8l#k+ln<6c@^t+Ai#nlY6%F8(qeAN$%Xe(~0)FoFiGf^vL92y37;NUq2xGLtRvZ6LQgO=rY44{0mL8M7 zDpi;Kcf2>|uZR~+_vkihL*=#pMP9Gl;{MF_xvSdssN3MF@W|dr{WXED@)K%*GPE~! zU+M=N`WTk$({v-m6zylZd*DlNFL!n83HzbuV~rQ;9kr1)F;y8AW6GA5{8-ezu+7c& z1>*`v75r3i^5(vx9i;~=@2y|zc*`Fx)mt0mXJuS%=jyz+$D-c(ePa4%^pSgJbhmdN z(s6&=6&X_UWScIcqrQ{!hUWwOl7^8r%PXI*sHj+7CDp#s(9*KjRp5(L28ja=LG$j& zM`By!mnN5`9m(9CeZEab+dgfMW%f*qNSbTAH!|JygtT6LGU)Vm@Sb(Ix?Xe*c6D++ z?HcQT-DCDe20oWJYO|!n`sK#urXNk6OqUH8bTh?MsyCSIf6X(&71MgOWo}cuhU>Kp zs@qh$%W6yFinkZOf3vyZT0y^?XK#L4xTxqv(Tt)#g;xsJ7TjHQxU!FZrV?STPn_EJ zX4f}w%NVp^h<)(*f!+EY?6s(SP1k*0wsz9m&P?B%@LbdxeNu3W#(a=Go*~wA@~~xca;5@2jkpFO_#L^Onphxmn^U-B%`+ ze^Y*@Tvyhm=<$M&in43-JV*7LVjs--Aa_LHWdrXU(lE5o(Afi;diChmquV{*tzBPi z|7yn537aD3i0!?PHpkZ7TXw1F#p0;S>5a2pNByPpkJ>t2PjkP>^)Z3C-pQ|}CuOhB zS=}M4W80j}v=g@5BeIQ)#Usj6xr4Gs9ivUwrfAQouLPgN0AsRi3SAUp22LtL~+`%34Rw zztt4R;ng9FsZ>~))_@nyaz#GwfGe_pm z>pgamd)R_u{|uG~Zn{0H&w*}tb=}b|txI|KX4^U4BfgzY$(0`!FDYtM;w$r14{Q0% z7cKmwd&BUIsYk?%Hb=_M?49i^I(^dlTJDn_HfDdB;*NRPP!asvGsJV=JIFsga6J$a z92Xen&+=FM?+;E;&I*qj#i)*P$Kt<;n;83X)JE%3Q*EpA8hyIFybo4W7nIi~xp+zvT| z(sFI%%v*#?-%E~+rcat?IG%9zb=SCFaDU-72RCRx>Rp!mqu;STnxH1lOnEG+qwUTJ zv*}I!9!U~MYB9>4fz96C?rE;et$*9|n+G&M-13Ga&?-7-w{CQ7w*S`he9Mz9j^>k1 z!G@x`l$t{or%Il^x$4HItNN>5ul2jpxge>~SoA{Sdjf}QyThsPsf8X(`9`${n>${+9m+aZ`^US|0*{&H)7iyPR&MeiSegmTfDH&indm@5>`E9=ve>rTnW8 zehv=Cmg%%h0X6b zd|C5A<)>xCig)MtyK1{=xp4U6@M|0L#}>X;^l8!Fl8t2@%a#;RsyL#a%B`L7>s|8b z?}z1%STp!q54lra=i53SPX5{Yn{{W@4XL88{&MfLy)Ne$9;i+AzGwV8`pXzg{PN5h zoqp{!t=$vt`sLo;&EKuAvp;)o)N{U_^_{DpE&rtStJ0M6-^-^|?ymPZ=Lh$xW2M_8 z{b`=ut-aUwJ>GMBr>2Zo;tp8f(+^Xk+=pAHx6E)n>Db=9yzyYu700Jui&kL#C?YL# zsI`Z=yYXr154oH7r1OK;tkz48gZ9Mcl={Z1V0lJaP08;?^9!E3{{7`IF8^`0<;MA& zdyAhhf2t~}=9TI*B}}@2!7<6Yd_vH$bO(Dw>hUVuKuf< z{#EG}Yf85l)f8CrU(V~4H$Lyx{7p9#O9q#puUJ<(v0`^|e8Cqr*)f|34Vcn?)C+yG z`^60&(>J1hN!HM8G3g<*XnY{rA6e?zkzaK7_|-$@sZBq-P6*FLpG~+gSl=={oEG0bN;k1q=QS2vi=sY@T4#A) zH(ojIwYWC69&Jr-eY0g|(`Sv=#@PCS)f>v&mrgAHqwuMl%L{rHe3k!QL3+ukiXPQd zt9w<9ENRHQx9C*hfsPx-rjAML+okuE!RH25cClp6O}mkFA!3lJDeB(XFN7~Ewq0zx zTvu|l?quUz&bc~|tu|#`mapTAE;(%s2}vnEJ3iC(TF&UCg2P)$Z|DZeQyvupHHMjMcW3Iijp?ghJ<>|_Y zE3cLvFL|i=Y|+%>`6c@DCo7w(u2=V~_LVh4 zFeYhq($s{yY`jlyTwQ24`hsPvxE|jG3n37O-vow zX?AXE`bAp$y>5IUIG|-^)$*dmyifD5R@Af%_iYSb4mjloL+{vjiBl3yDM{Ib+nsM) zn%S82al~jf;Er|Aaee2o*dKS8++F-7@)f1CJVZXMS@c6p>n%RZd{eZpmvBHW4@9^# z9rrovTm7w{wfY_FT0UuLuFb8TRz0qKPx1Rjmg3bVwz9{|_f@`8?W#Upl~?vnQA_^E zWe;dSwSRwzY49IC_w{*raF+q@j?SdpvQJiG^KnW^Jr9Y#2?aau4k)H6+M45s`%BaznfRN zNBP$VmkL`g3t|nn4`LR_ok?5OHrS>py;tJnQC34+^^!lq^RFYX<=)mq-nZo$>W9ix z@@A!{c)Pw_e?@=NaLgzfPw6^}QCs};zq&tKPgD*nKV0=t&FS)0WtW-<8)H*{%(|BOaMt@-f2S`>i_3fZGZGCLElnQIlH{3yt?9rnjOvK-Ku}KFxfIc=H6&agd^&%q=?L>%pcOOCLM|M zM!sq|qU`p}a7Zn$+sC;l1)Ah2iV@)Soo^hnLEUnM0o>e)kd|c_&l9x+fEA^E3topWkYt@1Bc|~gBqEVX3L9k>$fN?y|qrO^Ks{!&Zk^P@5o@5c15^LXs1orByqlE zlk$Xng(gkVdMIxO#(5K+|7+>ge4}Yo<1_VTwQ^1OnpsuTE8Z)+r*uil(2|v#J= z;@O&q8UoIxa8HIFB~8wnpAC|6n*RrYSz}U{|cO!L`FT zFW~TR2rO6L6^4m#X+71cY7Av(?xD;@i~6CwBCy~8uy3@hviYlq9u2b_vg@C&dAzEs z^6AP?%O{pjE`Fh~Ab)+)Zl}SvI%7)8ziHofJ27ZrpZ4t&vJa$w9baI(k#IJCo7pPd zuT4-J{2Ltot40>iE>5kx(^1nJ>&+I^t>a_&CJjl8OBxp|+R~GLNJ&Y$VA~KqIjSr= zD(XdDhW{t0!=BW%ufDi(K=b^@_AS5qutzsZHHlHikIcE&u94lMMn~4X@X(9`G?5KF~7%Ljs9QMCy^Z@;w>K-HQj5{7vk%}IPI{a2D=9S^)-2KdY*F6 zbnS7Saew8x;W_WQ=!x{s_V)Mo^8Dm_)p@*CY+dYF>=@A6-}!{+s(e!XOXo9WSd*f6 zN9RU=6um0y=ZL0=QBj7d)e*xZzK`%)^DPU^35Lx=dv%F&BDlfdD}s3%0b;v8T4lDuy#^-N%x%LX~SVdgy|vE9^<=4x8XTmxv*Y1F09x3tK-z?R6o6H zaHl#=`6zfQusTraf7M^+pA|?7*2-fvhcHAc*4=9;HjFmb8}+6ijOUF#OfjZcjj6_$ z40H8Q>lR6MLR+mx8KumXw*|)pGlEwFe*`)OpQW?Ifhz$axI54x@QVLuUx7EldxvMV z`$N}i*K@ATu0!tKzBR#%a;aJ;F45;1%8l#Ish0KTBuj(kh-HN3HOp}8Kx>?Jh_$z+ z#W>zDKtDp-uUXY*`AG1d;NQVG`9iQR__F+t{EB=|{#2eK_g0RmyXaq4tP}6ib<^kR zHN(TkF^1>#f9Tih`|2i2Y0@U~icl{kio=LGzE%gT6P4Fxi~LxyByd;oHhGV1RQ4-h zsOz*r!VAKa!so(8;jo|+pAq+o5mK_WOd6^SO1DXgQlZ#hd`Ebe{uRd!^=-9GJ*2Ks zSF1zSLZwtGRGw2_P_8L%xrc01jwlB8b9FLV@B@^+%M{bZIPnf~nYdQmB3=|HN~fd< zT^C)Bu1Q)Vsp6NSP8=vy(%S^9wcc8cX3_pqUs7kQUDYefLFI&UUb&!LQEHV&(pkD1 zsa{srC{q-h@|}FUyg7Iy&_A%l|E5p$MtDnoUVl#Dm*Ac1SYey^lrGKioxYF0)bNk- zfZ=(=FGihdi}6#_b@Q*LC}WOMHdyq-q)B3-_NqFMW@J$-mB-YPYNqN^8Xs!P-s z<*-tue5u?=Ffs~U0g1n6%(aSQnb`W5>FFH31{dn zi}$t8TBJ5y8=xg??X`GqzdA+jul7*`ieH(iexq7RRzB?^VVh7Pj22%O9}*2>rH~@t zF51Lq%B{8)Cx}bLd14pwAK`z(^THjpQ}{dC$m`TE-Vv&ztTlFRE#xP^(q7Z%YwNY2 zX!of=>q(<(6fB}e^a&n85e#Bbs1u5W79maSOYIL3yNg+(RV){N5}u|^0sjBYo20v= zwDY8=EcFAzOv=%YpwBE}fv|)=tArgwsSqU^#Zt-uZ>C)EG|CN^&>JR)wNI(%(b_<5 zr1pgNwDzR7fHJy^wTHDy+P&ISlwbaib`a7i3yysEU~07k{kzE@wGSxQyq|ilqc;B5 z&XI1`lRO^RhHCw_an#D=l+9U2CGg(BIqe{|@);f5v^S`a`?N=?-LL50k5s#XGT?X@ z3Gd@$UylD3u1x<1yB&=Y?-vzO*87O|wf2ejrS>!JxiyD&((rGa(rAxtiZGON^Ia&< zJ(6bpm~gi+Mi?dBDU7DOQS^@AS?yV^A01=0yS0hhw9w4P)9mii25ZA;&Tmp4{Z;Kf z?SI-2G#|X*UrKv{2Websp*_iA!b`&Y!akv#j_dSMNDCdruA-Z=>VpYwvXDo5SVx+e zLALw6@TTyw@DXLpe-Zwo`)>(bN!}A_4?v?9x6#ZWp<}3)sCg-8ey3_xiP0%0Fk2I_^++{ebFV-EG&WVG>FKIXAS?y1CuDV=Z zqmEa55t}xuUum0##bT6nUHV&hm;Nbzn*M#AomgX;__a{0-LCyi7B)dmRj(>PD@PSs znL{?VSsNml#R~BQ$tkH)xwKOnDg7>{h%XXW4{0xH-L>oLHuVkq{I1rhT?nNS!q>vx zVySqWv{c$9?UtUD?6h0ZLNjlr*^SiP>KXNj`mcILtx!$c1k$pR5LhEj7Jn8sF;xnP zUlV%wkTp~i&a$-uX*9e=9l)e?0e;cO+nA0-eD9wlr}7FN=~EL%pl`?!{_)u~t20`&mN`9pQH`j;wb zy~snoM*FB7Tg+g}O43E$JlDa;i&iSOxTL%Lyy?jz|9>8P|-n4raKx|Pf~NxofeFC{baa=q)mGsN z+T8!idSApk>ra;D=ADM`#6{{VIWago@Oa>_z}JC^fkT19;8l5!LTdxTs+(#^Fy)wU zGmECX3@3Hpi4OHWxm(DWcM5(HY%hN%&r}wwuV{;fRia(`Rv$FHWt?UF$}m_zU;0Y8 zp{6OvgB^ksf(L>lTLRDn;Qp7XvUTBGMDj!ybJ;qSyTNtG|Kp^xy90GT@~3S#v1!kWR~d}iQd3g+6RaD+xv9Bo&J6Dc5RMOpnWd#F&`o_L5a+v{_Q}@zzxV&-SNxj-n}e^)i{$CSje*YthF~Bd1eW=F zd!O+ndiJ{yyQ-bPIzMy`_5A7i+HH5f={}|WYJD;B>y&QEJClYbS0wa{X=AbKn&c|~ z1i79uOuZ^SXsofmY&(&3B1yINj(pivrn@BUQSJ&%^j5eP_XW>f zujC!9?t5M}qt0D(j_Zkm~K4rXmMtnfOUiXc-M7T=6zC`UR+%L|M z4(Z34ms&5GZ!>h2`U-zjglp5*lK*&%X8w&(tVYq!*l+&0z)yh{!DoWi{*QbCZ-I9j z<+Tm&KiwwpOWw_%qv(`Q*N3Jrh80@{u%cE>JsZ|d4BEBHT&)E)=ln4@ujFaF@MFa&Ah+8Gpjo8ese$^ zD5mLl3uXQp?mFi<`!9{xo9}L&WdFORQ*cZ~$G9(IcgEfmR~voO+};#px!b&qBGSJt zgU#{62w#flaqpeJu7R_f+4#NbC1X2Nr1_fuTkQw+JMli&wJ7{ z%6+Fh&D+nH?cE*9OT2CG?C`bfU0YgzYI&sjx0ajsMfQ7J1~s2(`l9J@^AUTs{nzHX zO>Z@ibiZl+tCQUC?QVNB9?8gWyC!A6^$t^4Q#Wm|KTB+59O93xwN@W!KH>?ggRL3y z*AxFpc_HieHfz$}wGFlYYpgT1w+=O}R=;w0vaf9Xv%XK$S%<@w@B35yCsMV2AG;~K zC~8O4wdlOWmP~ub#`v$yW2AA)VfTLfC-x_uZN0zB*Cf4Ro-|4OUZ~YSZ_cu|waze) z)F0O>gV(%))|D+C?Qb}r^!((py3e(KVE?J5#6G~8;YxQdcQ{&pY4Nx8Y8lX+-h8Sh z&bvx%)SI+FoL@AyZ}vKF1RmDUHS|)hwf@iF*|IHqN>owAqp|S`OX61ZPVJ(QEON1DfS0w?`5I=4|^Ne0>hHbXLHNi)FvHGc{Z~- zy;It<^f@UrVw)muQB_7U(605H*2icc(|L}OT=%n7vo-v84)qgGTE}ol5F|TRIclz zoeS>rf8fpc%=G03D-_BFipPXs)Wyn1^)F$&xLBxCA65zjlYF0fCb_%2);J5COI&-M zilf+GZeMC&(0rxg*ZOV^gByf~$Le(r+ne6B?{pq@ZnS^g?6P0;i2hGKhdh%#I~^Mu zRyOR?4yLDf8kqA((#E*ollG;)kkL7FXWAzz?<7BvFe`GpxW`xJzR#B>PP6PZjS=F5 zjdEYmTG_EdL}trMswGp?}yo z*W@yOZd_>k+C0?yY{X;MJmYNXN9|X2foi7Mp`+$eO62MCC&4zsGr=}W1g)_6Ib9WOgZ*oQRzS{GHjulDx(>GgSaGaBA)-fORI z?%wcbom#)Ld5WXJQQ7i$)-3Z||pcWt3Zh7`g zw$ClKQBv|6VaX z_7Xemn+!H%oOxu_5}V03B>H=6J5#A)h57Bss>uG9GQCH%2{q~?+FT)$xHnZCE~HY{ z{gixF?Izxk9@M=d>BXDc42l3B5t?ZA5vA=^oWYDhTYsv5zJHm2wr{KFHrGYRclMX< zzu7;qx3^c&4(F7ncN*5zFRr&Wyx#b6le5XuWNzNx>~8MaGN7fRWuc>oGv68Cy1n_w zrZ@bnqn}P&mV6-USL?di?9?eauLxGM?Y#64+UEcpCG z0vi-bjg@V|jq(7ci~M|0qO8R@dgEY~`I7ko^RwpJ<_qTdh_9oU#^)sbX^>#Tk=m|^;76jYLZv@u`PWhMkuleJG$0@Imt2@;dC~(4s<+ge}+87OuNb;zYG=zJJbDH@<#cH{Db_G{G>cjULn6NKQFhFPX(6;_XVG)e?IfRSTELy zxzZHrY00W9(v8=DpnuR%Yxv9%VW`%3GQ4l-Z2ZM|#kke@qH%$-kFn6O)iB#I+0d2N zg`M=PbkVwe>6mntR%J!v58^g)mZ;HN5_^PKX}z*Q7)xvR?`e&@P)pDp6zLA5_3(q* z2+d9_qnGH-t>@II)rZuvl$jbuIga+Ks^V@=y-7vnFZB)E(+)bpJ4|7vEOzRr{$O)I`ddoL4?oUQy;N?

2 z5v|B}soztLr)h1IOKY};lof0*z9n84%~A|yVdm33B6U4=Q*^KC{?xVTV)PMuO(*Gd z^yBq+>-*{r`U`abxb92cR^5Ev0G%rRCe4vF@ndl?&HA$N24z(a)BKE-#d=SDP>ogh zE6bIKl@Ut3QXu~&ACiBOzmtEG|0ca%kT1)va#!VXWi^d%r}C##MGV|s9ic8$>q$~` zwGEWFI6-gJndmJ^yxko`8IZyB24FJD^bD<4zoLk6A*~+UkTn0IxqU$ry-)3}W~u4a z&IEO)`hxl{&G&*DNBr;w%{hTGG~WoKcptrK_ot|ccSswgpQS=6N%w$mn{F@7YnyJD z?yRm(*Q{&MmFTYP4(e9vM(N^oHPU72cWJ#eRH_hn(R+K>g)_o_%6AoOyD5TjQATj9 zdR>{W+@<6wdGhCk$n~@e`Hbc$DoIL)(niTr+A2epr<9MBJ<2bX2goNqXR1TgN7Pm7 zPW2S6@)df$I9j_+dxqwF=4skY%8Vpn zACuPi5JFy5J<7l24{YjP>U^5{R>}m#Yj@F`ihC*lkw|zPAigGkNIA$TX_&M?`c}Fj z1*B%lBi%-`e^NJFH(mFPZjo-OZmw>eE>-7|YNVUeKhh`CWXVFaJ}GV%@1pk>uaX2; z5O&fj7y3J8hF&3kccc733$5S3q8z~zb(T6!9YgOV_9I#%(UcjTMw*^Rd9g>e zsZ{qK`t74F5k^W@F_K#9D{Rwh)bZL7!rf^#sK%0n?x!~-k80_JmqEgdw8F0wQpI)R zA#sOTPB}-LR4F!yQ>87^Aju?6mqtmcl;!P6S(0_)UD8{`R8Da(Wlgq7+r)QCV$(zu zy{mb@cn@VSri$sb#@|eN2#KnkTR^?@FfZ9>nr7cj)lpVs8nn}@7zG0X)NnNMz7h<)p>Sw}i?LBp& zHeXHBo}~=JT3P|^*HS6pca>H{E|S4~$}8m%x6aVk$+GIux+#m5C)6!!iaJ@X6?{T> zdYkuaX^r%!&`MtNZAm1Xw(816ulTQI(=FDGl#Eg@-BZ$Vsjn0%o)fo7rSz8Si_*K| z1C#+RqxE%XX}0Jf`Kgp6nNM%EJ}>0ce0OQT2}dcfxJA7oyGaAf3DFA$4a!L76 z*(rBdCMaLa^OYUS1|^*==w0=I{H5GhnIW%~1$kC*N8lNMx_?^mQMHgVoU7E&wI9S- z{damvKf*ZC{IB`4^?HPf_9sVJ>Z}!!_e9 z?GZX_9VweQlCnuBgthdq%PZ(D)#oTHyH`9Xtspe4*PWJ}q{&37mvmH&qyKx-NqN^9 zv=?zriBs+B6=8%nE7&bKjrjF<*%4e4e2U&c>gI0~Xde*i9oq_jULeB%j;D)vhHsp2 ztuN2p*?osI)79dh>^8It_Fndsmi(qynuB3_6J z#(EO|NtvE~BWG*2C$%_vU+S*(&FLknr;;Ct9~q;MuCtCbtX6LewpCVA9%*f0v3IZg zXP43QvG1n;*T8(GT>QW=#yHoIWBi|adc@~ZonmycKgEos9lc)kW=)i7mHvS4w641} zK`jk5`F{7F^Yrjc@cd2xBzuwjihH5E+~swiao%*_=`C~*b7i=ST<^MUPFJg`^}&`` zn|)1#8aLPZtA4CJUlCdU1ik05qBx_lF3)*2|3XpT(dJ8HtfhzH57X?}^yK9kTiaB3 zy4d|<-*H19965aO^zP3SnQzHi+_qQd*5ta_ZPt#)TJ0H6PNP=+d7Y)@_txKAYwdrw zjI}@R`p%!I(EoX6xEAwRN?dwv>ZsILQzxe-X563EIeTZu;nc3lL*p+-9@Wn!+gcm+ z_^n=_^PpX6KGQsn-b9uhcRGLY3<*Ta8NoAtw=dq`(?7%as&~An!SzS$Yb_TVKC7Nk zHlt|z&3E%RUQfBQ;$q50@A*5pfrT6VWeg z=$bK4jQ?v=^3+f6H1<2+_Q~`?X(tmVMLuXeFU<2VaLP?ZT&(S3GQjY{S_(phNa=q1ZNBw8jO8MyGyYfH38obc( zZ^tt&r*cmZJ>@!i!TtFo*1Ry3EMtGK%{!adWw(*2jar@y~&M?_u9toC1b zZrAmCkBNP3_2H|P_Yev6* zllX_QQ5+>M6*nm+Xt1{-|G8bARQs@^+=~6zcN+yi#$Y?Z3TF zSDl)BdhhAaPv3R=+Uag*+Mbzqs>87-&dScxwCR1@+`enb#mQ%;emnV-F$0GW9-Po~ zWtZ=I=l9L+^uFz?u-dz|`Fi#4^6KKtMI9?THvby9C>r&<47J7$*4yID8Qt11>lo-1 z=uvpv7rifc+LBqHa5LgA{TwMzY7kPCwBTZ8q4tbC($~W)`JW0tq%PJCH$7$g$MCv- zj=ov)tG$DV{SN<1|BtjkyVkwQQQcTwn_lfMf4lhof>n8UUSD@P^}_J8-OtQ9-RE@h z)Q;1xQ=U_Ao%H>wKl=EeKUd7LcI>#Y@05Yx-&y>C?ZK$Klg4ZqJiF(#juUgUyO}yR zBy2alpg6p*xBOjYD_V5pY(Z?**4DSxCk#^}eu-V5xH8?{rd#`c?c6#3PCI*!?tNRA z9&LV4dD%A0{E=`x__lAmd%Npr-&5LK(nxWu_J(j(ny4E``>Z*w&3LNzGM_9U4y5UatC}tiJI7k#rSMaV$%8be&yX10+C%xDj{P7k77e zcXv--+}-8H-AN!$jJP1dcUd2wf5|@_$g*K)rn{@FyKmjPRpImT4^L9Azf4VT`}Ee6 zlTS}16;JYivf$B!2WRgzzt`*mC;0K{D0UZ)N&Xsl;PtHFov`v1+Qq9x+q; zC46%owoc9~kt=2O%J}lD`OmFCYJ5-sayE6ryRVqX+#pc)#6vdqHfjh@mb+h!q83EK-XI{m87Kk+)>X1_b|^C?kHb{tKm*@EVl>Pl5<+8 zul$knZREGAUyFSxtCZ&t<4T+a;5sNL~C1{yRfqTaN$SiZRMfK5}qDc zqe7*sPb!Zs?-ReKWVc9-#Z85)n}~tLTxqdTOS**}(pu#(P=HIaRq9Uc)&v=98gA>4 z8q1k;Oq6acX_i`WTioqk!<@0s!Oj(~#hw-36`og)3kB}%0hu$>j{WHUrA=yu)Hk03 zKI=X``nc+Y@b=fs=1)&OOh}sb`MWicyk`lC4XHclpEup#b*b6j+q72=vhr*SFC zVy%D6i$nv~kNrT`p`S4%(@G8Fzq|MGO^9XM))rex&k}*9*T+{W>n^h?wsnYI|A=@Z zMtBbu&MA0pJMK;qSfw)MX4AEow68SJH1pVBR6XK|I*L3@4W@PzywY2lCXWy=c*9-A z9U1m;M<@G|!qSEBZR@S0^M~ZF%4(Iq_(zGa%Rc{1?eQV`?Y)$PDba66z3?RMdcHk1 zGkYj^kIwOphbieLB#?h z#Ng8b`vZChDt_yI#u&XmU;SSA5A*HpBLGeAO&${Vx)$1MZVREV*G zf7Rg2!FK~4e&5XN3@w!oSHcBbzm zPwfTygqld~6vucgxkotT>>69Hb%Fh%`K_e?R-l{)qkd@^ib7 z!{3d17nM5Z$A~PW(LY1bva)32D{}sPMKGu$1%VF{j`CHxa-MlEy=$lI zynCd#yKq_QNz>YX`U{2<#@>eJItyEw93;mJeq00Z1n+8Yl<-69sYa7O$+2Xn8ZS5G z*STidX5Fw=+RLoQ+}S~IB`>QH z%0X$k_(+Hqn;<6N1M%R+U@Ux89!ih+(Via;gY8Z3oIlsoI{j?^ZOP|}sS8tgf3ET) zFM}>f@Z3-Ww3+7pf$c*74a*OE5;{Dnu5Wq6Wp)cSgD|OYl?vogHq1EFw_Z@AaBtN4 zm{+k|WAdX96?+-_EWlu{uIot8Rd-9fglPUYNApkkV?wmJLR^e^WwPMlW$@Wna?606 z)|PI{7uCn)etHwL0VqjddL~tp+^m*Y7RqzwZSpE$tu}A~f8ad!O)3E6t+OZ4S;F=- z_uZd%>AQYX-~Ri2;q#eqZPLo-#M!s<|4?O&@%|TrjbYQn--b^L9U4$zy05LybS1AV z8>CN=AZo)V88%rq2G$C{6MZITe@tTW@=;YHP6e;>y`n!&S5se!Jh#hp(%s&p;lA_b z#ktZ+c_>cbGi8G`SE$8L=4x;qxM%pkmEt|wrQRY_s7bVk?#IM3q4W)M1I~5NmFaRZ zsVUfcjQCnm_%trryTEhM_08VNnwNVtt7}HL-(7xo{8cF6UpnzCO#K^~-&z(AhBgfP$Oy0noRLOfR*%BMx6(35-U@o`Iz zBZX_Nx`L#dYsHjd4r?@{&1 zlEhi!GW9RpT=yQ?8{d7s0gj-f!NYGa~(Lxq)GZx7=-Y42xiUeGx2-<%QI`B|^BkL6V@T<&_}eJspXvZ(i( zarz^M-bTroZM0ixv zrD>FIEOUz}BX8!VtG24603M$Zl33f z>xCn$u!L=QL7)7IdE4`T=a02baA@3TyeEa_%0y~0yHEQ-_d&nju+@;L@2$ht*%!L+Rz(OZZSax3ATx4fr~`?+hO zYrCtid$z~wZOM-l=8Aiyb#ht6sfL5GX;3!EZ^2sJA}$xtiFUDyClxH-)XkyaqFM3GohC+AXEJauN7SE|`-grCVYT@taUg zD8-vN8@%&x7vt&)OCIlR=Q`mI^bX-N`8DEp`HZ@Y96)C=gEU992XqNwYLC~)>Cfo& zx)Is}&3~FJ*r)KCF4_X^DcuZxlwrByq+z|Gl3}>MjV@EuoSj9lB)6)!j&rs*lFQ)U@^6G!;yG!qTwb}T&}u~>X&n{tu*v*WK4yCineME2Ib-$!FVq>8?KZ_%|MDJzqTW@c!4kAYN#fDN#xrNdK zyvWbQRN#%9flAwe#vcPFA4|O;2ZG~x8H}YqP^Kv2H;n|tt&k{(Xu~e@8TkiHvisl_ zH6{zdX(EW(>NlmPau_Vmb5c9WDE$`iBewEOtRd}?GNlB0x%^SCh&V#Bq6e0C3_1M? z;6Cjnz7Qc~Ct%~R$Y>yM|9XK;{}80 ztp>gwkGA$!UdhAds)9WiPyyfu^L8rFzo%eG#ChDxH3?=q{Lt!VMZSw10#3^ z`Guq@UtDnrwUycdc55A~EL9aPt^}UzA+iry1&mY&ct}HmA^Sqry@;{71lHL_EvHJ# zN8sx}mA8mHY*AJ!+Yx8D3?%*>A`w}@w<`h9?F~Hp4mgcY@W-OSmRtZ^J__Dg3L%0Q zTmo!W1GuwsVC9t}gUA3f7Tm^?WC-a)>hOClu*AlY17KU@!IParb|w?ZN@NTW_yXc7 zaSH6lC19de2V0Fq9>hm5SZ81y!r;Zuqdx?BpWI5$Kp$j^=fR!UNwuWvQoPhf`YKJ6 z)8uGyYX4K(fJeKY@De@A$7Dlr*TU&m^fdYuol1YEAJTj1ne=S<`hj#L{SLi9i|UF< zz;--C67JmuV}DBRp@yrZ`W>;SZc3n%j+oy>ITFl^Q;2@_hb457dLZ(4PAWvEaW~At zB*X;gDk(}NywXKz+O@=$P9t*UB?8eCG3apRg z6heFiLe(#@#4XBJ@Pto++jt-Ml9gDs4xV%w;vg+C z3g?MLM2gL@qh9FY#pGJ@2zebC_#e_k8Ylx|1A+JoMLf(%@v!)hu=#6@O`QclrN9P8A=>s>9xLnRgHk2wfjD05EcSxGK8`5keZ)80N=Z^v`H1Y5 z$HCKGRR<7mu<1x@9s2wr?Zfm!)bSznoyo)3SLP;u8^M%evgku}H(I5RP<5$fSVAF2 zy)L3(>%phYRgNNdSX#-KPlJEnNcKmB^o4Xm+9z$54kFI>1~JqKxs$vJOzapu-(w{X zr{jFJ8~CwWvLm?_ksuKfjwAk%=JISBUs!H zFl-lsQ#u~LVg!CpK^$lau^bG_EwJqWz|_4!90p@|BRs<_a5dY&8;3zXDIa5X7}nY5 zFEjKaau!M{De?rc*zY4sHWqf$L28WLv)0ml>A4gwj{)aSgEuUGB25<%tWR<^AQotYV;>+GUbJ$h78_!1>!Qe)gP1vh@?8@ zqliM9Xs2Tq9l|QYl&6F+KAxy6IUTOCP?ZdT|xfVBH4@>W)pC(`+`}DNHP_T zHrHehGJ19#dynN=v&L7W)36#R`;1-BHev0|ai#|%AP4B`^fUM|ncNGO@+UA^Lx>k( z5tqWLX$c~6PWdV_6za-G@Le9lHqPSfhV(_!%FVDM=EzNO-vG=Hk2)OQDFJJWmuwA> zdlZrFZ-@}GG)>#VZ@x+Ggcs-xJ9$pdhObH{MnP-lF6J_$o&cx1P~I&!mtE2c#M+~! zEb*auO587=zzoO|OGv||ds4i-OAf&Nj8OkmTR`7r80jDZu7F&v?*)--nGY{)Q0lkw>pg)2q8ctm#o06~K7p!>lAoU!g=(+NK#9%96 zF3>V5`^cr_j`9lm8LX*|vPt=-lvbx=XDh2UiE*&nbHocUgtNgcPlczs0aouau!F0B zmHZx_rO)3swNo7Odbx(2j;xagl2-bHkvxp=B<#Z~OUor2ShU$-bmuBV5pDD%24fFy zC#t}&>>?2bp*n&Sd>{Q+2VQgqeFgji56v(>jES+M#nb7MbTTytUSj*D`3 zv1<#nteFKt1yk}bbp^@y$0}Vj8G*g-CZD z`+i$>TWxFGfJm@_sW~$Ij$jB+}qrp z=GayE(>kwUV*b;-wt1IwFXybyF8DJqqt@>^-%o!we!TUr=KBSoj{ZECm11kljVDcp zAHL^s=Qe zx9tb)vGz+=U7nm(=1-f<&6!DA+w;N-OS)@{DP$9zB%@9HOvxsb&q<%@X43bwe`;`E z_^QZeQSp&u!b=C=^et!1XDO1EKM2#nPmd5^BElKZr+deGvR!wbk}wg1e>eRZ_dWm1*-xgA@}pmexD-W%jRv1Fn5SF(rnmM-SA78C^bue9!xz3Cs^p3o9NOAH6I(G`daXoA8RE zZv&_MrW%g0pP{RBTzbiO^p%H$A_wyr3DM^Rl@ZW3Y=GgtbyX2M36TPpcMz9n)E%sCC zjEZk+UaP;U{;*o@s+=h6j+<0`K(PfOq5f4(RW!4Ra^gjIPQlKM;2%xC*Zox`eMn}n z?8$ir`J=3x9Q(LHxtkh7zt)fS%L!Rm%o(*gYG^TA_|1rAk)@)CMwgGOS?o*b{lF{c z6`E4&I)1&UfxDDD$RoMixO^S;?HY&R_}5v4t?KpC>O8G( ztsup_O0O)nC^oIwf`C<;WkPr7gTf3bYpl)gnZERw&+koXhcotP4bOd*Z?a$Fx=`VU z7Cv{($-a{UE#Z@+7spPHdmTG7ZeLtToIb`|EH>C}HXAIuM0PJ}6^)*;(6K0IZD2cZ z`wk|rYWHzJ_kQOqiX%k5MB&Wk#{Te<_=aELJ>%MKA7|Z=doZ*2FZ!D`wZr?SZ%#fx zl^ApXcCvq36xUiQ>FQ$}>zPgXnq!LV6Wk5&)qYViqukx9-Ri{DX;*z+Wpl--_`Ahz z!L5DWng{&q!v3~NW{d2n8?>ZSrH^*8okJ1=#Knti#@aqWw z;*DY~aZ5_REjcCTQq;Pru2C1l%zhcV545QIN_H;Hv)eJ)s^rek?V8Wpju)mEdhLr{ z2fYGcU+gHAl7C1SF*06pxA=v7>Z)dMWPO`^`OlKHy+7J~8UCSLO5DqvNrN5^yx;R} zplvzgdM)zmt1;fvT@s!)7+C*W!kkLQ%btsU8y*}m*-~29 zQTc~+dsDf__7NG|zYhCS_UGq}pxlgt4+Wmw+PPk9cm5E&8Vq1R-*Wy#!uaT}G0S4F z$6P3$5Zx_0FS>k0Z~uFGm3ENr2)#0%@8;N_-yt_UcW=H>P}{no;J^G3>vVf_m#-(y zlj>c-4d6uZV6*w-T%f0%>$BZr+n&ESr|X~QX>ES^e%+87`=Q`{@3*lpE2Vt+y~j0C z?B>e1-FB5BBK-E2=ukmhaeK+z(T);@<-XR~U;9h7du2DpdZW)4yBBsOq`iL?!#FAk z*zHp8y`u*NtS9HcEck78T7MS&C>T<>$z>4RQI9ox-B;}sLz1OIfHhbR>m5-sd}+u= zWH_G-sOV=ftA=^n!}K9_k~o9YdEdIXyEZsKIvO}e*)Q5_IX}2+d3?MVy;Zr!TvP5C zH=qAUXe5m0%X5=FC0$AOs)hHh9SR2KP0qfUIUxOPT9fpd>1k>9Up_y4f9%NYYVRl9 z6ssb~xj_8GJhI#j4U3A5{2q2U^iD+Gn4hI)l%5jTyLj)&wD9wxJwv_*o%i2p9%PuR zeaVa_pC~7Bp4=|Al)S=yE|y!umylAFS;$|{BgT?<5OJ<*s9-E*v>76e^Nlx+U5s&t z`?~9zv&;vINA|EH%@9A}Sy{f3_oh4CJ;hzlbIr2}{FBG7Y0wV4=MlVmuA_IGr<13N zXRxOj;sw7vT6Z@`#lrHo_tqWOR|S^*wK*66vZl(>l*uPhs*idQ$^}c zz1O}my|nD~OZVRs_#&iVgf1#8YGl-e$dSbcMe51$xr3=0ZA<^R*t$GptP&m`!t zYwheDrXf8Lxu|=Pt@(^NKwhT?BUjf=&&3(`_gsK<>NiWWq8WE zOm4lWmivaYuXDW1?rz47=Bo&AfralA4#+d91DZ+NZkj`Egtm)*g|W4HrSB%+iRP0& zm&~Uv#r+=o-woIv;Ok$~x4xx=h4J0zd)9K&$7(EZ>|wZ|3((Ebb=TF=2C>g^CUnz- zaQfR#Td4bL6x2XBA{VtMVn}nPRCz79;*3z2pCSAfCy8-F3vLHjgAd_&?|P5d-I=Qe z9JHo4!L{GrOlT%HhzPLE>`ZF1MGPBWxFDA?yB^=qJ?{ccC3^ zONWP|ms^<|hZ>RjRs-9&p!w}U-Hv{WuA(}^&eWG0g# z%3UC5b(Guk9Ay+(k`84mG}|@_d!f_wLvA9~lqX0NM6bk3-GpjFftZgoQ4q8vsz~#3 zrZ^0|>N5Y^b5lM^ampY5J>LuL^r?KLcm;F$6zAtL3DeYW@@02(*BYLL9`96XwD5}) z5F2I41#&y7mFf=#F`%gGK5`k+UtO(4D*}0+8Axv<%L6qyN;O9I>j*MMbx>0^_n3jI zPI*UanQwGI#At?t$1$8b%xq=+spr%fdLGrEsYCszRt9Ejqe_6i(t(O54=K;csqARt z0KZZm&7{y(l@NZ9)e& z+T~<`d!OelF@tGBG?j8>Lain?kpt*URIIc?7y$&ZI$edy6qw{l{`mz zD`VGAP@D42l)-dK>Yj2zDo)kcR3$gdcJ&Rph{__Ih+mAMv{HQ~5$Z96#QEG#=8mxi z@z66~_SO1m8&e^&PMAfQH3H+Ho~a$B41N*e)|b=0MCptLY7OlsrY`AK2H=#o6eUOI zP+yfXypQ@(KSDEwKjfS#9nzN2*Cc*ig;WL+bD0@zJ7u`gQx0U~bwj`~{YXxx%P9l-MNEj{65U)n${Cq>{dejK zwU>yOTN4x6p@w$sV((|~WV*Y4wC0R*(5n~zXtVVHO1a(%YBC!^l2U^5Qb|PaaSyVd zx`dx6x#V1RKkZiP1I6vG_#qE_s?d|arQRk+svS9tFo3>J1j+wMwUm)eU*em`t_;(5 zLGOK5roj@TsP}X%aF)H;ss$2R(o%3--O?a38Y&M@l=Y&IxRS}$pCS5iCpaB_MgNlQ zEtZv@%T4JRO&+yO>CS(Y8d53jN$p$Z6StY)Lz;D`S&{teZO_HfyS1^JEajw|5o#Fn zH0{ax%3IfPdAsft_R7=5)dH)w)E!~wtFw3~zmH+`&uF7}1;3fcQ$ePF;UPzZ$66AX7Mc7=9@tV-0M*DTRJP)v#%ZSrKly*O&9xyyT`rg4b%pv-GU@0pRWXgHS=p&fl^A*nSweYE z&y>G;XRyWfoyZ$vJC94M&JLDJ>A{DPTg3T1vVhUm=cD%aL210KBsv zbR0S;SLNQoX?dk80i|B~wo(mo&1_<*5-RaZ9r_}1RQN3AAYZV)94lW_9Z*irP~Ir} zpete{YOA6=U+n^Y;rB`jC6K5}`VyDnN6P^-+M||G#{-S!)P+Ex+{$lt2r&H$M6xnP z-2=^!$%sp=2fAL4s06&WG;skKXezM18bF@*fVVUnxd@es4a8Su=wC&=I7j_PJV0cl zsGi(4q8GUU8O;NMjBf+N7K}`ibnvhbA;0|;a4M2WQdC!#kjovLj=&#Hh3L?PgFzBdn4p+l!9V$H87$Z zAg0h55vH=pxd_6$mGC!#h$+2CyrxK<;~`X#i>ji$gJwe^a;_WvReLCfYm|o~MjZaW zCGOe)$Zs_6(HXyo;CWgDr)!Fg|6ts?8J;c{@0G(nYT#2v%Geda(l3b&k5D{sZG0!- zw`#ajYvA>pph>U;dHnl_z*3-g_zh;cvUu5Uf&nB1>VHx*J**8}WJ!WF1|q@Ybt z(7vLPdk#f|qS}|gp~R32l)0$Sy5VaCF%^jY6!haj^yqZN)<(f!!5;w69Dp%w@mI;Z zAI7i^tf)P#sXg9niSG`ur=nL4apzii)fl5z1LIl&eO?(~74T%0VIf5oA*1nIJQU-L zsudKCeK=l4;os7*fvWi57+|jzU?WAU;6>`+!GD#@jri9eE&Ylfcm|c0L@01SL@yUr z*?ETFGT;${F_Lfbdy#&5Q7!kPIvGXv7+p}!@WuF-##N)xV*$t{s)YL#)q*LiJ6lvQ zD*!rML4TF(i}cTRf1j>M^}MKFRydv@km!n;+6_I^1wGLLeb5upp8@#U8#Axl-~a6j z&)5&<&6A4x?3wxXt^&)i#Q%!+gd&|JSD|s;Te=zZX@jcELJ|YA+O3wRGV9 z?|A7D+c9a2oEUJWDRB=keopZn*DXOzn{27Jm#b47G%8gA8c9va(};mhBI4AK*q!Uhv4l<;OS~gzWB=Zp zh*pQg9$Tx+l)Gvw(Mwq(qp}jY4Kdgm)ME0UWLG;f%Sct}p?;ak%+rh~k0=MoTg)zsK}@e0nXAl{9;qa$1(Tu!6sl&a*-EBd zPH9D!CSJ>Xktq{LXq4;fYeGic`-O5$9ZsjI&82}@L230c%B&~RlcWt|cT%8olveUo zL_}-Ai}oi+BF=Z8e5RHr>k^0LCYWs&^&Z;!4)^#D&h2ldKO$f2Yej4eKs)7-NV;ssphQxX2Hjjm{D~6sLdRgg5yjEi22(A&u)tic#HXLBaZuT+gLR}YQ2=&CTa2Zf z_@(q$YGcOFhPTPYeykW!qnq%MYmnbK7s^{SNkZ*zml}#|uTi$5nz;n#@Dp4w0kfzi zR{n6<;SJR1_yup!5jo%8!6mVf!{Oa~fK#C-ZV~grWC_8F7zr$AEY`$SXyE0l@ra(c zCbDsIg%4H>KnS#%h{t_ON}JJ#-rVmA*tjqXp^~_y&~_=X?xC z1PA4y+GJ1i4)!i7YFVs2{~_mfp>jaZld6LGGDgaT249TWN!ZTE@*b4txW+lTH&D9? z<2P{6yt4NY7se%e9G-XHE#6O_9M3`MznpPrxQBR}dRDo@TyNcRh;D^|yHQNOqVyz3 zF`dEeIEpBAJj?f^?|w@Qv%}QOlxu8m91WG0UD{Sq2~?R&R0YzB zUFsQljDBi!0_)pAkzzg&~RW%1`G;L5Z;ezmE5TdeLs)&K=-Wg$Ke#{+7^N%;htAR#?m@ z@D}cj_dFNC4fA|*k92QyH3X+E*LlSi=Go8H5W$T_rrZT}166`Orp?t!`g6wmKK;z+ z%umhdEkD5l&Gz;4-EAJ@bIY{Jlw_=DxTmY2&10W4bC4rffgF#14?%ulfI1f0+D5E4 zPht0q!KS9*K?$^DHq8Nv`bh6bHn^2+L#ANgj=H5Pysz{SXWK*2TByk9aD%x9Tp;(| zyVX0-+msv5&%|l3B+v2fP^n{_5Q0egBxonz6*?noy9Z3P3S1j+s5jml?HTWS?RxQ7 zwZ9d^s z!>5nwp7F9_qP~~zrMA8HG#ksrgTHwl{IbQ=Rm_=FV5E!(2WKRiPadbVbOhZ1F@InB z4}Fsv%3J~8;Q$y9k#rxbDtQ69pxM|z8kD2b0_hBR4D*q3KUEkm+!5=GAE3MROE`?2 zex#!z_r5rIKp*4``5JN@yCRdFlCy;oVw%`MNW;I0LSOzCH=SR?ZQv^LAGnG9EWR!O zn!Cb1;1>&Lq{s48c?@>q7Getdh+59nXFssjv?X=5bdkDr-35J!K{RYL)`6b(VZES_ zGi=b0&>zu#*KXD})jrWoVxKV@)|b7*_%Z*YO`E8AY9Q9A^W-Nof%+FM*W)Dd8>?({ zur?F19@T(O-YGR1s*X-&rJN)M$#=z7!bwqtYJez4OLv6d!b!nb+9ZwV_X>l>nW7af zpN$FwCB#Jepy&(sTS@W0bXDvo%@?Z*vTz@MWV1L#SR#%W9!vYgSwbJVnV5;6Aoxoz z^k9x$MJd0Od+Hk0Eu+~94DA82xp)O=zS zG^;d=G#Ts&urb%OiC`T5W-XcwraU{JIn69*deHUgPE;T2Cs_|Xi!iLf$H|*T8Rs~e ze^F`??bQHq3@RzPQa0EstYnn`6URvJq!q$tC?-@B=3oQ~@rqDZGROnOao{fpNyVX5 zFjme$Zud20@#d;S<))|}F++|*ANIrkU>q33rHMf7LPrtL$b!N>zO6kED-MbRJDq zcfd2ZB3nR%tOV5LV&H9NK|yi@6okttb=BM0vjh>7)fdVTthT4IiwZ|>elqbu*{Mbm zrIjz>xHQ5Z?ydR|+_mR8BgLzEa%ZBlnkY{oRzvIaKGu(mYDYOz{Rb=8N38SbvCcG8 zMicpRDY8EKSa|`?Tr1!xN3py6SGE%Mang$-nkngOMdhvf2RYk&$y2JJj3RBwp#Mee zgv#Dy>{t?kLfEl_c2%`hO(=%mgvzr*E>NzKRf$b%8K?~IC(o$U)l~8mcKp4;9894) zVjuIKTA_?n^Qem0D`SS^?$cobx#|S0yvxA62_wc}PaliDHmSBC?KqLNAg?Jh*jyhK zk%WFP{Pr>B98PzYv2U*cp59g^2mHqZxe~EoDT4@!DwiPZ5H+zs_Qy*6Q|XCwY6$e1 zTcX!SlEKOYbsE;@D#}*kK6s|*$e+?}^y5jXJ^o@e&Y=_JJh0)eDGimfWC!(>EI|MB zB=iFY;)F3A%6|z^lFL#%Df_^7`>7mNCn-rdv)JWoIAvT{YGO1>BSXEJ`b!xKROKZU z2Dc%5{XaZeP4XxDVlVj?C-ar?R+WJt5U{wPIBgec4vrv7g3EawIP3@HJ$%OQL{h7h zAJtz#ZUTvB*gryx3&)c(lpDL80c5HgPmaP>Mv`5WJaQZ{K}jbULm~4P&WjC+gBZOq zJn435>~HplMdjM{`asqQ2?k!_*=cm!vj?bz=%z;3T- zcYGcBrE$QOCSbLxOH?Cw0lz4P5o>|7=zQV>e9~>4Pj^G1cQMXm%}{i!Ik6Ncrzylf zr3xY*>9CVy*c)6>cjLVD8}@!d*$B18GT5)@U>7XlQ;&dCt-vYjE-NQoQ* z6@bBD2nK*Twg9%2h~B6S{k}xCANd74w>`uw?CDw%Gt{2gw^hWG;FJjdQY2wlqRCmn zv9=Kp6)&+C8Wnr+WHr!}!-zq!lN)%(_0S#&#)yqjQwTq(T%Lr*PXsbn9*VB4>QC;0 zU9Er@(qL@=#dSO3{6A4`fF5s%-th(Az$+#2^t*8KV{p104o=~8u!d{nd9L9!TY#Bo zh80nG>f4y%#qm^i)vcJh;lya5H8l{e3W$#%#<47mvsaxxCcE}0CZw7cEE#xF1^6Mt*Aa&CtPYm|MMJ-OB&bQS#nye@#7$)Y z>d6fy8e(MU;~p+x54(s@z`ouQ^Pp4_2-^)p-&DhSOTl%!!&lnT--q!GV_@TVP{H!I zIuBzr1)p1hJE>}Q^rQwBbekwb+?t^OXA$3Ev#&AIW5GTh0PJK5+S?Im3x#txMiWfF z@6hB}ODt54Kcu-er$_`t(x3kNSQqi>N%0S|Bz7O@xD zst8Tf;uzJM#0elS<%ktPQBvWlM4%xC+U3J|M&u*HLkpGrl{{8ZGBSPtx?54g&lngYbQJkGnkdKKPlKTzc);)Jpp ztjKt1mz==gTnF|O1hnQTG;U_AondvOad%O950py6(|rNDrH5B*rLs6LX;dFF7=2m_ zJ(s4&z*l6#4|T@~?8PUG>dsz-ca$);syc+AU{5CGt3{H{k@@lgK7Iy})iUVQCB#+C zvYu!|B6_1EG^{yXEnQszjPN}8p#`w9*7!_ivO2Dp1dp8njo37-W=r9NSD@(-Hot`;TY}eI2m5S9Jr0Y@~Rby1@Lj3fqGKxbgR`v2qN@ zQ{Td3GM}Rey!Q5#NPn(5S^v3E^2cFLl^S&zPxC>}_B``t#fC=WR zYcL!3sEx>auyzI3-kI1B-&O~{s1oemh?YJED%J{C=)tVq2Crp>we`fBITF@xhZR`R z%UiGqz6P%QSnRSSeDbF<1S9$#vkmhS_98(CrwepR`m1$u)Vv&heh6poBVg{21%|yD$a6KIaV^nzTC{H&EVv`Q)H!&?U$DKA;Lf)}U+2IZ zCc#>V!CR#R2`s?Lp2tWW!E7n2lO75TWhY|qGvFVy;i;#?7DnLzj-bVllmkRf#7m&O zgjH%fR+fC=2O>s316dON(3hLxue;+am9WF>hO=N@*l-a-S`TxtG*k$8qb1AHmRD+X ztdq^Kp4c%55huoHC&3E)0-?4ddN&@HaF7hg$lt~&ZNT0Wu^)VAsb8_LS&3P~BX&?( zJq%>)9_;=JT49F`>_i;lJjQk{{P1HSjp$df+Y@1PbFnUNMjpZ~c#WdksIOq5%@MDu z51)DyZJePFB04BOSj8W!9MK8AEJEM9A(#O})$>FqyncCdFD$eQ_9CZYPkPwIHexiX zf!&1T=}GkF3sOcc$L-khwS+oVIrOO(8I6z7D_j0rz&5DTAI5&64fZyh)T7up(t?K%=QEuwWbJXh-Z>;(!o-M~+4jp1u#!2pzEn`#Kuy zdmwoSIV9ELcj95U$=KOg(9geN?*&RBQ57@1F1be$(Oc&g15~#(KAc!rwrYYLF^0fZ0sU`!YeGm7|F1)Y-kh@fNySsbIms7Uwi>VzE*9d>;#f| zOGfn?wWKmkK88%ANpfE~7^jC6oUJ-T^ZN%_@nH%DBukIe#tob$z5!WXOs=8+quM~> z;txHUX~G_7O92JC!+O|t(5$?sxvc4?IS_tEm+4Nk^g8I9Qe;sC0ud*~DE*bL z@=58Gcn3O3=aKJpNK8j`djgd7dICEcC0rCJ{SVZ-y#GJ>cdWm_f)q>BdHA#%Mq3GW8er(+zD*aX#ftOZ46Jvcccf z)%3_PLsw5ThpnvHr&-K4qU~fOavS<}KB|yzkl)BXl_xk$z7x{Le0ezLeOl#6sMJb4!|6~#D$Jean&=wkJcjyZb?j~JQT85%*KNxRHx>?cEx>)QdpJi|?^{yR z)X;1JyQHnArXk0))0}B(WNBbI=$q`<+RtrX}FzeV0tR0ynH82MM zu#?%h%rRg)19AGArwmj!!k%u4!}(^OWU#(UdvaZa9g_-=*urcHwiY(VcCTQ5{@J|I z`Q2P`Vr8zaeSp>9yNVvE?L~i7I+L%o-;H-oVdfxThb7XtwqFnb_I@wHZ0s8R!oQ8F zvf;i-@=;8I#`fAPFm>nDz^whk;c*xw^Z=PkgX@kBAJD>EUt7x+5*-8ua?{y)Jzslu7Wm^G% zG{U{mJ6*^WCnBGzg}7LFD4in@GZM5r?i2l}80H?kNfXX~rw$>1dLQLay(0$_y<{5d z?PbNT$ebSGIe|)luN(=EY4+oV%WVU#Y{7_pN8YLet9!qY>NMn~<|XqjG}YLTQhVO5 z=(TH&ai(%U8uNLR%XA3+YzrvxpBH#3V2e4!c-k^JK=2)A+Nz(fIZXYev%s|*tA+>} z!ZRq6T7jmP6aL|5d**xAaE--OpmrwY>Ewx7(q5tky^Gq0JklOiCjF7^q^+r`MK`2E zm=f#`W;NZN`l0&B+odmZJ!P;on4jRubk%l$b&qn7a<;Yiw)M20u-3MF0C<_A=(_Z{&2Az?xhMv5Fz`NwFoe7+P^=?X`&e&F6?F6171A`J7I{*?%%_Zxj6QEU=n!P*iS%pgrcR|morrZlCH3@g>Lj#P)@$19je0*_ORbH~ zV=L;G>yK$WF^hokZy?R&VM12!OV0!?KLv>XCd~N}(3ETIY~~Dd>YdN+s;#cAMBypa zU#n$ZXpQo2AX-V!3(FS>d@oZe-_>lQbY8U>dijS3jtLmyH`HgTiT3k^R1Vt_FvfR- zWv}Up@eLTism6GA7*n4qL3B|U(&gw0VhdMIcUhsa_zLQ8Ej@=_KU^=ov!r&wk-7;f z`~l?c4MOLDaPGCtjiE_ef&V&f2hCg8+;;8J&^gQ>L%s@R>G;w(Y8$H^xE zys?dlcgaG3ew}xjr?{t=XR2qI`?hPDdzX8UYqhhLbC2_b^Cm3oko~Z2tc`*0^Y4OZ z)*YggzAi4XZp?3_9QAwR@6vpipA)ByB?EtlM1=nK_u$UKewV^bq4oSum>(K1KrbTQ z6%U!DqljJREx%(eB7;mau&dLmN>-aQlgA%Ao$ z3~MkB$BgrgS%&rcsfH$|sfHKqPfCM0$`9n0e}Qi1SAIT!To?qtV0pf_cc;hgP3Gdc z)!uYZ1J4vsQ&cku^cY;PolRU%o!1^Bt9#mSEJeZ}cM>P|x4 z2`Bg7xzlySWplbcO~uhtg7DZolY_=K*@8_%PUZmiCEHKC(3oVoV6hub20=f-5C$ds z&W8K!WO^*UfLe_c*;HbH5++XKRo*CWh8A;op%vmd8~I3Ku{awQw_My#?+>q=3l(_2 zmUj;-^=`G>!F)FqQUxP&1G9VOw~~@*C)X>dYW4tWg6}~8I=Xh?9(>YQ^nV_DHR3{W zs(GrZzyH=yQ}Dn3!T!^H80c+G(EVY3poCC?E+=Lb=Gss5X7QDKw{x)TzI{+(J9iCH zm1>Gp`DFf(R6w3$8*2V&PHGoxHM%~A_C7Csv_83}7KXK&KAOh5t-3y%UUU>OQ5r8g zz#2@Kit#bt`rg6t(+@?hNC`Tjuh3bHmrp7^lt}p^l%#iwM$s#L<%@AYJqwXj*0V6u zw#&M(z&Gzgwvc%}JH_*bxW{kI?V7cVdt~bCA7+YTH!_iirv5j=w)y10S0=S z+`}H%FEs8o-Sp|~^UXNUsPP$Pp)H}Nak`c4S+<5|8ncm1R!2dbaVNi7_%5akm#{+| zAoND9nR>FMti@@*GqBS`#3`ygGm8$T8bblLk~&g3CNt6`evK!?+0L=cZZ0GWZsa6p zP08Z_l+HY!e(cXU`#!0hB-_IZnsUpG1A^-XRr2-vn0&24!BL-MbuqWX*9C)JxSdmfkXIqULzUn@PB_1Z)FOUN5p zZAvu{@g3s#kN+b-r+K9@R{uzQUQ>k)qt6m`6|eBaqd|@N58M-u_D=D<@C0-3g(FG{ zY8E{nu~=G@tD9=hOf&ST8BC z&y49llDV?qW54N^);^_77xgEhOEXT}n!YA$g#WR07GP49@86$Ob7p2|d+BbFknTo0 zC6q2{B$N^a1nCe&z#rWp-ICHMC=Jr7w8YlL>3KiH|GmyN!|t#%bIx;~xS#v}#@6sp z!Ia?E;ORi2jK`^$QpJodfrFvWT0N@siz7pYqB4w*n1it~@o(b?Cv;1A7QZoGj$anr zIOefBPL@T#>!dx^?61EMjS8H~sFN`uV^~H?#?kZ@=_Aq~r9aBZ9_SN17c>IyK=nYk z!0ODg8JE(>X2fRB4wMb9*CtB!ymuwdNVX36YGv)6bx-2Z_#1J{<9vyWQVwQmo!G&@ z%GcOi$9-2g;VLBbiFh5KwIozL{c%Ps*2<_gV3ZRM|7j6LRPG ze&)~T|G`%sJ;ADSij*NwhS5-(>fufOZ74l_Uw@#z3GEBz3x`8rg_r1;%=We~@{Mbh z@K#)r0fzi=jDuU4Ns6P>CANoO2gf z$tgxYE8d=o!%$O{#0$9|xL%L}`2oMQRA+^K+w>Ykv_Pms@NLHDX^-Fi{-*waXI|%j z^YrcD%)!nGVVIGYS;t8CjmR=0>(-Q7$$gWaC6&pxGS|DD2eMvDZk1%mu2OCB6JY^) zi^EPY{Zgv@Mt{FApa(i-<_f+FPRM*8xM1BE>q-;kjo$zHfAMwp?TyWq;7{zBbS+Vi zy^hY`EBAl0OPVK4vCoCQ;1=MRWsJe&+Qj=hC&IjNK=$u>K5Ox z{z?9ZF(-VL;lZS+P1%8K(sXg5>#-Hks)eg*3-vSFw;>xY;H_Zoa8CV!anrP{f1SbT z=oA%8$i>|K)pnjC9+x^Fm6{pui%L#;srb&d#|c^^QDOMO{LKjHb@Y3h17l~i-q~1S zcC)6T*S^Z`fS0CCwc!w%^RAH>Xw&~;m$dU*J&h|`&+z|(_cK@F1^spE<+T3BPATB} zB9t|Agq5b&_aE@5`fJ2I_m=ZFU`+oTS0!#(!qkNR{uS=rVm8+%;RI_;KId3)dthvM zZP*EYtpCsG8=e!KssCa<11E6SHOM{AeNFZ$j@(Fz^EQZm;Xm&!>S?3)R7T(mGgbaf zhy~I4o83bn79OXy&jNP+>^ue%3Z)c zQpuEaDc`f7Y$=_m%GAN>$DY5Gb;P`Dv^P|v5OHG}{deP{ImfDi-t}bMu!^`g2;;=+ zQViPoSEZ*Y<>!&x$*bi-awn;V*j@O6YEu-4Stg=7y>RNP0V}5~>KwmY20WcQ@cA#W zifkk|T#D*-LsZA}h^s_XY>cKvqI49+ofL67x>TQ1kDN}yBS*`%UyL?{Bnp#^N4Tw-OE-s_7^@uIVZ!$NN&j-RO``-PkI zrUIfja?tJ@nIK#d?vj}sh~Hujz9%u#3Dvj(k$U7aJ2|cF>UIfd0a(o(=<+oLZ6i0bQ1WlDQG7~R?|EMNLedxwgw;M-x?g`k@v7n4|gXGm|_piV;KcgyB99*Z& zD=zY&+4(sS=uV03|0S?OUobLvl0lrsCyeDWl-ItYpEZnpLME{-8R`b)8Jm+a9!IWk zJE-Qn=|jk3*8!=}ozEQ3clG3X zE_u`W@$=-3ZE~3ps7I%9_T|6}=cl^Y4JD>oWMSu! z9b87n5;+Bs`p0SiehO5Cy9-Y+cZT0jQuW+U&h`d>Q-ph1i26oHvhRJVLJs(lX&#M2PZu(~-Koxf zP8B65ZSSZ0+Lfa**Ib4B(dNT%-FOByNJh06?a>it!?*m_33fnLTBQc9Tby?WIKwBL z$0e@lcj^L1xPwt-@*ZloH$UK-4{>)wREKo>RTLfkp4L;SZ%NEeCeM$#XLmXG`~3fL zTJj3n;!afm*V4Aj>8bm;vZxE^NopLEA`|G%lX>oo%KJci#&GH$edseI=o>4@Yj5J7 z9HdTjg=*3Rj(?~#S&WK!?qDvEi-owt##~Pu*c7$7w;j0S^||h{V9yKE!w>Q)Yx&+N z9(5L1vW+UtZoc_3b&pQ;jjw5i?`X-1RH>G5{LH^ocx^P^1!HNeR@8>0V<#t9nnE9s zqSLeSa}wjI$Om+OdqzM}^4SfU37YcEu8T@m^xO*YzI^;`lra#;zXkc9lALj2#z_TQ zygKiy^Pxgpk^Ff@jxzjje%_abTsvS>t}=?%*0|;%^#$pJr)C7KJecHi03%YuPFbUo8L;%R^|A&5br9_ z*+-cJ1^BrPE%*`di(X*W~X9Q(NhWR#$hh_+9CX9r!nj z*zZ8TybbpzdX3GwGYz=nO5CF=d`n5br97Wn@Wc7!Wh55j=OnIGp|#@aNvTw-o^w4{ znQcxnBmBwzfBfNUuhT!DaJA2wEuz)2H2Q-I{vn1O6bvUm;s07Fis=tiPrJ$pT18J< zMIT&3t?egf#GmL9o2VoF%=|cnS#A3N?}5GPhu!IkgQ&B2q8i+r*Qd~Lexz2nhB@*$ zz4ij<@s7DIfwL~md4I%})Z$)N;!%a``-CxCj-v|g8m-^{$-TM#fg`Ytdd>epg|q|# z8*~EH)o)SpSsMA3YFR$cwmRsud5ps>=x)7Z7E7TQ3_&etBJ7R^wEh>gVsq}mXS8TX z@`PVAa&ytbqnHb#b^g};{1NxH5@(c+)+j_fCw=HE(Rx>Q&N#}f$o_#XRE9enz5BKJ zjNJTBLC&@`*OQli8c#cB=Pwg!Qy=Hxp|?rAMvjk}OQvUP^fH}Fl8em7CfemU#^@gA zti3${%GG9aq*H4bKg_Se5A$D`{u`Ye)3|FcfYTwyEvS8K+2%MbmnGp~T!q3T*I(p zJ4wt|Pe1T+9#FS^%=xZi4O+!Kev~`o>GDH0hr6U}xH;O|Cfrsw zD|5s{P7&0Vb~+-+qFuPZT*A>Or#;imZw+<+bsZC$yEF(DRZ+goDLh4~skN&OOn|*k zTjrnW%-t-~7u;SOyOW(^r|^x{T(!Zm7sNdz1ysl#MpM7YRM2O!Tz5-qg6%om*0lIP z&QEqV=VRAGbi=*@>oEpRwHK~NXak-AnVe5(i|T1lxCJkTW?~Vsj?j%Ztxly?BI~-E z*6u=2i(-EsGFRu~PClkJ7Pu0HMkpmMM(wp02>t!2`dviZZVm1Gh93G241s0b(Id3e zJ+K%$zPy#?cXBs*132kB@;v1Wd(rDscx3{CClxkJCW}E*QO^qu=h(onqXxI3|5`I|!g%ZM-!&8k@uAkM{-tO+c za8_4%@5il57!g0%Kh0Cy{aERtd?C37m{_1DhZ-fdJE7$882w8tjX3GKU6{Sb3t^Q~ z(zD8w-IK1aP``1HQfzsMoLA~9Y>s?wUpGP^d+X_k^(*>q>UGtOps~|>?wkU{+YwFM zwenG-+*V3^B|~m4e+8m3KoxTrpS{uc;-s?zSKal_6XyroAKx1Mif+OV&qR^+GU{YK zq(6xKiV5(AoZQY`R8Wsverq)~7STM2zD-uMmpKLHjsxg-^o5O^39?hRAE0j4&1wWr zw*_7+z0D=YXU1WpviT!;4Ndt=wY@tE`+ZIZw4!(k8vqnKU9s~{+o<(nddW~Wqub}7d#iL z9^MtstCzKVDVcG%lS?LK^Zumvjoq12CR^JqpC&$zk<=}6N7=-IwHKa3l_S}0)mX1J z&?B%>zm@m9i@UGO3*>Tc*&7!#o@$2ZO;iWSS;UEqk8AcSaL+Dt2kvv9h3A#rE zZINj5t}q`L!=*8SnADi6{^tIi{@F2geOWy#loM20rn(k6s@=_MVvVwvTkEU`)*O44 zvmX}5d1A?uuD@6dCOV?M8r*aaBa41C+=g86pg_*R9ds5#c=%<{%$8|pe3n@#5DrWY z9t~Q-o}s?sYWe{qkGa|yWIQ)UTSLU6qZX4- zo64T6BgkFTF==Y6EW)5?%*~U&5>-zd64NLhxPExbXc@>(C;6`5vQ8 zF)Oq@loGlhY!SK{x)M4NYKX_Mt~Y`WA(Q3E%j$3om%E3!1(vX~S{=*^W>y&ZyL}z} z_dPG=i2UADIks7R!T6W43;ma4>c*`1hMAKzcWz((nCqU$a(`GR&!vWF>dsM8<(=|% zd6ryOI*1Bnx>!^`Atj65;I)lH9l0g$YPWG}D}cHzin`Vq>sveEw0BJvK1Z9i7Ro6m z^Z!9H3p?zgXolVf-(8j{ssPb!2K2^NtP11c_*`Hg>&4^T0PDK~4$HUbKNhqyaWwqe ztZn+x9-m-*Otw|U=%g-34gSq#BpR_sQKOY{!l;2F`*L%%*%eOC5>qo9qa(SIb#bHh z-YV}5blnnL%H8Fs=rmTAV-!ES&zjoV)7TSG2deko*WF9h>7MhRN}g$IH8m@}`W{+? zhvnV!75R}oo0+VxSY5b;)@DWUL4)9q*LQ~5XTeqci=ttwnZpvT^JvVTFpr?_nQ5-E zme?Dep{!9ih@y6bVS0-)h)1l7d(?Abj?fRR$1O5At#GzJj%wm|BBsiqe@3t)ZGkV~ zaA$}8)~drS-P|N!ZOq}iSL?O$0RL4xsa?^|qV2Oto1%@>hG^etKWcllhcG2}p<2+- z7=gR>DOe!iqjEd!gw%&-x7A z%!fEyX3*|AQO<14nwLN%+!H?fBN+1k2+PTf9Ft1QP2>Xd9n?)X^IJg5FT1E7Kf`4) zoi?cs-ux7MKNtAARWRL}+Rsrf?PGbYX{KT>M8D*&K3)F?#L8H`n_f|O>8G@L+FX=a zc4>FCg8DEVuJ7sjjBkzeMt;~QZ}2mp?;NJm{)<>x6mcaiD3zD*DdSYxGgp1dJ@=@e z&OncbyhUswJalCjvJ&HLiClGlw^vxN%p%Wb83Vi$R>yOpYX zPIxoCruvt=x*D&}aQ9YwdB5-}-kNxJzLZ7x437TpKa?SIGpP*fsa|mg(fnlG5qn0e z*x9YcW=Hd&Ig`2NA7ifmwf?XEQoo@OHoh>gnBRea_}=`)%4@fD!l1%SqR08M_=xM@ zgi=x;+-F}ZZItRtIVC{E)?WFa@?M^bGWs95>ODYZ_aObNW~8Z;!wI8`>#{3ZpPMy} z;`-0pT6k+0wf)*0t*mw`{7?8G%F!$IzWNvXmwGL|G8NE9#y7aR7dL7dKO1gy49cn= zvyS;EywG%aIkmBTR2-1?S&!hYGi`tjOEOFswp}nDU;RWH<;i=(X;V;4y z!{@>Uw7S|LZG-kwdyAsveXS#EEJu0YXmnF%TZUBGyUD#d($o3GJwASW;@jA-e3QNR zye(qx#6F1Y?9bsnb=$jaoAAYujyl*JqL|U4K&WtdSExYfrbscpz zeJ1K`&_~_?s_VJDKzfbW<9-~=K9O1rUXc3hK*-HNySles*D7w#Wd=w#BaDoWmSxJ! z0xg{@k=-Eon+wl`qoO1qQA(>{syp5J-Rs?N)N7uxxWQjmK0>efklY0vQz0pz*g*IT zmEuwCrpv+_nL{Rhqy4vyB9YzJ8f309QjEX#wR$!9oA2oX{lZ^@YdR3F7Csje`1#B5 zC-jLo;bY;1FplHeCGCvXO)Iap*G9oG{6uq`^90SELuur!v|P%#_$i60F$2_~dJdoP zym8gzQv4B5s#=iQu$kv?cSH4pn&j>vT@d%l+1y=~2ztUj#d(a=?yTcK3y0vyipE@G?s&CuP=mcSW!5{F7yRhc+E!X+b9hTQH9S%qtX0sj z+CAh(?n|!rs9~x80!c}+qI;4u#XG@YJ+@x#7ygUhk?J0Gp7&4hZ)#Qd0C!P$Zn=_p zR9uJ3beuF2b$~~xUu_i(K@i$TYM`W?j=S>?Se@UPX>e|*G6#9h2If_xzj4XfYR0T6>}~gZkEGt+3vi)vywN?lbTYI7Y8{6?SPenJwxt zTQ=n0e@U!RN=pwn(&F@s)Z@$Ob@dFpuCh^WCf>IGaove|kXSCRpyy-tQ=dQ98&@-K za%@6OA5TGa$?C<_@~(6ja{uS9;w~r8lkTz0?kQhoUl)YyTA7T(IbkhKwD$OrzQMJ& zfql^IXHH@@xPo8L5c8$c*cfA!hus-vdzNz+F+)ypJrTcDK6A$@7o}dxR!^euu*awF zaeLKd^|gDBd!^D`Mu|uK5pHHZp_=P3xvep9vX@dLiN%p)m6d4y4#u;FS=xMNR5hmR zRk@;Z`VoDkepj2IP1KHR0qqlgxSmy?rp?yc>Y{NQzsC;RHtjq8DlFm~L`+3kLwf5+ zw8eUSq>nqBXM?cRtmLZZPe|U+zNe5{+50@ET%14Q*LcA{PaW*;=WAvVc1-Ch3Uet)Msu-*4n+SzP5wn{1|J2p%__>7H~udnyJP?{UiOg z-oxByeTw@1Q+rsXI_%mGF{W zIzkSqxbxPEz%jpS#ajDtarn^;8s&@w`V`zy7iwcz3Cd|<#>2JHdFF@hp?$&6QF9#? zN(m>2$A#Vo{|QbC{Tyzh`LqV%P-u5}84*-jZAJK6xRRMBbXA+V`@4d6Hz~LOX~M(! zoBlq&n%-i*^09jozD=wUSDn=)$+On?*7wYlq%K8&e7!sk4fIoTDiKXyX@vNf@RVF^ z9-$pd^6w&bK~(0p@7Tv#)k=doOUAM4vhms+!#x~Jy!@wyH#1qU^{y4-QR%AmR?4n4 zSJONOv(XWC8g<@LsQX=0C3Put#6{GyM@V_ar{ovNpq8opb zo>$7OZf-Qr>iP6n#LAPjTxeR(33m(U4_hG-#NFgjv(VVk3-!^Ofm!WF}V;a^`4 zS0{%0Qu~B^nH}=<<{4ugWJ~rU@`d7v* zjydf+@7Ybf;CHuoN6=<3telh2OMS%YctaNy^TIB!g9>{N*G*?J`rQ(01=Xv1S?78Tv;jKfZdop#(AJj4K zGD;S?veZ`mSg1>6z9Nz@Qp*{D8tYuEsI?r&jTUA-uqfln3*6L)QUxwR{IoB;E^u zPo+!B3HL_U%dfm1-*8_-%&C}K{=WVu{)T>^e@x66Uwv;A&vaE)54eZBE4Ys;vC2R> zLpsi#yN##vcS2*KEM6C9;A||6tZ*(-Q9O<7`xVai2{Y(<4lAaGY!(y6dQ;)c>j5;dR$kPvFB2-&uYry%66B1{vI3ksi(?oVW7W zzp^S9A~LyPt|CeuLA#VgO`x<{(=2Z$n(vIK#(#!j=3=MyIWb2cw0TFP=1&IoebUNq55iyJiCxjT>l}j#v|E@WmcSEZ8cyM<(*NXY?41&n-STC5 zt}@kKQf;di24%IAxoxI;SnZ-tbMI5yD#Ji@f?Sq=XI)K_baB6!59B}x7%DX*3Gl+Y zJ10@C|CfFI4QoGs1X+nRFPl$IpH+#g8Eg&V9^bd>qC|h1{YH1Vu;XD}{Rv0B1v%Fo z;x4h4w2>A+C7s2&yrDc1mBmh|%>PMta3hZ1vGmRTc(lJJBU_g|X)M}}d7XWBHG4ZA z2$Rh?^A!4){fvA@s(zom=fj z7_z$mvA?f__rqG@k@!v;O#It{eYB%AQ{z2FJ@1Jva(E7Ss(Q zLRp{=JBJjs?%Ps_+X1>^jBrt?CtecUN{5+OEGaAA2z%v!PGc9`t)u07G;L~3}1ehM$;{q&G8s%;x!9f&ahE(^5C^ktay|Gi9pqf{DQTkgDShLH*ht)t z`_A{wDbwM*wsN-F1?<)Mxc!gs{Mq=(I6*sPBktHplr=>AOslWe(Au&?|AE}ZSuIS= zIZnT&7c+Wse)o+W%rLjj_2^&3Q9G(Y9jt+?uCPtKCk+ClzEW-j+sE&T_tx~9o@<`6 zUemkNcOB=_j@}cVZJyhnedzbTLFpmCa!l^YSzZ>mQcrnD)u?P_DSHNs99%heA-l|0 z<{M*^G0*sk=Objma}$kSL_u?=^~7p!-@qOImD81)(?q!OuY~^OAtbPyBAJSSoTPX` zzm!#aDHD`_A6B!8a&tLJK8-)!MtCiw(G!|TwXHkaK80Y3_*lIiOHq&%_K63{mF zX_bd$tk!G$v=b=LtRic5MoZVq({s-2vQZDN%RVC~nWKvKEN4A={$Aw#R|tyK5og~5 zidz|>jCG$zb4K%A@+|b6^L*p2?OWw*==++!vD|ZoSIV=GN^&<=9@2YDp*!19tVyOu zhEEw2Y20L!usb-Q{4Q5lYPy@NPt~{N?zS<5vWxU?^)~Wu@ig^(?wQGB zh58e#OCII8Tvr~-{<^a;lY2CYd7~X!RmWP&+L3H+FbkO5m`A??yZRA(%8mMU9XvJ> zY87*XSX?VVy}pt$8)DLML3|Cqb2*5>#opVVn(BY@UeQ6BvluL_fgf;N@7W<84u`?; z9!Cpl8c|J0+98{9kMBLJkJ9t%am?xI`VU57=I}VXtMei<8J5pFxtUvlzi`v{BxZ#_ z4*i^#JZ}3-`76X!^VRUS@XS^hxVtHd@?r5?p`|M>a=`x73L0C;!%Pc52;~g*52glI z2EGh*4U7&f3j7feiOcT>+lMZPN`?D`zYC8D_Y5}*$CKgsHdHM17~J}R;Mc*0!9Rok z(3sGYa6dfXcSL$hZB)m1F>Z9yuq;=zwa(Ek=iHpTaumz%%KBGw>%{GG!5GCm!#!Ef z334}=FzEvpBLp&|5?ngbwHRh^SvZ$DQI)yq+#$~VoO)^$--Kc*3W@`82295(Ct$uZ zUU7$dX>W*xRc#jhjCiB0`Lk7+$TT1rlCHc{)4k_oys@`po5g(-mp`sV?9~{{dx6|e zKFM+2ani|?6*4N2!5fRl_5*UXU!%^{fm!gdagh=<1;Ga*YIchbu(Aw znOIT%I%Y`x$>dGhF6GS3T|Muvyle9Olq)U!@+=DztHjRr7Ia?|KSE_C$w_p+cKV@u zpn$_1EDuu*Je2m(hd0aDrA)CVHOP4XC;rmYaO{8tW742nEam#$nx~*QMXQ3-t<}36>6? z35*Nm#b4zIs$A7Gi)I$eESp(B^BbH>%F(B!;M8C^xFghueM>2#lWfL9^RyjwZIefN z9>tjPby8MltCLgB-7t?Y&!Sv=a@@>XKiL=GH>S9!ms|&(yvleQU1ryDKGIaUK*nx? zG63Jb4es3Tg~}rNYpIpkNa%>lfeW@&Bl13vsBbkTF32RhE@=H`bkS>S6~I+Wq2{3+ z;jUUqW0>_3e7@aEK5th)X5AoM%OrP6?wm9zAtr7?%zRH-<(*(V-&t*pbJ{h$7Eh72 zOAnvY`Z5z;HM$eq95D~$nU;ghzsESmYVt1`?urD}1R3f|_Z^?hO(#C%Cvi;08PwwRTJ?3_N z+mx5tw&$Fd`*EIadCXkzawKPKpS(G)i*Jv6f_T&k!X{CTrTQ1(L~}U#Ty8P9d_hT8 zcc{D7m8$7h-H+u1(y!teVLq6T=3r#D!_ptdC|d|exs6lU{>_|Z^d&3PCY&$aJZylw zy z_d}{yPA_77Wex$uv6+4JO}hs>yc|{^^ASq*Ent1UWbEV#uVV*yIaoD#GcY}nLFM;W z#=4BL8AUSQr@u|llhG_=MMh>u^~`mdLg4FwC-^P><$Y+6CYu}VyRJ0(JMYTaUlVI* zt(w!$y)*Bje7o~K%Hz#-H(Mb2W!zS8bESw-#r7GcwE1LIZ-%?+J(x8Gk=r#V~M+ycc#2d$(m9j>1g~z|8L$*rKtGMaa&9EY2lioL&5Q6euE%* zOX|&xAIvFMDg4U*hGX0er^hN}rafjaW1}9?#%WggtMJuO$TLzjDu3?#bWb-M3L13%}WsJSdM9C6nBqIM%rNNzMM_ww^*7;R zp$Wk;!PmjPp-YUv;l@<+JFBDp)-D1f=%L*ojmB}7gac~{vH*LDfMyz#jW))ARFs@lx$eWb6N=tVEc7$uyJMLunbNOx*P2!}Q z9ra+ST<}!p*^JQ{*-aAFQ{$>9Y)lxN@JD>ZxETD$N~vAs zZv=PbnPnSO^bO>A3TnBjpw`s)>2bykLuDVmi=Ffl_LaNM-^^p?ChG5x&0=I5##r^O zw`4>rm_v*K`atcw@PJU!;Ct{TRWoL#htoEsO-fskwkGXfTD|n2({=bGqcTqhwuBOl z!Hz8H?xr!{Ctk|-Huv)U^$U$Gq7)rhx{9Ps{&aC^yq0FVt9aYR9Prhz@f1Ye>?~3L-yr4+S;frHjm{`ft_}4LHV+iZESNDQ{Yl!F^qW#?4bw)Y-At>Q zzK+)E3#YPSpi?-Db;I?Syu`CAu0YnAxmy=#TXRvLQK+VI*DfKXOV!-p zd6xOI`|HP+kGm81B(7gvD7Hgv0e@3p0na0)t@JDUZdJ{tS~JG>(adU@c{9gmei3*Q zJRY8`7dLNMO;Eb8jvh*Np|Z%n(E zIv}+}YVXu8sXwPCrCm=ul>TXkkU25)QD*mmJM@XsEAp3ANc}u!TT; zq7#ZPEt0L!fjsB34vF97-61`&z6y`Ztd5)6`gbed2hx^hjtxE!ozxx}IAF65Ix&FHGYU2CElS#{x+=)fwPWUFPwH!SMChytZRwA z&IpHp#lgqSc$hJrd(B@1!dQM3fBm449_zdzZLI5B(j_~h3~jS?%z8D3rP>}q7ChBszr zORN4q@qK*iy0p<5p+HWppE(hI-!jqzIYZg$9;mkW)bW<`>AunI!>0LTWAFP%#+37J zb?1?*2&wiQV{Eud&=nY#xt`J88vfc~W2&{m`OdXP$jUBy6*-pg&`SKqV(uNRgkYLt>`m{MIh%MAGkxXX3B=JA11s zYh6vPue77VPXa45S7#;%S_d13=aVUquv4l8@+U^v1QKkGR8+a`UgVkQ!-pw$eO$e` zQn7tv++Nw;S=@kj#8&N8Xlbxn@KEqkXe@mu&RT8%OFW)TWO)?6$3RqO^20Ts3_iIZ z41kg#BUgh#dT%x`mTTF=SA*9AO#%%9^8&u${9txm)k{+i+#g;U?hTf;a(GhsVz@Y! zu_Hyr2hBuhgD~A4@6VU`F6C_YCAll*Tb8eHo-8?=XALH{iEZv# zCQXX0gflw~EJ`1vgjo*MPYVk{NEl2z%)a1Y9^fnT9*F!!^{8)K?7R4+#A69Pm3%tBf|r<1^RrW9v-(vVUM+gr;{vvFHVq;y03U<`F6yN@*nh9 z_22Pr@nm(Ek;=IS+J($L)GnH_UY*l^)pr}K%$g{quC#u#ej#^r8jaFP_D9ZhXGdfR zdDbg%9r{PAICrh_W&z`#b}0OsT2D(VKFvcdLN(c0Cxzayr(PZ05_}nKiSqH(@TXd` zzC*80ZfF}(fD4{-3vE0zf0ytMEojtm3^C-%7vD5xU$$;Jf6cuu&o{XrWN)2vA$~|q zM|G)m3l)P4_BZs{wYbgJus2&v&Ai5bO{DT#O5bVxZtaZJklgB4-?Z5L@!!P1j@uWz zCgy}^jxt{a?e2U}AJ1V{qV~|3IApkyWIWP$bCflvnLF&yT@$2B?lkWhe;~FAy1gC! z-F?T@o$_Acv&bHDvU#mX<^wYZ6xNR*M3dnk*D`aOP0T;cE#w%JU}1euowkygN!6tk zx{7V=+*VU_FICyxMhP+>6O4|=Lma+(Xj8)nLh+%U!Slf;Aw9G{{Fzojf3GhwZW+gn zjmC0NWntsJamm*(%(Z9Oa~X%%gjL^gLDF>K{sZ zc`m5qPhp{;@`{euKh7{#=y*ud;+G(dBxQh0VKIrI>H?9!n%p^vDsXT`O(qE?j3>2SR4n}ND2NvqVQ zPV|wLo9}sL6gDQ|fYzFR^~N0LJQ2pZpTxXRSeP;(>y@nivkuSl7d+_vai@IG)ECMx z@?TPd)L&dH407#owptI3^?G(aQSU`YeXm*CX(qIhcerzUR+8sSRfELNm*l^xl~fZq z3-yFl)X+@Wm)-44<{s)Y$Bp}DGy4`k1KFq`K2eIQU#sWc|5F;uwWOcLa^&cC2*Xfz z-V5$@I~vxts3;Fa7wWQ}12phWbB6UMisORUI2YuFVBZF}{TSWjdAJE2LYJ^FI>@7} zN#;o~SF`m;%)7y`6K<{TCc7J?MzUP%$U2o@Ye)XOgPx|(HtL&x>raq@9my1bYdzua zm85EY3gl)JbFMWH*M;r!FP^{r9TWaadYpVYxpZ=)Bu`?qcz^76-$GBE8YCZ`RemYX z6vC0Z&QDe)l#_cICeB8MsVksxNdCN`;&bP7|EY9g0AQ50hu2-jErW7+WN!vUj4I?EXjIMdlj?!pKS1%vE46el%|2Sy$P9 z=gf0CLV5Do_tC%kl&Zl)^qnTjZDm$f|{Y(QLAEiRKd&4emSJ zQN8Udy^*rZmE>U{*m5W}KqM@d{{Xi(1COd-$QAwy4)hVI+fS)HePpe${<9M7;;>o! z;|lizu0DCEbcjmjQh)bxtTmjkhm%Krq zBF~qf%R`mh%4tPau7G4HA$ZVzJz{SJbF&S__>Ib_^3k6~W@3mfJ?*Ky&7=%Pk{ zQi*Y=Dzm`)l>oU?PW(t{>pBfnH5(epo2c0(QpfCK7qW|j82gj{GuOH2ltevd6)dij z#K?J^t?*Fl+C^|G4_RmM5J*Qu{STuoxr|DBs`fXv#mo2_@t1#=P?O^|Vlz zx$C>HDjVhYVq#>k*;@xKrguRh=sD=^p02W@A(a8^UQ^k|^;l8`Ia8M0L3a&MFiYev zVkg)?_d#jDHxpP%Oyhz1$Xbr;`4v!$4ei?Gsn#m4)&gc_7;U_wmiFDRyID z@E3j#gXte$XHN4*{KmUX7!YvRVZBFiRuEP!T2L{T1V#YwfJ!?c7AXMI58+S zE#e;f%zotShl7ZHrajRlJvTY~^I8|}1(~p~!c)S(l1t34<<#DWmxSwsddU$k5O#;} zg?=spDN6ad+EFESX39 z!LY1V&gsZMuAX9Xd9jimO+L3WTmC_olvB!P*z@DndF~(Oh_E+Ojhd5TwX;@$WmsWu zwf?iuI05GX9syOI@4z>H=DH-*km|#DSs<5`&q+h1Ct?PA-2>qPOn3ffJ<1X}2isb9 zE`pf3LvOp|>}2O#RY((l7HdjdrTtPz>74kfm?*vw_6eP+wRA)^rcGpn<6@r532O8u z44&5Xvc8T^eQzew<6QV;H{n;6fv498c4R9mWIuqNSqawifL+PHXl-P6%VQ5`Mq6M{ zv!~cU5Xl@u@jeOE*=*+gf8lrMXSb`8`#r!N+3Xwz>vhsO;+%G_JMTE|@!KKixN{%< z?wl~R7org>2nx7A6}`d|N`F+O#wmlSHmk}$xrZFjiuavV1$Ttr;w_;K*YN{B5tDI? zc!z%5PNB58Rt$-Gr4mwc?#KvfH`t&v(iAD1^go#JGX)3U;VrbD?>JN0f0w7lPB6lr zTK&jVj6?@$hEaxHRT@}@JgicAjdUWaF4W;}QMZ2zIPSjX%LDxV(-tHF)_>f2AzeGR&%%jKU)R3J0eKHyUvoxWY<_276Ns- zrL0Yq2%V zuxIQ`qdujjge{;Ae?(h*6ZL+Bd#~G{IQyI;Fj5sbwCh|qT&AnE@D;JcP2o9i3CG}r zbj6@_9ji4lv>O5lwRpIo{fajf+dhj;1(vyVZ%ocINd#d(}>A$t$PzGUuFNPCa z0ft{1EpnR-Q8J@o8Jx|}QBuF@3efvvIM6^wF+mhwec%xeCqr+++FgjR!x$=~GeJ%L z20K224n#pvr_EsuPlU(6+jRnk*K;rsf1;n&fbaO0Z`?*cVH(WaNoX+7C$e0Hy3#H_ z?GClG7-HKf&$~4^{^~G3W7${V=IR&H1_Mya?Sx)IQ_wwCBef%~$cs#%U5`?I53vJ^ zy5!{oU+@l$=oz@G+u(sOgWEfmbL>wGHDNrprq(``{og{Etbf6p{D=2G;SuGY--d;L z0S@qX-Zh12wiDk}ohn};j#7MIJDBTV^Nl~ii9N&LXTX2W0oEmYzAZT4sPmtK?)Gf- z6(;dKi>q7DXJ>=I+JIVaYxvV&a>u5iaj*a``ZCzSM@`Pa0}EK-IgWFP=iBqZhy9qlSBJmq#(M^H)&IvM z@4;2H<663K7Ezva%MZ-)*09Oj@LBcvwAvg|u6@*daVk0x({WUo#uY@bA^Mm}+s$Mg zOyrCva!$iOeD+XUr5~Re^^=R9Sxp$y(W|P+BkFw@^<2zD>t^Q>JyJMNFNd4+wqYSZ zL(SlC`or&J`w!DIqCIC5_ii1JZ5&%r(B6vz#6cbhU_kHS_l^AT7XEMhhhrzN?c%4+ zXi6+(+)ZX=d`}Pf3BK)O*h`Bz7SbPP(Kq@t!WzR-sZBqw09&gB{i`64V*Ff|dVf{) zG#b2u`UlHzoQ~@EXo{*if9X*n{|4|pf|9ejP(9&^?Vn2^KX2$<({WyM(Duzq6 zKZ1_@6!ojq;qym%=N38>(dP(C8Y2Aes1IJWhp|xblbaqM?Qxi^@yKKxM!op9!%{!W z91=bL{UdSC0OE-o zL|iA~F`Zy#Z^Hfjl4y6U&`11RScro6_w=42L}r6wyu_hw-;7!DE6{qE?15+-4{;{K z#f`piCEwSbdr_CY*9Ip7&NqSCem5-?owIa$?NF+v|HE0WH}@NFRPOhA&{_CcxDMm) z<$@=dPn-yv@dPZcfvo$t(8W=OAzW`Pcj^V_F_mxrp5E~*dJL~&w2vVFTmtU-w?Y{( zbKU4;d#PDJ!5gcJ<8jjP4w?nqy%Zeh8`eWBnH_&`m>hR;(f*VSKxg*0U5LLrIep-W z4Gg(-E#?+{}$A)EMma*P6FT6 z(m6mSLMK*Dk(x@?rGipeJRsJAK{$tdxkXqhWEb+I;lGaFw%e|6r`WmdUUq_`Fq5@M zBd3PY93`SPN;|yJ>UmP|NV}{4?GDNJM2~PbVmf0%6ivpFOa!Sq)4paKL`3Rg5 zo}h}u#I3Moo1pMiTqr@koU3&`Ay(TU>=z~yy>uo|(-8c{YMgQIu~XV(KL&$53ryFi zMeGH5n>^-T*A*keze0X;F5S^Am@3Q4VsK?e+)5l<_gqdGhvF`tK` z{6gltF~pM};p#cpsqJKgg>`{mG0JWSZ}tpGac^hn0c40d*_>;v{C zSn2^dRh5bI>%wDh3*&m2^9{Sa`OZLi6ouFmHbzaXBm2?gjs?=^Yv%Fe#2NXBV-Dl# z^PkIb<%Ffx22b!M!Y=j!mtZqL7v2jYAyaq*(}uQamr?*foK%Gh&5pWvG8J|iM_Z?{KZc8 z3F})LtDwUw7$Ne`indAt)|m>#24!iz=<1)HxjmYR z1tFVY;ui83d)`^>b*i(XMWceVtZFNXexe@GOS$$bI1LTJS>anEv+2a}i-^pke!RPh z9k$U*+xU40ZFZd2`<=%**3h%;YWA`F*vhei-m;AMO^3nSjq6ST!4+_xIu}^okFvh) z?dcMGujzMwyGPe;Frv`(bey^6J&OdVTTq=HY*=3QiP0ERa&qEZq-v50Rv#f zN8PTY9wL|M`|;7d4m*S>`#U$Q)Nt196{yYqO$)uGN5Gt8{en^>YQk$?yNW{HdYs)R z;63`B@vsDytDf}ts0P|Q@@%)bs_p2U-6d{_Do(usulNK{i%TddMOB!NbH$f@YjgTUxOuHbQHz7Do+Quu7yNt3_$rp&~z5;g#CNK;4N$mJx&hNgMe1{A%n< zM$yOjqoQNc5|!Bb_Jor#Rrrx{vqD%S{KDv2CE&%1V_h@y_#cyTPJ$Ve2YyTip(Ovt z5S!g~9fzeciFusj;$^h_pDTMM#!F>((@~|RY9FHKsPF~~1e9txd{T}#Pi`fxO zr(K4y3+P3wv?kWC!#kJaAWkIXb%Us@2E3( zbeE^{39-B{JNvD??E6cwLyIcR)%}orXveD8k?cTUMtpBp#~DQaQ&}&gN{Op^-r+jQ zKHv!J&RJHB!^|nWdF?ps=}A`^MvRBs5yv?33jx=CGKZh=n@70iN)UX~c=0n-t@ja+ zccxuR<4AIe>vH4N6U}4RVOB3dKg|QCHAm!)^C|sy5wqPp9xI3ezr(S)hocf*wj{5) zGK!XFrajLJu#TDTJojH=FWUA)P9hnnsAIIiHD<#md}20Yg05u3vN(eCiuE?96YKm( zJ>FRqd^GR5~jq<(OkCWANG&yKYXamZ|^ zp!1(2IZ5R0pTl11ZRW*?wxanEeAfUXD#;pWPBfQ+68ym|iYjT6T@B1vH!DAwuu1&< z+SqEn0y$RHJPevY-4LuhR0X$z061=5vo2Hli9xwQu(sI!BkkE;U2%re#(z`c??R>M zk&~PIU60kOHokgs=-YXD^&*v&IrOD$wB9uGD-qX6=&+&?EI(0RE2EX)-FMY{Y6H(` z_@}GApLko5mo4o5+S8)?#^1P zEiR{nJJ}=%{K$P_#`^wk$r%r8K zp;lNta+CKLiOfZN>?rL&j8E?bbN*XLqGB6xY}%k1`IXv?H{2arIc_kY2q=d>6DGl5 z%O2$XLGG#KZ_dpSCapI^sY_VTBIl)sVvi2T#n&JyJlFDHA z$ZPzST01S-?;UiGebA2k(m8>T<#Pv5MI8Lf;Fnd$6?9sI04Yw^VSsCeQ<-@y2bteP zcukFp^cRY`MmP^#ULikjU30{nu1l`tq9u5RS7HSW?7o-A$ir!yI@A*SOJOM%e$qba z9$b}NQeUzLyUC>=gxj=Ih!@Heb3SB-O2CCLo9heoJbz)eY91NlgdI6jmN<8})0kd+ z!g&aCM~PG;mR=A!1i~UuWRd;Sa#5jaYyZKXyp1!KO88*s8EpJSvK%o~TiQgL*!!$U ztTg?pyJUy?(kya`eE4+k!4`7MUaEm*h;pm{Kc?OT?&kXc|3Bw__PDma_ee&lsEm~C zQ3ooRI8c~1mP5h3p zA*aV-FFXKO>|^m-??WdrjHQ-t*1A-VxQU2sI@{X1(b@N5dT8fa-DG-Bv_59cwJ$+( z++w#ImFWUo+sLy}$HThG@&J5tD0-S3Z_9I76Zg?Uy(?PHg+z7BbSiHBn2g82PWZ@J zu9}#E)EE?RgskvVr|C0vzj5(ybWXb?wia&JA5Qr_RcjW-pGdT(w)X3ZEwOlF2}k?H zS6P~1@41X!@t@)!Tb{(ixB;2{PvTW%(0b&?{6syx0n-zmtREu5r-Lya(e?T!4^qRp zpJhoRn+}`Hu=)G2@1LD$Vf9(Eu{ZC+2i4yAlV{hrRk2RzbHJLx?!->x3uHqfyVD1) zLv0mtjJ2@sHnMH&jHARs3}e3}&3>~njr|tGmSMEEb+?t!@qVxIBpK^Fts{(+P~Z_G zo!ytOta-`f^j-@vb0b-Wo^Xw!)}P=ccQg9E?4BKq|Bg)CocNH4|8Dg4%%qFsYnFc! z3!&rR;Xi*T(y>4GCN|@ZyFD3Z&j!x~euj$4(}}H#S@F&2@dsj4(bZ}Im4f)bf0I3}1JudBJuxCtAu*1a z|MTRZOpaH?U$dG$^H=aB6~_jVoqabE2=`!<_oM6D!|CK=abg6`1WgCw~+bp6wqsJEVY)?dvvmO0vca4YX>yI z+1PpSgWI22`Xm}A23oe^mrNw8QsJUUe0t)C|)@`(h77 z{Zt*h6j>LW7JDIjFm_*jJk=SG#9ofwMwWOEkvdsa<(eIT9S>rK*v$CWxGPpKG1B@D zmO3`Mk!R~^Z%DVSHm+}7YutN0M?51v%iZJL>7I~#g?l*lDQmjkr9#!)j!m|6)TQV{ z70rB0A$6HH5uGqEo*ADX+Zme_doN}srnfw{E0!Ny8hg%UtOz-Y^^ zZa28{<5c)MWSc|R_*|-wwIUAZ3AoNpheKWh{*5T6lQ9gWw9 zhyEFV4?Y`+`xEcSUy0or?;W2(u6`yy&JFl+?ugco><*u0|Nkkf6xhNCL)Osv;4Wff zFHqUg6LN*_5A`Mww>Dj+d$Q)z&`7w#|6PxP}*Cru78-JnQkS z&S1aV&+Nt~0_XvU=ngnM{xduFe^cG$BfK{a;#;Ulx;<(^FKZsHM-{A7)TOXdz2Q0P zHar~IT>f_XljRxZQ_DJ(rIqa|eY^B%$>fq#B~wZZOFt~zTJ~e9o1Q&QgGVh-x@vgd zcILRO{^{v6GyCLpt9VzX=PS>zcB*>kDj7LDG8blEpZQ_tEt$5o!>$_C1h{DJkmydH zPeJHxcx>1e?ny_m{IbD;Dv=6AnXgF9vMeyF*uQs7buOh>!F}!v-d}ycdi#66aSe6e z?x+s#{9zwr`_ed$fBKQDX*`;2yNSZp^SI zEjMBRb)j$f?Fk#5h@K4(2=xrs4m2r$oxHbJWiOYemo6(AT{5ereMzU1H~8yb@$u5gwPq!U#f$LB?+?F5{i@}` z2EnQ2pO*eb-Ol?hmMPUu+qqEctch~h^}HkOSG4vAcjoh1h3Na97~Dc6tQ z7yVC?Q_wK$&di*Q=JZa?^e%FjxZJJ@_Q{stV(lV>L$3vfGDcVMa_E!DrPzr?1HAm_ z$n9$82vbjKs<*rEe&06l|2&i3FS=&Y$E>5Hi@k@@JJ~Vjja&@f5&D!`UCv-d>K6SS zSWZ>T)@03{3+)Tv6S*@|CsH216#k7WCVzyIbc6V*e0*7E>D$GwqTPk33x*e@73|C( znQ!EOk~bsoo4k150^l+7+K1t`p01TvRB2Gbk@H67J~ao{9#$t@uW`eY2IK2qSM!@H z`?KHl?XeFsHc~zKS<9NpkU;mc_9fwKZLjqyeyn^z_<>kGdL_PUuSKTPOYS@TzhsnT zwH?rRrk(1 zb8Ea;sb}VS@Ar;QmRUq*Hj959uNHkT5H9X@?Tu^i6_=NNO6R8$md9;D`&4H=_hQcw z-$VY-(kf)0%bJtDE%S{3TXNCb89gnwxsyV+`ey-NB`Xvo# zHJo0*S?!ut9ogUdrnpww23l&zU!#`lU^=9>4-79(FIbuPbKzfQ1EYg25o3=1Ycd{R za=+reDeYXw4_TM8zs=sAbzA1~wBFv&9o?*>6E{(lB2MML) z@~fdW(Z>=6mRD^tN1FSzr=PFb|5p0ij13tb)1z>b7_|Wp8yA4%o$(i=E5oJ1PXjj* zyIwtbIM|I0`IGFpG>e7MbKKOlu0cG<^Y{&l`Xg#BsZ^de(UoH_GrqCTNOX(bN>=vl;LCwiC7lZW`MnBTm351jSau^zJ5a5o zm-`R*1HPLxZpf)uv1x@Tvqq+$^WE(6M{$itzf zf#=CloK-$7@IY`v=#}u=$ZOPGokne(5%ETeONq|N)QrUH*y+g0P}5*>c_ForA1E0? zzDn2paPA$sH{{-*`)F?G+znS>zPj`3^xT{CZZ5d3Y^P;m=BG8T)Gn;vwBk+KZ&m0} z?bSMC8ss;0HE2_7TgA@)o(==8v8MH*M72oW!0@u?$ykacVuebjIm0u*_7C=CIWHRZEt!d>vFg#n(1gJ4Wd~WV5yg?B zDa8{?vx7sT^^=Ct-rmnS!Tpr?4*wr%?_^ZY`W`iBUwX*b#&guU%-+o?P5y-jl8k&F zt{d7Numr4ucLD>0Z9)UW-$lAm)5O6p&bx`5v8A5E+nm6sbuB(BwmkAA+zP$E3HF{TiF1uaM z<;sp){`x;R{9l7MwR0-@{afhEbibm-6C)!v17}M|k@r@51daOAb};@W|mqlWc8 z=X|fvU+TTVQ_Zu+S2MFxh2JY4uW&dkBmIPDyQ8mBmfV~mGcP(k{B+>XvZ|$PO8O(K zON+af8iC#6#qmt*URxP8q(Ae_^gWgKR7R()=Got7UCy{O?KN)&cdlcXZL0N#WLmss zbY8d%In`eW-U!U2!r2SKrJ;7j*WVNGLI2|#_;ioc^L#CKax+Vn}W7iCGZh~Fr3UTa>sv+%Q`-KBp7 zE64vZHo5wHJ9wA7zjd|rT=2KfYFDA8Lcg3-87+K0oE43!$(cm4Mq+CtbAp@84wW1) zzOOi0RJFKKNzbxdgNq`u_>I=M?X07(Hum;5U`73i-_1oWs8{IaGZ9F<8x4oI zQzOtFxT|bn$>gFx3*RkRo8LPBXx`|&^|{|(?Qpft)gP|DmHTLZ<&qV#C%x+{)v0l& z+Vz!&WslDOs$!tpnL48z9&PkU{j1e_X7+c@H5ORUShrfX#4m+k3e+uYQgVCIzQT5e zvBJYeqe}Avo1^2bOPs4c*F1BmO0vmq@!ycSCTDEL3l-{Sf0Xv1d%Nu-op_GniTB0} z!e;{G%YG|4Pd~D6ihnA3yUYFGg=N>F+SA6{1cZKPN8w!RN{E}Zge^1`lyl7t6{3H3Jihc<_;aHVD zt=i1$CoA8PvpB0sPS?t9Yuf8?Zxm^`sP?joCwwnBCQ(nRk1fZ#EM5|J2cIoV0~TYh zeR6GC(PgqL?+kX2{cdgVyyU9y%A=z8Mc4DbF&V3~yHxlvXH90*mv9cV9mc=E-MR{E zuzz%HC^Im)tWoLSlHR3D%SH!Agg%aRi{C-_)tekcoY$NiT~YT+Z!^CmE$L7As(9aY z{a}C8_?f=)Et6FeFT@^?j0;_(62pS>r^>%AeOsfJ7ClO?!bQ47 zjY`~4RrLjtVc~wEk-=BFqIcP8dJPXK8gcFY!tBDi1-^ne^K0aPlo!a|o?pFmXyOOo zEft$q`McthtnL|KWq5PWS6NWIYr{^Bm({PzvLR{JZu|gsT=<@yeM!A zPBEbP!Q!`y>zAAcjKKqWIbstrKaA(w2E1mvnOZI&lvAp=X%4@)bR=V z;0x{Zjd*f=+=WK)SMcfJG3flI@P zTEx~z&qRJF4*pCq5_lp|C9ta8Sze*6a_KE4vx~14Jyhf?+C--6>cUZl@uKxXhtb?$ zkhLXyN5+f(#=h(Qp^SMIf3Low&VstX)?8OR`C<~R<4Bj586@M4Mjy3Uh{H??s%M#lyu5Z29rL9lv=Re|^4;){2-bt;r z)2?Q&T*nReh&7OWGO>&ps{zDp-+;|{nHo-mh&_3fyqZEh2!k#Au<{q%-mx7ZW_cyM zZ6nxy+|T~X%gF~4HR7kq*eNIDsx_9~5i0w=LoMk=RA*@%+8UfnZr+~Yom3a^Nqwe4 z#7Vax1OBDpv%&engS^%R?+Jbv=n&Xg{y=$sI+%OQwv=37(xPltc%3!VGdZntdQJZZ zPfhnj?oHmp^qv(qRk^d;tyQMwOiWLBzw#gB%^C(vTJ+^bsBC7pA3E( zm=>55oE?6GJfKA}I!s1~M&r?b@eRpU#{JZ!_q!TVCv=Q`5?LDmqaM`Fu1!>%c+J_x zvDVhk$hJ1Jyv9E4&Fs(r#2)wGcutxV&weC!PrOZ{A3aktto5v)uw(cj`;%3Wj@PkY zSq&3{PAPFuygycUkLXE!3R9@QG=nJZa_Wm;B}?ZC>S(5i->0tIqf`aB2fP0q^?_{E zO?oe~l`aGq$%|V?j!q*Y%U=p-(0BUd;Q3G_ddYIVbE9XZubyv)XSlnzJKtT?*ED@V z*5sU}IgadCGuouR@B7$$(sR{)jvl-}Qz>wWt-v}9pX1B%!_ixTiH}O|XCejBXJRi> z$MFD}CzL>py_)E3Ib!|Y=xFOiCPfo`m)mX6*`IKhBkCw*@Wk1B>w4Ui5ArGtxZhP_Kkm^L8JVx}%2x3k$ z$nd!k+eS3b4`i^ciiTr7lNrW5`yEb)%R`OVG0xYhV)d50vUj|%vfuCz@lE#5@oaX- zsp2@n`6>MdI@{;7lVce5**UP`G5jUgIOdCuh@Fddi4Uaq+K<#f?54^Mcubb#Ra}c- z@d7)L2_hTSM@_NEJ=rZDfbaY$KFg}fU+IyzlKiZl>=B;lM6h7Z~@o{~^F6w1=gO{_v`X=LC$6o7Be*29b*E(c09!F!Dhv#rP zdzmYUm$}7w*4S>?$?*8tcG*_d-jax_KGcUEX76vmj#`BqsT^LNYJi)JH;CYGP2Aui zqUxHHNwe_Z$mdb)F#Fkgy}&N)nbhuoghLD6bU6|SvN8sa|oybt_-l`{_ zqEQud^Ce#7WyHt~W_Nrs(7Od6ZYJMP5SMwC-S)o{X+#(+dSVASb}Nw)6VSRkvU4zo zo%HY7*&Bz?cR#z}ojEs{c#E8u-Eg6a?ETt!US*=1CKJu@B+(3+tk+A#>R)ANcZua= zatxklH=+SiP>Yb)H<1y3ntiZ!mMv65Tg8s>8lrB>sLbTFo+B<`D;Xn$h>^aD2=N{G z{huZV$VKGOUF?t4XJ0xuz$Q6Ov(qBR)AysssCJwU6V1uYCKU$>)2y|lfB1ivMS>|1m`N`xpkRID@IM6DZ}D2net3k6mo=z;`a7`| zzY|ro1CR73&iE=7Ju#Mj_QC7~-_AZ#dtROK*Y-z3R02DyKrtPOsP4_Lt%)+bj&ZaD zj@^htBbyq?j)9tPV`bEv(FxwTfvUR8)kwufH{_n(xh4Vx9h}vUV~VX-{IcSm9XzKW zdwn^Ulfc;kM>}#(V~+YbUYVU=#rL;MVWW#^?lIgeEu|lxWhc}JKL0~3g^TBPPQ~O; z0)xh|_g;tCj9b_@d<|HB+6!=63qA@0dL9>X!y8(Pud{AXpCB~@h5*5<}8Tu$udynuML52Ri`TQc-`X<&^ zPiEv~)=p-Ur1#o!@ct+fOJ|^jIpk6N!cNd4_84!5mc9YM=-LGBtN^=a^Zp|*#bK=E zm*dQ{lpRS$RBb0pr#jKVJBdPBL45NA#N4cBmt+gn_$=77g$Sd)T=zft%d6nX%RI-y zuJ$%$pg2wh{xg<191D9Y>%gpyM94Ua5qh1c-Uhv{=UI(_>NUyvvW);45hi^}1 zJzr-915jx_&b^gYvD2^43B2DR4yYbjH<-98^_IJtwW^cC)qZ@xpQw{x;C=U|u%JEo z*A*Hm1TXA#zVvhaEuN=7tX&v=XMXz>nu^okpazvr&j2~~9X-Ze53_6a7WBG=6`e=4 z)1yH7Ex0%PR{Z}5-93Dk{qR`z*enL#>g5>b`fZ$%1&2KVeNSVZcR;B_!R|GdYh;8} zU>A5FP`{hpq;v4>LBz5A2HdAZf7N+E#ZLG%@b4a?0qZm4TFk5y5DT$u+>x1&u$(t=?uLgnB6@#6``WcQ?*Q0m1(I=QF^?U=_mDk%z&*tzeFJ_y4o5piWX2*e zWIXXFPr>Iap%;y24%dh)d=|O&50v&3XMX`CETKxum@dJ}##MC^&$`aPhx^ z$>+f5Pq@}DD0(^LKMe#QWo4^^eJ5DY8t|?TaM-R;$~xqsH5E(Mj1iB99(q8}A3<*$ z!Ji7?=`P@VnK*)7VBDW@k*aX5!Hm2nSh5=^O(1su0Y3l882d5GbI|8#VykWfdwyeO zC&B$r0gFoH2in2L4MbAC2TksQpLSyeec)zCfMi?nstb_?%ZZz*#JV(rPS!v*9*&IX zHHa97+jtkJd>O9VmH65)Yte&M8!Zx#2WY7`Yqpdq=OtiYTkiS|+^ZEZS4Ys@@WBs= zBv`_88Z*Pb$lw!n5-fu=jRo(g!R7Lx+NNC5k?)^E1+UU4xDS}Q8XDb=Or6R4s()q> zY*2Sc7gYEI_jSR4*6_TZVDq~?;X7#Qd9cq-zUgp&Tg?i5!E6%DCX)KUKJu^<((?gi z>0jtmR^-BMtWP%3sSFhV;g<%iMH-k9;3{>9Ro>Gc?mZf;IL-L%@FQi~U1H7gi@;Gw zFsk!N0~>dK1b(v*Tx|qhv;qSg0;enBLyHuLy~^i1!SRmdV$23|-C5fg8I$xt@!!_q zr=1bF!TtKc%K&1PxJMTCc8cMJ^;4_;CvZ?_J_Bp1663GJRporDPH#phRGNnbP&QRC zCGT!$wTVqZFX+G-UAV3W?}~cu#~QZ)5)N?hE`IL~9jdG9Gr;v$@aR5f+z06}2tDI& zsPjhV*#PeNGFSZy9O{9Q&j39u8HsI)2}uic4M z&EvPnSdH~uy9hjb583m7xbHTKis!3nTVQRuncWhc_79@O|d>o=OUbWv@~3wE>zUbTRW^xRskM1Qa_Mm)-` zsTj=RU{VI0M%iT^`0xob#;Nbv*F|yk{WyE~UqJO>n0GTz7yKty$5=tdKf$SL2g%>2lx<5%Pk2 zfg1^CU&K9X^Smf$)ZsVbSQJ=pgud?vOINXqwZR>A#XbdBO%RcyzQirT>zow-y2R)j z!~Z+;+`r+kji5Ux&r&w3`Wn~aK23S=1f1_ljjAG))Dg>|FLLB=MsXGys(jXLW?0O6 zR0gI7tKj17JDFLS75f{AD7&>2W7?0z(OKomOU%>-+$uBsmEeVy=Y+t6GIAQeWaSiN z9)}h`K&$LXw)AFn4P`L=4kqM-7vFGwbDnwvePIpPRc5sgvBF=o-ru5G{LCFPndx^a zJlcznQpQ~C5-B(ty<`Cx(w>>*KpB(3xrx|q)v>wAk|j53B;QtGah5Zq-PmzNU4lIa zlYPp^O^54+OR5FP_`^Yhtf;joPR&^r2_UtF1%tYTt|`Y4S`-9ZE83ea}3(bgG${{RDEFC z6o`EUg;7?8Pwk~zV ze*~M`@?>@Sy#(wo!)XU`ZaOkA42^%n%3lw)-pd{Sfv?Qxike{5TC|v4pf!(mABz_H zB6n;K#r5DB{o(CVM!FO^+@HHtWbG2k+sIQM1je1^>Cd7u{|;Zdj~MxBXeYlC!P*S! zevmZ^^6Wdnb2nJ39O|ZE{Xb+Qd`C~v2ROQe2(*Jpx6?#xNzZzlobF>#wvSbq2PR}; zvm9e4;ycP>b)#wBiB3NO$*eW|5-IrtvmVF1enZ=x1=p0VpbF7{0pXv)i7txI zDBykUklKtQKvlz@K-vw)4Tcv)ptu}X{4sQ#1;Bh4(8xve8-*>ig6DsQya^@M>AqTu zW1V1}S2?2)y0^lK&O<|w!P{;||7l9+mdnXj@Y&|z@dS=0(e4JLbG^i!mF-X+h*SX^ zn*&|xl%muKoTMV>M!EAzBu0!?Z2-^91t!0xc;i)8O_HPnE9&8jHsI77+_M`{AhHtv zFdxa?ocRt#BDaEaYeQu%!D8i?{SNis&+HnJcQ^~(sTF6nXZDrAxht&ejf~35Dh-A! zTt>eA#7ee89tfw@Ia?jlFLRfDe5=PbA7W2-176nyd08{@t2$> z9q}l9|5@zSoA6Y2Cb#^L#7B5h4iGKYfhsDu5E4IHJK8>{>c}YjZhL^r{WXb@zTMH!QQMJk-(i2weuw>7g)$x=fEno{aQ~1nKrM7Z3SI;l)(uxfJ@I zN<}}@;b2fGJroXBqY}>ss=3dn+TPsI?8w)Nn(SL1k31j!$Tq}#$y3SshNH4)uJ5An z?X)JDy)v&$@19nk-Zqn3r2dc7zEAI+*3~n{dCWQ8^^EgtvR-#kA8sfW?#{)>#!ACe zLN!B0!C}<6xhFI&y7j;oM`*W){ssopEx!; zTe$voE^|If55~#P{&-;h&Ss7d)PL%2bJ>=Yi`Csa$nq{4v@>~)9I*XV@HvVvej&c# z6~JK@^>J^eX4-V>sJJ?tD(#W7 zer2=Ezl$tQ7RBcUmX@xLH}>xEE;AlT)^%?655)Pn&A%n9DD$%ad;jg3S2KsCe@vX# zmMo8dr?ZcHoBuBVcGq3@g{;vQs;_T?JDiA>g!4lu=+5*(aA9Z;mFd?766K|VpF(Bf z?;|%vOX!_&HTq(_7hSLmt*ecw5jH}$FC1fBPms6UhhCqHUH$1UvVol9@0_ctJbaHM zMz4XzL?d`YAXO^!jUt4~rY)a{xz>LI3>mAWYik)Sz*^i`ca}17* zjsM{7o?hzcYWv)8uTYr&m-83*rp$?1i~M{1m9s`?PIEtPf8U##8Sr(mPcqifdv!1M zyv|w9Ti=U+7`hs402W?hy&H!&2463Kw7fd_H7~M;zAM*JDQtPv8^0&%C&$uC?CvDv zs(p*|13EXm$XWfu^|#yaz24i)Q6wea6kk7;6*Dw{-9= zj{O$qv*Rx2U&OGrXAIQ5jU&+(L!7??X2dskW0!^RwtL_(QO3&_QR$ z%k&X@H`IV`q!mKv$T-4j3WKb`izwNvzI$KlLoexEVG(J{M1#lha{ zwgS($+0`>Qc{X^x8TiHe?VxJejntuV;sLcA$8EhF+0KrRr|okc3th+D z&$xHE?sGMEopKgCpLVr#xt#;;z1g{~Og{T(*5@pJh-bc?oY~RwH)EZm>fCWGye#~7 z_*ZhKSBGka+J~-&?jn<%`nP;*8G1OhHFTOvW$a-ETY{N&sn}PQ+GMYjf&E>$X{1to zn6(2L%_APb| zbkuOq^iFhrW_)PvV*B1WkDqx0QvKoB6e<~Yq%U%-$O)diF;sy{uy==_kK9OwLKj_{ zuA?J+D1MY&-u>~piH6n@wi)aRRiM9m%>JXjk7GG?+V5d6tvwZ`&pYpN_I1o6I`?L} zPnA#!$ZB1es-={Xm_}@GJ1Xq;j$8Zb==Mxq&Uua;z1>}Ya>>y$Usxz1W@pXwXz|J4RJx&#+Z;k!dW%R+iooea#B&Nh)jr|<05?x8J zm{D{{DGv<`HKWVw<5ccz6#63gJQc=r=)YS^S&oWSt8;|fP(kz^x^oT>uMh8!jEFXj z4v5|#uWy-SrLIrni{xYW8{I2iv+RrPZQTKH1@9DhckhCfO%D&R)(p>1>%r zpT$~sAN~8bQBAA_3$O?I(9b1S$CnYibv-@6I#4gZH+2L?hT_39!2(`uf&=N9*D}~Q zI4C%n>S|5sJy4Abvrp4|v`1(ZJ#O}edV@8&VQ=)wxN2{lj7^Tsv>bMP;LNhkp-SwH zo=Tpr&V#N4zJ!0Hr>SeSr+HeJv>WN5x7%~sbECVyD~C=;bDf{s3XFSgJJ{nj&~Gm! zHphC=5pHhmpJ=N{2AyM?Q8PI${A0LNqzFF!F*UgkN9PciogJT(cn|O9$mDU$R@=|e z_)|tNdu`VOS1accM_pGf_fvF0?n=kkHdHp4=-BC)>X>GqWs4X+j270%(5=r?1FLI% zJ-eZOi8b8Fp6~Va+KeCtIz!=8L-j*9LH*s}0=I{T^8bGL-7VC-Y)$XU9C}cW36G{D z*c56zz7ZRdJZd?dXd62pA5Z1{CeEj=Pa3n`Q+#uY<~il5oBp}Kj{AV?E`K=vN&g7X zHjl%1%Cp^71z2oy+Z-3j)|hHPWW0$6*wFGqqB?nd&&C2&d0j_LcysDm45yC6iSQ}9 zAvdPCXFTGdJM%r{@6}FBN?CnPtk>D9+h^DY+8R2Bx{kX3c21=4^Hc8e?hCFxbl2+T znnBh5cFu2zxtM87(2;jJ`{gw)3yD=Ca!+V{xII#NS*!-0jzjUrvC|2w;|6yR#}m}! z?(6>9`?LEASJboF-_tk3eaU^l|EsizeeFGOdXk>E+-}!?SHj)hHPSZ3dOvs&wXP%w zqgSG0d|33G$RK#kgvghn162AOjZ``w`VoDrMd&s{Z_uqa&6jgoeGE z3U9OR?Htdrm->q1UFRuh8ht!2xW>YX8dBHc3>~$`J07Ex(PUc(qYORLZ+$=c68qxm z@piGPQ6K$47ST6hI(^^X4wX?)d311Mur>JxcQb;+!KT40ff2z&=zZ7GmANUMmp`P( zQ#CpX4Ef7{sPZ>Ph;O)|7~d#($@GtrtbVfXHOzc?d~U> zGwhd)m2l}I{3uP5wGumGZ_pL+2>kCLxL=A^&?(#sU8Q;WUO3~g;mMH!(KPA-DqH0Q zxt%BJX4Q(E`-;X=vQxsgC+r_m_470PmsCW3mi_gwsQx;Q$}Bfp{|6_0Hd#vMcylt2 z#*znFhq|0zs?Hd3KNV|QlB01wQ2_q9lbo1}#F3QHsqr9Gw=K32uCXQdWo$Lk5o=vLf2DGq?{cw=cfY!V5l`bI_ytDdf9ZxFqa|EPp7TFB|6RQ8{fP&tNM_G>*k-fH zp1zf8BEH0B;xsmpYx=+V5?&w0*T#2K*)mR#u>nM2?4rg>6Bn$XEFknC5^2zpTO!bxrz_a~8p&At?Uc$dyhF|4o zd~Iu}WR*+x!;aRW)QFfzt%xIJab2aVN~Y1!Xl&FmDjC&`s&wNk1!MM5Lt`#+aXpFD z4Ws`qVHVw3pHjRTYgp4;l99wotmrM+(Nn4J@f4Qc4E~==B-94t7!FerBtchz+ML-B zpUntnvJmgq9CksUz`wB?ui#gBZhpkya1k$07{6Br7JnR1NN+qBL$LpS(6?$MoCVUG z@PW)i2KP#~LnCiZQCY)egr>u0G$=Y+^l9iU-R@MU}QRe#vyoyR)ewHZ|uYj zR#{G1?AGgu_Ir_92*<5KYYn5F(H}~9g(}UfjL)f>^dr@%_7FGo1wH$g8gq>&sJzt4 zs18&PQ90oqU~&uj&}WE)cnc3iPvoflJ=?&5H}LEaPmQoSyV`Z2j%HB7P3+g-#*We` z_TQ%A!C#c}=^wxg@)O>cJTNH@s!$yR#YW7+Q#Y4bf@OH~-od}3x`SiDI(eNd<1q^1 z2RVcvP&l`mXU+w(eek!nXSV7d;$mG&sJm56E?7L_A?lrnk-D5Vog3zRe+g89h_NYr9GW^5>4<$YU0cxnq_Hf zrG5rSqEv#d0RQcV$8Q{?d?AG;pK!m=S@W;hvG^FQS;2f?W=Hf<;CL%fX$4>Sn^pe; zT>BqXKb7dKDTztcSQ|>swV}kQO~a!zo&RTY^iBTWLW@5ImpVrb%@wl8YN4Ay3JgDE z9ZK0ReiZNXLMWsSwRGM9H-3d@)-_t7!;LjwfN#vAi~1|ZbhyGeVGB2?6uZkG+@zA{lE3O-bwwEkLi%UfSS=?5|yS7efbG1`|~%!TOUNHodwrA!>&OK?)?~% zEf*~nnaRD@=cp;P&3XX-U>KQ34wdq1lIvZAS9NeA2}T^Y{z)eMFR9rNwccTE1Q*L= z{vUu@cY)6iY)tJ|e*j)g2KR@sa-H$xc7PggCcC&NSlb)EFo{*23`Az9Vhh%?-*yhp zozL9L@o1;vN36t4J3JnImv-bs85E!@Bwqo)XQ-q!m^=5x-_nNs`*P;}1sL%ud|)e| zmV$fF(4lV<6$?g_@B27d_$1gk4Sbx5A8;$!a1n^q0jv5F8S^w4u?5ffe)bWXq;?*M z;k8^rG?Z$9jDsgGvCgtCr1IPg^gnpe*kHem7cgwhCWdUFb+GY25Ni!lz4%?yZfQ_;p}8Bz=xS{ z*#vao;L6F+yc2)qekj(k3{D0DR4&^eekJ@SzJ&mNA|?et z3)~y{EAUUKIQn<|QfyziD0p{dSu)-J5!LJ`*=*QnpV^b1QRy?%k9zyLhP%dlpG*5Q zeP;SDf3|n8Yoe=@=aSdv{oDC9HQh%W_ZSbSGHANSTf`bgm(nx&rErhXyr7?2>W#x! z>HB?O&>j9PavxEhlZo!M#vYAlT0W+_$eY&XmfMY29mib`cQe;g=UQiDS2g#Yp0%D= z+*63UJ?zNmx3K+DvZVXsqc{iub674V7LuQ4peY9Fe6WKmAx)!C)BAEsI1nzyE}l+h zx{2XxVe)gRU7j9J4>iJad?`3QxHaGk{89c*`FrKf0)9G-jg2pfe2A~7FLAWF&I>t^Ahqw*nson^8@@cDM-re0taxyC3c2W_VY~;z6sb zYoF)!0T*_rFuhix*(JCc~@`(3%tFPt{Kh;y91>>Z7-tzFT|endC!M$d>KIoJ!L zE9u_RF>+ft9%>nGh}Up&Xjf=v=tOX4FdY1zTJ~w7TY}+0i{Sl1FMT#144f}tT)w`% zcEA~!RKBR(7b=amiXV^E4J{8BCEHPZ{X?Uhl@7D6WuC9R%YD0ik9mLc+~vC={ho~1 z)Ast_@l~PI`YO*!cSp|+o{_H2_AAD#NW{I?2QAkUcf~)9PKrE>N96A4nQ(rv8D5If z;qvhDa2|b#A0)GNPxPz!W5lv#A;+#+9x`sWd!3bB8=MOqD;#&a9`aQ62HX=|Tj^mp z*Y&Zht*fPTy?wClno-|Y1||+6Q+FrzWG2V+(Q>=c=krdmxe%XM36bL`LaVU;zNNeB zAXa<~o}<&jTk)4x4Jo_zNFYDZF4!&jLE!qp`@vA;xp=EsgYe>+_OTVABp2Aat)>2|G zPNNAp6T{=b!oAAqD}60;J`$pPM>YI?pMY2W;4gR3=ORFD&E0%=M1IBtav$)RgwJ3M zUgU;hBYb`2N^BjSCTbHGJ~esR*3y0zZMu)GDwt<+ed)>+CcCG)fABbc%f0V=x_NH) zyyl*Phhh&E_7^$&Q8#l7n*R6J+H~a@Lf7eou_>`7vGK9l(HD6B(MTV9(EmVZp~kTg zzOX~_q3BM9?B4y)vevrK*lv4}t{~0q4eaIiOy`5nZygmJSL}YrIs06D5Bd{i*|Lr2 zkqVWq4mdXq}S>@AoWo;9i%bfQ{EYaVX>7T6d7gW()g^gKGSGae>1H;dU|HeD( ziWqo`)bFEiq3wK4Xt%IQes3@yj*F4y_ko6Wff3@tx@Ncmr1w z4Zk-wmkvl@A(L)K?z};abg8uxdte{hp0d4*rTe&jwf%GZWA^j(8|aNEsI~25qk!sJ z8_`ZqqlsLk9>MVVR%%BKC*xpcw0iU__PToFS#C{!!=>Fzv8u0loU zY3F{|Yp#*b%Z_T!Kj;Q9#Xg%TcbDx2YmucpbtOAleDnyqH}Ma>B{?fx4BGup-95y9%g*|0(qZ#~L}dUACyLnCkl>+cDcJzFj0%-fJ6%=5rR=e~a}C zb}|>Szx4^a#JyDRxiP*Ywv?Kp55+3Qu0#(+KcFJ$gy@^mMbyZBj805%M?Zys-gvNcfZbSC?Wn7{xnOJ^2$WdSUXc^_1-b_UB=vzWp_a$64Zd3@^%A=cCRQ z&d*$ScU`)(7|sdKaz})|06pnpf6~}Meb;-Cn?F;#{wNwvBWg^%K|R-!_!R27_Kg=& zN9TPsp^a3-AA(M`ky!5Mh>r+awqg5y%(}Isj^Gp23~YdvyB}LzJNPHq=f4?S;;Y1P zs-qO*@mS4HMh5tMfqo~aV*iW15nGO5Vr%T5Sf#it{wG*-CYBz*Ek2#T0`tglm`#PU zd$GzkqI;fAp_RV0|IB4iP zyT`uQHksU%O17Vk`>8~=o-D0i)=NYh4r2%YJeu`;Xt3|0O;y9v?1@JEIM!xgbm8ZS zrT7L*X)7^UpP(;ZL&ts#O>;DQS92;1@1~O9oz$Buq~^x5`2F!V_+9d2o5@01KrgUY zV-Hdrq)Ti_>;>{yW|28Fm2T(9$xOMEt}6FZ{q!)Ef^Q zC~0oL8%0J{D*C=*TWh;y^RiOMY)_50Z1y312`0|G@fG z9BB)p3f@8wx)66uG(4-JR3t?%F*dI-`?1Wg zCi||3$S`=Dnwt+$r@C6gpe~A&+HnW)6kMXS%5QiM_Qvz5cYKyguUFzZbXusBsLOu% zAT;B}n$ru>7UtnS zT4{W3Y{QSV2>oG#F#tR17OIr~VO>eZ=Gov`E$db4FkVKRUCdmD;|Z!kOvxEyWcK1Y z*@#8^4mRLZ*ahRTPzLgM57tUGU~FL5oFm%!XS^FbsjB)ZnDre!PEKOag|T*{Si3Fo zd346wzMUMB=diL=b#pd%T#Uc!Juu=6tcK517NzQ09w(0FH@pw!_-Yzq0o@B+`w_EI zNUbcNHGnNNk$PXdv5U7`Pf&$AGvy!2B$tS=3nPqmq+@iuvkr!aTEgiQWCVQ48ZICb z`+mGQgYmo+U|+Q&BH%|XypN!Qt;}{b(e#g5il z5>pqV6FS3T2GMi3JCy?;#EKb#Cu~gO1uUCaIsY~I*J!xVJXU)ye=Erp`3XvoQyHo? zv@n(&k(ZeFNvtNtU$@0N9Z5`yVp)EK=LIYYESvWDdUjg(V&(58-!W?SW80p!-f{Mw$Ynh`0}ivOuj zho_(hu`<=L`;|lG#rjb`jH+ZRGlmT=zGZOK!zT-#M<-Ts8lQ@>5=)8HiQ?V51QuVx zmvn`=pi5ZLXMn~f{{F^8a5nY*G%KL!)0224_F>`fCcbJ19+nNf)&j#7_}3OfDR1IS zcoEpjVjqOxs2~2jT3~8Le7f1>CE1`u8TwCs1EPDp6aaaS)cBlKZGdiaZvCp z(A^CF-iGp4r@S~z@aet}x84B#Jqvw~V*RGD&ZAi2QQ)aC^+kMWuXEjtV97!Jz#8ub zEPe~KS6`FsfO~tO-4~8H9!z_d*gwU#y^+cXd4t%%H~IWpD*AUi$EM(8o=D93_>`x4 z7+Cobl0)x9pwIjHe;6yTOs4ywe)GLQ+_yhImwQ?B_VC!dS9#vqS zZOl;XZ{Ptjy1AfnReXe@_F6=kQic6pwLFGYco&y+NAFl7s-0or|w}P9ufT1nG#%d{Vi)yjPz^W@mnxAD2 zlu@=18dFb@jZpBq)ZdqIl?{B}gq%IV83*8~hv2CvdHn$fs+Y`PV4zm<6!CtiiQhWP z8NYDukKF4UB$oP!Z0AqD;H`Yx#7q4}^ozWITlinG@vD(Q>O=A&Sh_g%S^%cL&g)%X z!q{o>6h+soF5X9sZx=ZJ15Y}^EXwh*(!w1|F)QbK*u}_)dP>mcOVAADYl?Ebj6YRc zt_`Nu0^ZG8;hR#FD~`|&i1*~B73{^k{_hP0#V@+>>I`kG5^Z~6c_SEeBb-Pzky}CU zt$AxYSFalRY)*z>sl694DHN3>#R zSh>@zqn>b@&*#B}f4FBpBM5?>J}iCpG^xVqJHh8V!`Zq3A92mQ|9{voZYVB#Cp2Vo zzgvK4Ti~GiYu=5RcOz!5+1F#{mDn|@gr~bQ|7XGHyl_FY_lZ_e>s7!gHN#7(F=^gr z*COGXDsuky|GktibN1g~|K!MNW^)o)N$URwtbY9$YgK7^UkZPJ<+opW>Hq&e?*m)S z*B&CM6|cXG*LLt#*sESEs*e5@zkJ6%4uj!G!Md|RE0?jVuDqgRUCcxL+yy7kfuo1; zFb2Uo)jKXteU|UnLPT_kh+^TOBtvzeay>6$i6lmwfAL6|)GGB-#)dl6T$g%RAEdg_ zaee+57S~R_*W{D3DXMYDio9}IIrA^=UtF*-LUm1&?6Lp*n?7l$E5VC_@z4CM@8VIy zB=eVe>bw4@6~n(Touk|v^H=3R>KgM3U6tf}2)Zjz;bwVCZb^dWr?Bt}&?^8wqFYrN zH=l0y`O!Gjkp`JaNaemLzFYHFbn^8nyfUFB{@R#V6OK0p$Lc^?HGznrAxKo=|H{B2 z3tAJ-38S-9cb2sEr?{~*1q)G*Xos>nV4z}B%URa~Xi>bQh%syAqRcB`Wq#^+9V_5f znEET^y)gBAL5enY-UTpMSa}BO65gKSrSHPuzd2^&^*`Ldj3d85i^VBixx)E6TM@vj zr+%6VZeu32e-fON_e>zg@yJ-i*lJYv`^Oh+?@d2ZIsno1P%N6gEl1iU&r4k~M|f;w_RC5ms1P znBtoOelg?PO7jxU%jP;A!F1nE^)W0Mb_GA^52`nwOVWOqo_XQ!AI?4yL5inDwuWmj{TcUset; z!AVqFF$Kknz*P_xmSyuvSfr}@IwOZ~!Yg&&(m8^@&epH$V5K{`fUTRCutcyn@mjxV zoPxaG|9hD{K&vADENl^{*2|;`Nhf8os~UlhYMq2};&6IteY9Tx@jr1wou{kKyXboD zYzgBiN61Uvzl53kO*kpo39B9d;;)`98We@bkr(1y!Xsgn52$5p00P{p2^cq?yk|Aqc+EF;-5a3gBj}3 zX5vSA3d2g7or$Na5+Uv=jY}L=cqD#Vnwq`NF6IvE15lniE(ux4QSn`2qs|L+h3Hgx zEj)^I=0C{yOPm$rUb#?zHu*Q7ai6~EU4LLvDCa*9-P4YyQlDe{IV;Yy^YH&Q2AaR1 zzecdv?QjO|!^Yu?5%MRbmq>P6$cj@QN<*k#xM@RlW5mf(Wlm{MS}iO75eLT&-Xr{~+)#t4CF!zS z7j=Kr3W={pfk%|HMNxvE?k65#@`y<4H_21U@KPu_3Y9rIJIXl3F{Q1D>qsln%EY){ z7?I#u>z(K;8%T)vn^>30G10BQ ztDBv!6c$UOh*xCr5}y^{lZIl_i>OeNSe@#$P8zA;B3TvZQ-FDkCux?Nhw708|NSIv z5~f`P63WgmK;DV3iE9NplAq#&Cf@2gNpwj>X=Tb=yowhkAIRyM=6xjJmGh`46(Th+ za8`(K|49*L>Iqi=aSp*c0gTlXMI(&>VQ~qK-Fykw>Z~VN$l|x9Afx_!g1z{R>WrBD zOi&kHnleL>(K-qn#M%Diy>5;Rf+iQySZw@BTQg;fIRaCA6P*i}M8o1+;#K0%VPFv8 z7h#UzDvquGi*YbV-y|t@+{8t#f+*B{f@UPnVB%eZI|@F!mx;d&h9^k7GkJ|Z{ilU$ z-2b=sCf12xi!y|TI#>NbL{CosglWTv6yc{uMCH-4CDZSA=CQU_H`JW^Zd`TPZZ>p+b#ZXzBF!)T&pYiX5P_Cr7q1H^AfpmN6kdjSWFSA>W_=DES!MjGK^YraM zT}fB{XJ46^qchFpS}{pZNi$)+DanK>(*HyelEk{NFveV6t+Oafv}R(QuuAg~c9^4( z2IouRgZR624^zL>eFQ7%xatBcI0~+Uhxnjy%=|?%S-egoF|pLk{r(%T<|Mu=ZXx=X zCMz2JkC&S=Rv1&pDw_JAq^+n>I_v*ok+@kAcp*DN97otD?9%VTK1JI|j!LrX3~@cZ zOneevN*Bya{URQqX9~+C&GlqS6+KxMIMiEI@>%zm6c!H=pEd15aV7l|bWJHE{viw% zoK1~F{!McoL@x%IAWTtYpw`a`?4;po9kh~Opej5PB@2>XzDXA{VXEJSVX{%A#hLUi zs@C(x9mENQ9Xg`x#ZSc_^^49j$7^Det`P=_;?3h4gJ@npT49UH<;624y~I0oRFX+t zQP(H`g^A>+~0W0#nI5QS}WQkVdcqDbaDD?!{pK)NJHL`LyS=upXe3A1VTq`X%!1eiHkDVE( zrSMgHqbg7d0v7hJE(0MepVi$?l{^%$nvOPGlj{SFPC4?bj!=v5(z?rmUUj@NRk*uW zEzI1ibGGhgYGU?(?fBrV#!6Rn7w#Nk!7`9&Q<)om|EWOyT>;Q?RNS3H5Wr+P18aN!(x^aCgL z{tWQ#AI6`-v232E9*6~8Exo|TQOQW@TdEjgu$otS3Hzl-s-~NCHNnHQnlv`~I>pJv z`OMDisv)Q=dKVyD2ya&XVYM%AMc=adu^wrMn~w<4`Z>v)Nms8O1@ z)k5T4DVn znJ-!9;B4`HX1TM+L)5L;!LK7rtw+$Cax((AP@5hGB zRdob;ETsD+8T%jLN{l?9N{pox-X%G!&bER}Fomn?BB?qis`PgeTvI)H~JG7$u2yt*Ag+qqLzU@1h4uSLsW-r?A8HC`&gGe+s6~ z5I>f8LpXemJDJ{N2eRi6o@`^zZK2pWbGXVKs=%viF;7{~ip5hc3P~Sz@sup^!^tFl zBnxHLtKNn7HC@b8_@`=2I|C&r~H+ftl;I;id5jvSwKUc1#@n~HrdP4X;e#9R)O>)aVEh~cCT>T z^o~l8lovqW1XWj&hsoSMN%CD-CvK|yNs7tNl8qwU$JB?-|5cY&tD@I`o(NOtmE@8% zm7OAON4lXXR#*-hrEk(A zwb!UV%ci~jpJzdKf^=X}pzu$8Lw+lHwB^B)M3rVG4)dS)QF^#?gG{IkbG4p<6co)m zw5F{sy4JWwwHlqtOC%X(Ex7ni&y_79$jFB#8!XO~L*PnfFxAxZBuTY*AqvtzSq;+h zWhF>gxA09`u5426k7y=lw^L~orl&{uFm-fk>C!XAPqez?)6&VLflE)72T7dEdC@3 zRYZ~xISl2=t7U_m)ZmObpNoK*a98kAb*n3kCNYdF4zmh44$GHzltkj&y~&S0O!C5<=WZ zG$9LB_-%TB<^RM zJTw@&>;WfZ6Ws{!Wcv%#q)(XohejmG%G)nk%5sh~KH-`8nJ7ziE6kThB`Z<<$@JLB z!Z)p2c>*LkWxtxfR8gntlaVecx)=1dBasVk3H!8S@;#frKGVx0-CHpb($Tb18k>B7 zssbmzD^4p}ZSJE>uNKs$qsbE_j14z0ANHz#Bv@5J#1Jx84 zj5XpkMl5Nkr~z3HvWW#BNfoV&JUSYiFMJec$glICS6WXKrU*Ob;nZBWVSrWvXbP#(>EQFcSV1a#MgQ0lkaQ>FED?leI>s>^42Jg1YJZ`H#bhCz_nw+ssb!Ov^27e}a&y3H;9}=%? z)H@81V%#soXBr+a7zG4lFvDhor=RiejOWH4GV0DomB+X~Bda#LZ^mi`yXD4bqb6vq zMX&;Id~JLcTn}R{jE`U!&Zt@hccrl&#``g>(s+-?6&TlJ>;dDNjB7Bg%Q(e@9?v+X z49hZXz}SaD%Q39S$Q6u-VKKqy!Ai7ow*L1pDpEl#fl-w*+DO8oMcAaXRN5X$(f| zLEm6}Hh!aFMgMsw!=nG|(*-+Q!Cc(%`}lGEF}$s@zk(StgXc!&#mIM!ij0xf8yG92 zBW!eo3|llhBnC#t_&WG7c4{zZ4n7Ok`i;}VSVO}f7<<&H%^Oy3Sbeb5X~Y1=`v_iP za2FXq&3N{|hvD&zxWTZ-peHkYmEp~T_Geg%fi*DBL<4Nb6>0qZr+>efj@&p6A4W&nutcNQ`bqzYpZWf;oo3+`{?|7# z*3-CqM&;{&?<%-I3=e6XI7ZxV#-0uCVZ+M8u#Ul1G*-}9)8MKa>m2OQ8S8J{gxFnU2ov}at4v4et@ zPXpfL#mSWh5VJ`+A->489&y32Rfw?oRC-}_3+`a{R z&%oOnSU>~G`r+Tld)Uvnf33%exh7j92~o z|Nqa(a)O!oe|U>vL}gr=ktYYM*T%gF#X1>SHsixUO&VxYqw<`BYKnm{O~P{nv0E6Q zgUCt)xooU{@NWajY&;9z0poMf7Z`SFta}g_XY@skp0aTtf<08DyBF*c8=noEHO?pF z9vjb$+Hmk+jqe5@Mh;`#zu+llc;w(CXwSy;|DOH#w{fq7F`IEh1*;Fn$q>v<3~TuB zHyZZ$UtJ;?Ga0KCL=zg8VyvR^y2k%7FggZfxpCFT>I7FHh$c6_^WXZ!{d>*EZZ)cb zhD{p&(l{-Qvp9H48|QS;>;3mmf|;)2;|*&h;rN0bLc=114jHeSj2A4V} z`8a`tNTP78m{ET=JY3Lj49hX_Sp{H2hVqQjxiIdBQH3^a%7}o1SOg>6F{%kho<`y) zLs9Wh!`(D|hhZ5;?qGDejH-;`RUKICV0XjFMh!N2+rOwQqnlv(0HfZLJ3v@+-*%4 zX1;38@v<1o{l2L6dy~=BLhJ#-b7??7rdEmh0Pae~w`?t2@?kA+BmA zd`NX5=w8D|p8@+z7#T<2f-Y7uVi7r$sEDaTGZ{~2Lpdx*Z%6;iJfemYbHN8xTVF^H zp?94%cpdA!UO%lK*WM5&D6Ul}3lRzw-A_ZG zB^DDf&oCL+ikwZAAQY*%a)-@i`l+MI9OkQD7}Kb)$XI5+c3#QXI+9C>G_cXyh?ev? zy^EGZ%vO)9lj*nm95IK8g%Dyf`4`2>-e5;bnR-7yUfU%U(r4!`O z$oWz`g@c;ec{PHpON`buYPeQGu0(HRYAQ3N8*~FYQO?tP6EZM~&5g&bv$XE%Y} zUG0 zNS8WO%~ThY)2NN${P}>HS03}(`w5i@P?s@BKZcuW>ZD(g4yZQnkfkhrse^y+|LhX*;R68xbR;SQwNsrEG1LdV;Ek2pv3`U3hn0`ieOYf-d!2ISI zuxstpld0CkeYKMAC6Cjo#6QX)xtf|lda>sl6DzdA`Zj6?@mbxjucP|VUG(47@z@0` z)z5M#%Fd;c>-6H#kz5E~oSCdZ7uA2&-a|2Uplu2bdQh&acT;yNo3y@k zG447wQQe_!q?%GA=w)Dxp{cgy4E+uiovq{n!bGm5FK{VLzH&?~r%k2Dvw6e~b&oz! z3s?IqXSM582Ik?XVRn2b`IU&nP9DJi&Ap?1N)EURLX|<<8qB8@QFnml8Jwna9Vth7 zhmcLvzQ`k#uj(M^m}C)M=;P!M`b~8XQJdC@x|sScONCJ9wZY0rxtCgim;k-;4b)%Y zKFihG>%+;3OdEb0TbKAr%L2Q3Sxm`wC5uoeh~bzMuR)d}+v)AKW%^Pon>tUN0FQ)2 zy8-WW58OQO)S1d3>P>wa(UyD*rl3<`s~V{v!d>+erN{(gy=KP5@j#*_xTOM^(Ee4I z!N0u}ENDeB1#l4lwhcHrma3|9Nv(!y#J%9C*aud^N0?0itVN-2=O5n_dXBxN&+fZ5w<`ap)kf)1IlZ zVDW1VCC#;JHEkfKfTE$nI2Kd#@06x$J8=G~N;NQTHkIqhf5F->D%ns%94v2_SIY~f zE8=9SHum5v<1<)!SIDAdlSe9DwGptRRLohgC6B=$b|I3;+GKw)IJ81;F$t{ei;1tq zNX#M|S5=mYVmGm^!GbWAjo{o|Ccnfq9vsaBz)*dLWw|iU55Kmaox_%Amof27edZeT zn&Frl;KDmhf1+p6FR4f1OsGmOCCgD8sq&PaEKCg6|Hia6Yz-f$)KlO|Y$Sz?jldq> zUPuqz_J8&r15b4g-+J#lZ)vE+mIZI)G5=YAcmE^bai7WG6};BZeQ$g#z*o~RKm>mD zzx9>$&j_p&?tyD_n6L%BDvCHsBBUtsq!1w;Qp-_2xS8fLwrL@K!#_lpkFF5YD>g4~ zTY@toGGR+xi&%H`yvXKZl^x4$wQLn_-E3WKq^+X0g5`wyjXB>^({{!-&$hs3vbVE~ zwmsHsWx0w`D~tEiq6Hed)jCJL-Mvso>e_F6$OtM_gl| ztliDk&(*`V(3R~9hyJ|X^=p1+?x>t4*~PL4WPi@CmD4O|TlUJVv6-hb4rbKK{E!uy zTQHP$Hq|!5 zUf%xRw#s(TI?mGEw4E(N*CA&>?`jz70+&FBzFFNWZ*E9TSfW0|JkH<-F_NlIZ zGJecR&CoLHX8w_RCX>ue`+h&IZ+2&qwl6Jsx#Yo8iN#tJdXVHwj43!ZrB1Om#cLFK zSYS?ML&r_aEWR~0O_?L`LIxH)9LZGmN>Nnnf9{mD@BA+bZNAg%X>dxm%je>G8&r)bNF!{FhY1zpzr z>_D^KrrEr<+qMI?71od7xt&Wk)&7$2h=rlly~sb>*T~b=nVy@UGd_1n-eKIut9c9a z|8O>izVHvus`;yPTV-cuEKPrt9+}xF>ljq)?U|j@3(`LIHdDi~C3-=^i2^?-l#4S($AsUspEl3rKGD&ntR~Az;^ctp?de*cdmy_} zZhiMTVZ7Fq8pV8IqxgO%+Va$T+^#yxhD>y{w@dCq!D)Qtg1|9pI_x5_3;v_0Q~^o#O1|Pm{}$Di>z2644-LeXJfcKl)14 z_sD-Dl<;?99YcrN3!1u-cKNcO^*!^d-f-VXFYjyYV|^#R!@P^V$-XVV2fjzXivC=9 z%Vq(^=l5K4{gU4}_ifgyjN{+wv|(R&eBSr5{fArc|9pS?eXaL@z0dvpIET__L|!e{ zrNWX5Ii=v0LSKN?-KP3-L>>fKZVpK@9ZKSy$zkrRRf71)dd;E6KWM>UDoC;(e z&petf=P&UUmcQ$->52Rm%Vztf(4&#xVyhQup13)|9IHo+3T!F$@X+jGi$$e$|wE&d_y6iNo-{fnVCp6zSzFYHhC?swhHP0zHZcTFqv zE&1!~&l5hSe2n-o^nHc*p&uH4zLD|HSDarFYbyGtq`P=p%I2gZiGL?uPgzmCZs~rd z#}`uztO#q&n~BZxP@$V|fjiR~>00j|<~`vX;QzHo_>+F*>#>hXlX@mVd;ZEVmA5{3ZLTA)Oa7lOvu~BKLe5do=`L~|yTcL) zy%jw%zCgnM*fWviLZj`2turkz%|DrDvOkht6nDVk+v!>F3CH!;_J(;Ic_V$S|Dj(7 zxA8XLA#V+De{YI!ygw>1)IY%6*VQ5KO1266b$UjHjG5`BzaRfL7yYLs3y+;T;gcITyd4R6bIi?c!zeCQ4jt~7U6tg!!Je`YIWyJi^! zPj!|2S#!xXB%4?h`WgiT`=KCT$QR;`_x$Xx<*wpx?f%*Q-aW;W?49my@7?8D?e6C) z;w1Bv^G1S7_;SwBoJ-kHv)*Ku$qdb`ko7Qkmv@P_)VwczcnlxYA)&P~wHxXHCE||~iD!jG}x`03G1Jxhp>)@(l<(`^{+Ri_= z%(i|p7v}$AnljVbzxf|68Bm>GV)=>RLYE){YELLUj{*DDSLL2GN$BR^>22#7=Vsgi zm*RTo+JvZLldGuvoSXKfxtF`^xvRM+yI;FwJtaH=_cC{$tBh-^lXJ#6UpY6qKll`B zF3FnC*!nx_+wXxvucY~uH4ySB@@4GR_#1I=qas3EStfGZ*s1(BOLhBGd!E(9d#JM7 zZRvza$mwbnnFyY_WU`3XSw1b!5w}Qtln>elVgz)3GxR1}zA_0cRA1E))VOM+l938l z-C^3FU=^RL_SMq$7vxF0F#CYr%pPJU(r+o9a?=~wT6_spCclgu#_*Jtc!T`>qh>IP z9@BivQ=ENQ1FHiygvMfZX{A&}st?Oc34Hbs2CsS_@SEj{1E7177MK|55!e7O!>-Uq zA0^Pjq=16mkQJy2u)=OCo11RFYOZ9mabuWRrWD(j*UiIir|rY-Gput>scd;>FY|_b zY;I($gL9*Z#lnX%vD77!rt7l3_?D*md_^{js(@_kyOOGw&^Hr9Pw^zMTw|F3I z6`O%wtS-2ACaGp+jdT>8elw&9#jbtU9pJ(WC7VI3Yp!09=!UGJ4pWlt&)#R&(oLv7 z$QI@!UzVuOV2j>O?I9-uX{amg7Td}nl&2~Ut(MJDg19R$P(12Zodx&z z3$h<#lu6*wZV#r@VRQ^rojFb41(#C`R1wyYX6i@eiWA5wV7FU|s?9g>0oK%tX#2ns z=9SBVRsV?eS_;ED@KC;pmS#Y2=dzG1v=P^d^TmqdW8pV^Jrp_%6%pN~BaiYbs&ZW| ztRE#RP{Zk3%uZw{5121ZUv@CZn@*c|T27!P6lLyX3NckOZ8Et{ifNwd7w9lNWiEkj z-35J+Tj1UmwM}4GKLy>PRmwc2qH+=}bj!e^_k*-XN`#KQBB$WH1C)D8do@p8 ztJOn2ZZ4RuCLmW#q832$s55<#&Y_c;W>8FT%@k(R=>zm^u-(PbSHT$l6`V*CSXa`a zj~5TErx98J3hsB6mf#A!DQ|(kkq!5JhSW`}4*tG^|30iz4)i6?icds~)LA+qm6ii? zVYMX~Xx~Fk-T{TMt7H@{(LXWwnd0nTwg#8V73QzKMVBoWYBeuQF%9Pw(J|h=b z7AlXy)YeHos5-Ry;E6g7O<_0Dk=%exIToy4kEts3D*7p{(k3Pit1ygi1dj9?w29t} zjPN|!81;aGKr1cQi=cklRr{#+z^hGz%8LvOS|<;b3&^LWc9M)My@|(j@jbW=Q=w=w zO&l$bf}+h)aQ|hCZKRj zTCONtkXesIrhQcYDtAG3XrB5`jYhrDt)0{dA%Ct22JDk$G}yl1Q)TGs^ac8F=v`f; zpU@P;V-0rEz33$R4Ku99C2DyGHAxzsg!J!4@fvI@J2tI6Mj zveZ>mA#;CoPiUK*G7U4;#GRkYzvfzT0(*o_Ve2vVp{bCIJ64Wdh`Q`I=$M^TTYaxdkB(p;SnzWgU@V=%Lyg~wk-d?f0F!>lUU+cK!; z^dGdDsl=247oUY;7!Q4$UP^bOOG7th8ubx;$NN#m2KO9Un_5mr>r<6+u%&uXA$Tvx zO95exuuAwH+6+B}F2VqzA=GS+1ZF|Ug;&zp;TtO z=?_b?WwWWXX@~i$rIe+=X*Qo_`rR6Ce`URH>c@@d(qR8p;n`}V8q=Qb%1j|AYxC7l z+92Sd`>ThgsbJ2OpwRG}Py_s^v&1pt0^vj;BQRMUFCS3SloZrwj_7w_-+r<*{e&qB z<+3Mi2pbKC(p#K`Kgkv1wt%~H5VMkYfc>AK=TU=5GjSg~`V_bf3#n0xOJbzj;%p%= zur5$LaLFI<|IOFWHw)E`KT!F2;G_JN{4M=M{44!`L%X7Opd1uz_67O~&xC)#T{~OY zDgLPrV=kEInLW%H>OI@c_B!OX{+e}XJ>z9wz8A>!KSl(IWCkv#*AmO=)v>|@LEoUB~>B^fxBo2_CX_@ zKM{JiIzjEFc2#541@Ijoz(iVKJPSpXWkPph7dV#d`uF(f1o8uCpj$E;YAruOz2s=X zEEE@_g-_6{;{p|Nu04cqN&i4Y|6#$UZ{$6u+VmEUqlQ}MhKzMww|2FP)*;qc<^twL z=HZqa)*9B`=0RKs`U|z0Wz84N;rt}}9J!e4#7soB%87d2DsmOkOK%Gml?UoyY9sZ9 zoGBg^9O6{zvRoca(QBZvR|ed)<543ROFg4!fkUzYL$WWq`ldmqdEf|L4(+}vtc$gB zjaiOip(wTu9RFLvTUZ+>S!ZG>I6VvK_4F4w!>1`j<+t$CJK?L(iqD0Aa92P269Rt) znhI5f6#)<8m5~7}6pD^QzwH5Zj9LY@L$^!{q(I#%9yVwW{O&I!bk{0yA5Hz~Yf6Gn znn#B|4C!O-Wv*qmnVZ4l3)<@8D`<4_OSx=jBI9OjnYNgovmL1S#B8c9dyIR_HlRR z!Y0*8SK;cD6P3ZlM)Q3~PunY;*(XigOdrevn{0PmR`M6P_NI~M7p8su8*ukZ+%@R7 z6=tfiSJ_zh1-%ayNu790Oa&KX17euEUyf6XsdlxzatcvK8>ytUQf{FxRxc^-l`?9) z9w72j^9_e)!7%zFBQifT-{>u1;Jn80>{3K9CFxT1TKY6jGn!gQJ_OT$Eph~L7F>20 zv_t4u%~Csn5BCCCT-(VLq}^hK_*uviqM_7u3s>0;s&z}S6Aqzr`4Erif>mq*_TJyc z5#liDCQX-`N(Cf`)J2*ixupr(HOj_JBL=Cr$cpBHwn65VV5>c4(oDT9k@iaV;^t3W zV-s)v)%u6IvuO}mqIZIOJ(6$EWwMRfv&?m79&{DMs10Np%1mt|?rHT9X~aYGrmb39 z4OdwCv|K}7qK;CY%2Db>s3F|enqiJ)5EvQPk%>^^D+TuU&r~{9g6;;c-b7lVD$;qV z><$F0{~d60t|DI%^TFv?8%)&8wY^ZIIjWRVZh^11r~FbHA|*f#>mKs*_X}zfyX0hRELN9A8Cu^9a_rs&-rLr*i5!rG=7- zXf9P=h^(+5c)$lBf@&sD0juvUdAIzBd|o~&zm<8#q?AR?z9G)t1ZAl56QaNt;P{PG zD?o2Sg1*2+!a;4O9#Ferk-M0g%m}FGecEWwhS+E1kK}To4HkjlH&WXJO+iwf3VzyZN^L4Hn_QaRO*iAew~NiBVYPqkIrd#)afY*cla-SmmxfL=Fc(x=a2Iyt&Jjs)|z% zRR)7Qx0BLD>4#@mmHz1D^+Ietg(!qwb`#OvKKdnHoylUlu;bY;>^Uxp|H>Tz8~#On z{0<$tQ*0>Cs!~jVzCt&o5r;!{qyVlyia4yx;6iAdS5+7tFMi(Q~VgkD5^CO2xl$h-a@N zri&p)>sD|&R|hNbAzXb9^h(F#&gSAi8{EzDN>3-czHq2 zg#TX;HXM>LI+#zu{;R1g5zi;6hn4EeXZbR^Y>RNR8cf#nVD-!7wTLS_;xQcieFUQY zEAj_qQxx7yYpl-%uqFS7So6MmA5{T@cu&yOZtS};^hf44tFiUDOWa598dsiAG3_+9 zHaYp%;A$VsH{px&8@M-YKbA&x(vG=?j?~Y9VjaZ(or&&oUoe)(LLFg(8mS%uv-mjd zFdnB=Ib45la8c7>r(TWv`vt8b*hP=&Rl!r7Pt=6^m6c+kTJjTh4KZ#mrBF}6Ogs!4 zSR%O{yuD6fDe{oH*VNBLiz7|#19tG~*i9dyPFqmUlrBn3@c3O?Bu#}fyZ13%RUZLw>Eyg(pta?-4`snnw`fA7l@6i*fDS zalfx|3?F9dZMw|&!0u1tA8`Az3ob)9(8lD^*N|u0>E6_2@&&O69G;n|%dF5!gV#G5 z9Y#)FuM|>fr4`njR4mF&aJJ9Eif>mtp$~aO-%HrY?%?g6fW2Oi&ZavtMVV;e305*w zv97(rSbvw^3@-OR^f7374wZ3A}kZS3%L zc``D`zok}^59$XM#fOOGk3)syxUd3|O%tIVlsc`@(ftre3;3X3=Luv&nc`!>B0LTB z7iX!{$;)&p=%nuChnsHkfALezcPyJMlPm>s+V`-0Gu1M!=Rb2N*ze3I<}Kp0+e`+1 zo=QV^+zd^OL)f(`)N9lunvreEUx+`^X+DU!w*VAa^5oArh2}|JQDGY{B_iS|ifYDr zxJK_xS8=4Y0KLi0E-8H$X(Ssi+X+vo`N^qW#|!S}usc`Qln1sm@a z#FzEp`yz3&?!=io4BhF)$eKS(o1j!3BP~N5bXWK;WFgNR1(n*Lg@yQB4>}}SP;N{` z&2MpFb6`Ng6u9Sq?7xoM`Z@87wvlcImDE0F()PucY+G(^18w3iHqpA**3;g_cF!`w zJjT?7Ph^MFhpDyLo#jwz;84$~MQ=jP7DcO6LAn6qwVrey{THIv@4#1G)AwkZ%1&gq zk@8My0&?sEaun`U0zC9qrM&t`^=M;>cces#bPOBKZQ)*WC83V+9x4G0^jZsXo!J^p z44p^~A#VelwG_%(b>UwIf-!s=@JzJ65lUwLHH$VG`ZLSm?aL`gQNx;zsBRKsz)Hf= z02vtJ|H0qF-wFDJ9^`*}d~iDPRKG;bg$Vwwz;->)}Jix&7DlYaD|wEfO9H|S|SamYnJFtGV~gz8@rm_ z!_H+(v9poQtY8{4?dYLYW2kl1B)jVyVGTE>x#*hI6`DgICQgi&n#((tYw8rOtX_)f zPS%4L$fLWm_c@t2nG-C3S>9TzqL$p7pU3W_?~uibBKmfy%2|QG=mV926<{0Zw8N+Y zQ|c{vmNm*|)X$m$QSu2MakVr@Tn(MZ0CZ-$`oevTFW39pdl6SX%Iosf_Dpk&uDPx# z*Jfu6XESF3=ePVAXIJ;Az<Y}8QOYW;z$=>=}pO9orJmW~|rG)}=NIYh1k z97kVSld3_%QV+QOPki0I)ji|gM_jdB`Ltz8XVrClXlSY8>tRz}rHn3J@yV5@@f5*-PBV(LW<3|V50HXUWhF%ir;I-agawIO@J2_~xR zq!6Kre~#~=?}UGtP(WU)HKY!+FHt3FWS?)}X=`acWFE_3VM z;Qe2%3W^_eYg3{Dkv6e`RC7|V9T)K*?@@Kh$(os>6F|3rf zi9ZQ!V54uecckYh_Y&7dXHDmq{1y2L`CIcU<{i(C&t0B#EN4sZ$^0FjTS9Yf3SHS; zGqiZjkpg9scc&C8;w{`iWmG{WVQSu90u_Vp>79k+KT4j-|pG|1EDf zkKnrLjCCeDySSQq{JwGGYBh)ai~ZRw+dLs3!ls2+2oDMC6H?2*7J616nXXg=V!9TC zoM|kw`kK;K(IV~=l7;<&j?f-9L*phO?N;_{MaWG09#`8u!kT2~9RnST-Dj%|9qEDG zRbbQh=*QJ}Ky)n>i;Ee;9<1kV_`wZObT@SC&j(hZ8qy|EA6mxiQ5_j8Q0Tfm#Ho1$ z%3L906QOY6ysxVFqPw%}PJaD7PfpvM$vM??>*wzVrPms{AaRaOu?-EM64N+gZ<3|Z zsFdz0iG`?w_Y)e#c8r`Jdd*hM9LJTWKN3atbTt}vsVF&LEGSMErU$zE1Kv>Y8c%ai zFHgWT#8*8qM2uB_)X!1h*!!mL)@k-CAx%Pm3;i0h)bY%A-cr%@6Z@QMMtoOa$(^M! z;zyiopM_=OD(R!#QeCWV(svQp$7N6_6!^{0n?p-ZkD+UaPO3?|`q7U-0h;R07tgnm9pRgca!pwX6y7 zB5{GX{%O7=-aF`iEqAYRr@8leTKb%UG$~zeLZTXKPIT-Fdl7js=6+mc!m@-i34DCR zSa;M95s9InZFkLoa4+b#WDR|b+Er;K&w^giB~+H@ij~DooYQMjiA+Sr?pvTTl#`Oh zg22OkR(5FJiIP-3#>ySwNpq(8n#FFtY-wfbYL@s-oRjHAE96L`6;47$y{|r2PpfI_ zJZ&WWz&fZNl!Mkv9QhPn>W@$-8mYa&-nftGtFO`>iXp$?41S|{6|<^9^}aW9$3gNX zDM}hG;@X8%fljCh-G{pEHJpxKe*_e6`vz78E(Px6{9F~75NI5r0t@_AQAd33KO0C8 zuS)lo`g#=Afn8;qY_03~5E>WpAu=xdcJ%A$Hqo7;J_6kQCS8$A&Q>F>)65PAFwHK2Fs6!n7c*L1|1d!-{#WFHQ!-+S^jJ(q=Yl_}oR#oEyp z3gz>i_GR`u_Omv@TF9DcDQ|9MLPo{aXNNI+5JilJX2(&o6SP)uLmQwrx;873>pwyr z!LJ@w$Eq!$%rppUve{~DWDwP%j8GpH6bs_$CD6C}mFx?>*85=CuLQL97chfAMs^)S z)J2715Ale|fMQ4n@erDwL%>w*0~7cRL=;y2D{`@wP#tWAib9GOhS&NI-I(K8r%9-- z)==YA9+vVQO1~GeR@0Ho)kS2l$S=@Em@BtJ{GBH~kj^37cqFBxchCb7*&m27#$P|-2;gU zVLH$cpvKi4wIK$YI!{m^=!c%hDj?PO>!tJ)&>E3o_uEkWUV}9`rrt!iT0I z#MDF<+7zEFK#8`BRvCZS0~T@)5Z`Y!2CF&(Ec_RNe*EX(xs~@=t#iPNFV=@a9|GAh zR^R8ZDK@9_u}YiRjk6^;btg=V&(+cps~8)UVLs%~0P%W#*e-GS7v0(&vEGDXZhWMY!+BR&iUvp2wYX5;Ty;E;L>*7>ct zl3(zchsPqUmf^JOK-E;#PF54+UyYK zPpnE)x+qp9Kz&6n+mt$vdJ!1XbURMe?uZQM;df6($D zs3}i_TJ?KqTGK!yzEUTlB9sIbnMJ6IbfJDmSFIDW@oi)U)L}MbRg4){V;0PqiqVln z-vV=QD&`+z^nakZG)haB!V1NGS+VlnjJe-d(`68Rd4)a^dA?kfMcUlB{WZ3SAPEqxu zw?>bQULRF8a#{Fap=Cmz+V|OJTANxbnR}Zi@=07jW&$P{QnVFvxabHh@eS}E@*MH3 z^VaYe7Jii0D;B*2S()C+9Ary#)zGVL&u(C{>Gt#;XmIC2Gw(SRR!5KuW-;DDfAj~f zI}|3Xp{7v~J-j*U9^`QX(U@95PhlkB4=n6{Xh>C|TTso(F+ghgQKS0@*Zm3gsnO`K zKM_UrUfKj+`S5FG@h)z6Qe4WH!tV=DOlgw5A#rJZ&6t-F8KEB? z`|X=;Udsm4SJq37(5nNzw%#3t|yP=7p zeL|i)HaeC$wmZ%`?l_J)vh12Q-4xC~AgU^h1Fby%{2RH~axA%5@={%0eU-#S%s9Me z9H!nD*_v$E>mTMr56+MA>zOJ5BXJ#&$T_WRBTETA>zi#*< zeV+39+UH$gpQP{4OAY)&ezk0lAPWeEHZuC_i*XyxvdyCE|rJ>vV;?>jQ~W=H0acK_@vC=8SSR-WsB(#=g~ z`}44C(d!c06#Sf=T`;3S!`L?A7p+s-Fk**XRmkzZ^`7>=@NV?p^E`F8ch7M*@?7*5 z2~3fSYum{q49!QGW+0{?NOjkzDEGtxfqlMB_}NG9UaqeBi*pKR9!q=n<=&_LAJ>1J z|Ec?z^ly*STW2-NS(;bfT_W&Y_3&k)QVYE+@k{9^W$#w_v%=xh!;0L8czIlSw(XnQ zZi;5_>Klc-E++SK=C1E2zSjJD{d>{eL*5Gt!A`X;4cidWG;(Rw>6nRe-Qz~b&WkM> zS2H#v(jSs#?nQkPx4YluR?5!K+?5%V)jE53?jdJc&vtKu|AH_`*-LoYhPF!KWux22 zZi%ZIzc=o9%$>;gVZYlY^EEDwHj%f~Flng&oM(c&Fz{q!JoSCA11_ngCK376KXeG= zK&7V>Gnt{8G^#J@23ns~7%{`=b(e6y$z7GxC8tBq1(usHM(k`S6AXql-N({7=%J#7jx{3RH_tiYgx2BHZNoY#z>cWU|zC z-j+FYGOuM%%};jE@KzPR$+22A(VHI2@3w5Qt#q6Xi;O-U_bM(gYJBM5){T5a`n@iw zMU)G|WABe{hpVbHJ#SfV&0H-n(NkH-RdR_6Ob@OvPO3GQ681xmb&fd4P)Aruwj<7d z!K||%(G4o9Cu+6TIOVl;O6)6^m72&e<&w%^^|{^~%Cn*DAaq<_1GUhG642#fi0;}^ zWiF!q2|@{fW$#t@AXg>lnfy@aXlJ~$QGScO{ka2jYvtC?U7g!AuWY_6zrM4V^Nw?v z`>SuOSdn;ab0$nH5}!Odx_87iWGHjvaZeKJ$8QK%O$t4lx~|0pl>Daomp#)0hXU37 z7Jp5Dwy(1AL7U3gaTE$m2C7V`QvTP6SdA3Q`W|jt)omQ8vvTdK~37td~*7^eX zK&#JzL3kvb6P}2rWeyQ}FSUvebvot(YM<|^m*hm$;OkQP^Nk(9~t}kLOo&wsc;|Uy)ZguZi;)U$iotcH94sbtkH^ zXF|T%OGJey&M!DN>2`q%v8x<~n1i|>a~P%6T7ggAtG))l{_Z}`bFSmws)0!PG+EhH z80h8@^AvLf+t|?Dh+iTrh0k+zv&3@8sSHfuja3JTr+kM!XWjc;Eu8!ES2_2&r}|zB zWt8VyHJp`8saZG;;5oKp3TgpU-uB6-w z{jlDMRMNC9L>W||6X*m{f!^~YQC8CEE8ee2- zp+1SNVt%zV$QPcOcUY^jxw0wX@K^NKa)rAlcozi91L-wUeMoMnR{-nM)3n-NJbYB- z>WI^!=j@BD-7SSJ#Vw~yBbgtyB=HXPjNkdLd*66wdy0B;+z&nF{Ds6$ath{$exTcM zBEQyD%^WcQY{|0hu>N3kS{GQ3m{cyF)u@(aOX84zNGpx$j)zhWRHRo)e)&%|U5nR? zAr_^{81w-i(}PfH_hYgt68OD2dVTG?vRIClh6s=ROMQdANgfGvANO1?S36e&=kNKa z^6NNHI4M`0Ymw8QpO)J(Ki1n?nnrM@wxMj~&mpZ$<;}$-`xHz{2}}AhZh7Q1`!`PI z?Dl&h;bxC&4J`NF!1`YH+WY(ac*e zTT|?pZF!cdz*Ouvxw!F6FX|-mM7yF4MyKK%s`kG_M>tiCMFy5DJC&K}7OVg=vI(>( zn^7k*JF=T>MHV2|K=HYtasd0u9N6va>TT?~=3b2nn90shdGUEI@^0tZ^SkCh!fr(8@_<7P#_b=Y_x-z4;A#8@kOw}B0Q zuTS>e@wOF;h{wE2{&Qz3e~3I26KTV#j_hmhA|GdNj=2EM@!pYdx7v%^^36**CtZiW zL3JW#DILW#fja&+-W8sE-o}1ErjEScm4QjpPf9nrJH^kU|5YjF>@OAiG2L z{5n|_)1sU7;aV#-8Z(x2g?s*2zS_RR-ahX2&MEnMc`NgN$Sa>WI`4X3le~Vp4f4*r z%KHiNm2!j&4eu2hW?e=Xwzf$4rEuBgV+n}~t74AWGx@iU)YwYV$M|IVk?>ZG3e5K2 z_OlWZ$jb}KPRKv&D-XrvC$gZ$kcUYZRgb%Dsq3hTnz3jv$P}VC5O39%$_%-xv<)~99yqQ!h?853m!&s0CT}N@2OvxLFD6W6b@a)@|7rCF$dnj)=A(2TB2@fsD z&L-k*;|in|x|&E8=#n@qHpH>kszohL(qoF4IwMOipq&%j1|EtoslC^heHju^2R)?k zcW{3KSD=W4O^7VZ>pGJQ3(_L?E7A@f5fhfH^vY$wdy(2M=d z{z~n^tl|$!IjNG67|8Q4_5Tf&*CPL~=7Kv6vZ4%Gr=pJMXcxo4*j@GY_zJV%O_;*-nvZ zNiCDxC%r0gD)w5~zK~DR{v=O<#$ln{bMhYXQL7IecAm05a4~;d_SW2hD-s?t%=_4P zJ`gTekrVWXbRs{-G|N2MdfC3yVRyW;pK#bidfWS2viNXr6TKKyWL4F-atTrJ74=%Y zo4g5_)Vtyj7v_qS_#NtP207n}y-iEP+S@DBxkNQ{m&nZtwUg-- zB5`K;Pu5JwuQBx#Mn^xiH|OK%e?WIITPsH_)OH9Vp3?4Y?+U-$SH?eFXoYI-LTRu% zhTI3$=8~+=Z?te$xA~EIfn_T?Oh1^?co$cgd5YQKS$b2YCW|=*0!-RAxTYjBZ86QGU!bC+V-$BEU|!mxlw}+88y{YJxZL zQFtXD*XeSG{7jySid70~0gq7U8wIpXraW9(4MpXn z%HPUUJo^>TenU5+rTk1vmHv?KOIy)1UJJz7DM1ka#02Qi(r0Aq@5Nry0%;Qdy*w~j zAEh$#9z1QFSiPT;iUYt|2cRV;yVW&S&!*%$qD(=250)d;xwd_niHSJVTm|;N4_<`-YiAqybR3o;lh1K0ahLl$v z$~HL(k;e+D9_rC%WSsw?pLAHdAZ18rrMl>kr%Tsl54aV2qb55?o`99Q2?WtKIT>p3 zNpdf_lpG4oZYoelE+G7FN?)XhK>OKXu`PhIS);5#ePE9wD6$Hi1C+FDk)z1+)HHer z^CSBsdb}rCnp?|N<+t%&&|l8s>hP2P#ZIs0W^mWh3ths6W165U`<}_gN>l_QFqa-f zmq52Q8Q74;&~ui_O+a1PfQ>i?oy)3FX|70|gGHnOBUT(b<*C{%^p+TOmn`TT=BQs# z=lY7?b25-=FHtjGpw2>FY>2u}J%N=OpcY5 z%)tS57*N}z@feH$Z`L-WrpIah@RO>#5jDcvdQp9cmJikHs?avB58Qbes)+|sBlGFy zpz6B~b);pud((lHy+>W4PEoIcefS+(+m|uZTn0H%J~f*z&nyFCd>EZWndx6>A3cD% z%XpY`P*QKojDhC3p*wyE*qa&18Fpaq+e}ONe;(>7HI(9kL}@_2hNAEi=*>=qs_IPW z%#J{B(Dg4f)IHQMt3#W(jV@|awI6^b`B96~Txtna|2m*fN&>l42DM9GTLk^^-dKe{ z)II7(tinKG`C6!>)cxvt^%r29;(_m(pnOFYI0`l4i^^T}6B?=Wu#RqE9G0nrpjYq4 zV;k0fGQQ7i%V3puy&$lwAGI%<@89}$b7C*vVP$Y5H%Ddn1&}bm1L6KB9_eHVRUhcL zIne6s^#%ws zioS~(`9Gj~{Q-6FkAw_u<%!_Exr$2fL(DPkC7wg$`vkBxCD9?60v|I6lO%1x(_(Pw zsZc4M11;g5m?!^TyY;VDc}vV|q@!n27wEa`%5!9JiOOrBjt0w113(C8R=#NZ;+V^?wEjR(9 zz*G@Qx}eY-27YTZ^mvZ~A$1k_lX6frTx#$n0K<6}^B`XB9nh$gw3gUa>DV8u)RVZv zX4)9kyvt%29l$P|t8PIZ)C$ZBp`B10qUtLu)q#Ra!--rE_3{Yd*qn$}25FbI=BUEw z0aZ{6?`bgRL=3j^{X_>c6RPIV$zRAez!+8{tD)0z8km94)O5_gbY#}y1dL?HVjmm> zTJSDahu)7hxPuuGk@$-!1IsXec0L^njL{9c9GDVX!)_K+(eySuQ&QaWktfpgdFH z9d@g?!K_eFT?{-%2jvtz;0I+stmKKj9#+x~$jt7*Y3@d^Yc|`XqptJ|ppyDCC783Qr8lHj1K(5*n(Ah99A-Eo(T}=?`u0dITziFU zeWQlpPA6(tQQ4lQMBtPikM5#?-dYt*GmXUga2HklvA}^z*psz zwZKA_0>8{Q#32WPiaDyv*dJTfV(KAaTFxp7m=Gd>?yrRDv~HLPd8Oq7H+v51@h1?& zJ;&Mh8E0EvVD4JL-|W<<5^cz2c*5W5mCSp#Ic7!gn=(v-DaBmWoM{?s!tl4Lp=pe1 zhN-UUIX|ASfU`IPy_NYuKAqsJ^9Q&h+)1_$`;6(syrl;KgS-_}R21;HRe@=*1yoNb zAaCwzv$f(t-_C|dj8JbV-IRRby9NML*WzDnU3EEHevQaL1LAt5*j_9vhKh{Hi;eMh zjMPsWEwu((Ym?YqY$@&)YXF^)gV~!}$b4Pm6!d?aD_;Un`RcqFxyE{J8s zi$EXb2p&wB%oJ>xJ~#^WSxsS`a2qxM34%YcC{Q9$I9}m`XMJprHtpaBuxpt_Mxf^dQ?DR~+6a`s341?{8icv261ei2bTxE~vM3IJH=s3C zooY}H^gTR4CSDsri2-i75qN9TA5rrFBL*@zJ(;0bDoijXP17Veh9a*o9x@-UFJFMx#+L2df8Hroe|5U?nY|iox>J|20}N6 zKZ)!TofXwN;`fkn+cvYvcjSsPhcP+Z2|CYzfSu+5@T1d-+PWFp-&pMk(TVQECUIBU z&kW0q1A;RRXxmowC`^v^p?)Rp=pZi!meYey@H*g&!_n0=={>;bwM4I~zriWkL5ET{ zI6O)M^-BQty#UDMspt(Z0Y1MgJbWqj3lPbblv(m=V9^eWOT@{Tdm1D5gZ+OKh6)*& zKiM1j2i=YmLX?mb_$@#OO8ZOtihHYh=DP{^T2~)eHK4PSTo0U`ot^U!Y)l1p~y&BHG6rgoQ@*>@t9l(9%NIs97&TVI>G8JhzqVeDKMcQ+mcZ<}Y zfcp2#k0law8TZ7d*xhIU&9}T*mjcQeMAId)HoVkP`~%&vet}W`T0YKu#yt!Dt`7MN z^ZdCza@{%ea`f!~qv}22q$s-f?di$0vl*6*Ai<1+2~-qOFkluDg~yD7ia7@q#DI#5 zh?o@oTUH2UO&--ovW_G%}y1K$$C)KG_)3$ZlHg$Wl zZhp2+tXIv@_8mJOw#Ujg=eFG3{F3GqYVK||t?jUOZ?&G&?B>!J!pHd?cUiV`s!L*I zLpfHw?;BHzN0a60dR8+Zm_8@{R(7E8x^Vkaf92w;B~@=!&MzNQ+OcGQv~%>2$o6nN z^oqZ|Z+(7S_Oo=0wl>$5MVjc_-FD|e7QdGy+ItJod3;WY2>lk2l1X|;qn&c zhm~CscZj5MNU#g5tn2b6xh0uv@x(kLGYw{N zgUm(2ci7G<+-?|FnI~HJXt}kfqQ!wN$F%yXRa1*~)ji94M#BCt^EH_nO;0v-B3{gc zyRNS7Q~yEZ`N=QS7iUky>;3J*L}z7aUP+&dtE$&Dd$n1p`mu_crCm$PS-&?gIM?6L zHzA+Ne46^MX=me}4bRoxS=*`hgWCP;pKn}53rr|*FefIX}w%@h=-R(c@ z7*Km(S0=~l9>YGcbw+qP`CqRkJjcC^@1^LEX$n(u0sG@nqtt9-AL4Z*oY^BR&o zwDHHfQ)}1nnpgW_{aJ}#soKoJ`9BNId^b8r2d0Mnv2^J@mCLGsYSz2i|EiuZzbSrk z^y$z(fl8-?+bw@kwom$&Wox~27hH!e)>lYPCg%sDSu9xh`&NZaT? z(Z*QM`1P!Azpt!K*{*o2l4+4`!H)h1-9h=+@N`HH0xHPbh);OQ|V5bo|?_>M|hv%9yH>X>s^7yU3owzD-R^o`nzKQmU zLgU}eB0kl4SK}Ft%^K%6ywT9L@vg-C$vZRY{8|3TBIRY%s;+NdQ4?%_V)cWSJt{6J z-+^UyZdtZ868|zXKd`}lIoC2%mb$#@q(mg~YvQit*z~j6aru+*MxW@kX0^?2fx|-Y zMD{BwEB&|hxzfS$i(^~E{e#~)-?&2x1M&lMM`qh)_Q!XyHhFpS*QU!j0?C1?gEH4+ z!@S&iIM5WFOw_8qLk+=H@EGK)MaWkdgx(8290>ad5zT1`@x?l@qWitvIr%ztq)~XY zc1APbobQ2O$gA!XzA?_R{#RLrbBKSe^9|7s1|x6Xh?Kl zv#ZUTnA{LIs7SI*$wW4r79_bxeWquFq5$;7de+#+v zJ$!lYE82e^AMy!>_OiQ^pZ221$dTU!5XlsXf-$Z z2lbt9-tGcLiKt-?elZfwh zTKwVIj_^x?V|;t($7dcULSh9L{H0Czrryo$$}ykmeB*Bxbb}vT^fq?Q(gc7rtN{ ze6KpEVs$v3RpZsc!_k@^56t)f;WYUA_&#NI-8WbS=HW;C309((@T$SNH__|w&6i*` z?TbHIAKygZ0{rUc`X-<$5A&Tuw9#8w?{x|*QSU)kKMcve3#-yTB*I7uw(>{tpGx9Y zv^O@HVaO0u+^5|u+&ze7cpaXLkL2U|XYnRUq80p+-I@&$17JNir7yFOB)BhH>5d-vf) zmFPrVfW^UQiB52P_~LLV{8H#x^qhl(rw8`(Z}rW0XJeOGL_~=hh3oNm+7q9_3w@vY zI-+APLKEzPmF)-3z(EsQ`Y*wknS<|Nhjc?~ zN$Ts=$H;csWNC6oVsYaW4ZZ3=LVn5bJgN4&hCh<2-248|qK}tvtFEr8su|Vnt*Wxh zx5~@PTE`EMJskNlG$t_7*PB>kRmndaFKJj_Kd)g#Q@_m9h2j2dLJK2L$99ywA8&}S zkI#(16#rj*a>+iiK9MECTIan&i|qX5A&K7_-f0-z_;%ug()Tu z(1P&HNF@4k>}nt4WjYSc zt|t32?Y2*$r*EErd+?d?Ly-xQ^CO|iW#N9I?*b?K%blZG?e-7egwN*p$gf6I9+N+( za6D^9XR~5rg|nwWhdtzYUuU+h`lan}Jm&)9Lq&dRz26YVoQeBDa+Y5s8A+3mZv+qZRDiy75R%8rfh6?oWv zINu}JhP5OmneC~X>l^CpcQicGbYu3lz|gYqYbLgSp!IXD%35}9KE3?h z=raG!1)|1gGMV++lUSb`%6^^MBR8=y-}jimd#E=0LFok*>nlT5y(_Md&kR51@8iCo z8<&}ts>d(xmvsMZzg$D^1|ldB!^F4R9|>(B(!@d0JS#Du39ax?b+_YJ^HTcb^l91p z{MSBzU_r1sYZsqm9tG+i7=icuO6ObRWQLp`zJuM7g*@KFE%En#%Kgh1!d~`0ob_C1 z2a#YO%GWXras=Aav8*P#hP5=eXBSfU24STroZoI{tCy>;jehDoKI2O~+)&>1L1qd% z{t=>?1b^QL|oh7XUt4%fUYGCF)uaGbMWVNkYzx)-b5AIdzMJE*YOZE_A}1^1JY zH=~!uE{@HK&WKD3_Xq_8(|ofF&oSax=Jp`I$dbZ#x8RErS>ah`jKa=Z;wi)n{qh&& zKF2HbHlnbkvO`!~Gy_Z2$;_BMoE?*Cn|?icW7DKWJ*$bnZ~V4#Y2!YLj}n^`uO$wL z;@{A?e`ARFoEJ8XW)|_>hH<&MrMI-5+p*amPq!&){d@_cA(;oj8u4J#Y&N$tpA z<~z+D$tu>*+)@6U1H|F}mWK()MBY0mvu8&KJm=Bhw|h3yD0thkrg1 zq)x|Xy~;T$ur_dD@DyVH93ANGf6sS+Vb9!gnZc}q9Gl&hKham`3=Vu3>=AAgxivB@ za(_4$`ji=z^1v~EA`ufUbpbT^8uws)M<{wMsyoki}@LVxU+t>~pgkrrb} zm`A{`s|thj#N=V-=c>#V>GM*LCif<0+HuJ~$sJ9Xuz^amrg5wnC{1o{`joW;>zXDt ztxv2&gSen>VdE(O2{ni8IeE`*?dG*Pr}fLteklnAp3Ti|I-zlRa&~@_|AxSePJ(z4 z9RmA@+J_beuLz$OdokX%Y_IYI%0@+(I(z3AWJhI(W?H1bOpnOzi68k0|C@m$1KpiZ z++%#Joge&5@PyvVoXVd5*3MMoZ!gMD$-J3ancbT20M6$Hj%7{nqv8JHe_6MFTX3EK zOlLSTlP_RSd^78D??6g<+I`hG$BFuXAhzXY&H`GfF~5{nn3kQJ9mv}KJBa>tEmp=2 z_{u*GeVCA)fo12M^rX}m$*#$E$qSN?CGSgKl{_@>p4}1dcpX$5b ze?aK0@WY{VLtDaKW1C90#7Da zOke))taP?IPh;87Gt<+%Fg14>E1@UlW)w=Dc7eNy&ow0USm>V6Ex{}Ohxw-Bp}RW& zVBrjRHXh8|-8-4BI|V-cCXtgr!@K=v{G-nq~D7K{bO<$5eJN;Se z?&OP!PwQW)o1dB#Eon7!k0tH8wAtC}ubR^;xw?E1$$b%;6@EK%>_cV%PIcdA#;hAubWHvyR@s-L%}vN%nZKB|0?)Aa`7NS@?ExoVNaVEP z#60PpeF;77ylf1r|5)Za=HuHzMgKr6ZcXId8dm0gli8YSK|JMSSbsA)wJ3ReQ)%Pc zx`xJEd{Zkb+MM3*fi}}xEvdPra((R8;K{!A*}Kw*WK-^x;MDNU(A>Z>|9z~9Ig!=F z>F~g4@7Uw9*JA6UgTqJpnc2(jmz&Lc`%Zi8kV{G6Be^+9a zpTs)+>#>8cXU_Bl;(WBk3uHQL-bbP{w9QXtg~9mzFRbl3GW$a2bnL++h{UlYGlKcT z*U>%?fxEm*TTH`o&CgebZUV zN3#8bqssf%e9&S=^JA;4E50i!#}8$6VQ_9v_Q?DI-|>MbgH3_={k_45!h3}G zjLeBl0U&>brU#b#&MoZ2OyxiM@x-jY+*b`(Ih06o)x;*A<(`QZvnw+xXW?b`sB=AY z)^A`JSnYp>sA_-vzh@rp0oLIk;y%FYldlV}Ank2&9`R3Lrf#uwuhWevSEJk!g;{v~ zPr93QkQKu(XWKLLF$HP%U}hC3=UOrkJD!=K&-0U+s~(q|n0-033txmu=?gON z=YIAbAKn!oTzOpez^bw3J>rSTmf%VL2YgFe2lOUr9!`=Jh(5jJ$HxthCe07 z!8PG2#H;!NdsKkk2}-a=TvFHzIxyK6XXeL`l}h_F%pqRSTDKP(XjcIugj^at zB=D(o50TR^M!Fx)8jusPi62hO&EdG;IgYgwXK<7`AF+b_6XxU(DNJEj`SScTwEuC; zr9F24W#v6tUx*{A42;5JvR@Tq?T5D7(HQDdcSDI%M}C5%hcbUS?KtU|~BFNkjfnqGEl;8(t`!#{1sPpG_Qze{(+* zx#V@?lw_DII}QYV#C{T8dEx`s_ngnl*~xfYY|q_{&YefLJHBugTKszM-CdlG{F!x8 zOF&*db8u&40lJfQ(gA4K&ivlaC85!=!KF8qttySiKZy3lUwxDFI5QdJu!pT8D#CNY zO~K#s#CbV5J6I9ACiF?@yU<;s8NnL@S^E7<_f9+pRx!7G19SUlyU(~Ev108o_hd>>orL0#^{j`SL{?Cx|7tB|%ar}`u^*Wt`3Ee#%teedA# z09Iy=3jRZUvNzmo3!mUOG6@}H40E)P;%D%#dnD_=j|UA$7sfDW-qRi9{^XAEJ>pDY zuG$Be+T`BK2pZ4&xU<<~;$PqX&Yk!&O@li3XQq29Gf^)%gPeMHOgY~@nW##KvwpDy z8u3h4axLb6CU+BSiQYzcJvDn2*1Ow@1e3^iMwUN`S=W(xGtGm(MDoAop3aS9R&{3X zqud5!t{($>Udp#COk(E!LAcsR)++UeIvs&Ndn*xiPJtRfUpUv_H#{a*AAhjyy|Sv( z5wR8FpMrtFlgu>tb4u~F7)eC_Aw-8bHS}^Q9DYB1CJ_w!MP3e1XGY}~f0H~7+()os zzES8(+@3F)v0epEKn65pEaT`0c9EKnm(0t^8ix}*Yz6k0_gQl^!WmBOPh#fvDCd88 zcrC;WppJEbZ#XXz4=aO&^McdIS;uO|rAR2Rf{ko`4eefnmbZ@8fHR;n_s}}0&|Y1M zNm9YA%(qwq-h%rq&AI67z0nd6VEnwvy4dEqABns;nYsB}K*Q%yjV-y+`A3Ol@nEiN zZWZ#=>zR7ya#yCNr?1Vw6IdO4t!!s`ro5*7^Z4-S0HWJ3V(#dEUkhS5#qmVAf!Hmp z{p*QsHa`@PoE2RX?HOGW?jAZOfL`yOg+JD4=-*vvo}VE}J;;3e^`K)e`u$4A%$Z1T zm*XE^1!{i-wVxK2xko#L;NZ9UYn{Q)e7v2m@h!o6@(Xc5yZHZRZRC32c;+E5$KU4- z-xJJ7CW)3YlX$0nn6WM74u>cG$ZY+y`D3wPg3*~~~H%mndbn3A0bbzKbFhGb4o-=127q&7UYHFs_B zBi0gyDmRx`mK|5}U3h+=C0yl0R-B&V97%kg2Ip_zWZ(PF=Yfwx7e?x$?Msd>3B_t6 z`-M&ooa33{^^~>^E%JhUm9I6{;$NLt;lJ!<@7@T#d=k%-Dpv1( zg3OZjRYPI!L$|rZUyZ%)B>z=J(K?WI6w{rDK+$1XjK71c7kph=A^R;WNRM(y6)FlV zK*>@p)}?GVI5R(r@vu4fcJ5iErf~jXu+b*}JJj+(biYzG_fotTu7MK&j`ipy{O7XF z>>fe%(3R;1<_Yg30^h^T>#t8=fv4bCL>+l8nP~E-Uw8M6)m1*(EKohTVo7N#_IkK9 zIG<=%+wpGc^qx? zn%}3_5lwYLs#U6c@(O%h{7~E|7$*%PvP-5 z99~laSNe)sl_Qx)c_n)ZqvkJsejnk}F0(xSOu94eFqVBpI;CGACeZB^# z{qC=gO)PgS&Z}5dmWZET@?rGU$c^Faurc=ZFJ@l3DSupUQug)SaqbZR*TK`nm#|V{ zZDd4bpU9zno(+!;of=H}4|KK`KFjsW&dS8m(sDTm{k}gdQoH+@ss<|~0@I0+a2NLe z$DJd6rS5^qsq@k7zlBdF3faQFNGUD+!vn2@7voPg5kJE}f-eTEg98Fb`_Ch4?>VeT zAME~H__5FqPQMGkun^v1S_7=z24~|Vu!$L;v*6H+;lLLmWA}k84}cSw<{{y%+!4b4#9& z4U6@PIkESn8zQHMI|h?ZAKy=fv+@VyF?DBtjN8-cg0J8wNMG+D|Nj_T6Yhe&0VoH2MEzK)JK+$ks4P^IrCqT4*d zteN~ko8x7)%AMsq#<|Oxfp5`!{^Ri(J2u!QIF_;G!(*a3a?K?72ju(1nUDGn$^Svt zH}3(>KGeO`-QWEM+u|Kq8+W2HJc{k$Bz%~+!JQreGnXLCT+6OIvHT2pke>)X*AX*m zFFXe~5(VK#;xYfp%8y+7XuOG@$lROhmx-YlJd?lNe|_Zok~t;s#5P4MVhdt-lr)w6 zQ1W=}?8vg>F#aQE_U@+II06VdDT_kSH29jXtHiR>3SGdwKxN^mu@>ZQci zDiBky#9f!)L~OunR_u2ruJ`7`mB{>I^qt;L&Uwb4@Rt)|_gy>@Tl?oYm$M(yzKp|7 zzB`C@6lTrERJ_lYI)nZ9Kz)S88c$QR*M&b1o+OSr@eKFSDn( zCkFG83!?9Y7lv9!(y>QMCo*3@Dt>OPZ{&;6Z^T*~K@_`Ji7?rf=t5(eWBw~Y%H2+M z*6Gd@&WFz4@Q`DJeM8f+oqfk#<7H6nuEb>BAFr2c-w|$KV(?VLAI9f9;}=@RjLg&U zqVIj*JD(FFy*05-&kQC4j}h0ifLHE(*7x1yjDwooj0QD|dEB#*-*yzj_}VTaO2i=d z0i?pGkcju<>aWn7*~CxifF0tvd;`9;W0{-GKu0>@pZ_9OmY$i4OfJ2i_}v@QZ9&5o z*jX+nD#t*+PbSXWQKY#zGbU4?yTrddG9)%4{8-?D;8oFnrS)Y$mR(iat|S)yIebR= zZ=x$-9o&S6`CHh5=3)E0J%5P%5Hj5(cy)h>7PsDatp8Xv#gB+Ia6;e=qD~*n+J?cz zS|8xAaW)|5{!+-H37&?Qb1OcHdlsH!6!r1_;X4>>T%*&^e>=A3zwu8U890gP7VZ6` zoleNnf5WM|;``u2Kl`93w1@J#$e`R4`G`2B#LVxNWiA97t2>*1V!kq=ezeM)D(*F+8 zOIu?n`0~D~@>F&a{fT&=ClbeVmhTfh(8mz3wK~XF?mZ_=?o!zhbq*9{D9m>$jrCz0UfI+j9NzxZOcqnr*Dr_=Ohempz!z z1w^@ROI+k7@QP_jr7u%&0taMPXa35(O&mzP`JJxBPQS%}zkgKlgvh+u>G4U ztWK+U`$8qJ139;lu7|G|p91{nCT4*bp&1>9?d&=DWGOu1cI;(`=4$BorO?1(%+PJ5 zUcY5t!>{%x#($^G#`F*A)#-n+v(81*x<7qk`aJv($E1C%vi*~kXJr4O1)d4qPlUl! z11E-FiF_Dq5g!wuRnmhvG^`+41|pJt#XMW1|3}uWT<83P zel`FtrZb)svyiyQGJ}0CUORuegIL3|FHwIEk`xO8DW7#G42lp0MY#hpH_yn1x z2|cAHmWxGb0ykmd{ULizws*D{GD8HPOdXJ0S@_C1lL(9V`u7h^4Bi)hIGT@jEZLj=gqp`j#}0_ijLs*%cUiPL@_DFd zuobjwEpd4#FcZ5BU1=5e@viu9ti=AX13Pay{zpC8H}+rOWcc7L#`P`an+{A4{t|p3 z_;=uvKsQ>USpa{1qGSh{z51KTub1Exasb}c@1YB~Wt?BYT9I|o|BYB9KETU6=yu14 z^)x)rml7%WDy-{KY+lpRMP9}O>LvV+56JqmUu1@4+F%1+fG73T^fJ5{BAg$>n7zREjbvwIt1`y(^`O)5n@E-!qJ%ori|_04$rl(UEC0%F}BiA>ZVJ!}Hp;D_v{ ztRI?qEm3tk=0_97rWyPEC|cAK_ju=OJbnAJ-l!({UT|#aTB5riMLg$A@q1s+ZUa|@ z?+D)%?iGGHG%L8A9$QKL>AwE2oxPp$SmQo}Pgk*Os0COY%4(I%SrIXnKED;(-qksc z$OTU!Lw`x_*KquV1?PIC=}++I8pyuFJK*RSxuy7Xyu)1loy@vlMr5Q@XvG7GV|*g( zMII#$zuvFGlBmX>0@ zJOL}_MM%GQqLKZ^XC|I3t3g>atjK49IX`_k9%IB;@c0P<`9z)>6r6QlXXz$hZ>XSh3;7^eU8@8#dlNPWS%+Ubn^ zza6{l@4jbS5k|pxd7Mx5Kk=6|1CaP}vld|C}U2CuM(h!*t{ zZyC!@s}u0)S&UR%kF_-jw>*Oy-is#x4bntUEar!>`sYII7PF8_-{gA*azr~~k8dRE z{tU)y0V!)fF`6ba2QiiXeUBoV+ITd#y_x07`JVHw^mSmx;0oV#c+yN(oDK9{PggAI7?N8QxPDxYxpU7b3w_(F2cC%X6W~ zjc%@Rzgy~?iGB8PcP_EMu5jCe|F=jvf?CvIzrT%iN3mad0}}poAY(0@`z_WVZFWzB zQyu62<{pHNWHvFsMnm6UWyW+x;cl$`|GH12jUGWe52RjqAo-k*<>^i={qx<2IqQV& zzdLJe_V*2fX0)b`UD%=gM&IM`z}BpEJ>EB!=es!f@$QN6#$%8suEgr~2K$dsglDFZ zvxB}5;iKO&li7?`IgHzjnuP2`I ziS*o4Xhi?up*et5$NR>D-}k8faPF_B-A<#I2C^RWP^7!9>?2-5x&H?)Hqn~*Qmz-= zZ}4pUgE5f>|2HurrqU~YLCKLw1Pdwud06<`Ql?6$KT>ujd+fFZHSb|1+=snYuOr_F zeQSM}QQJqb01pH?quCSwOv?M7yDw7ZzE~57Q}YIDGl!D-kwrqqUDv14$L})o?m+Hb zN$V_wx;0_}n9DlI)<}PUxUAO13U&=WaXV{TFQqT%xO=nP?-+F3P2g@Um|5j6fYLwD z4B55d(PbWS5wYdJgnEWa|1_ve7bU)CzHdRudBo9s5I^Mwl;$}QIt__$D`~D__xP{y z7u_37T@LO2m}?WVedE5LoQxTSq^>%c}Jk zp-6WyezgYj1mDlltnRd3iV|K1lB>~IZlZ>wmgm!&d1~YuKN|MGe2_+#Zg?(bHm^- zOTf+t%r$)qcE?bomuZJPm|OUq`kw)kzJRjd4Q2lT3_i-Jd!KRk2_y79{(r{vpBFj6 z3Tpl_Is6K`f2Sl>)U6qGw;N;bZ+iY@P*TchI-PdOP{tTNHI-Ss9bo!g@bF-fw|z=! zZgc0kJ&N|y8de~WV$SMu*6!b29M3oM>k8sG-v%mu@ZJGnpcE=|4DD7yC+^Qotqj;!yHw4a(Bsza7sU2#sl3%wa3^N_FqRTT-AT z#M@n{gi`NInWf!Sb2Z7>*W7L?@gw)(UB8356(Bpp`&&>V%?0K-tEM!ap}m!~Z6#Qq z2KPM~9@(3`;*7Hh*SmndL&4ifs99HXEdyf%De(}>O?+_fR`Z@VjEoMnMlZhel&l^1 zx>IvMT6}Nt7x!(Tr?QNKo;;C*j_rVs)G;d8fPR;D&VZ!ZaKP^vO?l2;Qf#B#fAY~v zezjW+-m(I@;5QH&V`RBdmMvg(16cWyTJCgfNxg)TBx=5f>zkmQ>*1D*p_%hR_K)zX zMckc%yzm|9_>%MQc<(pd`I^rvp8A9Q|D>;1gZ2VPp0-Moa}zz*lCjs4x>nLQ5s(t2 zW;OJ@#=TJd0m%J~+^6yTP2RbT_bYa10?OK%C#(7F2UZK>t0}?4KyYU zH;t02HNDi1|54g1MXxyg)|*AyGK{taNT?>qJ;}jOe>c&#W#rYCCpGT>ru>^}r2=hR zp#4^J7TeHbLCJn4g+m-414rIF24IA}r6dr-obq>gb% zweG;%+wdz-K6$uy3C~o3J|EmRO?e9BBs?l7KYgW~^;|PrFG|j8TOaB5WQ-K`lyNIp zVmwjG!FG-u>i3q^6hB%%`_N88Y&XWm!F+e7m0D5fj+CVXC2ULn)s&)~qd7+cDQIABak?OfO{IM(lTT#1eK0-%fearC;am;=O=?kIIvq9juAagTy zXUmLY{bM}Y1!^IgWN+F}ZPu1nRQo9>jb_o97%eA!tG9&&J9MTJh5x%3q%Fe#jRYGH zyPw(pT!d%B#gFQu+Bh7_MV|n7bl8ZB7JFUeD#jJvQ!2eP#F^d~p!NNnS)TT+${@t( zoytMqW#FI!1XS>=q?mi0Hq?2XyJ1>GWl^qva`iDT>cK=7B&#I47T~#R+NLw8>sV|b zeRnAS?m(-xro9NWO^)TfLx=FHHnVn%@D?2`+4}E_mDYMf?-YtOT7;vfA{vD;p)SdP zHq_#7288L81BphByfZ}x7LM6!n`0BlS}?Q`6#Yw|>$A0pZJ~KPnB2*?Qf}q5h5MVJ zjT`>ov9b8XrXpIEW=C;^H1TeYQ?#MmFfO6C@l3PJD+S7uapm}>Up1abz?Tc+G9Ey)L`>##aQ)*X@9Zqr>|z zdLxSCT{Ef?r+g}-D2V!7eIoiITpB%53Z)UoluAe0f4}Xjp-BA}CYAjcesu^j!iVA6 z!=>R=R}JenCL+8=?@~#;LnF%@4<4dL3Dvt94wo@6oa@lxuVp)@Ap+&UW zc#6)|Z=y){vHlV|gbnqxj_m(C)cc|dx}rOx45Apqh5ignMumhKp+KWq_beynDlROP z3Rfk?)@fZFr)nEfxJpnMrM!AZ?OaDMiC=D_uhoxraK|(`Mo1@~BsoOVf#{}j7^9R` z+>aIW(fdXBggNobZR983r{nLUw6cocTLC5cqbQRZP1{0R$ziIWS|~)RN+`GJgpDip zg%F-EB2<=$&CoM(VbP)A(PVxgKEooo;zEwEi^n%b9=Wv0gI7YqHi4rAr4!GsfM&IY zT8U%s56|q5EPNP}$3aN>l117U$72BQzlC!C2Cw}J8CE{))6l1;p<90n7hV8IUJI68 zq^)M)ZZGobg|5&ax%wpZ+J4w9`XDDCLEhbqT)91MC|yC^)>o92B!j4DHS!D#`65+T z4{NlGG8@NJ57;4DONg)HsN^tCDB5Qf#ORFC8p#KSQJs6&gh#`--M8!FXNFbreD#1v zrAM9B7vh3SWho8+>KBz(^v74ET^@2(FXMOm<ixHoW_OG_szvq2M*3toMcP3hdYp1sk^V_a6b%yXYvf~x&}XA8#hpg- z(IM(4xmsuz=iF69wQ*bVQfV{Bao2){bsYNs8-A%nG)wXQ{{j1}`MsvT35P&ogGJXSGYBu{E*fMwwQH$JPlBdMUFh@U*ItEbhn;+|FXmi3_8 zP}mTaupZbg(HY0`D4wXW=z(~%dPy<){akC!^Zp{@L{X&QB+)lIlUqG@lJ;nCNi2D# zXx0JF^0Y}c<7*c!*PfDWV@$W>xhA|cTF^rd{a#KDOXzj!2bH9i_mgleN^HDcWhyE5 zzSlC6?3^O@{Ny4Gsqa-cp;0{5P_17!7EIbyyTqx9q)z1}s&4qUR2pHTHIjJ5du%+3 z8|hqaX=A{XRW#Z(4otcb|5ppEy-b1?2E|3|pf|hupmYf1fZL1LcYy%)zYrkWoh{Nn z!?E~+sHh}IeTx>0w;FDYGuLv~Kpr|IW9q)n8~877Z-|xVtY6+4TMiWwE$m6^l$}AX zqS8wy5nmK{6V+91)Mgq3De@O37jIBKHgaqR`I0|HQLSf0AFLf~=u>fB$vA35wPktn z4Qe~RLw%(;nYcE4QZ&Q&LN((n#{NJKfDvB(NnR+2YtB7)muSkazuhecLc9fH= zsNHT3CrY3;lk}!`)w|WJrazcm@9D}$FHA?4gf1B(QtTZeS9MX&Du;*1Krtty$f8sM z@+~2S(x{I`zf9jS34S+*)FMK*-s1hzx#3qe^{{B|WjM6l^sO=&Z`3cfqPU4Dr*~bl zhw{@1R1MVvk`s;giSz1A|3&>wUhyb{(Fd;;ZG?EWRvoO&Y8umi)W;?d2vx$OWLeXA z?U&)zp3pckDb{3Tabj_2(I8K2G2ST-EmR3hhEkI?Jhbaw4MoanT8yZ%IJhKDkz`-mL5);D_dm(i#;B0!SyZgIWP{sH z3zS+^M>6|vuB5(Iuj{|sPjb2Hque!~#Vyo!q6z9hje1cA!>(wmuGw|VOV8Os7R5ic zv;K_Em}aSW8tqfA!m4pdp=tL=?IXl$gy^@vt^b7vqvpD*Gw-hct0YP8EPLiNUXFN)%78i1aZBaqVgW|&f7Nty~QM!|OxvZ74B~})BZfo>l z(|}tQ+fl7(ttB3@8;7c~dRyaPBiu9%>E8M!jxDMxtFF>%)CP*$qRxfO7_HWX4Zj&w zy3v(Rxt-GNNnUG7s~Lf{=tg@WA+9HFCGuheGvYB)Yo0Ze#9?K zqm@Q1?q(Fivt6lQ^}LNhI}-{_OE8q1J zZ)zlYV^L$!G;ZZ7ybH@l6ICm{U1bto5*;!O8NU@)tZpXV8U0cXEw$O$v$U$EH@k{i zwxxsYaHi1ARtS31c&lT@AdMn;- z(xFnDW@6Mxqg{yeBqO6%>O0X9qfs{ImC~M3TJ@PwBXp?O?a%Nc8Oi&zoOR}1)X%BwV1ZlOk$v9yRbaay%nGh}DAoHTsN z(UM|xRl2@fO5;b=(zPGB+(Kj7?Dw+SSPPj%o27Ib)h5~Qrd%d<>$+M|G+a`$o>lwHilw&Fm{g0Zy{uo9 z%KAlb5e+o-{$EaRXjK^vwW1#;TdAC)n)aM%mB}V*9gRfMHd&6Oqqq3a`qHYXr!iHj9F?^}*q>1RdQ0CD{!>Ycwa?PWK(u|efBuJC2qzjwuBT3n4 zl@RHJ0*XHBPxufQw6Uu>6G_QtiBpRTWjdM{N3QJAvXSc0{D^GRn!9gT{MNb6k!Y@8 zeW2&fGHXb!{BPtde~o|pHaSTt^lr1#iOQOkq~5Wa3H6qDcoLN4chyYv*bY$;QDvi` zM$J4OSCma-!%|45GVUP0xO=QxDqU0iDJT8XeUtf2E)^dQ74fGM=#J&gd5Dr{p{v`phcvVG(gKX13Mo~wD6~mF z(#R405MCQ8_ZDiP{FR^PwIu^eepIXK8TGGebdurBf^7PnT2C#hmKI`cR9oBHxvpqb z>nTa?>OHTg)I+94TK{Y0+g{4fRf)Yy2utPZuCj@8|m+`|m#V)DQG?3#_Q?v3JCfvv*R{EGdO>NoSJiMEP|^ zz9ho5G!O9}A=BhZ;aZ3ne@PU1gszxXMjX`ZHI-E()a-*fQp@i{(nNsz$hNDVkOzzG zAM$7r0)!)te;dovAv7~&Ji^oXB-^PE4C^M#*_gH8rnM=B`bB4IOOvV0R$`pluwnhJ zKJpNx8rYapxirftUMN0jT-?4*&K4$=Rw<1K*l*K1BsHo<^`!2r#)by7UFqFIpYdf$ znuZ9ICxug^O&XJWQYDl=C9AjPq;o^E@TRdQ44N*nTPHJ`Z4$KEs)T>x%^P)U{oRLJ z;Qx*UJM+HPhpMIOqyAPM)f%#dnA~gk?aW%p+Dz?XsYQ?Ugh?7^c~P5LPDX8YU(b2A zL(fuU?^3JkJyuSWwrq3?QPz(3U*k)DIMO$TbjdTCVKn{Sq^m}*s@5jyC^xl;+Fx|c z<8_i&giuM&#__B*jUO4OlXb)6geIvQjZb3N-r!5=fb#nx%G+hFj6=X_D+>ddbaqK z@gn0-LY%Y>c?7F`qIQy_gg(WpC={lRR;cIIb7tWdN`#63i$`HXX@pDTfu77}DTN%5x*3-j25rRY zeTH3~t2~;okgTB|H62Z8uyLyv63*=0xV7FTEzLN-dWYz)+2w^K(FXOW{76j}lN=_hAq$#*CBTU&hkR|+ z+y8K0g=Dv!Cn|h@vNk}o_sfy-YnVO%p4^+WDq}4k=|R@_{7H#xIn#{dR_0-A(Q7w@ z4b7BE%au)7mgxi~sOOh-5Ly4sE4H3Ho5@*cLVuDp(u!o45T=AGvu5PLo!Uk1rn;+b z)g!7`6aR$EFR(>U~Kx5sUTI`x?>R-$a;grYH$pNulu46tY86U`DXOmmRV zDl4(B$Yy2Qv-q#_lICeMX`)Kv#YXSsk6@CYo>3kiEl!YL+*$85Nl{c*y0FRTInJcl z)N#ej!Ki8-d77*sE^oFr;ZY+>-U#wEG>fFX3vI4Nsbn)WOSmYrO0G6gD~OgF%@>;0 zDxxIfD7$4h8~LJPYGI+(#-&QC_o)R{3zb*&Q`C@ulu8stG$uj6iJFT~ z*U|?$u84ZdH&3<=v-a7%f0jP0`|p`Nsnt&#=t*h0N++r&%~yyM1*?bB8UHoApv}CQ zl|=SKk1GoYIy|eRG+~uaDATt_i1@6vt-i%U#CuIo)G7$c(k7XxSJb-V9475~Z6s-0 zeXd>?tuflAGfN@Fh(4+vZS;#e8ub+3L_358PulZjNkfJAHtPlTqe^6{RTi_wS^7&JXiMtv<#s5c9GZA>5g$!<)ku*wwW%h zu^}9p)TTNJU%H}0H8pudDO5W>Eg8rtq5ZP5T3zb7r=*5(!BMxdck_rLx_6H#=vegsq7{_>5ip0pAOUQgbk14dU!HvUb(8z zg-1^hQ4UrnQ5DNi=R%&+8RGQY@F5A zPISoQh-y`%0!As-&Y}_?GELGn{$xB-cRjAPn|B$v)Ehk=#OR_?6T_l}z3;t4Pw6>*Tm3DS#*bP{vO*MO$+Jo_s?a0;qGx1- zQJp38s%;cCQE$^IG=0o;Ht|=rgP~mgqd(DSqZ~qy@Z#yWHU}x1;Za4SU!HA5|2=wU zbkXM1M72aiMQ0?z^rmIh?9tx9N`wXiIkl&FM<4&#&7uJV!bv{XOp3racPNUu){St0eR)$P*Oj&Pv-^NvSN54e%?8CqR ze%sS_raUZn!;|Q{p0IZEIES7PvQ-*=EQMz;k*|9 zBn`tP6}5(NY8Y3EJlV*&n0irHbeO&%IwTJ!(V<=Ro^X>ZVj_doCT}QWO3;fNNms`# zM+OvaAz!UVDHJu_0#|FvO=~W8;sGzM_FvL96t!J>2h@^oJx^(koY?_|3`4N^y5{1f z#aLS1k=!7BNg9>iNF@+jjVhYIxe%bbYBiE+5a#nI-}P$dYn#(T!ilw_MxC&s(JNi9 zhM8SyeBw14!NQ@&z2@QNBO^`E_>81Mjdke+W-ByXtZ^N+xM+>Zf@V{+be6C1r@mGi zy~SvvuBmU$CSo+tIJ(lSFKxa>crk9Op0*s6laMRP&ipvV4V0hJyxsRCd6{KYm=%Q= z#WH*dJBEC>3F3(2?RI!LGYL}n zC8?Pvt6#b=iP1_S8mEtyPrTOH0}MZYYqJ(h=z zv|;&X$^Ii-qx?(cx$1er?kv_vNLMXvj?AZ);mOcvm=Jb^D7!A}qG+i+ z5=2X-1xxRd{%gJwvd74;L>>#f(QW+A!?@^?XpLzGp7betV_LG&T%o{_>fu*+g$P5x z@pR$V8v))}5)M^*;X-#+GL25zVrq(QN*{A{)!OUC_~Ht zgEXa;{)$#nTf~{xGSx#B)p!5$OWap_v%DE(*|XKb^3jv0s`)9~%3%46ng_kDT=%?J zg>~gF%ZWUagmmdcqH^LGrjKZ5M;ur<7PU8yq8=ChGP|)*A}VO}NQQ{!|M{Y74Z7Ll zG_uvATC=H@U-Ephm9V;I);y(=&zn-Jhdgf=8~3u3sVBt0?U(qNQDLD$$kVrIvaWk_ zq-okhg2|xjYwK_0WWuv(kf@A)>02pv#SV`Onbu%3nMdc8&iJnKG&xqXhHQzZZD|F7 zJy}V;R4Z$Vmc=yXyh(Il96&yS<^gDaK^`qstu<~uyQcZZSpLQvR8ApJblYg6Y38Eg zCjFV@XT5CpJy9gnVvPohO6ZUzr*FfB&e5}Buw*kS%-am8Icfv!Cw9g9utayqBVA_ z`L}2R*nWIbmg$yM;Iy1XmN)W@@|gd*C7jUhIMbE(bZfHaxcvj6@1GhyP@a> zo#Be~C&fO|*fAa}ON!(?%~I$JgRVI4OtLa*(c^pSXN^)@8=yLh)@d!BdHY$Ngns$- zR2D~DJABUMlctCy76C*Sb(4h^WkbkpNm^Z1E14D|{ENHWxG~$HeoHE`wl%3#+(9(l z%3;7%y7UUnF9cFk#j9f{5M44pC)y&4ru(LA7|k+XC2nP{uGQ5Z9a0bI@Fu zex7B;%B7MSe$?KkZ<%dYEg-Cl=kKN=yZbL<}J;xQNu6A zxRB3rDgCeb79s9S{}vrpO7*F@opDX$!4`K-@OQxcjCU)G=Me<8p)r0(dtj@`J_HJfFVT}5mNZ*lt(fkfHs-$~MDv(TI zoLSVgk(!!hqcLnUuZ=rS{-ZylW|FH-nzk8Q*%yp9>Ri9PLo`u`Ntk8)wjn!sKDGkcA;EQAIKJ;GW3;M5U zT|$nioFT(JQ*0hYND?KH_UHW)vb?zr^}EgO>aK@L{kK(>=1njCQaF=6Md|G+<)VCa z#V}&uqDD$@D=^JhrjECpmTha;Y~D>=N+Zbli)3+)ZH-xtO(9V9RHZkbuXB~HfxNf! z-)vywV&YHMqOz3=8N!gsdlvu1YftMhjZ z{x*G7GKA3tuTH9+aVE(=#x>3I#*N|~rBo{KOwy}HmY%YECRHR{5l~=#i8f{pOpjbKWicwuU`V@Ukx{BzZsI?Bss*05&Ing5PXhn^%sBbTNk;b4T zN41(-RJLnt1C`7CchoB?q37i%eZWd7sbjb7<YikymNxms=z-Qk3TK9S zqrOJLHO{1cn=h)@qoxUE*btbvk*M41c=_FHEjXEhri1A>|fKSG@PLNr_7-kEA`vYLl8ZvXdZ&v^3-8c(8= zdW$|*0+Yye)w^bQJW0*wD=bD^fSR=7Py|B7;!#W?A<#If(PAM-_Cj%AdCsU+Bwv^< zNxF$F2x?D7!;yVKb^^7V=9_G!h!Ps774?#IqmOE@b`VXmvZ{WH7H1l$(Jb*HPZL4; z4x*^uOyt@B5znXxy;M?^sw;^k+7pTq#AD4zs~pAAnO;j!ilrQir}`Z+k{;(h`>{Jk zbE59=M~nPWc)oC3;qtBKZ2N2K$q>~(p)d#>*Q=XUl$TH_n&3}bJt9f7~u z;pVu2%dUT`*#~I0|1keu&SEff3sLWHr#(L*%Kaktm21u(M_cpvv5VFNL}yw`ROpQF z5B3cB&EFw#b6{HF^T5mOUOX@G8D~|2Y5sHl+nh6bG?JZD-ekX!t=T|f zHoIS}^iLzo>~jCgQ19r{=$gpOS93VDB7vufm=A`|=+W#s6eVy&-=XyK(j>!u=8KfH)WnNiC7p*A<4b%f(~t0Wdi~ zGZ)F-kbN(+ESa8-6W1F4m^x(D=yGD@s2sc{|!C+BSA;tY`F=(1HGO z?iuU=Jgo3$eoM9~y)?Bkbv66`4NUJy_s$;9?j#9zXgY%ZBNrB~b~hBh&Oeh|z*mhdN=Y(?CN;1baniRc%S&l_}%fA@fIb$qOowdpzH7C zU+z1^{g$0XC*|6()4jva8k4i^m&QIPjrju$7qf5I7mWVz*{f+>VGjE+y_@ZkxjPj} z<`SugL*~is&4n4xionMp^R-ZqNIH6FNw;{<__HN1#imAw zMt%vc3H<78c7MnplFcP&H{M@=UftN*Uc1KZ_-)&`tp{u!v~}6m{kN5FtJ|908s6T2 z$NM{jwU5_MsH>>2tv|1EV&ci9+;vxWV-MrUtk2l2bu zpQ${*Bzs?Ga{9c?G1;>-zob`XuFYPX*_b*z)jU;`e81`83%M!yAUnbTk zW+x^l-bzeJ9Gv*Bv47+ChHD!;H$9d5DRX1)VRpGm=J$0!bFK*fFI*oP7ppJ%A>N|2 zPw7jgL(A?cpH?vt_WpDEpt2K7UyiqpPbygx`#jn^@^k3);QjvFd>h#{rlPQrU4zzS z-=WQWr~9*0O&omAXW#d$+3EaZcBtvePS+O{P9zreGWLFoIGz3d0(S+MhoX_Y*put$ z@R;!4?AxuYOz|yt*Rpd;4|W+`Ti8jB6S;P|mE?E`yFx9`<}&B9_t}Y= zx#*P)8 z6SGI=`se=61>KBud+=0hd~58bl5z2CN{5&AEO#nODvz#OQ+0AxsB&3(vh4D*=4B6; z_K#m4dyw6th6FG2-|UOHdlatBzml7p9iGXgvdMYLmZ`^6b5cL0@~IQk3)B7Cld4m8 zYj*Ga$U;AMh?tAcb)D~Y_6~eMaA@eL@U7un!WEI4=(OnMXz%FXk!#^h--ezG_6wZK zep;iQrM?&34-5OT@ANn9zSKIun!RtPXJ5;{nK>hUT6#WIIGH*;-8#KIbyMn{1Q*qCKo35NnV%QkgRWduj!PgvlAT? zlM=%clm0)Bt^qu*t&5&>HHhsZO`RgO+tjx8YTLGLwK3In z!ranpi1!Ac%D#JjC;GMZujOCJ@1ReDcfPrV`H(5a*vSwi{^nXx16jukYonkUxfiOQ z1Ld_i2@klNd+I?Yc%pJtNmqKQ0Z?yy!gfFf?iyJ@DOUl0(J1KC>_F1M!yn|M_!rzF zcn&kfv%(F&uTWXqW*BNHE+vR%#d`<=>xvKfwOkMG9MrRVlBZBi4%E&1?!Vfm+Esa_ zoS}?RugM?W7d&U>G9IHl$-N6ovx+mpEx8Ljf8_tpZ|j(k=Z}U(e`Jf!%g8_AXynXw zEs+1HPA!%ZJ&~3ZM8k4ZbFWOV0PpwSBYac*R{7ubuN-hXU~xb@|INNepQ~Pv&4bLB zOt%f+h34E5GF`9Eg0-6J9Oao@TVC(E2+I=W&T%I~TkJ7($+DDr>RK(C`9rtnBAvqp z^F5&X*9#iO;lfRU35$en(Czi-7IOhYsMuFXh7#L%s3JF%W{97JYGNs=qZBMo#giZ7 zM)4>3YTSK2fkm>}(339*_2b%XoHj+xfdX?YXsC|xB)i|bG9CBwqV1Ah%S+R8!A!B*2r;v3@yuP45x0|EnE1hx!{4_O-e zF?3bvtB`}iaY5w+%zlzjviYV_mLkPboR@A=8_V52o4bCv zSIge2k9I@*%(jx%+z{cNxKUE1`iA2M$rxz-V3=!&G0cZft-^Qayy$fJ&NI1ATqjZn zidvQEcHx)Mo1e(}@E3)};zwxlj_11w4soWK#J$kFVUD^$@uQ4ZOS zV5jyn^EfLX=W$H-i0pk?2drJKU#y+7E?N&-&RH_DuGs219=RIIuhqtEw7!>H zVL0v8-gl3`7&IvOSn#LdSs^1sD~2V7Ee^{Ic@;Ry&*^=@YpHpGF_>Gi<)`gk7f%AH?uvuIQTlZcnFFh5@O5F^328MaJ zaQ$gC;;ET>Yi&FBbw_uQGdzE&-I7}{_hHVi?3LDYnb$G~X0*&Go^d9w)uu}ElTU0{o`V`FJwC~VoV zi@OR94Zol*w+K3u@1Mm*2cZ)5;l6Q3`b*m&e{-L6yIjj1Gws%##o3A3yRyDp z>=`s8E~9G3;Pk|_>#1YX)bysAi!#lb-!m+k-Lif23cHe(UUa(nUeYDWeARDbaIx?L zQM01`qCI%#PZ2F5j)gZ0j|p29Jl#LoYq3;V_{t5VgY-Q0yJwE`citv@UHd3|pS;ug zCmr$5YR-wyyRHfHSItDH^Vz}yF;XgNcx)_Z9^f_6JIi~dx7GX$+IX5cUu+^S7anuX z$P0}r1(geOh|)qa$wI`mf<|4)4B9;0`oeW9Fi>Bk| zcHSPJHNMe4k~z$$LxDqd07Xt+vND>)XqAe zxgq1<%SN9R`Wk$eq(rIXasL;lu(DRWoOhlrJSdm>$S72yOTTx zI@BSaUhZw4!|HE6f(u97Jy@6^&XK|lmke&>K=Wj;VqX1CgAL!r3!=aDT6`|ZTzh&= ze+^B}Ci+<@j!lAI=`Hx_A4oK`M9=W|gml42SjT;*OK1!?f^W;$;cU7;quN#FoNV>D z-9KGs*FZ<-ysNg&x!-d~*qYfo*hb~{$l0B70_;s}H*#*-%6k5x66X9Oc|dDQiQa_*?gifr+aGZ!;$D~l$%6|0H4Py;{3tK2efBT{XHp!IfNe@PC~7Sx5@%w%Ll zs?%27a;_!Ui@Qo+BFj~n?1i%24dgApvDxZ=C|mo<6FgNtOWj3W-JHuE_41eHh2}qS zn4Iy>0Oxf_59f5}ELRuTTvubLUVrsWaqo5S^H6B}u7i5(R!^{JlIJDf_c*6GCpd38 z$I4mSJu;j2CSz$J&l}d7dwTcuZR@w(UkrHfU)_I<-$?&HfwrLBzzY6JKAXK_yfVxm zO&yKhVIw!3lxE&gabJvF*E8*f`VKm>yVY%4Ble7K)rXKA@*Ju-!$=?cg3ID- z3&(khZ_HKaK68nDYrYRMHGBC`z7n^XM$#&j=Q3$^x`3R8n)*xj8oAZ$dTDYGxv!s4 z0y+e}(0kBuTzg5Zb1|gs4D6*^%ayZt0>*o`_M@4 zq?DF#sZ-(CHRcV_HXl#t@h`*|Vo9hxb~X+&wKPe_BHYYvl z2IwqZ)UQ(``=TlI3NngiNFjig!_@KGYGhoaF)C--UiCVwrhiZan3I)ZI~joo+)4eb zrFd40r_rpmW+OM*b#|WY(p#zTNEa?yYf4Kar?!kPQ2)@Y+%{zi6q>JS?V#dWg;j+P zaW@jCm!p@V%h#6M&wA(uFfIw?D%87nv-MgwR62jKi=>JE5398G-<~!;A@Z8fD=hey&fz;GCIMufcEhf zE{r@Nyii^bghpdO{XTxTJvW;OY$DCTetUq3wvql>D}nr5E%sb*g74KKFIgShitLl` z>yOAx^_aey{6_Ay8V%GZFfTHQt<=XLSDuaMnho`-W27zfrGaD=xr0o4FgePqaH4*S z&EnFuD*8J9HuT=hQm599jip_otGYw4%Z_NsPO`q*NZ>$IkXOE>w@3DIJ#-R#AjYuj zgV{9QL!aogwcfNDnXEBdj-1jWkwM-I)mI-V!j+((^m5!-I+M*M51|D%mfDe%FNu5T z#%)DS^l(~+%%ywiP-H^iK<9aez6P;hO=vsb(;Gl*F$9*z1O-3Jozml35dEV+VT<`g zB#8~>YLa8Djoyh~gLYeet~>JmYl*^MWBvH+gD_|3^<37LrP3;-q~42rgPC1Gl4&Qb zlQy&uJnOP#IvWPt+zmE}qd#G>N7H5+#)0ppnRNq~qkU)FX+Pr9rsM94>+QLF@Y;sc z`Px`AfuE?yu@dw*jc1S9VtSZ;*WS?M`h7(vtM$oRcYcaqk;QN(L?s`%G5Qp3IN*p@ zYy$UGm)L$947E*f?5^)v3A?!)upRs8NPLx{%~>jJNgoCaftax6->fgm*Y+a%zClVL z$KRe7WdrmK(u}o&La;x^FMkW#p-rKn`jMeh2pKA(iMMC7ug*gmM>o`L!>bv$QBlCCBhtub{d;pREA8^pZ5D6QHlB=wscch9c!!=(m}0 z>R$(LGo99@xePcCvg<~=hW5fqSeX1n=OC}VkYvzJq=8Jdgf8Y9+JdCPp5#HLa5ydfe;z`r-krX~8FCj^fnYDoq&k!Z!w7>y>RX(FwVVxRi{_D}zi1y2r)1sC zy6KJZ-Hz}vz;TAnF+k06J#egF)Ss@>H$h=rV~?Q$+5vuuH*y~D;A52{ZJ|`S4X5lM z{UB+MIZ1;8@h-g{R0|J6fA}ZXM+kJY6On03ffnc=U`&1K5Z%cl$SYXyIIuXO!U)yT zSNcdM(O#sM{uQd3i&!SE^NKyzdg3bU_0eo8*a^QG)N9#Pwi+5D|)Kq)to-XeCE#d_&q*dslM9AWh^ zE0f_nOr(#XAv;I^NwVlk%*c0In2aTz=tyYw{-e)?uE{4hl9Z%ovJQ5B3R#Z+Vx>tx zy**ins}y2sdKJR4&#V>J+GpU2zTmXpfST_Bs1wK26kvnF$QJsMPSDeZ zXJZ~{of^=EWDogC=R$9=HITIrP$gca?1%XB zOJ?yTiW@+)uzDKOF3{Kwry`k)m0t9unBq>;P1rS4aAf5GM(5~F0-^FQh znze;u%V*ff571NYs4rysY$i_Qn|P{=*r~xdjn1=a*wZok5{%qo_MA;1Kj=>V8!Hdo zxEj#(saVAVtEYFNB0a?ZXdm_3WCx4FOzeT8a|tM+LhG3&f(3jL80~d^7>glh_%18{ zYCHoo{GfdxuSiSnHET}&Nj^&gCOHBtYO7w0#Ote=O)rQ2HVjIeBgie-^a$Mzb#Mpv zn;kpFSBum&(ga?%AN;C8z_!mLFLDKV>t6bhCg|_U6`I5jVN^N!j{ld)E6TWMy*hJ zK7%Y~4OlN8YC2cwJw25bpu=(IpM}3TpWel(OSukE1z!s-=N9^X7E5$CS$}|W`H4u# z!^WXAN(2qTE}IXf-dXT-rjV2Lp;i=DtRU1+6Tvij5A9kLxrJJa`mpEo$VE~J)+8N@ zv>WsmQ2b5Ppr=P(V8{Q}kM039+>1KVXQdi;axipP>tT(rWB=-v=mM;|&2&2{tP?t! zW&(A$=w|vA_rC)_^^P6G*cQdiTd{+8VkNDFH<5~S$fnoOqmgY6#_rw@Yc-mjf&KM{ zO-{qz`N8((Va^uP%S6H)Ehcl=9@vK0ut1G~Xj9DAEcjy`peVZtnEN!^38TFNdqdR| z*#Z3vJ)ys3LvS{=W9{_HbeW#ccxbf0!Ra=ZECqH@1gF(V_K=mP&-Hv34j(H8+T6on z%PW!u_&c?+OO4p4P;O>T$UB{pd2~Eh?p4-}4x$ZV0|Q7RtY$Mh66fDly&j#+6`(#k z&%L5ww4W@2`%H#v%k(~UxW1Nc1b#9OD}K0s0WnTD_+D0Ms$a(m`BHzVH6f*FXPgwb z7#Q;Cci0MZbQ2c3IXv13dR{N6+h_p%$11Qoi^w)M4g$_GGx}X$jn5d2>_AEI z08H@Ne?rMTl?}#BjMu9H#h68M@obOu6Hw3PNo~49-;Aq!(+kk+Ci)FFhFr!D{LadO z!C}#ZxO>1>TEKq0a7wJw&#}hfGgQYI6{!p%6p+9Gb5!-dfeQjZ8y(?|X)%|;K_`Dd@1Z}2aG>(Na7rjKfG8L!h zS*)~iWCWL`cZRiCM18rQ*av3fAU4(l`}aMog%c`>w4yw)l#ApKqN4=(EtLM%>OF>d z`V;K`4z@-ALkBRsmQ8=qqWUu}0I~UDy(1fe-&hQrI0etO1X|OLfUktYf4RwSfJM>` zYhgXq=H2855T`n1n*I$aOcJ;;a}dS;WkGz0M)`i=8U4UFUBU|Kv-R$J8Ty3|#2z05 z4`C^*kKXWKupd6)x8Lf0h?~7)Nr)P|!RtRsm*XsmfoIYV=Zqbed^5cPYxx-&g!SN| zbb$}>7Cvt%mEcid#x9e!VetA3BIc=ppNJy+u?t(l&)7jb(1WmXqltsffq(44DgFVU z*#&ED9cu(XEE0;)y+|X(6Z6Pe+^dKE!HyfGPsMt3V+3}C8B-bx*}bWSRwFwRD-0lc z7`GUB;Wc43ROqu0!I-v$+W0EWepk{G{#$uiu*Uiu){jh}H({mwV?8X)Y=AfLfK7)# zR*l_YYuPpI|L)ie^|>OniSCabu^ubL3C8yWwqI|K{je6h=mSXvEouwNC%s8i=r6B@ z_qY&oBgfup8<;md(Ilv>t)csI+TN#|VH2xEgC4a6@V`#$lHQkX1%|O5S^6XJ^j_hN zo(HdO59iHq#jJ$Tt>Cd-1VUSYC2IrN4@96naMHGdZ`p<(2lr@=J`O8sE8??i*ctcW zQLoaw=y#wrpMX1UMgJ6Uql;dKQu01bqetrxX5+ zg^kr)!4KHP@>nsPZ8nn4l35wrhBjjf`ZM@e57-x+ubE^mtHdT@tf<#!hDWpH$O=qy~q#z$dTC*#9YZ?fPx(mv3 z;hK$Y1Mg}8nFItol$!Y)WINkJVt5PTaKgUFDZG=p^?n%1Iym)*(L3`rs zHiCsC|5thLgV=K^-2lr_6a1=cq%AE$0+2}^tY1Wa6rW@ErC77)Tx2>-}otEV0VqNak{lG(d3!8Zy6!sLY zgq{r6+h_e9m~b5sVQ z&rdj!L%4u*`xExTXyiDq=(E{Voc$xfS{j3_S9h(E{+$QIhV4V_av$gHf7}lJEpn;J zyp3<87uFd2LMpRdts-&{2jtS)Eqs5WGG7@-?A!s)L5g#IXcyK&-^LZ?Ut=vJ9>WO` zL7!=U>RI-NjK@qqV!g17)@WliBdx+WBsEw$>LLf(do7WE;#0JA?VVVkmsM{q3BKER zRzsgd>%(VUNy^h(Y`9AG8C+MLX}{@1l)H4*Ucna|u4iaJbvIv@sNnkjr=RA|X-~B4 z^ayQ%tlPgl#~GlKK9MaauMrn5;9jt|+9JI_Sq9tZkCo=otmKq%kjUyRRvF%Ry!sfY znHPORnf!|^=4&7#pQa4q5=93XYSZDZ-H@v?L0pS`#&%WG+wdje%S7p6>?t2f{wQ`X zQ0T^%Xmx2ie~tB)WinWL%nZs>ek=DX?av)S}3*OgRP&kL1|TUn+okc$E@&LN{c=NJ`VFrxNhHMr}vxDu<~6I=6% zp51a4;WU3(xuLY?W(X1FfLcszPZJFNNEi1FJyp1gD19~A&#uT%s2~4IU&q~6lGMu5 zb@oIlPDisOZZ$3DnMcn`;WS1m?p`R|GQCl|tG9qJ6(W@+yS&6FL5W^1k(piM@Qm`~lS>c`}yyfO= zcW8=oT+0(Ks+;s;d`;Nge>?@~Bl915jN8NQ;{CaWT5nHhnk*KF<*KZ9m;0Ef3;P|j zXbr9{^N17KF=aMb8OU`2sHJ^LM$Evw1M=#O&+zqhh{^dVt6}4ZS zTYsu~A=V66Ggy7TDXGbZu~keUrQ_^1;_MQLsu&o!gWa#%1uu%IF!M_IyWuygG}IM=tXw zSHm-bZ_GDyEg-Y`<=h{_DL=?fI*yLw)@tAN>iRTRUf9OfQLm~o{1d({d9So&ufz}R zoE9Kdz+Rst1i|l2Bu_O9XAs_l<=C7{dD*;x=`J$6k>fukss%dQ!fc~DX&~pXKy*{Ds>@+*RdxjN zu$wK{>T-|B9rZnZ3T03Y8R0{`B(^6Lm8+EV*3}wmv6#-(hreuh(UUA8<5?+3*`5&7IO!Bh)&b`Y1I%J+Vps`KxwXY*4OLj zF~>&r0=PPwRu49^AW^hG>S$(Te#k!$MSf%i{m9i2O7VS>zrQCIFzz?3kV1eo|1mf8 z(#_4hI(f(Vbobuj-Op#cx5ulW`M1%_)Xa3(bp=RW1xV~@@5 z=WHrRxHIgHbK>)VyKcCTJ7V*WJN|JUbaSrRdCvU)?sK00o;|LmZl>nJ_qYUJFfx78 zKJQim=HM|QL7|&Mi-s);zaIH6x^m2@=wDIyqLQOVMv7tfzySYjKZpMd|K7gT{8DVm zHwDVtnsi|a+6{1iJ8F;A2Ff1K755uYJ!O@;R^6vKfFCsh#?;R}2F#e1?uPD>jv9H7 z>__q^IJ(*oX79+Z56u6Qt(Pq!XKap`lbGd`GblGb_j2y`oB}xyvkK?bvlp{X%ITD| zHs`+0J8xdz)%+#-ykmi@r2Mb)QtQN5_X-Yr6V^0hS43vSm8j+g*Tp8r9gI5=w?8f} z?m*!wF`41Vg6{?F_Z#oi(@Qkfm40&z^;%kYu#;yX4le|+eKxX(v7D9OqUX6iLWVTk z_{ez8;1ov-^Z8HQIl2t-;Y00>67A{fyq4d>{xT;ydrr0|YgCrQ(!kQd+Sl^XlATpG zyKBz8+|{bbM9J;(-^(;8Tff}K^3TeaEA=eydBGCVzrvj%i-I2e=Xw`2 zxb}*S+t)Ykcj(0O(hi{&5LEi!1hs_^MnkmC1 z8gKLUSa&o6JeD`jb}y$y_F!vKOI}7o#_^23nddB5tW~n2v!v{OIW=xZFOw* zZR2hAbJt~!unfptopC0u`kyk%6Mql?_eOYx$Wxd>&lAnsyELbI~NoZ<-k1yv{-B^RKr;c?Et%;*QirDK zKl8r!`c&$}sduH{U3NI!z{0BK3ET_{arj1WKl{O+REj2Cm zcv{Jf^O=pU_p(^FlCvt8vz4{I&E1pJBP%g;UuyPm-yfa6O!_$Oec0PNuk>e~pGYs# zzMQl*F^f^J3$H9RE@o$e@kMTzURU`-ofD1LG%3(vclF}s$`|h)>xtB<&A+1!9vj{#Tnas&G2s>8WEKpGoj$6f>J@Rn9~tALaqfg z_Wf%9A~ol(YtKAsj$O7KYj*mw)F*$mKaEqjrTV75N!yuTHX|dWilso-;q2=UrA2;-uQFJ_q$*BezEx3y~kA^dOvZ!n{53h9gWx&+p_SQm>n_43SBN< zEx}fELZgArVj2b25X!uY{SebA@^|oepG#6lZnEA^dFa@hGbyud+U}Hn$){3pW-W1P ze`C5apn@sILji-Ez3dc!K`iB2JFtZx#nzhW?oA6kCk8gefat2>WlkN zraTC`v;6*ux4Bl?SUWm5ZZ%dhSKxYq+_*#K&emGd^mEI*O)Athmnk1xFnWL3r+^<` zR`EFSm!s~`JTAL+rYWsqviGm~$vrGJo!R7-;i6Yl-}h+p5fMB+?0fXD!c&XJ7yVGE zR#aAqF_8Io@`^LqIGHt5in`j{*JTG;`(%Dg|CpNb$DZ0TL$S2ZYLI2la#&wjuUo5T z)y}S#vnP9{rETh;A5ow0zbo?k`19wFC*QqvBkgv=tGbpNQbeQ>_r6H|!dqibMy%-fc$QzI zzjX_C->fm2^U}xuDV;ndrANkUTV44nrKUx`Wddsj)eSTRbq{?Ql~8znk+res3q(b{ z2t4V%#4t}N#GeAXQ_>5i-AbLB zTI$cbU+2Cm?~A{x^yJ{3l{W(Ku6jE-rGIBGbZE1A5+TF)UjF1od?-hG0A&HK$+m6 zpxA)Qfzw0NqAwJ_5j&~SrzmU4KEL%QPP~T`Cz)8ZhwkS2rEMp2#%FJ~wzmwitgs%< zx(;^yf~+Or8+OdxoVNAP(v;w2zn@3He0(RoJoxbP?J4)Vyvem5GgOL9i9J;~E2dV& z^bj?yQQ@8CH`eXnxN`k6mA4nI7_r3vhRG@zIRU6tQ`~3G>;{(HOlO8E?PAK-KTRwd z`QG{%V^jYTA%nt>gzO4@7N7+mis~1;w`ildA_f11welZf>M3|=PimkktfZWspPieP zEoT3~7`Cz8wT!dwx9+oQ)|FWwtV1#rQy=`Q{Jq~7+s8TY2EDxZ==KBpGAMbsIyqo_ zfyPCe7VaO@I&56X^{BNahE_|cKO(VP)%@ZYB8K=qHVFK4_?F}JLdt@?_SX98kJ3nb zFWB~Fsp|cD2C~z)}22Syr?Ums_ASSy^l^We@JS^KWW~=WK$&cI2cB|*r6m6wC z&zYG$G=rrL$_UGNlv?Re$F#CJeLQWrC8h|!grHu*D*|fxD!#Kr_(BUxxJreU*jSiG zZVo7GO68BBK}nX>St!qnxU9CSIbE`@Sy<*zOF;JQoHNKOw>s#we%iDC{)Ojh} zQ?@2g|JCAWLUJSPc6Wd<#Y^t8)!m4AP~$^M-~K1UBI;*P6QcyQ#hpcvm?CQ*D% z2Vj<6Y=`3I>XP@!wi3Qa!`urw)on2jizk>Jq|xFE<6SfLs%P$Ic6o0L7#zASVngKJ zh+$!AK^y%JcnvpwGX5}LHT)6Gw7B-oLtS(8zuNoRHT%iD9eE7@DvJ{9Vzxaw1+#l) zwa==NHOiWrSvw;l{Y+Yq^t+iKb4I!L=!K-orsC#rrjMo%<`AEm{#Sz^hx}(gM^hnR&W;5)U_QZOMc(!<2$eglKF{nM%BdSwv0zQ~c zo5+IobEv54gG^vCpqib5rfg!%ThiTTXspHfvrJS-@zJ(f`TvVc1^K0gfwZF|<=-RF}r3Hiw!gzi*7;2+QGFRR( z()+dl+rXrNGQKv`b?Jn7MEYe6_iFF`)~l1LruZN6(ON326pPvhi26DAP?&<8ay5FGTgmf+mjDJfc&QgrLA+UC3JtGyKnwkWp*7Ol zs(+M2$|q&K+DmK7E&>xXvI$x_tvML|6M<=z1lsK(CoqB+z}UEhPhAI&Nia^GOQZ_f zqZa|Pq<|Cnw>ozQP~Q+BeCya!)DP>(?Z5bct!^-K?D@!elp_D2+IIn2@t2)~I^E*H z_X?9Xz>|y7@~9^+P8qWLd&vfJm^{W+vVnr6+!a$uA;0Q0INsSXV3 zZwE$Vb@f^30#giptPx;U%tdx#7}ywH@j2Dd7a$avOHtJ3`XW31 zS6?CpJc^sZLr)>c`xq5T7ZHyi*Z1n1!Fu@te4{Ax&$WR+MB{ocFfM+9Pn8OEMg!9J z6$su<@W##q(>RVlui*b51Cc(3-(L&1NoOECZGg~>(T4$HZUcNc2;3VEqf{0-`j(id zQmDU<)MNFM$T79kMmW1H3{}{J339U}f%XGi zVi+p0yVFs0F>*IMX%an3_tP!-znOSV0VdiK)dVTPsjnm7vMj{v@!&)%T6Y&G^sJ1`GJQNtF=YT^I>Ww+RCb{~k$b@qUz1N9n( zu`7cyK1%+>*I{7SGO3F@NvCDF-dsPf6<3KX!Bynia1*#y+-hzT*NhA1?$c?s9u2{4 zTfht4j^`;0&E+JFQz;hoD-4*r~p5nhbfQXr+vk?oxi_R(ZA39Q6Ez#*Dp9W_98dS#rof7Ma4F?#2L z_wK{jG0gTJU{*sh6Qh6)_0}hW<8c-Dcn=uq6RbHqc6UkqZc9|UkH*TL09x#MA}SEcp1PRIEocA1+zE{KRFR=_7-6zfQDmz z`hopr01A~uGJtA-#ae!c9kLB8yC#VPCn_GSzw(&fx?sX|!FLDXYGd&kLtxwLVz&LU zpYvg(UI9~FhclozR&O!Ps2MZrz|MLPHs>91TOZ=j>+BS)^nP{}f8WMv76Yek8&-%1 zxbz&@Uo&lheR>Yu_AaPI%j07CM7}3Kjo-?j=CAM<`TzJm{91kn-<>bbr*WIGvo(zR z5LyI0ll`!_f#5Suft~YUKWzt3bO^?-8|#9-+YPUNSXaxjzTPtz=CLAdVL$MkmSWBC z#CTrBx<87YISp1R4*a-7V9%S`MQwzZpk=E&)p+%ZvR0Xg%FkQKZuVBw)FD9hR%=(Z z>Ufe9!1=F`;`9LR2mX6W;kJ-1c!YRyBWii(NZX{bQhlki)E?ClrKR&?us9vn(uerU z{6_8*m|Xv%3&k1O!P)TqMrr}tLv@N8u6|UWDeu9sf2^ES?kOInn|ezP)LLpAQH^;U zOdkjMyz597c$+_H3vM&_fOFv5yZA4BD*uW<%unQ-@)7(yZXUACx3O2gWA;9Qd28qg#>MK8^ zsxZG|=JOapsS0PE#B@dtW*h`Y#l6D9~9g<-;f!VBRnDjd6`RyiH(qb2{6s{+T6fw5PSl&*O1tyWgUMbESJes+I4$^W23!O*{iUn>`j! zRe6-WSWc4@)l&?GwDc1=1s1N0Fjp)fjgjt3%M4EAaC4lOzn9(Y#RZW=L!m6U&S1zK;;8;FGn6gK$ zk6sl5JHQr!>2jC+hJ}lRRVe|>b)9o_|6u*U=8vN~^9H|^kLS;Eaoko~8+&&1iOpl#eOvRfl>} zt*L5C9rdnyR_hMd=sNBf_lzrp3X}z6WvPYKTKbL}Wyv(iG~Tq@bkme%`p5L%*wa|V zm}VGYm@Ewv7YY7CkZ_D&jie#Wv8-T>47=ut(;LJ z)FJ9+wW20#U)glouXayxw{+KacXCg2pK^b8e?g6Gf@i(wr)QU^G_aI* zp3|s&StE~C-l`+O4!J?z(-!U8 zZ}V)^U8Bp;(C`Z5u}6G>P6w$RMQv${eim4Dpf+B0D$|tG=w!hug7Qh;jU4?xnJV3s zMalt128NrYZDEbztF=KVo24`d9EoLIE;pFJ&Hv&p@bnuCZPCr-8Q+`d_(NQAZVo+# z(`hj3CaSUxno}LF%F0S5PC16>{oq-L9wi3PE%(3feeNsnPj0t66tUP)&kD@#4^JVv z6Ka*O$rd>pW3*5?tK=y4)gLOy&Vj#h0dqTzuOW05M@qR;VZ#(dYvT-4FY`}xS+Cw+ zv%JQ5757?+%BAimmvNTyvf-0dOgfBi0O>+aVIbchtL`g2xT1P1c2ui^JNuv}sdLpq zYG1XBT0|wPpW0P@j)-WDc0nrvkLeAV>^b0Vuc92cmJ8tf!k(w{iNbtTcx@Fn2}^~Z zf?1fyXLEfy8}QH1@O*db0bn#aw2skK^A4_E#L51|GBRcI}pl9oyP zq#cGBQ+>1B?Ctf<{N4P+yxsf;)wI)$KE^qy7hNKr#+(cmVuWD+U)lq^I6xoI{sZRa zLACi~@Raj$B9?-+xTx9C8R4Q*1|4Z?%QnQ>?>yf; z_dQ!ZQ#~U+{XCuVH4E$6;wdJ#mB(YY3oEmfeabeRQcIK_*m1uUP93E(tsyYdHoVN; z=lb#B(}%|kH) z7Yxk}InowMLr;KBxX+{fC2j}Z@wXmApNh&oUhANSsI}GJSnbDgf2#aTwj+!4RlX^Q zDL<5bsz3aKO6VQ1k@>>I-Us_O54^V-bSgD-7q~cn7QYG=`abYcL*c33#JUi;%ZQ-* zgCX6EoWphY!0TM6F?GD^hV|?PtFlFIDErBuJo_;Fojh$})z@J)eDmabeDKa#?Dr)3 zk?fG;mFYNHexONP2yA3EwYl0!ZGnDF!_+<+7<;57ErAH-CoRWs5eA8u#OvaC>5?J9 zRKh&MT+^IyDrVkiK51@ZUSN7=tZ2Mzm~Y6JN=c){Cqhx74L^iiNAH8z-A{kYBG@Qc zK280pZda?Qx6l`(7p&be%)t#@eX8oGodfH73wz5-!1E75ghJ^oY6rg|1E<+7bYv)p zl@N^;mdm^OY*@nSsBgT&4d+5RaPts#90%hyiM3+Cw4s_+U7(g%UnoN`7N_KCa%((w zJvkBH!U%b_yhlDK-$q}GeA%cJR%$4H(dFc|;;R-_Yv9@Yse`d*yQ(G8wPYYVW2^#y zRX{cEP&yk8-FW^Fyou$)E8(b^EA=t}FbN8fyA(Y;4Rm$OdNEf!cXp ztR?mnX7R_k54gu#;3Kxy-?5h1ud&)fH5aQeUp_70kp0jj>9q1u@mBxANmo!CtNp9x zYExJ|vJw_@n|9(pa82ONzCkCe5<*vDjqpK;5UYwl@Em#wFZiqGyc0Vx58{bPL_ zdKdG0Z2HgG$(UsLB&vKlelfR`oIy0akey_$Q75ntbsIR35ue7Q6B{bMzz#ePzOp}f zuhFbA;+pzw5UavkYhBeuSfZh-ul9*8L4Dp1WDH)>mRv19LR=zcN`Z!0!%@i~eHVk_ zCx=MC#W`Y(P?5WfzHDvu4D>)7&Q54Ma1yLk{1qQq9Y5^A-AWYtpRDu5&@f~9sNmU&{%!Y>m8t~>iXi);b69a$qr>bg zDkd%3EtX3x+#8|2v{7m(m6M)IGYv-L9b;3|2V;H1HgS@$i7yOqwmkP8Ea~D}Z}=Er zkvDsQ%I0yhuN*8_MK6{=a-4D=e32xK!X2D`<>WHx`Bl|D!g3H+Eew4dWhYpXZZ+W zCHlaP<0Hji=mqr7uwQy5&Jvr6f|wu#@!wHBQ&hjDja1hvzUTlm)id7xudA-BBl--v zT~}OFE3|ij>V4DK5Lz@^h+Y^L8+6t0gzql@{edk5 z>UkeEyc1gpk=$eCw}$Bi-C;g@ihGv33wox?iOLVTCq}22@H+IOV;|r(;OznDqtJh(RG|12Y5oU^z57bV`7a*DN%L+$CKRj=(|N&N8_DsLb%@!J?zrhZy_oIf^rcLHJ^Lt|jV=LoQ z;~aDvlf{>aiZ_9;dIp^Hg8DmTh8t>Sa0>L)7Q>c)f`3sRJtNyQk!fJ^q_a?D(XOIu ztR(X5+tBZD5j^FQ$^-eWXM=kws`n>51D(AcBl2J7+3Zv7{p~5Xv$nRjJLre-C-+%y z>s&3TY3|kBAGtSk#$-><5^RDJ&$Xd9<%6#CU^Y7Z`URPT3j0m)>f{?8#ueBZQz>F} zNKW9Uz-l3%!!yI`2aNN|F`YoG&vw!(u@1kDJ&?QMEN<-C3JcuaRnb|(vDvZNImXq` zo$1M!AIlw;_S$1qCCJ<~Aw~=n=ZXysKBmd0Wu`r*fAM!UL|PAw%?%#05*o}_7qXGP z+=HsI#>kn~0W0vZHW>I&9CB3gUy$V0 zInNz;9rrC)1=m97FGpR+`ut+~Px8j2H&ebn#lFiPhxtEjv)bBWH5{~8uvb7^;*zH zhR##PU3?rroi1kM5!a89U%K`?+Bx1iJ~(7Yc_2H>9NQfk&O7e8vH{%Jn_3;zDUGKi z_~k-vk%`l#*U}0@hSA%6%v9Lul5RuobvH@7sMGE9)Jhy#TL@Zm2buhf9q z)QajQM2QpR$?|#mklY2`NlwT`5Tg`P{{@cJM*FO7WG%tcuZh^<3Ta1sAb*+0jYXy_ zlbOY%EUNS{Kv6QX>1u1}CQ4o2n@95U~QoE)7?WVa7SYkoV zgzUpKL}+DIifI2gPLg~?2z!*xh-LP{Q+a`Yk(<=B>Q^;fD-A5EIWk`>wFBCJ+FETE zzBX#hwc&{OblB=)IEl9@7Nw5Hqej<3zS3ayBP}AV5N3(}q_0wULtUe8EM+QVYG>+W z@;99^9ycyCW*Q0^wo65%KH^>>0TyEqH<%tqh1(`nk}Bw~wnnS1d1(ej_lwl3@F<6> zOVl;$b(NyiXfM3`7}@xitS6g@{uwX9>Xh-7iT&?Jje&yv@OS(>74c>Se1;CFI|)V& zQY_-sqIv~nj*FnCOavAZub0Qaivj_mdM2~79Aub}AltW(%|pM4qAUu?gCDXCe#`@Z z?XQi&e;Q?lSOs7KRnR@O50HwN$Y(ysu6v1ncjy1eMMI#vLoq^etO^jQNx({Ka4Odd z*~-u8kJv$2B@{!Cns(Aq zO1bm;Ok|u(BJ)uQ6_*cyQJlsO--Hb66?PBz@Bnvz8`r;vcdp{S|9~|9ediO#hGB## zaG$Ekvsb_;G{WDN^vbB!t&f_!MErk4j8iXghzG;6^w7H^FWm#UL>qYBz8Jl5JrAG# zAI4@g#%2cc@@vqUc09Vvj%5>&OC5pk8UryJquC<7<{?YHmTdsPe;$5n1UjmQU_^ca z5$ z8tq2GUN+#FDggVcgJ*7qoj4R-0G8mj2AJ0kR2h6mM*pq;1zm)5z#RVDxB0Iwfj5v% zAK-w=K;3@e-6ufV4j`gig}%5$QPnX5+3qGdfh%M8#i3S_*BQp$f|d9KuMa>B9ss$z zglzX=jLbZsyV9sUn0IG%_p;B-xu#x5%8z0mwT>_>z z?{B9Z)Kh)L>bMRJ`T%BgFWUnT{t0rb(bykFu)3-O8T<$6&nzH_JHeMfg(v+0r06kT zzc3%v3f=!j<|t5d^cp=7wOx9!(;(DABo<|7f@B5j*7X)6g880 z!alSUBDML*3jPDcwkmMj(x{V-MGt5@sx;rjKl)qwW+OlTk2oIx|0r|>?iC9ZiUGrV zkNaJNkr)V6;x5|_Twpp@^CaBK|A_hu@FueF?PTKKRFb+w1t?J5b%BM&-DPnWcU#=u zS=?C`S=`;-tt~BeiF;-;lK-3T?|VMxNl4OUD!S!ydt_MjX|OT+MB)+Wy8-SNP{+>w>7L2j2 z_;j6rCeE%eJk&whAcq`)1|uiX;v$)SqF`PoWg^4f$dlysHS--lWo-EXBDV!?<~X2=6zHZxeb^3_B6Sj|8eOxN!CX zZ$1Mp+7BM69Y#!ZY;`eW(&3w;;BV8AyV?N$=q6nKW?cVz_@{N~qYY@Qu^3*Ai&0%zZ$QZ0QbZ09#z*$SNdYP7>CT<25dCrgkYvSHNZS9*{Cs&xhGZv|>1sBi=V z?PJCFvf*L8=!d;H$_n)QDfD(YvSAsxLs7WDdf-E@!=EJJ{(TMp>lD0cF&7E17MS@c zaWAijGj#i_z~WkCBu)V;AO{)HmSDGb#>%g)J>c~GAfLC~y9mFnTAF{966>M|13gYcq%qQd{mqU5CA@U$r zcDsK!JVhEtT?BmPHbj=%B44@uAFG@J)Btb%1Fk3^dF?*j6huB}p2ze-toIzE7^ReuYD9kbZoq22q5Gw?z!B}JVvn<}uqZ5->{F@Z{Jx^yh#Km} z1EsfQyOlkHx9UfQ%XPEEuIdF5Gj%((UDQ2Q*~)CiM|o@c5NV8{1v86$N-YK|YBD0m zR^L9K1S;K6Xf=0|eaLj@ni2y=XGCAoHV)wxQHF$+tyNxAc92h%bQO<+2J&~o2k_Yb zgm%diwhgOcee_V@-%wq@!N$S@?!q_tG1I7z@x1^FO$PQ1GnpC8DCo6hwma6f&w0vu z%qelMuxqUGrbxq>3TZ{{^3u{nB_+j6iW?WtDSBE^mNz)NFl%zjQRZq0FM6S_P$SnK z(NBoEo9wCCy3xgkqiW7d-I{tbb#=Txc$H+A=q%>YKlrWL$KDF_sDi~g`oa`TPxpK` z&pnaMW2X5Z^B;-RREb(9c#!T^bXa2b1R`>ZcDizkVyR-KTq12QJkIU#j&}U8zjIyn zY;wu$D{NNhF7hZH!H$RGd5B=N_=EhDn$)aQak59!#_~;2Jl`xj!rRRJLjCTMxvDun zSXY^TuXtLfD!)+9l+`Y^6sMG=lpZLZQ<_@Zr^H({u#hRJU(~ahUtGOlQm*j(Z=X~0 zOjO&LZncD|e}^?t_tf`Io?l&8w@=gW&EM7)Rp}a?7d0^=NqNRs>S#bV0_VBCXPaq7 z$@QP&A8iUU?0X5fe5driu$rWe`b21J{p0ZUQSR8L3Euef*mIE;!MTz~?0B-P?*m)G ztoL-XRyBS%B%ANpTDTiijp<()1Gs4m6dg5b8io2d&AH&*u&ugN~54MS^VH`A}?(`Ko8k#V4*t)b8`+3>n-O7XOU$b8k$ zMY+RrYJboD)Z)#~ce6@zN=~KKG~N*Cp*j9v$#j@nVjl*Ysx-*+so#L3=J8sv8vO;M#QvBI1qm^c1@fk!4l6z zp3^>&@VUdTZI)%0Uz|%k@0{(-^U5m>7JD3dlb*nuc^^e`Mfad?p;vTw!ukdWX@;rG zfa$BMt)X}(_{Pm-tiBoKIJepH-WCQ@O$|$km2YisE;amOXklt%&9tAeO|ZyJbqom= z7t2JY=ZdoO`{vHe(tm6HW#K37Tgxj`_D8xn@p-+qHM&Kw3lT@l(mT~1*wo&tdK-S@ zwW(vm%QZ^XAraxVl{YL2GGB7_a8@&P%MblAEcbI!ye)yB5!^mHJZ5@$cG&ZXKjNK9 z-xK=99Ey4zwJ%~=aCgZRrnPgo@qB4{X%kZ$$35pgYfTenUg1LB2;N`Beeo9MbXS4|;sHL7ZHnPsY0c6;29*OfAgG@1o%Ui! zPpUD0nq;jaRig>n93}{h2!5v?rEDZ$C+R547v2@=L|=GC)B)!co7_Iok?vgUjC37x z)o>L#{;_YfFGk$P0G8H8tKPQKy3jPHEVpoc{vSX8$dzR8|MK(0t+!`iaUX|O%uo!g zwik@1^U()&YvOuVn^;fNG_m>YrqeQ)Bzl8=(!N43d(aVG+O|MY=Cr1`RJPD^rm%0( zsfu&1fr2X`j`-zOS0^=y(M9%(o)~`sP^~GtgpiTi6RKEo3fs<|Z5e79UB1!K$3BI; zNJscuc$T`CQ+nPa@m#q=wMH{pdtaL#^d5TF4`n~aGX#C0QrLy)z)xfMd1IYJ?El!O zp|8@NBb=?A^Bvvor)>&*BYU{*fVr`8ks;SG#c;1|PSJ}zfA)-TqdqC#&40E1gQ;Y! z(3AkKTzb=W+aS43=r!qDB9%p)w?bWK8uf%!#6e8n?U znDY&J(Wjukcusi+Q5V=`qPaL;USHK)T~BpN{+sl%I8OALXbnWSnbZ1Dv+aBm&k@Hz zHiuPeJ7#-p|L!n3UfSK(5=%=`7zoxRpwQO<4t7MjeM4M{5!Iz&nJ1N1E;hD3bU?F_lC?HA-%j*|F!wduMZz5T1Hx2cCM z$UBMY&dTVmQAQsolWX<(?$lGc(ZqNl`7WZ%;eh1$cF`<8mL9SdyL zt^Zm^TbtNj4#F8{A86TOx@hccs8Zg#WK~h~qKu-ff@gWvvnPDn^{qqkHabn$pemhq zBI!+JV)(HLas0W|0X3uQoT%Bi%7pOWRcVrNfr>45J~19Dk1)=%^>HP;Mb0Hw9{5i0 z>^$a`?7q$)Gbpx4EHy{jHcPXrCXHf9B`1yw*7jvonhRbVsXe#$TxNZ{pUdwh^kR{Mc(% zc-6^Ti)v-09*g={Qy@JeYR_*)rP@M_J&d)i6C4el5sorzw)v20x8sK&0ZHr95+b(@^v8%Mc33lTBx<~>a* z+4lV5qEo2L&>y-o28bws2{STd|pZQ zq7?-%f7Z_Hm>Zfq|A#&Mac;AUb?nqoU&7}kGNxG15C0OSPxPhM$Z*%%T;prfWPKME z3%=A7?y0-nOc~ypPT1x-rr9@I_89FIn+!T@4f2U_a?pO=OI<<8)!<8^+w_eha>66R z8GTLth0t+935v&}2YwN?#hGb;>e%QpkrT<=?o{U`hrl(_bCBx7_TjG*mO^7+B;Jge z?rqTw!5pkXto#9lmv^1LNIiEqbNp_dYi3N7%$eq)#>VAUN~#v+6!ghomsj}1l*4B0 zvPb9sQ!Zd++GjDh;|@eV37e{G7$r}%rY2+zsufqGeR7M4$w4y|OC%z`lH6d+H*c{f zIp#ZPD`lKhzNmCz*$2}~&v9a_GD2GtG+xssXm5yCA0P20VnC!Ta&lxycx)(NBb2oz zHqv}=v3tMwF7Q&NoK`jwAL)OkIJSVsz5CZ47Sws=g zBci)`rLGlrk*%wBon^6UYDL@9n?+>>y8Kyr?Q*aGaO5QB{K#Eb#*nR**6_~J{%~*D zc>Vn7g-JEjC)HeCdvHdds+*$ALK_C%QtT5Rpy#`;+6OrzoQLf;^ZJUPCBC8|CCNs& z>!p9AWS+dSe64(<>XY_<*u?N1k^7?`#E4_YMefr*)jm>u6z=1CLd$mmqh{pP->x6F z63ZRSSF7Ifhg(G5r`xj=I0Lk@`e9{s7Yg~Kgwf&_;-SJ!{sGoaZS~|h!yOVZ(bCMj zDsGl6EX@AdKeuJhkF0H3nOOtB@6K*fG~IbwbW7VOtVYOj?Sjz1BiAQ-(>~U$kxA6- zkTN`$7akFML^DQu-QR{=9;@vq~q#N3ITsM`}Hl)o3g1Aez75#>Klb#dLaF_try?beg_O|Ee7cJeee zjQ+^pg=WnpC~^)FPL=$WilzHRH=*$Ml^N>0iJBaQr?#`cwXTsVOD$S7F{U7DmHEV)eXm^EbBHJWvOs&Z1oB5#T=TuSRx|mB5lS8+w)Z%Wu znQUh|(YwX|mnpC8YSGDpIYkF6hS}FZ@BgK*3saSMPBc|IWs0Bj%D`lP;<#XwgXx~+XyRPuEOt(Jhmo^=IrM907k7YvM|fEr zDw!m1BU&w33YC{T+()*Q9!65mZ`RSK*64|}LT8>JSMuXWc6t`?Tbu8){0^3p+!yId zRE~NBKF6Q>^YKm7|EztdPDRZoX-$&sar0uHhZkuHMSbx>VhH!0JnNWjYFE0Ypj-a# zqBujUP3-*c80gsJDx%ChrF5#A3^D6tA}2*Xi_VVe6mu>zO@AzOaL9SBP#q+l!#_@^ zdndRuoTTHRqp3Z?`o_H2!rFc~8TSG2E~*aWqw_->sEcH^MaQ}3-lI0F;c&_R!m=Vm`CIdP%SPjc@>s)G+ad}} z9$9_$AnoN~V+f)D5_LK*Dn2V_OVq5$sEF2K@6=7CwFT$=DoC%kqQ2qoR@XAtm}J^) z6*==(kgE4nI*M!bH5c#craFJf8xmhUdv-<@U4F`g^67Hr9j z&3*o(EJu`e=5y-jH$UE*_i}qg)dkI=MX^Lq>V76o%J{9$yjpuwmn5!CJYRKVYEbh0 zsCyx$>Spq1f@{oJ&m~)~K~=WD476MGHp8xx_@d#ZV=TM9Ke?F#xpfDwuKFV84_zDWM1@+| zi9Jewa)sJ7CYNE3vAv;f>4n0HMGYz%*zbAg)4kas|9$>h(RM{@h%~Z!%#7$|k(x+J zRNJW45u0@rLc)VSDXikz#7WNO^LvV%+Z<~gX^u&@J(g9Lt+o};E}mDU(YK$u%9d~z z-X%e__?tLbY!%iJcEb8?m_Ly%@kNpYT@u?cQ?&|h$?1GD_sfsrIV-c?eBJSReAb8Z z@4nH5lIu^7_VpBX2-PKCuGX#Q)oR<5w!}V7=#jpu=DTWp5^m_TwXamOrTd{_@~iKf z>x^ZDG0U(LEKE!p7dX^9p^SBW5W z>X4n9{)!e7hrrES%aT+TuiVW#p4;YIebxf|JLdtH&>i9_@iy^2Wajx}h-hIW(Jav) zqAXDz@eYwts3M}E>>ES#$mgzg_BhL7LqeIa=$C?zd6#~K=KPtZ{HFg_<7YK1&9>yv z@hx%Rr+X@9M*UsYUF~GGB~^FDr^TD94zJmxc4+#&*a)3T`$C-{|0!J0(=cZD4{L2x zd&Bp#_r;pxgS;vB^Mp_vw6l(x9~qqHSShhy#W=h9}@rfPjk84`Uqbe{GU7;8dN65qnS^o+69 zHZ8BnFYR1HlvOj^#I0{NhpaF^UPwZR%`oe%Pmo8qrsxLL!262SaXZ zn+6?H2Pu6LBk>z&@jXSIybE&7$!IL-YUn>Ce;URs1N+o}C?8 zIKdeulFR0Cd)!r+D9yh3U()i@=cKex7#_1T{*P2o_2KDtlMX~p30oV)R~CpnKo6E? z29k&D=Z(c>7fWm9Y@g^HuRk-HiXu_%&$G+D)-}`l%yG!M**(JhjNIlEu*G1W7V>5i zKLu8yQ#4k5PSjuc00`mPgb`>P6BF&5=Y8aEvg*^&? zDJcJWKCfNg;-cY>P*FeCVqqh4nJ-egF?LtVsI}0iniWxZtxDKck}v*WYTH!KUH$g zN_9(3R0yM+q+cKQHDsywi|UEIxBR<2S2|kA=P}G>-xbsf3iht_d~B$ShyV712yNSGRp#WGoi6K04%6;gpdE7_ZQ|wZJ|Tj4qCP&XtA$1C7?8< z&)os|k0H)w_BPf=mOJL5=0vmDTwSqi!PBm^YUNm&9NHOYNZr%s!1o5B9@H-?U z)vvS?oh<5Wlt27h_{^xLk;g*jYlz^#!={BYnruagBo4|;huL=ANM;$LxJl$0@1Gu? z`<}DJNx4;2E`5dFz$I|&ffq~W|B8A<{|dW{k4m0MW`J)SCh09&AUq=)B>4xG$odHf z5dT8cX*qJM6S&J@LtWy+{0ZC|<|ddoC2TchA{k`VE`XIa3^Dh0+z$3GdlYJ$wV?~# zk6)XcL)T~DNI^7({pPu4)aC7)-Z zr_7V$TZlTRjhN+?C;aHAe#kb>hGyn;%-`xE*8Cn7GM6xoz$7!%-9RtM4|w&i9OpW8V7hyl4Jmp!O5^D~Ml32PA2d>Z0eO zO|mnJs`9SVaA|$1R?0~Gqq@>=ti|7mN1^JX6+DpB(3S3wJX!*;4wnH$(sMQjSWOjh7NpYaNQg0s~CmD}#1 z9&dZWL$p9ELP1Pv#4alTZ~*?GxXVg)XI7&Y9=}- zoGYFr`y`V~S4aj*`${KD`bl0$UW>!MoU|u2IgUjQ`|vRO+<=ji;lnxLyg#F2-qWj4sb{sXxsT`D3trnsvI{At9-?y30`L+pvI)GoP%-%IUj@8* zI9j`b=%=W^sK5B2)GOU8@ksWAr)LmP6hD>pmJ|z{3nz+x7mXKGK_6N8w*yQRV0n5o z?dcGv7gLX31ibMRIuXYU#nrB$f2U=Pls(OM2Q#_^-nW!@1u@=9V4KVplnY#fWZ@%W zmaxAt0}Q>Y!l?oeQ36EEPw;#;VtWs4MSv0C4xEo+P)d++D}mw6g$HiVHbvfZ5m5D? zz+5U|wlVF&!Hhr!uu-&_KIv=XyG_MYKgliReeV}fJ?}B{OFHOX@Mc7;UFlZ%xb6?v%i$&>An&J`sH@1qtT0kruwFjO}1ejwkx zft$rV@uh*!a@UuN>XP^Art}Hl7`htjH?KuJWiWe#tvPOlHNt&(JOrbu zhHyA?i6@0+V0DEHcM1L^-ayTN2>%b%HF^kK4bR_}8w#Z6Q#KNJCnxZ2d!Zolm?3e0 zILXXl^vo@~7Fb?&fLH87T_XvyvG;F8Pfoi2aZYgTvVArVsVKG#^1SuA*uR9$of{lV`!UNnQ=;)w#e}l_qGS1{+|~I9o%58`tpOPVWa<=LR;p?SKs`V8G=ZG~c# zxSjB@upxh|uiRA#b(9qz8F`ru@)fcT1T~~Na!$TkR#$RFR8_P?;PLOHKO&4)P zpA~gT2hqahr2st2L!jOsxa4Y-nSd%a>)p?w;y*(a+4^OBa}1xFhUijP32e(wzh^Rkve4R*SC2 z#UG1)8{JReNmh?u;xgI#TFzO!I)>ZzsAku}GSt zf;Nv*(e+S$ax6EL`2_y#N#AyAiD!Yco1=;2h<%8yx3$0=Wr{F-FL#!CO8zaf=Wong zmi?tHHn?}qPIc$j8CTO>w^!Z$@q;zzi9w#*rVS>IuM;oI`LTF$iNjv#HQKWstpBBI zM#TOqeQNK`oSu{#@>n%U-kkTvF~P9Ef-t?Yo^y@yTy(fB3a5t|BwQvvsyZ5~h|Gvu z5E&dkLeqjsq1KXF^esBu+t1U28q4%%KVi;j=kEG-f;*Bo^6TI9ZJ%fyXj?=d^WU%g0BLO}&_5u01cr{({1m_s^U38y~Esk zZ{(S%(c$xg)8(ksL9TE%@v!tS>{@D(r#0D>i3ej|$-BU}ij?5)WXi{hm+!-Q*YKr~bx;S<;+%)*-6TgtM{}pv`N3*2Y%QH# ze4waPVa(41KgND7ElQ1;+cc{An1=tRE=fIJ{dB|#$zblIr_`S5I?jaqKiZTzqrOE{ zjP^OWkN$hg^u)V0nq*{Ddzi30I7axIQTXTcmXJ#8V&i1fNy|sG$k@mD-u9Adr);O! zM%_ooZH2yeaFXJNXbiuC)%#3j2t9zxb|%}JIZ~;7u7sZ~Tq9|wn4_2@bqZS%Km1AT z51L|r0l&Ew-NmP6Y6Fd1#t5K-yM(vWf1I(R*LHg}?)s=9I?ye1*F&$>cMfqpu}F-f z@`SQCrJqY?7hTF9ofDTkOWd*k_!hr4=u)*f?o{fxxPGdof*<}f^nPkS3xx`2+Z_4F zy9I5@n_%@6$~VPMOS_fwThg+a1)-1R9?@9&LG@|zSMOods*1;k-4$=kbcQL8y}Y^V zCAx#*qx3mpQK8SYCe3~IRfSTtn2V>oQjc7REd9(k9UA&i)Jp0s8YjIbn=Rc84&p=6 zcvM2Y0W6S>_4=INhU6If5(~9#_A2Um=y*#|6L!4s36(*ndpf)OyNet?%Qdsg9AWy^ z@VKm1nX>#-*>A=2{0`Z5jhfiL&BrxcR((Nat=RCSmf>HO4`hp^$BDL3_1{i)EX;rR z^V=HBTK*7Jy>1(PChBe^Kca19r-+zPkv1#rVk8^9m4DyXq&&Jjt|HA4X4cx(i9K{|T)|6empQstk3w9?jNqk80RbiFr zMGb^{0jhE^QOp&#A17kNXqFlPmDnd_8FdwN!vbhJr+V9a9=f(UhT67UUs@#QWrl<0 zwaXWkttdTKoLw*>cbd6o)RTt7dJW>AsGo;Uiya@eLT5znT)T7(9~vTVGPl>KZTah* z54k{vkav${Q|`BJvFjqLZI&QA-^6saTu|PjLTFfC;V(N| zY|5YF&W+txJ0$f@@E7UL(EG9Z@ktACXh3S`1KVKObU*R*8WcY8{``+cLJj&h3o&U1-o z%IhmDl%G_g>Nucy9*PnWJz#;CtHtTqn)Fz*)YF9=O)tT`EtUHln7nqt^Bv}uV&ypq zh{Bfs*34I|k>V&l)ywO1KX*-r?@cxhGQKswG%T%{Y1ml6b7S+YdQYAVEYgYM{Ch1$9uoJ(!>%D0)Gu@%xZX}EZ^e4Og4 ztgmvq{(SUn-2}}Pb*TC`XuMo)CC2;Q}lB3n)9kF#fJ))sO+)@Jcunsb%9Xa z4E1eihz0^FeT>LLr5Z7+2Uch0nE$6U61sxCk9FO1dIO^S3QA7xA(xW%sCLwKWc1p( ze|HDFR@={__UU3mriwr?iTWlADB6RitK? zY=JmI9K>r(uJv3X%iS@y_KwliDf$Z~@hPzONM`y8IOSz^5BVMG9}2m0sl1o6t>&8& zEOWsJ;ZR|oKa9PIHEJ&X1)2-H=!H}{xssj+zQP99$V>nWW;tqMTm@pczyCaRZoYxr zGm?G9gkl!@5^7_7a=1t6UgE0fn&BMpsBdSj&8=>W5jAW_TT9IQOi@OEh1XFcXszaxJeNi{4}(d_O;@+@@Yu*@J+fCA-`!8 z)F%|Zl+#rTd9W~0*RL+eW9P6%^bT(=cP&(MJx(T*&8ca=oxW)1lm8>)CK5pt(H5{w zM@d`C>&u-|qx7PTkPQRFzkzs{NuQ@gOf z8wvd2Sg0I0aBb_EKbX$UUswm7LC^R|j_gAwcwZrIyvGr5f8!iSkLPG#xVMX!$E5ML z3Jj9IvhAWR#2)c*RjDRl{Zuta*+y|o`9+hjnXeScSIWwzuO-(dgC%8xAZW(T!%A?5 zKZ5(nG=Wma8Mc61z&1q;XFk+sj&Pm9eDHA9{3P!Sq9t9huE+ytz6W1Vg!8vxtvnle z*!AFHsJNS08E$1n>~Lld9YGiSZh;xSlx{`u^z{J>veb9T*U6WQx!o4(I(3a|iB;oQ z?=bHm&jwGt?+5dY38yJ)1O1MD=2sI}1>FUwQAhK$FkSLkvH>jUd16w0PWo1MSgMzt z7H3M1Ne)OxOH#zcg;xY!u-@w(^_9^&la>S0HiU@^}J8WI{8FQA&VOnD4Uk_{k`^+nb zWkSHj|H8~-rZC5tm&^htj=6%FcpLTw*T%m9DuYv5ITWUk0_!^xu}~KurYhVkY$-e| zTp{WvmP_hMMvCu=>Wb~C!2Cd*A)Y0=0414ZQM4#scoAAU8G?FXTfK(D%6q6qtp&1} z20}ZJTMca?o<9LKYJLOL?q4<$yoxPc1-lS4NgtwlgTTK!z%E5B^d5A?Zlcn~Wp*E4 zIlv=wupwX>jlx>~ALu!~fi@DyH31s93>fqGV9Ib@Q!ss4t~*#M?}4RWjObn*Q1uKi z6O4yQep|30K7c)=fntD$Xb6A!5Ik~?pb?@*X9Q&sSIme0k4?ZAenQ^$H$fEi1#*cE zL^yF0x+?AXX{c9t6uA6gUKnunTByt22BJF{`dYj@qCvhAPVn9#8B6tKQ4Ii^t4!n{gVAxrxSfzs}PzC&nMDW^%LSbqq^v0%xNw6Kv zg0oPox(@}df1&vH2(L~8ojng2kB;D4a-a>x1CK|IURVbdbQ)L_ z9l^xd1cdxqFjKDJEH0z|(0Oo~4&&?smDL*IT4ZQZ9lTLMukRy}{((9{A!y%3pr?O> z0$Ufh-vV*v z75IM}cn$$hOHQS77zfoY0)0S%cVj{y2=UsCGrt9{#}*vpA+F;Mj(rVB+yiDqKD@aT z{gs2Q5Wk|q#t?u76S$&LywN!ODQJ(I&B zNAOwT^B>`F$MJqQI2oI8#T&rp*ob&!PqZhn7TDKm^z%?e>xP1{F$OAUEm41`11v>= zDwfttZk85(4<(064o*5)CMxKWRR?mvG5ReM@x!X%i8R8iK>vuqH*nx8zQb0sFt#4T zVvd5#wHK|k46K7AXw$&E!(aZ-@q@+VD~KclU=FdD1@EVg+(;Qxqm|BON93U zy*vfDBB`+WBS745MN91D?L+&V0;l5^?EeKU?+9AxD&oumm9{sPt+yXam1n{G=!a{S zV3XrP<5(hGaWq=02JEXQ)Gp^jBk)hy-vVSLCW1NBANJJ@XV4N`V|*hCZ4?EbPBJta z6S1Y>lQ`^C2dfUPd?-;{!;Dt_vsWMc^9&wa~y8iSuiU(a{}sG6z9Rt}|XYL(61mF#_BX21Rt91O^@qK5gD+`^{dU3jE5=(R zDB3kg%lAbK48?O}{IV*d_KC2lB3S!-c&umW#Z#3dtq64y^V!|`O@;M0CB!;!A(W*bd#p;RNQD;1N1#6`jUbTkj z2rwiE;9WoD8pgn{tf@TzO|W+j+I1wfO(%edxeAJ1%fOXc1FPx2 zwD}nH%8W{m*nEuaY2aQ3^eQLAR|I-Iuyw=M9z79ISVavKjP~yE0IgvIz0rsDD{Ztf zykcEg#4pH!)P#+s;23qVN@)WRS{+*|`bP@8tAbalu$e^IR$xp86jxL6h(SMUU@`Id zTY$YML!Sm$T3~?Z6zEq8Y)J>((pUOy4caDP^-|bHfN`e4uQm848WpV>XhjizA;IU; z%HJz+w!L6aePL^;mxb%-f%m;I?z-TLI$`SwyK4;VtBWhBfoo`ptEmM`51e}yv|2KH zq#S-J5B@0&zT+w4#BX7dKPr1SFy;bB4Ezh6tsi601Iw_&dz8W3e8u)1-z~uJvtdi` z@XPO&Ugi)k5o4!}}PF z-8g(F7SA%=2l6m3%HgpK@F>Gdm_tR`K#!`>b|P$nK9ixPL4QI^ir_7!cmy^LtU`_F zz$fC$aT4fj9=;j4L!w$AHV&Wgu?GlVU@w9F5ZFtgH%PSZdu(s<_=Ic!fOdL`md~&3 zou4?Xz(_SrSiq+Q{0*=2`n{EB<-jkz@I`iT zZY{W8W2OHoz;j@pS%#|#T%jM|kf8Uy_=X?Xatp0igjUN(EB-{^72!D#)>DX2{$man zRsJm>R{Ra4kZly;90fRMHO6*SrJW?hW}-0i z1C|p|sue?DC!OWUD@)1yI`OP0@nARB?o#V;K~2{G+^HW>kL?C;8oxe=%v7M3A_$?v%tTA z7xBZd$t(B7!{>p027Vc^|A18odiB5W{r8H+l?C*)U9b-(@F)EHZ(Ias8zk-uDcBM! zJ#GU259r2Lh35^tPQXg*J8G!g|!>tF-q`Ufh`}cAMjDu z$}!@B7m?tc0#_3F7Z{b^N-MKsES3LntH8Gd-~aD9FgN~xuL7SsDtpFNX`>WgyKyxX zJ}*YHz!D}CTq%!_Z}5%~WOEI1F}bKr@Xj#k%xW7 z!bbFXMB@92l{2(>y!vl;l>i$KZ1LD%2-+mjKVCdMxQf7a1bRyZ8`9wD!SI72@WSC3 z@$vs__5W-qFmDXlg0Rvv2Ie|}J`Py20Iy}R78ch=;aypJ$U_UbKu>ijm0J+ixN_R(j|6)}e$8SNr6Uf2MIFFUd zXWo6jhu@eV!K>w;3N;^83J0(97w~{3Tmv?YJ;FAF7h2{26H$TzP>n&GVZJxazXoa) zBY+>tgv##rhg7V!XW*O90 zXVNxb2;G?;NvG2HeOrB1eO|hZo8+&}r86Da)rk175&j|+3hojA2-=DNk*Z`JC09jL z#WkcWq_ZU(Pr%;Oh3%LHn{MYK*eqzR3X=4om~t3=PBG~;BUIIIf%nVGFR!dP%WGSrM^SZ;hcsR zfhHMHHW}2fi0xhWu67H_x;!8~nTM{S?t{FeKpuDnZJ;Mtk60`nuD-1rC+#h|BfcPC zt?I740}Yv>@=MZk;VxlcAdd{fZ^Rb9fLlU$cX1D)n`f@Y{iK9)BC z9Ql1v9(1w6j0085dU4xOsUw~_MpvP~(c9R4(CQ9iF9QRxi^H7`D%w+^0a)sv%};_d zZxw+ET9IG)ouTyE6ndju_`ktFAA&WW#u%LjG}~eJ4e|(Oh&Y^L9n5ED8vC4kiOAhB z=)6+g0p4QNr~Msqz03Ue#1rB#K}%tUU@KxdIjE{|0ji0Q1!@5aMX@ztt=C7L^X2|q zTnfjrdOTXO(M(q$d(L3f(_^7XHiK#aeZ0QV3OzzPy#u|MJ=Hvs9zFdRZzZQDTf35d zO9WNL7GXUi&_;g<&dctqM9M*uaN$#-MYc!fQ+$?okZcm`glU4d;#HCqVG^$b_=pl_ zK2rzyfIPYdwaPol+lTrcLBGt#y^Dr0GeO!=8F}=>rlU3EJ%W~pioc^Rc&8EpAy>Y&@O6%cpsHX+wZByV zP4Ma0vi-5mLVS@yFDzwOvQG4c9?Ym0AJM@PfDw9F`6h z$#=stS^|H!k@piTqTz@VorH#aYbg1ZLmlY}_VtN1KryHR&>hFX2b_QigTvn#YoLF5 zYW_@Ucf8~0Vl0XY84(1|jR*BU7Qy3bpl^H=YxJs!2W-QNt2yFR4#c9iB09AhZG999 zDkf-6RK@GlfX_Txy65Gi!=9%9Yk1G5b~ zelri4qV|Y0Dv>K`jn_?3Te}DDT|&%f#-TdKZK$Vo$M0`K4`CgDIWY@Jw0s~(dBklZ zM$iYfM|8mU%|INX1pNNKyhwjt#Bi%~PP8H)dgaN?Mfz`ycrD$-rv=)qyDuAPGpBdF zH-?<=GqO|sKiHKt>3hkZ=RXqmhlX;Ju$N$fkP-KmmC8Cu6U3|F5lhA4;``7}Y)C`_ z?WX}pVJJHkShf4~I4EZe=owTYd5#j$^`QwB!cK)69EF;fU)_5=q31F_zYO1 zTTl``AP_;Zw5Q-P`1?)J4g_y7^wmeOeVHJjY;M3$Wz)TZ+j>H8Vv=x2*~C)pW$vv1 zHE$+Rh%cZN>marXoPz4YkHD$56*Ly43!X!TOU}Uw9^tq8Rj9{&>w4|WXxTPv4`XM?ij>4Qs51;x$oGrxIAcv1=K?t zVZVK`w_R9w=3;iEgr`oyJXHak(PDO`!;JI4eXK|1br7&>^$|@DMm$-M*-0(D9|R=i z65Ll`Vnt%dTs0Bvg_c;2bwizuftZB^VgaqO=8D3-z>V|yhO2s183}&?Bx`4k$0>*k zZG(QsJ**Tf;CML&ccG3+eftC-#G6JSjC^gJn0_R z?w|2_K4y}Ed8-|-CiA?NvF{Y<=$L?69nCL+%1kQi|9;`+@JAAj;KywIZqVF+g1gpt zMAkl{B?EOuMxmX=z}H)l(UhXb*OBwS2FjaO@;^lhw&wZxiw%<)(k{O^<_^2W&NBUPYdX$Pyv46^ZS<3jTtxdh<(O+{8jnMP}Z*r zRQW>GU~VF~gS<>L{!YZm`oYSsp(@HO%rgU{MdNpIE-o3ZXhsiIz>mD*H-}>US*V0{ zhmOrg==c<)XDgr#6p!rDYOXm~429vbxOX;(`eA+GV~4TZQA5NHyy$xUvr;u|KpzOc|mSrF7bv6Z%ASk*VVznnW0ZZUxhr? zF4iSkG(F%S1Wrdm@g$SY8#Z0!vpH&MR>V+J)P{u04)(I1li zQlac$X|Y5m2@yXLrV2*zZz8967Fofz%rYpPZ(ts?FS&iF0)K%2n)m??|4+hF;REQn z3_}=6=tUEfr?H<8{7m^cLej1iD6qKbNqlUV~dcTn+U!2 zhRhD!ZKePt-hw&+^+%((%*%OcughzO!g76b43GpX$@5U#kD;zmv+3^K2L3c*6Uk)x zMb+LQTX5shdZEgYCfcSNtFo$GCt(DC@DKQ_ae8(jQ%-NDW3cz>&?WD|tbjUp7Y<{b zPzs-lCP06Cs=T9OCTeF)#Oi<+z97~EA?BfVzHG7)G;BwcF7h&U+?U2AaD3ir%r$Qd zkBBEpOJpD9wG|x{jJ%%QEv+R95?v?S^J;TznMk?_+36Fcot#f~_tl}z^c7|bTL*qT z6OrAj$c24_GTmVADDv_r>0Q3nsMQ?qy#bbPedi3vPkUovSR!n z(_mAUX^rKQ>m7STbXoCMdpU}#@~+ye8mFqSO&gZ{I_8k>o@$$b@V#)vTPB*?n4TF2 zm{`jUXM)#6E~Hbr1Kdw`4af1O3&KVB#0_POlu_EvVRl_zUCS_SXt?&RLN8YEzK{m{ zbTea!uBcmHzrtp`X`SraPepUf_)UZz#TzAGqzmQ!71{E2a*1-5+OJs{bU5gbpcR_8 zs%nbKlE*|2+magTzU7Fqx3P_~$sPUOW2jE-dtR}ik@&o%pR|@VNa7UMBzF71vs;+j z^bP7CC}$r=1&~#wmhA8C_DnvRd@AWed|Wi8dlQr*tHGb=+vxt=5dsy6tJXOC zC|4XQqK~i>c>|#K*B+H(yih)zAzUKtDT^F^Rm)YAmB~<@9ws=)&G4zc zMrXF|q(y1Tws>vhTuaGy%qQL+Q9pSdwKeG9;MkC!!6UR$LC;l7Q3sb3>_z2vJ$IZL zirN8peD#43--UVrI_7ue$=g83b5Z3i_$oLvN5GbNin-Mzx&b}Uw}o0jPV{c|Ty+cG zU0fTTC62zRq4?R})xODg+8S@|V_9f+nkJjPMx!y#+{|{<-G(_wgveZ)4dJQ;UCQkA z3)MzdtC7~YN_kv8s5J7mhjI%%H$2$OwoSDu$4uxZw z{iN-q^`vDG5X4SH4MVDdGsK&3*iGJPoLNXINzdLiVL(LN zpbwH7y#CaD=VXh>I02QDEykvn7dF1@gEt*)$pyU4ghY5%*hN%LoFW-5Jq$hMJ&G=> zo|@A^r9r<3&D5Mz@fA-bPX!g+E2^4%pgq&_x3QYxN5u_;!o1G5+2tl_c7fo8v_g3_ z$Q;r^S66S;kzrj!KWd+;e^=a=V4cN#z^ds_WSUp(>Edn%Wr@9>e%@x}UsNdF5%v&^ z+GjnWptcX0phw`5Okx)@)9Iz)68uY^^*;A_+){U<>sM!gM_YSyTVLxSO9L}!Bn?#! zpDRWf{xWVh2iezpBH2v>S~g8v5-~2pUG-{u-5OCDWz`*NldH6h>!&9)jU-?FF;t8* z1ANvW<@GAA7`oOXdGZLR@e*~<`ml}=U6h29V{3njZzKJeh5w1XN1=X zPu26ooZ8OdT7$VlByop*72q;49`PKBZxHGo^2xs5Tb>g47cl4kb*!+**ydS2nOYjZSL`d#Dmz+My?lK|chd;_ zDen{Zq~K5apb%~J%tS-gRq2T}7S>>@Nz(cy?}(WhW>IB{@A@mq0(i))hMpAH>w6K_%+*7OMJtxE{M93C5fS~o3p zjW$#rBhL{X=DS&&Zv}bO6AfQC+?DR`429~eWTtNiMg}U6A*MDCT6T>whx>_@#5&wR zdoy`Zo?qZQL46}>D6J=Y(%lu#!HzGs309TmvZ=qZwqaSt%8C|-mBw_-OUG>TIs1nA zO?F26XM{AaN>cTd%ygofAe~B?pL{mI4=vd`i#f@Qoi zrV90o`=mY6`qJz(O)wcvc^20Gz+FfU;2gw4Nx7m(BMNC6Hd(hycQ&kQXhd+bW|m@| zq+D=>w~@{E9U${PAKi=HSKVUIVozW1FmgOK#W$B81YDaMGpgsTAIjU)*(A&r?gA6W z&R4OSN9pu^zla+5n zMn`OpsYq~F(Wb_ywo2JqjC?A z=T~=I^v)>yFQysy-hUq}uYFJuKPyZXPnYCL2goldTuP1lwtA-ecU6w!i)^H1r?4A; zBHN0(>dtifY}dhRb()IJtF52y8h1H)n>oaLCj25DtW;^v2CdUR)o#^33aU_dQ!bW$ z5hn^~@`J(smD4k+ljJe-$p4jf9&l0>%iEqavpKTtvg9B+2NeXAprA-lQ4~o=lw<}K zMZ^G#Vh*5)*()N7q6mnB0Rsj=0TUo93JPpalf(DCQ}_J$-uvzO&FsvXbNX~wzg=Bj zRb3T)o~%37!OKFmp@-R3R1oQdH`(Dx^XO^O1<)cGJrVz>4fr~(WG!7A9Ur}oT&ZV; zGJ|jXd(}N(Q$$pxQ598X)umHPN0mNM+P7>(#mCj#{0qVv@mlZ0nK=!e_$& zt%COjmxS(&3?X-32D?=jdzyL2`_`u1k~TFXka=eI=Q*SDy5tw-Ps+PK_w1Y%S=}-k zr*3hU$9F^uLs$BT)i$VkuKMNbu{D9(34zkklhNxE*LWwU{*it(b6j?Voa=M$%t_7J zoi!kHd3s%H)0A%BCp<$FzsK&5UK{y|J!a|Qg`pnoLmEO(mf_^XeUnTS=Z9}5w)d&* z2pd4=k>$}qbWp54-kf)_Kkog=E;J*7V2fZ;Ai^x$tM;~!ccGoBRJ8yc|`QA!NPg{}h z%M4~dnpK{an!PYa7q;}`ljqpDm=n~40oE)2x80#GI z_9KHqtF)=?>OY0Z^n-ofy*oT-CW!OQj=0AnIjnlNl2`19@K^X7t|j;G321%K3)LZ8 ztspxr@DD|)NZ}tcL4FlcC zBrrC(3Z2iH(bchGi4o4bz7x}4%Baa2o%?QtjITEbd0hFsn~{$Xqc@i?!b_EnL}NOi;?X&AjU`c?EKv{4=LZ#;xG<{@mQ zAuMQrqv!aXHU1p3mG;47w~+juDeMTyz)M~)MS%VhtmJ0GGlyu)cMxxRF2^mj z;~sYIE?_^=g=BF!lT4pE=&qyi)_Bapcd{X|*88IkyNvrDAb0W_qLBZFZaf=F&0**3 zk7%Mcu{-i%%9@Q2-?e0ixq>})m!Yk?hSFDabxX9bUnWk#SFjap>tb(MFw$Ca5F!lLFeG#rnMnEp8UyTC@Jd3z{|8O#FXVpl$gO zjg@pP^2Ly4QkFVdBWrn=?@wLQ`Y+3^tnBgvsKF9?2z#1*j+8fe4gIeyvXA4Vcn`ir zH=#eeih5kfaV`Ejx1%>u1nZ4>@cl)Z+33Me#hP^!hzrYzldGi zbIDJ&z`2VE()V!O&Hn}HmFJMr_Db|R7ZP{+Y%+A6gr2Yn%TEc}fY;)qHWwYlIq2RR zL$yX|rCOjhGYfb*7D9QX$aiT!@4rGZd1m~LMxz?rxV#5*xvw31v0muQ`r{*W8pjay zRL5g=Z;wVKo%YMxD~}BypJZ7Lara;NS$s+BUPWhi5BkNc(4dV4$-{||d5rvo;P?ogyZcE>@qdaS>P;26vHnPD5&zz~*}lOYja*`wr`iMR1In=wYX# z0lo&5E(6J1&^hh_C-T`jl(Z|$JLxFxt>$j^xO{hHsg^!ez5-cLKwdYp-5)?-`7KzG zuhO?X=NHb(_brcWYS4F1{{TLU;d}>=wy)`Q^x4LOtG*dUV4s zAg`(R;7ZrF;L4_a>O@QB)psr&<|_OfZ^F~|u4Ej1%g}9fxfX0qXFi#l)Xq<4Hkq8X z4#*F39NPNx@Yfmx-G){0CS5$xEK4KVu2lfzO8e{7tNutKe2IB@g*9 zzs{T``@kBw(>ktrn`0fb(0gEO4aZu(%a{6DtTEzS_cOAE)4Q039%hWc%oWNVu$S6Z zP*W$_9}S^=BYHp{wvMNUI(&u?;5$5T1*JTRzpOM8FYx^>X4+%SiSooOgxc+(Oc(k? zerxibY(rnkf4O-Q5$gY(q(2s5fPV7h)*)X>9i^Z#l$?LOT-TfuZ2l4F90=u41a%>!9)5G$`7#M#@AZ!7r={szvzfm_Djl2y?=>B-qNLVfn&$Mp?7To%VqC}k_l#o#>Ao<|@gO=9N{MM4yLeHwIgpjXH8wd&tyu#M2HIdFVCg>4rp&F!}OzNY>Ft-Eq{UDObv?%MVq{k|>bZXC*u-ow~N< z`kwIbLGZJ2$=P*Ea!#GZx3fW_P&NoE9nV$rgEntL?L3grzuGPjLE{Xy%<|%C@&!Er zQuczGgGuQi$T*fyX*o4Gmb^-yqQa6m-`?bv@+Orpp*(O8C1=2DIHl_wSV1|$wy?Ml z3G5pXxD~{y|K*MRDaYsF;VTZ4F}5SS>)6eiFTloLUYnBqP`vTIB>&t9Nq{^<#YY zLz?Rd!jzpze!GrX9oYcC0RP|8p1){SRnlkkAI8f-Rv<0$)4G&Vb1yXtrW9w-&l;TC zB;|pORhjM6vVCK)L%s^+x8=gru0qYq>cXw++-iU zb}fDzv&q+VkQ_bpSzSydlg}ki+?nJ36W@&@-_JyN8s}s>d*RNPCmv@#{7w8q*5w;P z*|&HZy-7y0XV6u7dgN4zu=%QFUJNCB5o3grG05@#R>aGCkeGa<8 ziR`)@N<^zpWa79W{uwcn(kWvQK3g}%|BBCoLw*lZG{=3*xO$E~r0=s+GTVC$KcaQ` zrhMl7gFoSGtoG)CwFRu19DGjtdR`2 zqh}_@$CD#$1<{M{BYVpLM!3SPz_>0+PH=kA`b)SR}Ln##K8sCE#m|~EPCzePSx}|UF zq3&cbzYpEgC@Aq`ya{spyKsyOG!wUzkEj_d(C*$0-=Dskl>T_v?I-Wy3E6{lGIO8E z{U-Oh+^$+|hipY~$vz?8qdpAeTPf!yB;jY}TS{f&%wf03i;A$D!%$G4Cx zCW=052AQc#A|H{B{MpC?a?qa;X-H1``@|5Z@zd4LlO=8QbJ(k=8q> ze?x!qu;w?lZq~MMyOOrSl1`l_K-|Em0~qEAI` z#ibQbRNP!Kk7G#1r{#B+pI4q*?k~NibVkLSwUyzw64QN4vwTIjwLGr<-cFml&FT4C z&)2&>(dn#q6I&E8sULKL(eyy1^2^d|N|%(qR`zcBi0ZL{0pu1>i(Zhp&U>5p5W78J za&~(Q$ZXgotuCW!c6H8u`HLFfUG#NPS>c?9lN$8TJv;Nq6vs0q(k&3I`GcC>QCUKy@4Aex5eM^Z1fhOuN;-0n!bh{!J|{J_Ek8?IlVm(#-EJNjQk!x8d`w8@)ZB5 zx*DvnPW25{b1Sc}___So^4y9)%ik$~wfwd6H_AtqA1+%`wyQK!GV<7WN4u9*RWAbdd6jiJ6rzIuB_w!t_OSk(WAD@Jsk!f=Wn_zzgy}n@oC|QYG;(MId);m(b6Gh z>&sJW?j*MGy6{brZSmiU?lCp7BYu^q2YjhxN_pDN#KmfoUsaGO+E@H=@!dsx3Vz5t zGkbs9erIp=#$ZM5HPr*E#t;Q}U(Ne<4+Td?^E_+Ge7q%fbb9lQugD`mo(!M^Q-*qT z@R*uST(XyA>xh*1ROH-nJh(gXs{e$#%9=KKT&$^jtFlw&sg(;VU$0zTIid3W%2t&t zD$*-XDj#09x8&2Kj~$y;F~PqFY<`)tJa0@hU;7E2^SaIGnb&()j~<R7yVRk=X&3HsqyrjoZ9>; z4ND6bH_C5ZQG8`lqXx6HbJ9n9=f~CupQ+ta<*#_9;-|`iHQ~BvgO@}ejF&j~5hM1h zv=`GJN!yotFvUy0W}oknvkeW9A6@t_iHY&aM7vxZ*%4|N{LcSk-Q-$gCf8h7b0ayp zC)UiYxvFM1Hue38VXG@=RkSK^MRct5j-7O*|B=(nzNn2xpLgy~{UUF2vy%3^yA*Y6 z(qn6nLEUa?A8t7qe}cAY-J@ImU5R;>Qa-6{VMS5Z_Nx6gU4v&tTSd2qx`u{D7RTns zmPfyd^>O}5nV!)iXIF!(8-3NdW0U(DZ!XShbXLJb*=z33mm#D4!+^WtFp+%vJ~EFGkmej|JNY z*4FK+J+XF8&7_(SYF?_nsjjN7p+8tRqi$L4)|vz4mu^C&fbPS@hj*R;K+d7#k=`Tn#?@r#0o>fWmPp4iIW zYx-lS{<@}@|4yQo-WG8D0|J$ybBTf7+;c@rhs=NFE-1LPQS+u5&8IiJtMO-rU+2%s zZjsi)vnu>gom11G`u*xNYggAj6zCbgE_StN4Dkm)^Zn|(i>zN;XjK9zHF&(BHCdLhW<4x75w@X9gArItTA34%+fyzu^zT_PQW8|w0EvudZ+9jbe|_JNv>wO0q8CVKA){%`zqL%&7CiHCjZ z887F&*28)iu@QYvS1X7S+u52gB!kE=Zf1lhUwH@%gBcE@<+2@w~!AdBZYC`qJX3 zhi@hi+WmnW0$T$qp>5$Q(Yg5YJx^xdz0s=ZYq9)98M)){NI95#clw~rGqNI?mt*C$8Z`H+?U;#Q1&4~;T^$s2mvUsN82==;BRW3vcqkG$=%3<0$$uI# zd_(?*!409yBGa&6-4SUUekfS$zq&4l{PJMc1C@uFua{OFt|+gVUNNNnnz8{EFV>bu zFG>A7yKzD5;x)||wz;xhRlCf#jatoYa&N=YxyNVT%07{e-YK3DvDw6!&J7L;oL<+W z`e=EVvPa8$SN5*02zuhbc^^oBIlFQG?*(rc_9*(ZVTT3}=UkEbRQlIxW77JkzU|BL z-sU+0JL%NuVtRHto&sa)9`=t2{T+SO^SbW`dhz#+Q0Dxsvdn8TW~9B4GTT?@4SO5= zVt8n`!Qb-D*se%^co8^ywQfW0C-|*w4!j#`9QiDAEb?@uU*xgyvqW&28aU5?bKM`c zt7>1WeXO=??Zq`+sy9_msmLp@C|zGtb!>a-pz7X{KB>!cD+`}$GQCB+)&tvI-FkbA zrcECz8q;82&f3fyGJ0k#z?=5nlnI{Mk$!>wHI1w0m5(burKGOJsaR7pF62p6_ztC? znU$5jG^-@@tIT#;3$n&%Rb;Ns%pw~1@a%W99?IyL+R14ido|Q5@H5^Y{~(XEuDg!HT?=x!rTJa~5Vln7JzT z6wkZiSY6|qpQ?gYN2{mQPVi45BgcejQT#~aN#`f;DBqdv0a%)nk={RRdG2TV9U8RF zAD;VJmXom}b%c)?@Wc%`)=1moP@Bk4(MUYc`HbCPdwfr_ z+vR6+8c%ZudwL}15?f^)d6l1J&&ZXrM$swZ+k!v(KdGBSj)7f7FrUge-Hjc*(!bR2 z^f=pAjmHMSR6sDf#IQ7Ztu$*uQXC!wvbB@cIK_0F?>3e(wJdx;S;a$NCg9V`l;W5#-W1Zr4tXiJK zB7IFX7c1-;i7z~_uy5_EwBn2jnZ2^cW^G}Y#=^8e$irSqZ06P8_1=k$5<2@u_SzQWC!|z%h->!h@A&>*mZMlY&+}Kj*P}VQDlQyMNBb+*PsKa3%3c+ z52cZx{{7$|!GWRILdD^$!jFa@3(p7-WFJXpxDTJxSi_`87ZVMrAoev`!Al}n<0U>C zkAN2O+sIdVed-zMGcq=0Ov!jIeJ}FXX!2_hP0!ERpK)JiII}}mG;;={s#E&5)EOzm zeACbeeCGV&9OoVCE%e^v+~OIQsK9@rN3=2$iwq;C;9s$|>@&I_z2d`IVwxt(*ekF- zUcoAMe`19v16x>%vx3!W*TjYK4cPWCBS(5)avh(GO?EZw$sUPsv2LFcAHvG`Hf;7) z@pI6Oe}y*S8?-OWupQQs5&0QbgnL-IpULX-4s3nOfqM<>(GkfQHnJODs9h$kYl~T# zp1`WLj8*Ha=yOUEJ&3(<(9@4yjE}Ng=N4?4W7z|9V{+e0jZ@_9f>-`P+INb#Cu^b* zdyc+$cCx4C3)b};Sko`Tn%mIX!ycmh(UV-mTIFi=6f?0y3`8H2jy7JJ@}p>-{y}@T zkJav5NzMN2XtOp_&-tv}uE$PwF2`l8X>VXfdMSSE1F&~CL*p<6Thzx%o2zu>#ptNQ zNiA|2`Y%Ol=!5=mEUWGFS)=zyv)Bavqjm?kLkBeyjrC>hShyHF$+d8a`RskTmmML? zi4-#zt@a4)Bd2j)7c>{`&>jv(vv?hCm`Zu)QEoq|)e{ZDU})AoX+_fBt>>rxQd>V z9dkOZo=nuCbFl9Z!v5bKt8jaAYAd7m2yA&XvB%w>wDT#O@e<0qkDiuwV;PnW>8;;E zqp=-LjG{OEj+W#$ygSBIrfh*1Q~q^4b0PXg#ke^hTJ_+i-TYn9e`s%ZDjL9I`b^PU zr2Un?SG%~ipIZ?t6#YZ_%yZGE%HooPrB6F}W9SSO&p`ILg5<9)=!fp;tYs-}j5e|% z--gi}XY#y3SmhNvpf@z_19fE|F$?OMTyX}}9zeZhIq67UWCJm)ge=uj^d&XPCq}7@ zEV+%qa!WM5UASHmJ`}NF6c&pKXlce!+EB{s$8U;_B>iwNjvkyD#4jf&pL9yHKC%#Y zqrTEt%Wfx)xmwW`9N$Syp$D-1I)LwP?v!qT_iQZ-{&N{T6JN z*JAsfioR$2F`h>h%PiNBN%> zDMq&3`_XUS0v-l|u#TXs3;P*6qtQK%y->yM|LBh%Wi~ij!q|Kj%gFkqwL}(-k1137 zYekqj2FTgVBN+La#c!_kkU8_%mq`hrL+ul?cslAMP8 zBXEllnmt)IgvbWqSr#xiI)Xm6fc|Y52ePXuf`wwgNQd5lcF5Ldhwv`!%etmr?WxIe z;^i8B6Ru@X(V<=WHdbUsQ~U#QD?L{hn0CBm5k4;YO?&Tk*x1P7dRg>z$hsq&UI=}? zW%`zdGn1B2#@0I%U5UI3uHu-^aR~?;pS1HD()%*1WN9<{ccg9F*VTi=Y>;iJgX}|Q z6KX!Up_3uOHzqUnI4@vWRWN zauJ30zhEi&jX5S24)`4wy&viSH^J#EyjJr58dzS#=eM!Lt^`{;UgpeOT(=forYvwT z!p+3rbUeZ5WuQd}S^Q0Zzt|tGC?$EU}v?CQ= z5=r7s;zo+@P)UC|ApSgf!wyfmCx?0cR-#3=@cv2!fSSaI@onT%n86E-(wd#^GN`;t6P zJkN)f<6HXSO-hnR*$YZEYxV%?HI$u5jD)i-f$M!q{4nHdXeMiW5oJC|Jh8*rKJH}{ z)S}Jag$=DM^XDSC>z8664-2lEf zK(Q-1+khTe$;`W&akGxPYyqoRU@y5GnjVC1+t{J~3S3JW=J;U#7`zRnFM{+-lnCSJbKf}l z&aa6f;JY+2m#ZrhkAvJP%ohce@Fe?RZw9g7GnbSy(q2IBDgbRO;CO!~rqfFa>UtR? zs0G-bhK*t^y*eL`-@~Ily*ue=FC6)LBqv#V<)d?$F(Z3!F4xOvZ5AV;J)Unta7m@4d6JpF==b3@e(tgl<6H)xe^29gb%>zO3cgR9} z@r(S6pk>4f9r5brNYnB~Y!)3C~IWM{oYZ4~b$7aq18 z3J(J7$M6olJuwMSvewRS#&8|8-)~qmhBHg|r|cg+Un2Q5qAtsjgqDzb{;(%PI~K=t z>G#!%F3dBhlXK!9k9<&jC$`56oNdnaphJEi_duzZjL*`#_=!l;fZD7<7Te-0L(SS-Z`1NT}?jO z?-PE{!h{#w;cnzp*<>#Cl+d5gGdE`=ySJp(-#Ozw@5Rr@6J~Vc7<;bEu+tol7Za!B zYxef{b!r(Qr+PN9A{vFCYG3DeXJ9-n_965+CGj#*KU<LnAJM=u;xqxVjixWR!!EBw_$F72&o+ZpJ-JHvvkCFebj{9ScSb1E;Uag(ZiR`s+ z1(I~~@Eqr3@6UJ%Zz4MDtBDA`_-NtHS7 z-ocA^3lSKWG2+)bT^+@|`2@LsmS-9svoAPrdxON{80)*-yV`T9()M)sQcf%tnw%e_5uhSLqq9ZQVDO8vHHjIYGGk`c3oqZ88Le9vNU4H}e1@s06E z6XQMez|JYeys1pQ<9P{x@h{lDJ>I*|`vKQc5oWtIc+dsNq>HhT=d&`~?_e!FK4{q_zmeGR_zOZ@lvIXwRl&+$a4D1k5SBHsX` zBR)Uzq2~hU1Gv=5*!fQF>zQO_O3-|!HF zp(he{Q}!pnmY4}AdX14ZFA>GI9s@^%@%1hA_?-PtN3c#NT4Los3Ex@m=|udF4tUG1 zr_cY6-54LA*oaSlcOvcH?AgRR?+Rv{ogis6v+Fd^_edKvp;-@Rszq?XhneF?aCAeO zd=%c-2Ax<4-rJ8^wHp*|O`N1hkk_-ByYHZmvxvs~T>MwYspN>gFM{4b&QczESbJbe?)L0QNy`{8--5Qp_+=BCS#<}0E8B=}!0^1>=SNf*M= z+OitUL_%4IyyY;Hoeb~q#TAN$IT23Zk&+aTVJ-EYN89#+nH9{@0pguZqy$NB5Ar*8 zgWLZo{3V7V6*i(bhQhhGA>Vz+byJzKSJJX6$Rf|eJ*P2i^h1-2t_+1!Rm`f#1o}$dT|h*cfjNc|*H`D?*P&R>U_uTYOig6cE4qvXp3QyNp6C z@&C%6oPBTBU70s$e3afV{rARMg&@G7-bZY!PB2M2F`5jI37Ggo&5giom zNp$cQvAlQ|lJqoUTC^w6(HBmM6DB9lSI#~n%XB~=v%vX|{yUABG&is-VqN@OEON8) zKJmxW;uGSZutR%YVgT~l?cCYS+tyn|mZ;unf){vKcxQWk#E4o;Kd(nW&@%BAdskbc zhnf-V8q0|Vi8WBiZY*Cc7eATi*c$ueV{>z4EU~E$hwox%dymkK!H)uM0=M|TC7;AS zb+`J%fnniI#AWH1nw36=mMtq9DYp_ws=YS?=Q}ro_nL34@38kG?@DJgQiumGvk9(oXEYdD9=R$qA#!eH8vYIryK6dQRp^j7 zN0xY^kN@S|<-N)`Hf19j2ZyJ8hTZE=?+h}SO(4qGNuH%d@%WUzBv-_n5kX;JEG0e^ z&z8@S`r9Bse!(bx3xD? zG~#7n#;%mB=-+Gjj5iYz!@iEDMB7Hr4Br}hKiD)lH?Yha_1^1J#-~-MZ^&4Zk(zODdQtkEv@=q7`|kC= zL@eeqX4ZS7N69VFo&0Du!AYU&&=cW?WCmXOa!%qRd?z|5J{VR~660z?1L#@GgEI|}+ z#qmDOOn7~~S3H%hh>OU}w<>ya^e9=VmVlUH*u=MluL}RnqUds z5M#0Qr>swXE`3KvcIL+!w`a`AXr3`W{o=GPskOe<-tkT^&s65~_VH%1 zO5%gHiEJQFRR*3~hhr<_4Cv(?>-Y;qh2O=hX_==H-dLlF`|&N2$}WKporxt>?0p<6&V`38 zVW#{5ydB5hvyWoyVw-ugA3DV0B*(>jkR7lBpR8H2RxXP%xjBG;QcDQ9{13t5vhOVV#lYnEDy=D)}r zWPX_#Ya4k0PSP^GDpC@?i5;;wG2*;%^RJz@zKJQ*Q#+*HkTxpqtJG$x*APGHdS7RH z@jK^o&-(a)Sbw4oyc~IkQTsw^I_#kWMa*9fyzJ2hpe4z4s;&!tTLe@F4Uq#tY$Z%6P+xfZutcGjMALF*4HslZe&KSV{N0jvWh+% zK%es&YuDw>wu+V$<4NBmeZ0(_&r#0ac$D~Mefez$xdN6l3+_OZ=3}kX9o{CL+ld@K zn8|aggH}b_N%k{-*`Ko>-opy`A6D-X)`8l^FeE9ZOk!@4$FhUApfPf@e1#P~JxIxl z;@Bs-17sF@0_nD9Fc;s64&_y(@y$s89}|1=2UcB2Sd|?^(mTXF_9L_OMs%)AJ$JJr zxt4X=XlhiBB=9Nrj5m=4)}SNV&78RpUBNM)T*S(57W8|S)#n`U`oL4^ah#*l#@~hkcDTTZvk=ae)GOeG`cj#W|8v{?UPsH53HqLK;!Ws z`m9&b;CuiLdm*20WB*l})70t4?y3>Q@;S>HLr&Me_<`=G)l<QEx_?ECpo+XzZu6Ci)3F zdK|U?1T0Y#$ZI^B|Cc&f;4#MRS<*$!tFem-*PB>HYn(q8ol zdYu>99rqrsc@8P}9`tAn_)S)Zi|Hxtkxof`ShnSe!Y}C5}+J^L2!wl;7Ilp>9cYe?b>y!_{FB#Rg5X9aGRkefcecs>VkVQn9KvtpQvOPwoX?~3}+Hs_BYms}`@xCf)eYl$y z*=?+o9){*m@{)deDbISA9($duRi3*~>1Fg?+I^%*`(`y+gyrNu?$s`$=jpq*=*=(T zczfZ0RZvRyfkJ4c{bjxA2U!W*b7)^tE;_{zK*!r?EA^Ds-2EOS`7^YNZ}a*S4ZzyRBzVv+8^yGjy>f8UZgmPAxi8%Kcn)dzrR`M|{}Sq>-LijE z?lJnU4d|5(vm37-TrVqFcV4o4Xvba)`lt}jEXzmz&Y&C+q#YrOOfGAP?G2L6!)$Si z1Dyc(u4PVFnXN^;+!TLWmRjvG(Yx74^0`L#6j>VGmtun3Q3bF59v$@8NN$^%PkyBS z`}p=LJm(i$qg_{FDAkYFYcA5>zBAzbitv9P+Q-pQa5%s9Wu+$zU*}{iJJZTm=)H8v z0@REV(iWO%Pe{SPTc`Ng+7}fo z(52AYcC5$-pxsNddhASEM)a;I?ux+wF+DD9+Sq>sRLYvCnD^Raw2|j+f*Wa{()#36 zg=!sU=X>Yh*J@9aBHMq>T&MWRKY>5Za53}=$}MAAU__U*k~TDPk+Gc5LF=!5Q3tqM zc^~#B+aX)LS`|hIb~bZ09%OKR?X8)@p2ACbzl38F=e5^sRMI-9ceBrpNj_r?=g$C{ zr$hU`&^8Ai=>s$3l-f~~2A#Wr&;f8&q3=|#9gyt3)8Vz+uY3}HHUxaDS7m|i&Q$}r zR=uX|9NHVCoh{9h_B8FJ)DAbrER>yAdy>=XN!gimd7|RpYoA^fs7m8}F}{A#v+-lNpA}mG$-0VFKsbIjb515R*kkm=eat;G;0(LyKkXj-153pv zaJ>nXHHw*W7BwD??)zq17o`URNeWlO;}o^N5>5Zle3F&CZ4!~~Xwzw+wl#G>jgfsm zz15Zacjo@~(5;wv$MY395wCt09mK0(=W{+iC#^i3>^=HRT>1c`K^_hj;4p@+TymId z5Q1MjwB#7ywSzN=rYBClmAB(Z@V1-!tcRz5%Ejv~oza&luWy((>OYZ?%hAKf~9qjoT`@tSo2x018Lq^Qq%;U%C3+1{vg_*IL=VSUwT)B>Q$TP@KO?gf~ zkb3|--Np324;kV@O49yltT#MsBv!rA+);qtvNX{RjGYU={Fdhwf|;!#Vt?XV=G!tx z?`f2ghCOE#D~Vp9=LYmwZE1N=>fIi0J(;^(Vqa^E|itUF$T-%m3LWr{K6XKUJ&4TgrU}~cOC%~ek2vOp%1HxW%Qk}?xB9g z$y&$ZfU*WRgu}LB_RGhfcLWT+3lAh-J9PVik?{*{ejiTh<5>wTMP>BQTBNWH&j-u^ z%H{PT$M10C7m?Dm($K81gV*~Y?^o*JLx2A{Bdm<)$FZWEMEexkK^YEm7-=IJrTLsu zhJ?Iiq=lcT#}~;N{R^o3C%pDuO8AEQyhDBS=;u~Yz6+yTUMs!0|2TTA3FnKE_~hRs z?w<-4j^~#YdOm|IJ99-p=AXWdoCGrcNuWr%N`9o&SE*|?H9f?W-k`2oaIO?^)PeUh zu+$MWr14bQShWW`!4-;8k;=0wc$Q*qXr<&yN}OKq`HkZ!Bc&x{B?DyX%r2;#MU9$s zZ#k6x8VVjp9#KY$MwC+U@L0Ce5V^BTZ=`*FqL z~`9KkVbI z=BYaRUip^(;+ia4kjr(4sLyUFTSL9`x#DwV`4>U#PD(or?fzo!Rm9yLd{b7KhLoeI zDq(u7nA*vcs0Y+<24!=pue_Wx89mzF`7L)Uzr*je`b$dQ0`8UftBMgLZmkwuJdTD? zLtZqJ&?HZ_0x#WoX(nt;i;JN`2TIq@R>dSpgL3k{iqlHPNjU(`R9oe=I7Xe7iz3R{ zY6^d7&E3kD(TK7YiJ$`|Dq~m*aB2PVe@;OKg%4T$!w#YkBk#iJJN3HvfR}JLh*1Ye{&s zs1~kduParqJwcQfh1H|VuNDK%rO-%nvC&aJVpSY9ph-Bjz2X)H#oaHiI18eh(NSe- z%$iS>c}H2a1Fcmh?I^F2Uh+y*-WZ*;m+MO<>g&%UAE|D7uAZ)Sjy@^hjcP9B3t?4B z>R0`n@|FdtB-vAL=?-@)|BqI;@)5LH5!&CbZ<1AQ29kf6O4hhkn{1_I=+INe8H84y z(QkSePBb8 z(N3|gL=oke(QbFkFC;oCKacV^xtu^4Hw1`7iRu>rNcd8QD&0wdX6~~QDL$xjB1sOV zXQ?b5!m8qB2tlcw6}K|}EmR7fhCuxnC$MM{MlIcK-_`5&f9gLWTzzIOvQl*3vMY(M zipQWx4K}((AIpDat9EOh5N;S$jwhi`@fkG6GI-Zm5Dw+rD_=*Wq3~zkhZ?)$LdJuX zS4i1}l)Xs&N3q2eT}-qQ!mNeT>!{B4BU5@M(b1))d>YNKQCY3*lN+f_<>_Te)Newf z8(+aVqPyCz|HoktL#so z_4@BW-D7$bqrUK_@9v+AJzXcn8fxo58Ga3gs;#aPh7G6oU!UFUq~p?jqIgK6jHsb^ zi&~?xsW>5$1oVrcQ)Agis*O2OPPjDG33-}7Z7kV;tAR@kJzt+i9ph@o`-LlcscM`W zVw6wTMUtUYW7g$?LZL>K?lI)L*BIioiZPxjyoo!N@{;6as5c(y&i*C*YT3Xv8;cWK z>;UOFt#^e+^BJ~mSjOLtnr=HKZ7FVyjZ8&NF=`2I_RXj)KYSfVb>pap%lsr3ZEU+U znNd`8r|2bQ*t}@i6P`p%9jb?@t@t(8^Ln?l`o)kUOzRrMv)yUPG<><5yn2+|ukKUC zUDbnzCu_S9WuwKOW993K>TS2btz_M8he{Ae6>UdU)mS&$nLHs%>M$N>d|D-{bd_ND z8@IQ5hoDy#`H;`^Fy#gX(bjOh?|ggir+a3yp}g44TwGY+nEZaG4lq0~yW z`B>#iB6JVqN-l4*=jof~Z2fm@Vl^SOzVuk8G5Y# zl1;4zns01w)qmq^%HODS^|Pug(W>o+{Qn#UHuiMJaH?^nIoOb6ZLn2=_?!@ATurUg z;eNAr*?WB-*Hb~f(m17y=6Zf<}kFb_H5Rz$E3}=#_epBi+(QuaPcFwSRcBd z+__k1jZ^8r;lTZ5_qb>EoAFYszuI7PiRx}GG6~!`?f*Xxjq|wZ7aok~3h&~tCPfGlJ$|$%8icn#7{Dlq>&;ZpI~TQN^e zYE;{}b~&E@yW`#+@#avHcWI(?Hb1yHSDCuIer&k+=&WAuEMl~>HIKEto-($sFn(xqv943w zG@|uxT+T3UV?s0#Z*X63eN|U?1{T-S+-FDq`c!DKZ_+G?cj?f4YrNCAi+#44Kqyw! zG|jc*sru&5#V!vtsnM=9E@xL8dTo|AEbCeBGt@gSeML>R%I!s))wIqO7cq3&oa?^Z zIrWNkXPSRq+8Q2BW)`LE55uC#kGfiGJ^R(2kwtsA9wrZ}X7*Q)dErmJqo-#uUTr%; z*XTJq#1(`y^^%q6atK+DtsUwGcf_c!E_brl83!}Ip%RVXso#tmYNN@Gs)5>UrRlSt zpl>dhv;Vr@&Ka-PJ*tWOr)Qb`>DJ#JDaOO~Zf7*|G**NxVZ9z>DobP0&}L&oEwSfV zt$1kisn$={8p#m3tU+b%*t`$tEVN7&={^D4f>Mh*q-D^xtRgPFp)0 zq7C)NXC;BEE>;8UBbO5xPf%+_3-zohZ8MoZi8`8fg&UhOY|Of=V%Z|Z^-Nk37c^+ALR|QCY5j!5!n8b=}#%zPAjCLiGP~KwV>p^`|?gY*y1RcHSLl7D3la z5u&Z{T?DB2^=EueXKif$Csz_y>=(mN{b(@M=x&=OH0F%U880*%+S;U^i@W1Wb+zZY z@1luY7xj?KwS`9aPb0xank&r-G3t9E!p4|8;%pw%IrXy8XTKY#F*Ld;Gv4VU$yPBg zFBJw2Px^1@a{o(e(`T2<+8nCqX!O{#ReR$P)-Lz6ez$UTzi|pZ$DU^Mx7#D4m*x!@ z5jJnt>a%-|Va(p^?>DL0P^Y`}i>PeZsiiKr(|`A~&9(LGGM6&O z1H}RAQDR6}Ir`O*ZK!u^sb^{)*5UTLaV%j_9In28#$DX{>A!I!q0#*l0@UB?1;eL{ zP1UO&bH+=J>Uy`U>#tCq)DAo2_Kql{cDVg21gJgr*wHFP$aU8qZrOUW_@SukqRvI0 zzKgy#e%&(N|2Dtdufm0>s6TfdA#W(7oQ+1c#W3@qNK=j7Khes1+tA{2aZyoMx%d6& zJ$8+u)~$hipN;$frMBv!KkFUi7<$*?*2KOU?e%oK$9@%dj8-<*fnZdJw{zR zxsPo?;p2xJws<*O1U&tzgw$}N4WLT z^Q}(S%er1At5@_=%XCg<8bVz(=o&j~yuv7JW8X%L=<43(mZy76Zjpo``NidVqMd2_ zjowC0lS6DC{@)Q|yg_v^?qxNv$C93<+KUUCTxRp8N&RY-4r`(D5<8>&496})U2bU7 zpm9^Ru^xfeOE$9#>o&)m6e(_NbG0;nhCaQER#pRjGI`RFxz7wLuyU1f8%=%;qtd?CIosor|p+U{aN^fKP9{;)o= zI=Z#|PaFxQHU>=Y*SmZcR1eWwZFB#ut#-Fz-$tzAS>@QDjSNGcjedPL6x-dpt{#1o zecd&e$-0IzcO@t9ZFfxD8ci~y<`o+=_AadI8LGF@!W|8Uc-2b&1ExDMU7lep>`o7h9{VqQ0N|E(z4V9p)jk~G8^qZdKu6T8yo~Xk#i*}W9K^KQc zTcetd2EEG%Nqu88uyJxb?~X9_h9doF42v>0dPQHYG~Bg;&boS*`m2OcjeKz;<4(d~ z{k$)1i5~jXyE}W>e62eSw}z_vcb%=rr+bZ+rurJr4Rb=Qtxr@7^=9&M@UQx@pgVPy z4y%!*0c*J{U+7LrBBFr!s;<-(Do5q(UX^X-YJ}SQ%|@wFLO3&OxcC!YHHTQ8RF0Le zvJ7YHF)P`X(%qR!vz1vd%m(J_P7DdAEwPnp=->Jo7hT4MOj;AN41xd4;|!s~nqESk zu231Swb;*RliPGohl@j3m!o^#*;qVA&$1S&r5bfM?lju07wX5HYb~}_tlo9VZZ7M& ztitjZlFe6|X&b|uRcu8pG}e!M;|Zd*&D4fbaTrN4s-rtTT$_gVr21O@Y_xD^OCd|t zv)RD(AvQ)uN%3~|xA_9vSQ9$bMr)_btqn)+TF6DT5U5(HMj91Df}zj-WdB`mtn2mJ z#+E3c)|$_UYdtrLxxPoDn65CdM%hYSKQr+|X;)pOYRn0%s<-%`tvZAs@hs8KxSgx_ zlOw!e>u1=MURtF~uWhI@U8KAX zs*~JL7?j7F{1)ViW%ID=V9(PC7OxU-*Z7GheK6Hpd05JuQo96{4^6%+39b;v6-!H~ zk~gTZARNeJPdYyNfQlOO<&+q-@ zR-N>V{cd Date: Mon, 26 Jan 2026 11:27:30 -0800 Subject: [PATCH 5/9] parakeet works --- examples/models/parakeet/2086-149220-0033.wav | Bin 237964 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 examples/models/parakeet/2086-149220-0033.wav diff --git a/examples/models/parakeet/2086-149220-0033.wav b/examples/models/parakeet/2086-149220-0033.wav deleted file mode 100644 index bd4276beead8e7c34ad0d58cc683034b7ded004d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 237964 zcmYg&1)Nmn_x`{+|g56 z@xLdyXA0Fb3l=&v&@+kJMjdOYmLb#=t}XO2QwfwuS+t2h&?`ShTexcu{qi@r&wcRt z{_j)V3HOiIZq0y4hO*oO&j!7rFFsHn^VQLqAp;|gIZB8YBIwsbA1j@i=_zC+(l1L5 zGvry~9_nM({0uUMbOdUT|5W;+OS)DySySjcs0B2?=B2|&XTkrjeVUsNkLLRC7w4YP z9hAiVRyv&Yiv!P`p*s$`a?{75*|ior9CY77zs*{s)}S?oz8nAhZutLKcvi2~YIStr zE{u2ls2IXW?`15u^9h=zY|CGxg-4ar$YFGTp%$X0C*uW8AVPhNQ-_ z0Ybu>!|yHqIr{%qWp(IHKZFg zq#ZegKWGwK1FrZ08r>nh0hx@uFdBdZ;1Rz-A&_Aw1mFM?fFs;#4qf3MBP)Cd_xWfE z)nU{DVQ2wmQQsA6n|o#paD_(brEx2SJs*u6qYo0!f}zoR>Dxmyfo1vWfTf`nQ0NcY z8m@svAQRGoMS}U*x?$yfh3l|QOaIBAt)3+bZbb={k%X{~<1VulsWA20@GA`CzuFBhswE)R+%g9(<2$W_=(pY$uFl z#-*O_#?lcT;z1xVk&dYUkZK~E0a}6WC@K{v*yxB3m5!ydz+_D5H=4e|qYQJ$ zuu~k2S6Bqw04(U%XfZdS9lVS2@;B~={~$@s5g)z>Sp(gH|9=1%&^ZqN#W&-E5fb2ej>2~p5t59pgmyKNaud~6$CG}CJ8}m8>2v9jB|_{7=Zoisht)&kEPO$ z8hEpT=9xydD>NPH4t5qpZ8lJSGqvxf8b(2;E5;~#hBhD*XaM(hRL)QG)=@cJ3G@s3 zW5g(fgRfAVHS~Y)0oNEm{uqzYy^+p!G`=W0fFa-!C}MwXpw>|a2T;ts1`H$S-~${R zj!l3cpbXOF10x9I20dfM%z)4@kP}7>aKngz?idNc2cv_v2^~YX%$n?>fq+|_ z)-@2}3^5wGlMiqkKJ53wq&WA6umXw0monBkYG7Qltbrvw1wz0;UTWzUcRPteoit*O zGcYn=s3da=vx1Q%!;BF2U(f^i3@t(%%x1WT3?VbtF?5fb0)4}B;pb2nXV4pK2|B>N z@H4(<)(WYlB^b*U`%mj!&>ImLl{|z*Ki+tgzgaag|&}su8A|g3siEeypq8;<^)6l z>x?_b72^;0xi^&KH2`K1_MnVy)--gFXIzqzhTd@C-mTVg&fX1U!wkX*E zqUUf6h*L0H=2$)t#{%e!TSQAN8=SEX1J~SMc($m`7R{cNwHwwkqb#gVa1b;W{>FXg zb^M|%u!D1;4a^99GNbc<(zEA?Kg6|*$F~rvx19ZUG><6G_ ziSXy4cG=HjRA>+U$nt`H1Vg?Ibzv!Z@_(Zspa>BQ5X0I4n=u}74ZSip!g^)f=4UvE zOt}=K%#yx^f^cab4`U8xAaj;pc-GA7&>W^F9G?RLESE4EU{jC=`#GSISsT6q zt>BLu3~@NuL`lBJH`hcd)Pr5|%7+;p92bshxyM_v7g~ju&>C9`dv|W1d%!dP!x{I* z*XR+P0>%jbr*&o>U<31ItS}dX%fMJzWBRE_U@M5f~4SWaK$Xeatlij*a4DJim`_p7%AHa?s9a%PkG#|J=Eeq zzKip)e$hVK!5zMjZ?qe_q`Pd5KyLUhGb37n4FCm<7e+F!;6I>ej^i*Z*faOSr6FHU zGZ2$woxoldJP&RM7QvN>>cXpgwn(sESVm|E=X{6#IYxv(j0%0SByY)%=Y;1NJ@gr_ zfhW)|-3a|LBVmk?3`b^fiVY!vi0LRn3NA@0&o~` zj_3e7MHyfc8pbRc|C(l^DA`6*lPEs-P#H)O9EO;|O`|b~)@m^%0gesWzJW)K77|Cq zZKXEB8t{9JXGjjZ4*OY_9rVC#$FgH4<2zyG;0c7rGR1EgRnRgdhCi0lEslp|!_QFv zmc-fmAUAv=AAXARVMH@4^VnGim?;nf zDdE7%6r*D%W%L7$m@O;?$iW}m3iDK$*?{m{whlBg#&5+mY}<@%_PLBp_-l>>+4ed6 zhG*=r@y&5yIO@kUvn;1`d~x&1yXqZ0Q3N1oKfN29oD|U2BQ%< zYG4#A)^P3`d1kDOkyk_p5G!N$&)g69h1oN2G4sNz!<>vY2ztYMm)rLLcaD~kHG%%L z&pY%j2FW&qee zdO<{hwy<)7_Bk?!jxieM2AuI9^F7+YfqSq&_6@vW!C5?bNkp>9cQYP=4@Llb1!jR6 zW^$aP9K0iNVI-_22uYOpOP~y24Ec@(I#2%Z8Y^Mg4^SCiMWOZZnweWd3C_d=*=%R% zh1X@gX2U2j79f;a1iy%j7+s9QFdv5Rh4F`{kOJflX+s-0XN_~Ng|jR`BQSwq;1B1( z7I20PHokf9q&b8--sRxE7OaKQGe@832{A5@7N}uHCt!rQ9yvJ&wa!e+c)&Sq1yQM= z${~{lkHev3qmf61xiMQY;{n&;31r}b zI%F*nv3RHlU>37P4QSRtvj*-sE`eP_5?GzV%iv(o0&fai#z+hyL^9qm9%e-#KP)X^ zm;E^FjxC5+mhggLEaovFiL#clo|Z>|k@0Q`*e|@s#r_y)`QWp` zP_F;8#{s4U0q1hWr?a(3priv#bE}(E%HX(CT0jJ zh0zC1vi6aA<$Wh)y^zOe>&1Qm`zh!SH5vb~Z;jS25@`^qHHO#*S~rIffao1N1QO{l zo-7(%8l+6Km1-qY+lm%Nw&K^~DPq+T*Z8S~QK+X#)*v(!6Pl?`B(;RBH0+G`5^VGo zdl1M!fz2>BMk7$bx`d{IPgo>o7iJk?6Ze5TjGM3dj5(ey1?T1v^5R152S3a!67~zg z6)XbyfmB;b#$cZ?kHM~iWw2%f&D}=znG=~Mqv;9=WOl$>kz;y}oS=8$D(p-6oFlz( zIc7Xq8uZ8rfK_pPhHD?`AN#l1nd2-lund0-w!>Zo+H%uVupGwV3q6C^1ruWL4KX02 z61W7u(N}oA1KWZwV8a~a;tq19D%HnsGJAKF_fuPTtwyV){boODGCCwd%z`~CqAjdY zVo9^`ND-lPtcfA(Tb_#hVLuEM!(TBs0e2i}an=GB&)x>IMs#WlAsXmnTLLa{1)YPX zU`5;$@984ji**t2oFZ-j(%`AWOb=|Sgni%$Bg6dJlK~C9|ArEnBdi8`!7s**yO;-S z5EjjT9P)+65jP;#;5-#}(oqt=0XxF6G#c!9g7cWwxnIT%?sCkir#k5GmM4Z4LQW_T z8{(V2+dIom@89W-y3L5be{sr1ma#J116->rH z8aBhbBtSGi><`cnJT2sj7{~jcRl%2NbOpABggC2=$cOz^I2wZ&2~u5~kU@H8?7Qg> z`)k&Ki_lj=yUP(m3L#9TS)omicYpy%)u32lfIG zQ9zf_4Mv8#Cj|LYxL31b%=&cxiAf&_>oW=p>3H3k3VFYt*NE^} z;AL=L_#GFF1B`-epcAMB3a~Se7U1C`=^pUKehQHok8yaMFoJS*jIN`z?e*FV(@`EU$!_`*`m> zNN8|UFBplB^eqYCK=2RPj?uzVBzAV~p_YIdAP*V=BYTe#jFVAaC`zT@ls8&E5?E71J)2ly)tvby3rCO;Ujt15DTc9jhGBE zYk=kz9kM>WEdiM!-w(dUIdBfX!d$>#EO%g=pF$FdtHaU-7H?(Bct-$;3zDXR5suiw zKAc4XE8$HKtSm8qhgL}Fi6re}6KrbS^AoYp&&>(fZJCZ;+ ziz8&!(Q|jGpK2-ybiiWu2OSM7uxI!9mZmDx+cGB6wX=)}+%VI`4NGl}jQ z2&1q8a0e_NmI%GUf)KS}bqxgJogP>l_z(7a3#&jXY)T{+Y!2}jtcfE*L}_Rp9s{~< zCQhuNGMK%G(24f~TWMZk3m_D0VqgUFa8z!uU!Mg*S=yu#u+HpChNt6Gj$;n`6i zSOmMkzjAH{*6b#3uhL4i0%{Sn!#)VSJMum_TBw~Ul8->1x|K>mBj^bc0%8KJrI3%~ zH88LQPJvvQD|jUdzQr|59qR^2f>8$jB4PvXVAxNf^R|1D%sNwHNo!~IeZ|>p>O=M4~3V6rtpWpc*V?FA9!)djH6O$2r~kX@ymZ~ z1F&i^PAp+DE#!aU1ybldi!i2!WQLJ}Az_IE;SDh(@897ZkqNUfTIYT+R&WXIANB|d z!0w?bXqcr9pDBf~gZGQEuZ2hs{Dt?HYeRK`i8|sk&X;I}ibkr16_iGtcY&^uQ?U~k z$_Rg`2j9rG@SbTk;iQ&+(I=w72D*b%h&#QZ8bHxat(2a@GF)`8j#@zU3*JJ6g*6Zk zV9`Prn@o4gv>RF;wGH2n`Ps=93aO@*n!+5M@LIj5h12)C^duV~Pp@;;4eYAyl zDUgll6%ww&0mz}l2G9n$8`&>l89GHxuon1SpfLjJSSunL#8ZxOS^F3#+JsN!tTE3J z^8zl}uE1>~%{i0UE77*1F9h##=S^wAzH@T7WWfm z_h3&H`=wY@z?-oBK~q?BAj&`{7a0^t2fh%y7f$lE)uctN72rkrU1hKybO710?ZIl$ z3$Gy%Y1UH>SP~AjgY`jJ2f#Vj$ee>l?ghHR$avQR7S1bkSSr{F{lPnh-)Mm>AT?+} zN7Ay9{%WXK#GQx_fWmNm5~Q9RNJnjiTq@_KSvW{S;V6b>6GJjatc}$kd=Byswvg`$ z$5U7(@Tvno1o(|4K1d~Ih@~gcE~0*DEHD=&iy$0wFUU%w51s=?42y)9!9EJV zQw?8_8DX~_%mJ)pEybAx_-Zga(2MUd8-s}uTVjT=JkAV*b%8dZfN_8x!Jy2mh|_Su zF9WUcl|U$>NUWK02PkbNHUpypqd0(R{M!+nBZr7}59c|-z_1{W;~^=mF!{{^UV*^p z#gaCYX!Ph6`~W|V@x!NLY%RouWppQ&BpFL>K$BQqLw3*t7)2jy7ZDb+LGW*oDqx}lAs6Tgt4|;gwZmQ;y<%k0J*19WTpC^zQ9HN>@c{4` z_Qv2dw2TOV*R8x_5;{`kmm{>NqZ8f9C3Gf)+C$VAN9Vk22PVWCDV|W8PWoUg#=4nf-S7zb z4Qb39Sb{AeJ^}Ob$^#q(j|YUnYXTb$|6zrHYsg<>?NA%S4v^DC&+F(8d~1bP9{QA$ z2QHyiL>YO~Vl7|05;|^>XDy^7kFG9gm$ggudsVwezO#VJT%#kON}~m|Q%rTMX#|{E z=KVa58lYi#C0GaIHmpcI)H~`nPz}_vhen0Cp@P~er+G+J&P3%92Vm{O@fv&{^8cwM zIYiCakAvq$%!4&FnhO$%WVY~7A9);<)q0COGax-DI0hcMWg5ZI37fVBfwb@ zz%R}&(izYJeDG07x-Oyv7y(ww==>(ty+UPC?>e=@=(?bt)&AA~34Km$IQ|J;o!0); zj%r7=W7=^#PSEjJ==fVZ89Gi=y$jUZ`B3k9A#7FANUI2wIFK{scmVO6k5C1iRnTm( z2g5eyAT7Zo!Qvr2T($Yg`kQ%UuEA~2&CcF_NGK=cLBf(3d zb{lFXo6a-n9C$??t`}|>t)oxq1}n=9D#0z{9<+~NtTy4lQ>ndNI>0r2bQO9Cy@WnO z@6e~0(4DvmcX|sw=@(a>iKRLcKjji*bq#$w(^U?&)Q%9;naXsh?`)a@Y$<_yx$8fzl$)zDE#`URJP!GN$bI)@kIGduwbfZ9?jQxbYwN(js& z1YRTT5P4Mw-x&5= z?AgFXm_ITN*v|n!A_B&IVQIV?#ah-#GC?#AMq_*BcR0`j5DIRCC&~#q^60I1e6`QOf*xnb;2u2(JpREC`{pkj{Y4 zywG(XVH7-jF~n`)Nz}sk1>(CaA)YM=p|mW7TVw~Ze&P2e;Fn0;^v8L8j%krK#mWgQ zCRht|Wf{$;Iuuv>hzVh*$z+*WBS0#*@)C&m;JXk#F-{TlK<@kv9kdIb!ykdqaDl$O4+fdKvK% z;vYmx&?F+~a5N4N4Ij^IH?)X8@W=50@5W(u$E!qSQ@Bs$b>T@dKX5l&SZ0Xtv*<2% z>5!wsY7e}CF=8#lYY|uv{-q``h>Rsx4fw+d_z#&I^osc)I|lrL`x?j_*OIn4lB)`7 z9w-AtmC?bG9drvWLj(vv2)?W&6kaFvT-45xSA{P zd*R~(;SL91qwGaOC!)fl|FB;g>Jd>Y%Ndy`L{W&`Qiy%PLkVPSocDmNv6_PHBM3vi zg#N-np&$Js>x;YKB-S>svRG@--Ytnjs>n^Uh8PK0kMP?X;cNcIU6e-)h}3Ww`4V7- zS3~^n5G)8>%$ej{Yez&0Sa;*C9FEKQZ7ALm1c&j?EXNYyGma?`S0L{UFTgB^C%o#& zAWxi2oZW%A3@eE)#NhDLZAn_~>C=vQj^hTz6ImoD#1DumI4(qtfRzMW5$9>Z!W{cz zWrW-mOA!cz|j zMoa@AjO-Wm4n0E3(IjQ;G$X=Kp(|h?=*HU|;82bBB8@^d&DBk8gwZ#U3{*Pz5a%-5 znbpA8sLMHH-mL}Gg)>!<9ajBlm)VhXmACvXXWHPqfjIWyh~Y2?>_sBNL#!H066qC+ zA9{sQ*qvfW#E~3JAZ|oN*+=Lr+!l%|@!Ok9_6-*{X%?poe+tPnUO}3OFS}Bctm(kSRuS8>=gD0e+Z|Ae4$ckC0dUVv&H`6 zL~*A0g7~)hzWAy5iTIJYQCulLA&wJ!iK(J0Gzd3@Bf@Us6X6|Ujj&93O1NJbLUM|s zorx>jH` zN?mena!uk72{Yq8wgs_T^tq_tB4l{BL{(RgT+dDD3D4*q!ai^|9 zSf$RE3j$aDBmAwtzkMJ1ru&k8C%sR4bG%93(cTZdX+F2l6i5krgInYq@&UQKJSOP! zkM}+1dBiogb&!2}^LLHj`f+vpYwoM=Q8l#k+ln<6c@^t+Ai#nlY6%F8(qeAN$%Xe(~0)FoFiGf^vL92y37;NUq2xGLtRvZ6LQgO=rY44{0mL8M7 zDpi;Kcf2>|uZR~+_vkihL*=#pMP9Gl;{MF_xvSdssN3MF@W|dr{WXED@)K%*GPE~! zU+M=N`WTk$({v-m6zylZd*DlNFL!n83HzbuV~rQ;9kr1)F;y8AW6GA5{8-ezu+7c& z1>*`v75r3i^5(vx9i;~=@2y|zc*`Fx)mt0mXJuS%=jyz+$D-c(ePa4%^pSgJbhmdN z(s6&=6&X_UWScIcqrQ{!hUWwOl7^8r%PXI*sHj+7CDp#s(9*KjRp5(L28ja=LG$j& zM`By!mnN5`9m(9CeZEab+dgfMW%f*qNSbTAH!|JygtT6LGU)Vm@Sb(Ix?Xe*c6D++ z?HcQT-DCDe20oWJYO|!n`sK#urXNk6OqUH8bTh?MsyCSIf6X(&71MgOWo}cuhU>Kp zs@qh$%W6yFinkZOf3vyZT0y^?XK#L4xTxqv(Tt)#g;xsJ7TjHQxU!FZrV?STPn_EJ zX4f}w%NVp^h<)(*f!+EY?6s(SP1k*0wsz9m&P?B%@LbdxeNu3W#(a=Go*~wA@~~xca;5@2jkpFO_#L^Onphxmn^U-B%`+ ze^Y*@Tvyhm=<$M&in43-JV*7LVjs--Aa_LHWdrXU(lE5o(Afi;diChmquV{*tzBPi z|7yn537aD3i0!?PHpkZ7TXw1F#p0;S>5a2pNByPpkJ>t2PjkP>^)Z3C-pQ|}CuOhB zS=}M4W80j}v=g@5BeIQ)#Usj6xr4Gs9ivUwrfAQouLPgN0AsRi3SAUp22LtL~+`%34Rw zztt4R;ng9FsZ>~))_@nyaz#GwfGe_pm z>pgamd)R_u{|uG~Zn{0H&w*}tb=}b|txI|KX4^U4BfgzY$(0`!FDYtM;w$r14{Q0% z7cKmwd&BUIsYk?%Hb=_M?49i^I(^dlTJDn_HfDdB;*NRPP!asvGsJV=JIFsga6J$a z92Xen&+=FM?+;E;&I*qj#i)*P$Kt<;n;83X)JE%3Q*EpA8hyIFybo4W7nIi~xp+zvT| z(sFI%%v*#?-%E~+rcat?IG%9zb=SCFaDU-72RCRx>Rp!mqu;STnxH1lOnEG+qwUTJ zv*}I!9!U~MYB9>4fz96C?rE;et$*9|n+G&M-13Ga&?-7-w{CQ7w*S`he9Mz9j^>k1 z!G@x`l$t{or%Il^x$4HItNN>5ul2jpxge>~SoA{Sdjf}QyThsPsf8X(`9`${n>${+9m+aZ`^US|0*{&H)7iyPR&MeiSegmTfDH&indm@5>`E9=ve>rTnW8 zehv=Cmg%%h0X6b zd|C5A<)>xCig)MtyK1{=xp4U6@M|0L#}>X;^l8!Fl8t2@%a#;RsyL#a%B`L7>s|8b z?}z1%STp!q54lra=i53SPX5{Yn{{W@4XL88{&MfLy)Ne$9;i+AzGwV8`pXzg{PN5h zoqp{!t=$vt`sLo;&EKuAvp;)o)N{U_^_{DpE&rtStJ0M6-^-^|?ymPZ=Lh$xW2M_8 z{b`=ut-aUwJ>GMBr>2Zo;tp8f(+^Xk+=pAHx6E)n>Db=9yzyYu700Jui&kL#C?YL# zsI`Z=yYXr154oH7r1OK;tkz48gZ9Mcl={Z1V0lJaP08;?^9!E3{{7`IF8^`0<;MA& zdyAhhf2t~}=9TI*B}}@2!7<6Yd_vH$bO(Dw>hUVuKuf< z{#EG}Yf85l)f8CrU(V~4H$Lyx{7p9#O9q#puUJ<(v0`^|e8Cqr*)f|34Vcn?)C+yG z`^60&(>J1hN!HM8G3g<*XnY{rA6e?zkzaK7_|-$@sZBq-P6*FLpG~+gSl=={oEG0bN;k1q=QS2vi=sY@T4#A) zH(ojIwYWC69&Jr-eY0g|(`Sv=#@PCS)f>v&mrgAHqwuMl%L{rHe3k!QL3+ukiXPQd zt9w<9ENRHQx9C*hfsPx-rjAML+okuE!RH25cClp6O}mkFA!3lJDeB(XFN7~Ewq0zx zTvu|l?quUz&bc~|tu|#`mapTAE;(%s2}vnEJ3iC(TF&UCg2P)$Z|DZeQyvupHHMjMcW3Iijp?ghJ<>|_Y zE3cLvFL|i=Y|+%>`6c@DCo7w(u2=V~_LVh4 zFeYhq($s{yY`jlyTwQ24`hsPvxE|jG3n37O-vow zX?AXE`bAp$y>5IUIG|-^)$*dmyifD5R@Af%_iYSb4mjloL+{vjiBl3yDM{Ib+nsM) zn%S82al~jf;Er|Aaee2o*dKS8++F-7@)f1CJVZXMS@c6p>n%RZd{eZpmvBHW4@9^# z9rrovTm7w{wfY_FT0UuLuFb8TRz0qKPx1Rjmg3bVwz9{|_f@`8?W#Upl~?vnQA_^E zWe;dSwSRwzY49IC_w{*raF+q@j?SdpvQJiG^KnW^Jr9Y#2?aau4k)H6+M45s`%BaznfRN zNBP$VmkL`g3t|nn4`LR_ok?5OHrS>py;tJnQC34+^^!lq^RFYX<=)mq-nZo$>W9ix z@@A!{c)Pw_e?@=NaLgzfPw6^}QCs};zq&tKPgD*nKV0=t&FS)0WtW-<8)H*{%(|BOaMt@-f2S`>i_3fZGZGCLElnQIlH{3yt?9rnjOvK-Ku}KFxfIc=H6&agd^&%q=?L>%pcOOCLM|M zM!sq|qU`p}a7Zn$+sC;l1)Ah2iV@)Soo^hnLEUnM0o>e)kd|c_&l9x+fEA^E3topWkYt@1Bc|~gBqEVX3L9k>$fN?y|qrO^Ks{!&Zk^P@5o@5c15^LXs1orByqlE zlk$Xng(gkVdMIxO#(5K+|7+>ge4}Yo<1_VTwQ^1OnpsuTE8Z)+r*uil(2|v#J= z;@O&q8UoIxa8HIFB~8wnpAC|6n*RrYSz}U{|cO!L`FT zFW~TR2rO6L6^4m#X+71cY7Av(?xD;@i~6CwBCy~8uy3@hviYlq9u2b_vg@C&dAzEs z^6AP?%O{pjE`Fh~Ab)+)Zl}SvI%7)8ziHofJ27ZrpZ4t&vJa$w9baI(k#IJCo7pPd zuT4-J{2Ltot40>iE>5kx(^1nJ>&+I^t>a_&CJjl8OBxp|+R~GLNJ&Y$VA~KqIjSr= zD(XdDhW{t0!=BW%ufDi(K=b^@_AS5qutzsZHHlHikIcE&u94lMMn~4X@X(9`G?5KF~7%Ljs9QMCy^Z@;w>K-HQj5{7vk%}IPI{a2D=9S^)-2KdY*F6 zbnS7Saew8x;W_WQ=!x{s_V)Mo^8Dm_)p@*CY+dYF>=@A6-}!{+s(e!XOXo9WSd*f6 zN9RU=6um0y=ZL0=QBj7d)e*xZzK`%)^DPU^35Lx=dv%F&BDlfdD}s3%0b;v8T4lDuy#^-N%x%LX~SVdgy|vE9^<=4x8XTmxv*Y1F09x3tK-z?R6o6H zaHl#=`6zfQusTraf7M^+pA|?7*2-fvhcHAc*4=9;HjFmb8}+6ijOUF#OfjZcjj6_$ z40H8Q>lR6MLR+mx8KumXw*|)pGlEwFe*`)OpQW?Ifhz$axI54x@QVLuUx7EldxvMV z`$N}i*K@ATu0!tKzBR#%a;aJ;F45;1%8l#Ish0KTBuj(kh-HN3HOp}8Kx>?Jh_$z+ z#W>zDKtDp-uUXY*`AG1d;NQVG`9iQR__F+t{EB=|{#2eK_g0RmyXaq4tP}6ib<^kR zHN(TkF^1>#f9Tih`|2i2Y0@U~icl{kio=LGzE%gT6P4Fxi~LxyByd;oHhGV1RQ4-h zsOz*r!VAKa!so(8;jo|+pAq+o5mK_WOd6^SO1DXgQlZ#hd`Ebe{uRd!^=-9GJ*2Ks zSF1zSLZwtGRGw2_P_8L%xrc01jwlB8b9FLV@B@^+%M{bZIPnf~nYdQmB3=|HN~fd< zT^C)Bu1Q)Vsp6NSP8=vy(%S^9wcc8cX3_pqUs7kQUDYefLFI&UUb&!LQEHV&(pkD1 zsa{srC{q-h@|}FUyg7Iy&_A%l|E5p$MtDnoUVl#Dm*Ac1SYey^lrGKioxYF0)bNk- zfZ=(=FGihdi}6#_b@Q*LC}WOMHdyq-q)B3-_NqFMW@J$-mB-YPYNqN^8Xs!P-s z<*-tue5u?=Ffs~U0g1n6%(aSQnb`W5>FFH31{dn zi}$t8TBJ5y8=xg??X`GqzdA+jul7*`ieH(iexq7RRzB?^VVh7Pj22%O9}*2>rH~@t zF51Lq%B{8)Cx}bLd14pwAK`z(^THjpQ}{dC$m`TE-Vv&ztTlFRE#xP^(q7Z%YwNY2 zX!of=>q(<(6fB}e^a&n85e#Bbs1u5W79maSOYIL3yNg+(RV){N5}u|^0sjBYo20v= zwDY8=EcFAzOv=%YpwBE}fv|)=tArgwsSqU^#Zt-uZ>C)EG|CN^&>JR)wNI(%(b_<5 zr1pgNwDzR7fHJy^wTHDy+P&ISlwbaib`a7i3yysEU~07k{kzE@wGSxQyq|ilqc;B5 z&XI1`lRO^RhHCw_an#D=l+9U2CGg(BIqe{|@);f5v^S`a`?N=?-LL50k5s#XGT?X@ z3Gd@$UylD3u1x<1yB&=Y?-vzO*87O|wf2ejrS>!JxiyD&((rGa(rAxtiZGON^Ia&< zJ(6bpm~gi+Mi?dBDU7DOQS^@AS?yV^A01=0yS0hhw9w4P)9mii25ZA;&Tmp4{Z;Kf z?SI-2G#|X*UrKv{2Websp*_iA!b`&Y!akv#j_dSMNDCdruA-Z=>VpYwvXDo5SVx+e zLALw6@TTyw@DXLpe-Zwo`)>(bN!}A_4?v?9x6#ZWp<}3)sCg-8ey3_xiP0%0Fk2I_^++{ebFV-EG&WVG>FKIXAS?y1CuDV=Z zqmEa55t}xuUum0##bT6nUHV&hm;Nbzn*M#AomgX;__a{0-LCyi7B)dmRj(>PD@PSs znL{?VSsNml#R~BQ$tkH)xwKOnDg7>{h%XXW4{0xH-L>oLHuVkq{I1rhT?nNS!q>vx zVySqWv{c$9?UtUD?6h0ZLNjlr*^SiP>KXNj`mcILtx!$c1k$pR5LhEj7Jn8sF;xnP zUlV%wkTp~i&a$-uX*9e=9l)e?0e;cO+nA0-eD9wlr}7FN=~EL%pl`?!{_)u~t20`&mN`9pQH`j;wb zy~snoM*FB7Tg+g}O43E$JlDa;i&iSOxTL%Lyy?jz|9>8P|-n4raKx|Pf~NxofeFC{baa=q)mGsN z+T8!idSApk>ra;D=ADM`#6{{VIWago@Oa>_z}JC^fkT19;8l5!LTdxTs+(#^Fy)wU zGmECX3@3Hpi4OHWxm(DWcM5(HY%hN%&r}wwuV{;fRia(`Rv$FHWt?UF$}m_zU;0Y8 zp{6OvgB^ksf(L>lTLRDn;Qp7XvUTBGMDj!ybJ;qSyTNtG|Kp^xy90GT@~3S#v1!kWR~d}iQd3g+6RaD+xv9Bo&J6Dc5RMOpnWd#F&`o_L5a+v{_Q}@zzxV&-SNxj-n}e^)i{$CSje*YthF~Bd1eW=F zd!O+ndiJ{yyQ-bPIzMy`_5A7i+HH5f={}|WYJD;B>y&QEJClYbS0wa{X=AbKn&c|~ z1i79uOuZ^SXsofmY&(&3B1yINj(pivrn@BUQSJ&%^j5eP_XW>f zujC!9?t5M}qt0D(j_Zkm~K4rXmMtnfOUiXc-M7T=6zC`UR+%L|M z4(Z34ms&5GZ!>h2`U-zjglp5*lK*&%X8w&(tVYq!*l+&0z)yh{!DoWi{*QbCZ-I9j z<+Tm&KiwwpOWw_%qv(`Q*N3Jrh80@{u%cE>JsZ|d4BEBHT&)E)=ln4@ujFaF@MFa&Ah+8Gpjo8ese$^ zD5mLl3uXQp?mFi<`!9{xo9}L&WdFORQ*cZ~$G9(IcgEfmR~voO+};#px!b&qBGSJt zgU#{62w#flaqpeJu7R_f+4#NbC1X2Nr1_fuTkQw+JMli&wJ7{ z%6+Fh&D+nH?cE*9OT2CG?C`bfU0YgzYI&sjx0ajsMfQ7J1~s2(`l9J@^AUTs{nzHX zO>Z@ibiZl+tCQUC?QVNB9?8gWyC!A6^$t^4Q#Wm|KTB+59O93xwN@W!KH>?ggRL3y z*AxFpc_HieHfz$}wGFlYYpgT1w+=O}R=;w0vaf9Xv%XK$S%<@w@B35yCsMV2AG;~K zC~8O4wdlOWmP~ub#`v$yW2AA)VfTLfC-x_uZN0zB*Cf4Ro-|4OUZ~YSZ_cu|waze) z)F0O>gV(%))|D+C?Qb}r^!((py3e(KVE?J5#6G~8;YxQdcQ{&pY4Nx8Y8lX+-h8Sh z&bvx%)SI+FoL@AyZ}vKF1RmDUHS|)hwf@iF*|IHqN>owAqp|S`OX61ZPVJ(QEON1DfS0w?`5I=4|^Ne0>hHbXLHNi)FvHGc{Z~- zy;It<^f@UrVw)muQB_7U(605H*2icc(|L}OT=%n7vo-v84)qgGTE}ol5F|TRIclz zoeS>rf8fpc%=G03D-_BFipPXs)Wyn1^)F$&xLBxCA65zjlYF0fCb_%2);J5COI&-M zilf+GZeMC&(0rxg*ZOV^gByf~$Le(r+ne6B?{pq@ZnS^g?6P0;i2hGKhdh%#I~^Mu zRyOR?4yLDf8kqA((#E*ollG;)kkL7FXWAzz?<7BvFe`GpxW`xJzR#B>PP6PZjS=F5 zjdEYmTG_EdL}trMswGp?}yo z*W@yOZd_>k+C0?yY{X;MJmYNXN9|X2foi7Mp`+$eO62MCC&4zsGr=}W1g)_6Ib9WOgZ*oQRzS{GHjulDx(>GgSaGaBA)-fORI z?%wcbom#)Ld5WXJQQ7i$)-3Z||pcWt3Zh7`g zw$ClKQBv|6VaX z_7Xemn+!H%oOxu_5}V03B>H=6J5#A)h57Bss>uG9GQCH%2{q~?+FT)$xHnZCE~HY{ z{gixF?Izxk9@M=d>BXDc42l3B5t?ZA5vA=^oWYDhTYsv5zJHm2wr{KFHrGYRclMX< zzu7;qx3^c&4(F7ncN*5zFRr&Wyx#b6le5XuWNzNx>~8MaGN7fRWuc>oGv68Cy1n_w zrZ@bnqn}P&mV6-USL?di?9?eauLxGM?Y#64+UEcpCG z0vi-bjg@V|jq(7ci~M|0qO8R@dgEY~`I7ko^RwpJ<_qTdh_9oU#^)sbX^>#Tk=m|^;76jYLZv@u`PWhMkuleJG$0@Imt2@;dC~(4s<+ge}+87OuNb;zYG=zJJbDH@<#cH{Db_G{G>cjULn6NKQFhFPX(6;_XVG)e?IfRSTELy zxzZHrY00W9(v8=DpnuR%Yxv9%VW`%3GQ4l-Z2ZM|#kke@qH%$-kFn6O)iB#I+0d2N zg`M=PbkVwe>6mntR%J!v58^g)mZ;HN5_^PKX}z*Q7)xvR?`e&@P)pDp6zLA5_3(q* z2+d9_qnGH-t>@II)rZuvl$jbuIga+Ks^V@=y-7vnFZB)E(+)bpJ4|7vEOzRr{$O)I`ddoL4?oUQy;N?

2 z5v|B}soztLr)h1IOKY};lof0*z9n84%~A|yVdm33B6U4=Q*^KC{?xVTV)PMuO(*Gd z^yBq+>-*{r`U`abxb92cR^5Ev0G%rRCe4vF@ndl?&HA$N24z(a)BKE-#d=SDP>ogh zE6bIKl@Ut3QXu~&ACiBOzmtEG|0ca%kT1)va#!VXWi^d%r}C##MGV|s9ic8$>q$~` zwGEWFI6-gJndmJ^yxko`8IZyB24FJD^bD<4zoLk6A*~+UkTn0IxqU$ry-)3}W~u4a z&IEO)`hxl{&G&*DNBr;w%{hTGG~WoKcptrK_ot|ccSswgpQS=6N%w$mn{F@7YnyJD z?yRm(*Q{&MmFTYP4(e9vM(N^oHPU72cWJ#eRH_hn(R+K>g)_o_%6AoOyD5TjQATj9 zdR>{W+@<6wdGhCk$n~@e`Hbc$DoIL)(niTr+A2epr<9MBJ<2bX2goNqXR1TgN7Pm7 zPW2S6@)df$I9j_+dxqwF=4skY%8Vpn zACuPi5JFy5J<7l24{YjP>U^5{R>}m#Yj@F`ihC*lkw|zPAigGkNIA$TX_&M?`c}Fj z1*B%lBi%-`e^NJFH(mFPZjo-OZmw>eE>-7|YNVUeKhh`CWXVFaJ}GV%@1pk>uaX2; z5O&fj7y3J8hF&3kccc733$5S3q8z~zb(T6!9YgOV_9I#%(UcjTMw*^Rd9g>e zsZ{qK`t74F5k^W@F_K#9D{Rwh)bZL7!rf^#sK%0n?x!~-k80_JmqEgdw8F0wQpI)R zA#sOTPB}-LR4F!yQ>87^Aju?6mqtmcl;!P6S(0_)UD8{`R8Da(Wlgq7+r)QCV$(zu zy{mb@cn@VSri$sb#@|eN2#KnkTR^?@FfZ9>nr7cj)lpVs8nn}@7zG0X)NnNMz7h<)p>Sw}i?LBp& zHeXHBo}~=JT3P|^*HS6pca>H{E|S4~$}8m%x6aVk$+GIux+#m5C)6!!iaJ@X6?{T> zdYkuaX^r%!&`MtNZAm1Xw(816ulTQI(=FDGl#Eg@-BZ$Vsjn0%o)fo7rSz8Si_*K| z1C#+RqxE%XX}0Jf`Kgp6nNM%EJ}>0ce0OQT2}dcfxJA7oyGaAf3DFA$4a!L76 z*(rBdCMaLa^OYUS1|^*==w0=I{H5GhnIW%~1$kC*N8lNMx_?^mQMHgVoU7E&wI9S- z{damvKf*ZC{IB`4^?HPf_9sVJ>Z}!!_e9 z?GZX_9VweQlCnuBgthdq%PZ(D)#oTHyH`9Xtspe4*PWJ}q{&37mvmH&qyKx-NqN^9 zv=?zriBs+B6=8%nE7&bKjrjF<*%4e4e2U&c>gI0~Xde*i9oq_jULeB%j;D)vhHsp2 ztuN2p*?osI)79dh>^8It_Fndsmi(qynuB3_6J z#(EO|NtvE~BWG*2C$%_vU+S*(&FLknr;;Ct9~q;MuCtCbtX6LewpCVA9%*f0v3IZg zXP43QvG1n;*T8(GT>QW=#yHoIWBi|adc@~ZonmycKgEos9lc)kW=)i7mHvS4w641} zK`jk5`F{7F^Yrjc@cd2xBzuwjihH5E+~swiao%*_=`C~*b7i=ST<^MUPFJg`^}&`` zn|)1#8aLPZtA4CJUlCdU1ik05qBx_lF3)*2|3XpT(dJ8HtfhzH57X?}^yK9kTiaB3 zy4d|<-*H19965aO^zP3SnQzHi+_qQd*5ta_ZPt#)TJ0H6PNP=+d7Y)@_txKAYwdrw zjI}@R`p%!I(EoX6xEAwRN?dwv>ZsILQzxe-X563EIeTZu;nc3lL*p+-9@Wn!+gcm+ z_^n=_^PpX6KGQsn-b9uhcRGLY3<*Ta8NoAtw=dq`(?7%as&~An!SzS$Yb_TVKC7Nk zHlt|z&3E%RUQfBQ;$q50@A*5pfrT6VWeg z=$bK4jQ?v=^3+f6H1<2+_Q~`?X(tmVMLuXeFU<2VaLP?ZT&(S3GQjY{S_(phNa=q1ZNBw8jO8MyGyYfH38obc( zZ^tt&r*cmZJ>@!i!TtFo*1Ry3EMtGK%{!adWw(*2jar@y~&M?_u9toC1b zZrAmCkBNP3_2H|P_Yev6* zllX_QQ5+>M6*nm+Xt1{-|G8bARQs@^+=~6zcN+yi#$Y?Z3TF zSDl)BdhhAaPv3R=+Uag*+Mbzqs>87-&dScxwCR1@+`enb#mQ%;emnV-F$0GW9-Po~ zWtZ=I=l9L+^uFz?u-dz|`Fi#4^6KKtMI9?THvby9C>r&<47J7$*4yID8Qt11>lo-1 z=uvpv7rifc+LBqHa5LgA{TwMzY7kPCwBTZ8q4tbC($~W)`JW0tq%PJCH$7$g$MCv- zj=ov)tG$DV{SN<1|BtjkyVkwQQQcTwn_lfMf4lhof>n8UUSD@P^}_J8-OtQ9-RE@h z)Q;1xQ=U_Ao%H>wKl=EeKUd7LcI>#Y@05Yx-&y>C?ZK$Klg4ZqJiF(#juUgUyO}yR zBy2alpg6p*xBOjYD_V5pY(Z?**4DSxCk#^}eu-V5xH8?{rd#`c?c6#3PCI*!?tNRA z9&LV4dD%A0{E=`x__lAmd%Npr-&5LK(nxWu_J(j(ny4E``>Z*w&3LNzGM_9U4y5UatC}tiJI7k#rSMaV$%8be&yX10+C%xDj{P7k77e zcXv--+}-8H-AN!$jJP1dcUd2wf5|@_$g*K)rn{@FyKmjPRpImT4^L9Azf4VT`}Ee6 zlTS}16;JYivf$B!2WRgzzt`*mC;0K{D0UZ)N&Xsl;PtHFov`v1+Qq9x+q; zC46%owoc9~kt=2O%J}lD`OmFCYJ5-sayE6ryRVqX+#pc)#6vdqHfjh@mb+h!q83EK-XI{m87Kk+)>X1_b|^C?kHb{tKm*@EVl>Pl5<+8 zul$knZREGAUyFSxtCZ&t<4T+a;5sNL~C1{yRfqTaN$SiZRMfK5}qDc zqe7*sPb!Zs?-ReKWVc9-#Z85)n}~tLTxqdTOS**}(pu#(P=HIaRq9Uc)&v=98gA>4 z8q1k;Oq6acX_i`WTioqk!<@0s!Oj(~#hw-36`og)3kB}%0hu$>j{WHUrA=yu)Hk03 zKI=X``nc+Y@b=fs=1)&OOh}sb`MWicyk`lC4XHclpEup#b*b6j+q72=vhr*SFC zVy%D6i$nv~kNrT`p`S4%(@G8Fzq|MGO^9XM))rex&k}*9*T+{W>n^h?wsnYI|A=@Z zMtBbu&MA0pJMK;qSfw)MX4AEow68SJH1pVBR6XK|I*L3@4W@PzywY2lCXWy=c*9-A z9U1m;M<@G|!qSEBZR@S0^M~ZF%4(Iq_(zGa%Rc{1?eQV`?Y)$PDba66z3?RMdcHk1 zGkYj^kIwOphbieLB#?h z#Ng8b`vZChDt_yI#u&XmU;SSA5A*HpBLGeAO&${Vx)$1MZVREV*G zf7Rg2!FK~4e&5XN3@w!oSHcBbzm zPwfTygqld~6vucgxkotT>>69Hb%Fh%`K_e?R-l{)qkd@^ib7 z!{3d17nM5Z$A~PW(LY1bva)32D{}sPMKGu$1%VF{j`CHxa-MlEy=$lI zynCd#yKq_QNz>YX`U{2<#@>eJItyEw93;mJeq00Z1n+8Yl<-69sYa7O$+2Xn8ZS5G z*STidX5Fw=+RLoQ+}S~IB`>QH z%0X$k_(+Hqn;<6N1M%R+U@Ux89!ih+(Via;gY8Z3oIlsoI{j?^ZOP|}sS8tgf3ET) zFM}>f@Z3-Ww3+7pf$c*74a*OE5;{Dnu5Wq6Wp)cSgD|OYl?vogHq1EFw_Z@AaBtN4 zm{+k|WAdX96?+-_EWlu{uIot8Rd-9fglPUYNApkkV?wmJLR^e^WwPMlW$@Wna?606 z)|PI{7uCn)etHwL0VqjddL~tp+^m*Y7RqzwZSpE$tu}A~f8ad!O)3E6t+OZ4S;F=- z_uZd%>AQYX-~Ri2;q#eqZPLo-#M!s<|4?O&@%|TrjbYQn--b^L9U4$zy05LybS1AV z8>CN=AZo)V88%rq2G$C{6MZITe@tTW@=;YHP6e;>y`n!&S5se!Jh#hp(%s&p;lA_b z#ktZ+c_>cbGi8G`SE$8L=4x;qxM%pkmEt|wrQRY_s7bVk?#IM3q4W)M1I~5NmFaRZ zsVUfcjQCnm_%trryTEhM_08VNnwNVtt7}HL-(7xo{8cF6UpnzCO#K^~-&z(AhBgfP$Oy0noRLOfR*%BMx6(35-U@o`Iz zBZX_Nx`L#dYsHjd4r?@{&1 zlEhi!GW9RpT=yQ?8{d7s0gj-f!NYGa~(Lxq)GZx7=-Y42xiUeGx2-<%QI`B|^BkL6V@T<&_}eJspXvZ(i( zarz^M-bTroZM0ixv zrD>FIEOUz}BX8!VtG24603M$Zl33f z>xCn$u!L=QL7)7IdE4`T=a02baA@3TyeEa_%0y~0yHEQ-_d&nju+@;L@2$ht*%!L+Rz(OZZSax3ATx4fr~`?+hO zYrCtid$z~wZOM-l=8Aiyb#ht6sfL5GX;3!EZ^2sJA}$xtiFUDyClxH-)XkyaqFM3GohC+AXEJauN7SE|`-grCVYT@taUg zD8-vN8@%&x7vt&)OCIlR=Q`mI^bX-N`8DEp`HZ@Y96)C=gEU992XqNwYLC~)>Cfo& zx)Is}&3~FJ*r)KCF4_X^DcuZxlwrByq+z|Gl3}>MjV@EuoSj9lB)6)!j&rs*lFQ)U@^6G!;yG!qTwb}T&}u~>X&n{tu*v*WK4yCineME2Ib-$!FVq>8?KZ_%|MDJzqTW@c!4kAYN#fDN#xrNdK zyvWbQRN#%9flAwe#vcPFA4|O;2ZG~x8H}YqP^Kv2H;n|tt&k{(Xu~e@8TkiHvisl_ zH6{zdX(EW(>NlmPau_Vmb5c9WDE$`iBewEOtRd}?GNlB0x%^SCh&V#Bq6e0C3_1M? z;6Cjnz7Qc~Ct%~R$Y>yM|9XK;{}80 ztp>gwkGA$!UdhAds)9WiPyyfu^L8rFzo%eG#ChDxH3?=q{Lt!VMZSw10#3^ z`Guq@UtDnrwUycdc55A~EL9aPt^}UzA+iry1&mY&ct}HmA^Sqry@;{71lHL_EvHJ# zN8sx}mA8mHY*AJ!+Yx8D3?%*>A`w}@w<`h9?F~Hp4mgcY@W-OSmRtZ^J__Dg3L%0Q zTmo!W1GuwsVC9t}gUA3f7Tm^?WC-a)>hOClu*AlY17KU@!IParb|w?ZN@NTW_yXc7 zaSH6lC19de2V0Fq9>hm5SZ81y!r;Zuqdx?BpWI5$Kp$j^=fR!UNwuWvQoPhf`YKJ6 z)8uGyYX4K(fJeKY@De@A$7Dlr*TU&m^fdYuol1YEAJTj1ne=S<`hj#L{SLi9i|UF< zz;--C67JmuV}DBRp@yrZ`W>;SZc3n%j+oy>ITFl^Q;2@_hb457dLZ(4PAWvEaW~At zB*X;gDk(}NywXKz+O@=$P9t*UB?8eCG3apRg z6heFiLe(#@#4XBJ@Pto++jt-Ml9gDs4xV%w;vg+C z3g?MLM2gL@qh9FY#pGJ@2zebC_#e_k8Ylx|1A+JoMLf(%@v!)hu=#6@O`QclrN9P8A=>s>9xLnRgHk2wfjD05EcSxGK8`5keZ)80N=Z^v`H1Y5 z$HCKGRR<7mu<1x@9s2wr?Zfm!)bSznoyo)3SLP;u8^M%evgku}H(I5RP<5$fSVAF2 zy)L3(>%phYRgNNdSX#-KPlJEnNcKmB^o4Xm+9z$54kFI>1~JqKxs$vJOzapu-(w{X zr{jFJ8~CwWvLm?_ksuKfjwAk%=JISBUs!H zFl-lsQ#u~LVg!CpK^$lau^bG_EwJqWz|_4!90p@|BRs<_a5dY&8;3zXDIa5X7}nY5 zFEjKaau!M{De?rc*zY4sHWqf$L28WLv)0ml>A4gwj{)aSgEuUGB25<%tWR<^AQotYV;>+GUbJ$h78_!1>!Qe)gP1vh@?8@ zqliM9Xs2Tq9l|QYl&6F+KAxy6IUTOCP?ZdT|xfVBH4@>W)pC(`+`}DNHP_T zHrHehGJ19#dynN=v&L7W)36#R`;1-BHev0|ai#|%AP4B`^fUM|ncNGO@+UA^Lx>k( z5tqWLX$c~6PWdV_6za-G@Le9lHqPSfhV(_!%FVDM=EzNO-vG=Hk2)OQDFJJWmuwA> zdlZrFZ-@}GG)>#VZ@x+Ggcs-xJ9$pdhObH{MnP-lF6J_$o&cx1P~I&!mtE2c#M+~! zEb*auO587=zzoO|OGv||ds4i-OAf&Nj8OkmTR`7r80jDZu7F&v?*)--nGY{)Q0lkw>pg)2q8ctm#o06~K7p!>lAoU!g=(+NK#9%96 zF3>V5`^cr_j`9lm8LX*|vPt=-lvbx=XDh2UiE*&nbHocUgtNgcPlczs0aouau!F0B zmHZx_rO)3swNo7Odbx(2j;xagl2-bHkvxp=B<#Z~OUor2ShU$-bmuBV5pDD%24fFy zC#t}&>>?2bp*n&Sd>{Q+2VQgqeFgji56v(>jES+M#nb7MbTTytUSj*D`3 zv1<#nteFKt1yk}bbp^@y$0}Vj8G*g-CZD z`+i$>TWxFGfJm@_sW~$Ij$jB+}qrp z=GayE(>kwUV*b;-wt1IwFXybyF8DJqqt@>^-%o!we!TUr=KBSoj{ZECm11kljVDcp zAHL^s=Qe zx9tb)vGz+=U7nm(=1-f<&6!DA+w;N-OS)@{DP$9zB%@9HOvxsb&q<%@X43bwe`;`E z_^QZeQSp&u!b=C=^et!1XDO1EKM2#nPmd5^BElKZr+deGvR!wbk}wg1e>eRZ_dWm1*-xgA@}pmexD-W%jRv1Fn5SF(rnmM-SA78C^bue9!xz3Cs^p3o9NOAH6I(G`daXoA8RE zZv&_MrW%g0pP{RBTzbiO^p%H$A_wyr3DM^Rl@ZW3Y=GgtbyX2M36TPpcMz9n)E%sCC zjEZk+UaP;U{;*o@s+=h6j+<0`K(PfOq5f4(RW!4Ra^gjIPQlKM;2%xC*Zox`eMn}n z?8$ir`J=3x9Q(LHxtkh7zt)fS%L!Rm%o(*gYG^TA_|1rAk)@)CMwgGOS?o*b{lF{c z6`E4&I)1&UfxDDD$RoMixO^S;?HY&R_}5v4t?KpC>O8G( ztsup_O0O)nC^oIwf`C<;WkPr7gTf3bYpl)gnZERw&+koXhcotP4bOd*Z?a$Fx=`VU z7Cv{($-a{UE#Z@+7spPHdmTG7ZeLtToIb`|EH>C}HXAIuM0PJ}6^)*;(6K0IZD2cZ z`wk|rYWHzJ_kQOqiX%k5MB&Wk#{Te<_=aELJ>%MKA7|Z=doZ*2FZ!D`wZr?SZ%#fx zl^ApXcCvq36xUiQ>FQ$}>zPgXnq!LV6Wk5&)qYViqukx9-Ri{DX;*z+Wpl--_`Ahz z!L5DWng{&q!v3~NW{d2n8?>ZSrH^*8okJ1=#Knti#@aqWw z;*DY~aZ5_REjcCTQq;Pru2C1l%zhcV545QIN_H;Hv)eJ)s^rek?V8Wpju)mEdhLr{ z2fYGcU+gHAl7C1SF*06pxA=v7>Z)dMWPO`^`OlKHy+7J~8UCSLO5DqvNrN5^yx;R} zplvzgdM)zmt1;fvT@s!)7+C*W!kkLQ%btsU8y*}m*-~29 zQTc~+dsDf__7NG|zYhCS_UGq}pxlgt4+Wmw+PPk9cm5E&8Vq1R-*Wy#!uaT}G0S4F z$6P3$5Zx_0FS>k0Z~uFGm3ENr2)#0%@8;N_-yt_UcW=H>P}{no;J^G3>vVf_m#-(y zlj>c-4d6uZV6*w-T%f0%>$BZr+n&ESr|X~QX>ES^e%+87`=Q`{@3*lpE2Vt+y~j0C z?B>e1-FB5BBK-E2=ukmhaeK+z(T);@<-XR~U;9h7du2DpdZW)4yBBsOq`iL?!#FAk z*zHp8y`u*NtS9HcEck78T7MS&C>T<>$z>4RQI9ox-B;}sLz1OIfHhbR>m5-sd}+u= zWH_G-sOV=ftA=^n!}K9_k~o9YdEdIXyEZsKIvO}e*)Q5_IX}2+d3?MVy;Zr!TvP5C zH=qAUXe5m0%X5=FC0$AOs)hHh9SR2KP0qfUIUxOPT9fpd>1k>9Up_y4f9%NYYVRl9 z6ssb~xj_8GJhI#j4U3A5{2q2U^iD+Gn4hI)l%5jTyLj)&wD9wxJwv_*o%i2p9%PuR zeaVa_pC~7Bp4=|Al)S=yE|y!umylAFS;$|{BgT?<5OJ<*s9-E*v>76e^Nlx+U5s&t z`?~9zv&;vINA|EH%@9A}Sy{f3_oh4CJ;hzlbIr2}{FBG7Y0wV4=MlVmuA_IGr<13N zXRxOj;sw7vT6Z@`#lrHo_tqWOR|S^*wK*66vZl(>l*uPhs*idQ$^}c zz1O}my|nD~OZVRs_#&iVgf1#8YGl-e$dSbcMe51$xr3=0ZA<^R*t$GptP&m`!t zYwheDrXf8Lxu|=Pt@(^NKwhT?BUjf=&&3(`_gsK<>NiWWq8WE zOm4lWmivaYuXDW1?rz47=Bo&AfralA4#+d91DZ+NZkj`Egtm)*g|W4HrSB%+iRP0& zm&~Uv#r+=o-woIv;Ok$~x4xx=h4J0zd)9K&$7(EZ>|wZ|3((Ebb=TF=2C>g^CUnz- zaQfR#Td4bL6x2XBA{VtMVn}nPRCz79;*3z2pCSAfCy8-F3vLHjgAd_&?|P5d-I=Qe z9JHo4!L{GrOlT%HhzPLE>`ZF1MGPBWxFDA?yB^=qJ?{ccC3^ zONWP|ms^<|hZ>RjRs-9&p!w}U-Hv{WuA(}^&eWG0g# z%3UC5b(Guk9Ay+(k`84mG}|@_d!f_wLvA9~lqX0NM6bk3-GpjFftZgoQ4q8vsz~#3 zrZ^0|>N5Y^b5lM^ampY5J>LuL^r?KLcm;F$6zAtL3DeYW@@02(*BYLL9`96XwD5}) z5F2I41#&y7mFf=#F`%gGK5`k+UtO(4D*}0+8Axv<%L6qyN;O9I>j*MMbx>0^_n3jI zPI*UanQwGI#At?t$1$8b%xq=+spr%fdLGrEsYCszRt9Ejqe_6i(t(O54=K;csqARt z0KZZm&7{y(l@NZ9)e& z+T~<`d!OelF@tGBG?j8>Lain?kpt*URIIc?7y$&ZI$edy6qw{l{`mz zD`VGAP@D42l)-dK>Yj2zDo)kcR3$gdcJ&Rph{__Ih+mAMv{HQ~5$Z96#QEG#=8mxi z@z66~_SO1m8&e^&PMAfQH3H+Ho~a$B41N*e)|b=0MCptLY7OlsrY`AK2H=#o6eUOI zP+yfXypQ@(KSDEwKjfS#9nzN2*Cc*ig;WL+bD0@zJ7u`gQx0U~bwj`~{YXxx%P9l-MNEj{65U)n${Cq>{dejK zwU>yOTN4x6p@w$sV((|~WV*Y4wC0R*(5n~zXtVVHO1a(%YBC!^l2U^5Qb|PaaSyVd zx`dx6x#V1RKkZiP1I6vG_#qE_s?d|arQRk+svS9tFo3>J1j+wMwUm)eU*em`t_;(5 zLGOK5roj@TsP}X%aF)H;ss$2R(o%3--O?a38Y&M@l=Y&IxRS}$pCS5iCpaB_MgNlQ zEtZv@%T4JRO&+yO>CS(Y8d53jN$p$Z6StY)Lz;D`S&{teZO_HfyS1^JEajw|5o#Fn zH0{ax%3IfPdAsft_R7=5)dH)w)E!~wtFw3~zmH+`&uF7}1;3fcQ$ePF;UPzZ$66AX7Mc7=9@tV-0M*DTRJP)v#%ZSrKly*O&9xyyT`rg4b%pv-GU@0pRWXgHS=p&fl^A*nSweYE z&y>G;XRyWfoyZ$vJC94M&JLDJ>A{DPTg3T1vVhUm=cD%aL210KBsv zbR0S;SLNQoX?dk80i|B~wo(mo&1_<*5-RaZ9r_}1RQN3AAYZV)94lW_9Z*irP~Ir} zpete{YOA6=U+n^Y;rB`jC6K5}`VyDnN6P^-+M||G#{-S!)P+Ex+{$lt2r&H$M6xnP z-2=^!$%sp=2fAL4s06&WG;skKXezM18bF@*fVVUnxd@es4a8Su=wC&=I7j_PJV0cl zsGi(4q8GUU8O;NMjBf+N7K}`ibnvhbA;0|;a4M2WQdC!#kjovLj=&#Hh3L?PgFzBdn4p+l!9V$H87$Z zAg0h55vH=pxd_6$mGC!#h$+2CyrxK<;~`X#i>ji$gJwe^a;_WvReLCfYm|o~MjZaW zCGOe)$Zs_6(HXyo;CWgDr)!Fg|6ts?8J;c{@0G(nYT#2v%Geda(l3b&k5D{sZG0!- zw`#ajYvA>pph>U;dHnl_z*3-g_zh;cvUu5Uf&nB1>VHx*J**8}WJ!WF1|q@Ybt z(7vLPdk#f|qS}|gp~R32l)0$Sy5VaCF%^jY6!haj^yqZN)<(f!!5;w69Dp%w@mI;Z zAI7i^tf)P#sXg9niSG`ur=nL4apzii)fl5z1LIl&eO?(~74T%0VIf5oA*1nIJQU-L zsudKCeK=l4;os7*fvWi57+|jzU?WAU;6>`+!GD#@jri9eE&Ylfcm|c0L@01SL@yUr z*?ETFGT;${F_Lfbdy#&5Q7!kPIvGXv7+p}!@WuF-##N)xV*$t{s)YL#)q*LiJ6lvQ zD*!rML4TF(i}cTRf1j>M^}MKFRydv@km!n;+6_I^1wGLLeb5upp8@#U8#Axl-~a6j z&)5&<&6A4x?3wxXt^&)i#Q%!+gd&|JSD|s;Te=zZX@jcELJ|YA+O3wRGV9 z?|A7D+c9a2oEUJWDRB=keopZn*DXOzn{27Jm#b47G%8gA8c9va(};mhBI4AK*q!Uhv4l<;OS~gzWB=Zp zh*pQg9$Tx+l)Gvw(Mwq(qp}jY4Kdgm)ME0UWLG;f%Sct}p?;ak%+rh~k0=MoTg)zsK}@e0nXAl{9;qa$1(Tu!6sl&a*-EBd zPH9D!CSJ>Xktq{LXq4;fYeGic`-O5$9ZsjI&82}@L230c%B&~RlcWt|cT%8olveUo zL_}-Ai}oi+BF=Z8e5RHr>k^0LCYWs&^&Z;!4)^#D&h2ldKO$f2Yej4eKs)7-NV;ssphQxX2Hjjm{D~6sLdRgg5yjEi22(A&u)tic#HXLBaZuT+gLR}YQ2=&CTa2Zf z_@(q$YGcOFhPTPYeykW!qnq%MYmnbK7s^{SNkZ*zml}#|uTi$5nz;n#@Dp4w0kfzi zR{n6<;SJR1_yup!5jo%8!6mVf!{Oa~fK#C-ZV~grWC_8F7zr$AEY`$SXyE0l@ra(c zCbDsIg%4H>KnS#%h{t_ON}JJ#-rVmA*tjqXp^~_y&~_=X?xC z1PA4y+GJ1i4)!i7YFVs2{~_mfp>jaZld6LGGDgaT249TWN!ZTE@*b4txW+lTH&D9? z<2P{6yt4NY7se%e9G-XHE#6O_9M3`MznpPrxQBR}dRDo@TyNcRh;D^|yHQNOqVyz3 zF`dEeIEpBAJj?f^?|w@Qv%}QOlxu8m91WG0UD{Sq2~?R&R0YzB zUFsQljDBi!0_)pAkzzg&~RW%1`G;L5Z;ezmE5TdeLs)&K=-Wg$Ke#{+7^N%;htAR#?m@ z@D}cj_dFNC4fA|*k92QyH3X+E*LlSi=Go8H5W$T_rrZT}166`Orp?t!`g6wmKK;z+ z%umhdEkD5l&Gz;4-EAJ@bIY{Jlw_=DxTmY2&10W4bC4rffgF#14?%ulfI1f0+D5E4 zPht0q!KS9*K?$^DHq8Nv`bh6bHn^2+L#ANgj=H5Pysz{SXWK*2TByk9aD%x9Tp;(| zyVX0-+msv5&%|l3B+v2fP^n{_5Q0egBxonz6*?noy9Z3P3S1j+s5jml?HTWS?RxQ7 zwZ9d^s z!>5nwp7F9_qP~~zrMA8HG#ksrgTHwl{IbQ=Rm_=FV5E!(2WKRiPadbVbOhZ1F@InB z4}Fsv%3J~8;Q$y9k#rxbDtQ69pxM|z8kD2b0_hBR4D*q3KUEkm+!5=GAE3MROE`?2 zex#!z_r5rIKp*4``5JN@yCRdFlCy;oVw%`MNW;I0LSOzCH=SR?ZQv^LAGnG9EWR!O zn!Cb1;1>&Lq{s48c?@>q7Getdh+59nXFssjv?X=5bdkDr-35J!K{RYL)`6b(VZES_ zGi=b0&>zu#*KXD})jrWoVxKV@)|b7*_%Z*YO`E8AY9Q9A^W-Nof%+FM*W)Dd8>?({ zur?F19@T(O-YGR1s*X-&rJN)M$#=z7!bwqtYJez4OLv6d!b!nb+9ZwV_X>l>nW7af zpN$FwCB#Jepy&(sTS@W0bXDvo%@?Z*vTz@MWV1L#SR#%W9!vYgSwbJVnV5;6Aoxoz z^k9x$MJd0Od+Hk0Eu+~94DA82xp)O=zS zG^;d=G#Ts&urb%OiC`T5W-XcwraU{JIn69*deHUgPE;T2Cs_|Xi!iLf$H|*T8Rs~e ze^F`??bQHq3@RzPQa0EstYnn`6URvJq!q$tC?-@B=3oQ~@rqDZGROnOao{fpNyVX5 zFjme$Zud20@#d;S<))|}F++|*ANIrkU>q33rHMf7LPrtL$b!N>zO6kED-MbRJDq zcfd2ZB3nR%tOV5LV&H9NK|yi@6okttb=BM0vjh>7)fdVTthT4IiwZ|>elqbu*{Mbm zrIjz>xHQ5Z?ydR|+_mR8BgLzEa%ZBlnkY{oRzvIaKGu(mYDYOz{Rb=8N38SbvCcG8 zMicpRDY8EKSa|`?Tr1!xN3py6SGE%Mang$-nkngOMdhvf2RYk&$y2JJj3RBwp#Mee zgv#Dy>{t?kLfEl_c2%`hO(=%mgvzr*E>NzKRf$b%8K?~IC(o$U)l~8mcKp4;9894) zVjuIKTA_?n^Qem0D`SS^?$cobx#|S0yvxA62_wc}PaliDHmSBC?KqLNAg?Jh*jyhK zk%WFP{Pr>B98PzYv2U*cp59g^2mHqZxe~EoDT4@!DwiPZ5H+zs_Qy*6Q|XCwY6$e1 zTcX!SlEKOYbsE;@D#}*kK6s|*$e+?}^y5jXJ^o@e&Y=_JJh0)eDGimfWC!(>EI|MB zB=iFY;)F3A%6|z^lFL#%Df_^7`>7mNCn-rdv)JWoIAvT{YGO1>BSXEJ`b!xKROKZU z2Dc%5{XaZeP4XxDVlVj?C-ar?R+WJt5U{wPIBgec4vrv7g3EawIP3@HJ$%OQL{h7h zAJtz#ZUTvB*gryx3&)c(lpDL80c5HgPmaP>Mv`5WJaQZ{K}jbULm~4P&WjC+gBZOq zJn435>~HplMdjM{`asqQ2?k!_*=cm!vj?bz=%z;3T- zcYGcBrE$QOCSbLxOH?Cw0lz4P5o>|7=zQV>e9~>4Pj^G1cQMXm%}{i!Ik6Ncrzylf zr3xY*>9CVy*c)6>cjLVD8}@!d*$B18GT5)@U>7XlQ;&dCt-vYjE-NQoQ* z6@bBD2nK*Twg9%2h~B6S{k}xCANd74w>`uw?CDw%Gt{2gw^hWG;FJjdQY2wlqRCmn zv9=Kp6)&+C8Wnr+WHr!}!-zq!lN)%(_0S#&#)yqjQwTq(T%Lr*PXsbn9*VB4>QC;0 zU9Er@(qL@=#dSO3{6A4`fF5s%-th(Az$+#2^t*8KV{p104o=~8u!d{nd9L9!TY#Bo zh80nG>f4y%#qm^i)vcJh;lya5H8l{e3W$#%#<47mvsaxxCcE}0CZw7cEE#xF1^6Mt*Aa&CtPYm|MMJ-OB&bQS#nye@#7$)Y z>d6fy8e(MU;~p+x54(s@z`ouQ^Pp4_2-^)p-&DhSOTl%!!&lnT--q!GV_@TVP{H!I zIuBzr1)p1hJE>}Q^rQwBbekwb+?t^OXA$3Ev#&AIW5GTh0PJK5+S?Im3x#txMiWfF z@6hB}ODt54Kcu-er$_`t(x3kNSQqi>N%0S|Bz7O@xD zst8Tf;uzJM#0elS<%ktPQBvWlM4%xC+U3J|M&u*HLkpGrl{{8ZGBSPtx?54g&lngYbQJkGnkdKKPlKTzc);)Jpp ztjKt1mz==gTnF|O1hnQTG;U_AondvOad%O950py6(|rNDrH5B*rLs6LX;dFF7=2m_ zJ(s4&z*l6#4|T@~?8PUG>dsz-ca$);syc+AU{5CGt3{H{k@@lgK7Iy})iUVQCB#+C zvYu!|B6_1EG^{yXEnQszjPN}8p#`w9*7!_ivO2Dp1dp8njo37-W=r9NSD@(-Hot`;TY}eI2m5S9Jr0Y@~Rby1@Lj3fqGKxbgR`v2qN@ zQ{Td3GM}Rey!Q5#NPn(5S^v3E^2cFLl^S&zPxC>}_B``t#fC=WR zYcL!3sEx>auyzI3-kI1B-&O~{s1oemh?YJED%J{C=)tVq2Crp>we`fBITF@xhZR`R z%UiGqz6P%QSnRSSeDbF<1S9$#vkmhS_98(CrwepR`m1$u)Vv&heh6poBVg{21%|yD$a6KIaV^nzTC{H&EVv`Q)H!&?U$DKA;Lf)}U+2IZ zCc#>V!CR#R2`s?Lp2tWW!E7n2lO75TWhY|qGvFVy;i;#?7DnLzj-bVllmkRf#7m&O zgjH%fR+fC=2O>s316dON(3hLxue;+am9WF>hO=N@*l-a-S`TxtG*k$8qb1AHmRD+X ztdq^Kp4c%55huoHC&3E)0-?4ddN&@HaF7hg$lt~&ZNT0Wu^)VAsb8_LS&3P~BX&?( zJq%>)9_;=JT49F`>_i;lJjQk{{P1HSjp$df+Y@1PbFnUNMjpZ~c#WdksIOq5%@MDu z51)DyZJePFB04BOSj8W!9MK8AEJEM9A(#O})$>FqyncCdFD$eQ_9CZYPkPwIHexiX zf!&1T=}GkF3sOcc$L-khwS+oVIrOO(8I6z7D_j0rz&5DTAI5&64fZyh)T7up(t?K%=QEuwWbJXh-Z>;(!o-M~+4jp1u#!2pzEn`#Kuy zdmwoSIV9ELcj95U$=KOg(9geN?*&RBQ57@1F1be$(Oc&g15~#(KAc!rwrYYLF^0fZ0sU`!YeGm7|F1)Y-kh@fNySsbIms7Uwi>VzE*9d>;#f| zOGfn?wWKmkK88%ANpfE~7^jC6oUJ-T^ZN%_@nH%DBukIe#tob$z5!WXOs=8+quM~> z;txHUX~G_7O92JC!+O|t(5$?sxvc4?IS_tEm+4Nk^g8I9Qe;sC0ud*~DE*bL z@=58Gcn3O3=aKJpNK8j`djgd7dICEcC0rCJ{SVZ-y#GJ>cdWm_f)q>BdHA#%Mq3GW8er(+zD*aX#ftOZ46Jvcccf z)%3_PLsw5ThpnvHr&-K4qU~fOavS<}KB|yzkl)BXl_xk$z7x{Le0ezLeOl#6sMJb4!|6~#D$Jean&=wkJcjyZb?j~JQT85%*KNxRHx>?cEx>)QdpJi|?^{yR z)X;1JyQHnArXk0))0}B(WNBbI=$q`<+RtrX}FzeV0tR0ynH82MM zu#?%h%rRg)19AGArwmj!!k%u4!}(^OWU#(UdvaZa9g_-=*urcHwiY(VcCTQ5{@J|I z`Q2P`Vr8zaeSp>9yNVvE?L~i7I+L%o-;H-oVdfxThb7XtwqFnb_I@wHZ0s8R!oQ8F zvf;i-@=;8I#`fAPFm>nDz^whk;c*xw^Z=PkgX@kBAJD>EUt7x+5*-8ua?{y)Jzslu7Wm^G% zG{U{mJ6*^WCnBGzg}7LFD4in@GZM5r?i2l}80H?kNfXX~rw$>1dLQLay(0$_y<{5d z?PbNT$ebSGIe|)luN(=EY4+oV%WVU#Y{7_pN8YLet9!qY>NMn~<|XqjG}YLTQhVO5 z=(TH&ai(%U8uNLR%XA3+YzrvxpBH#3V2e4!c-k^JK=2)A+Nz(fIZXYev%s|*tA+>} z!ZRq6T7jmP6aL|5d**xAaE--OpmrwY>Ewx7(q5tky^Gq0JklOiCjF7^q^+r`MK`2E zm=f#`W;NZN`l0&B+odmZJ!P;on4jRubk%l$b&qn7a<;Yiw)M20u-3MF0C<_A=(_Z{&2Az?xhMv5Fz`NwFoe7+P^=?X`&e&F6?F6171A`J7I{*?%%_Zxj6QEU=n!P*iS%pgrcR|morrZlCH3@g>Lj#P)@$19je0*_ORbH~ zV=L;G>yK$WF^hokZy?R&VM12!OV0!?KLv>XCd~N}(3ETIY~~Dd>YdN+s;#cAMBypa zU#n$ZXpQo2AX-V!3(FS>d@oZe-_>lQbY8U>dijS3jtLmyH`HgTiT3k^R1Vt_FvfR- zWv}Up@eLTism6GA7*n4qL3B|U(&gw0VhdMIcUhsa_zLQ8Ej@=_KU^=ov!r&wk-7;f z`~l?c4MOLDaPGCtjiE_ef&V&f2hCg8+;;8J&^gQ>L%s@R>G;w(Y8$H^xE zys?dlcgaG3ew}xjr?{t=XR2qI`?hPDdzX8UYqhhLbC2_b^Cm3oko~Z2tc`*0^Y4OZ z)*YggzAi4XZp?3_9QAwR@6vpipA)ByB?EtlM1=nK_u$UKewV^bq4oSum>(K1KrbTQ z6%U!DqljJREx%(eB7;mau&dLmN>-aQlgA%Ao$ z3~MkB$BgrgS%&rcsfH$|sfHKqPfCM0$`9n0e}Qi1SAIT!To?qtV0pf_cc;hgP3Gdc z)!uYZ1J4vsQ&cku^cY;PolRU%o!1^Bt9#mSEJeZ}cM>P|x4 z2`Bg7xzlySWplbcO~uhtg7DZolY_=K*@8_%PUZmiCEHKC(3oVoV6hub20=f-5C$ds z&W8K!WO^*UfLe_c*;HbH5++XKRo*CWh8A;op%vmd8~I3Ku{awQw_My#?+>q=3l(_2 zmUj;-^=`G>!F)FqQUxP&1G9VOw~~@*C)X>dYW4tWg6}~8I=Xh?9(>YQ^nV_DHR3{W zs(GrZzyH=yQ}Dn3!T!^H80c+G(EVY3poCC?E+=Lb=Gss5X7QDKw{x)TzI{+(J9iCH zm1>Gp`DFf(R6w3$8*2V&PHGoxHM%~A_C7Csv_83}7KXK&KAOh5t-3y%UUU>OQ5r8g zz#2@Kit#bt`rg6t(+@?hNC`Tjuh3bHmrp7^lt}p^l%#iwM$s#L<%@AYJqwXj*0V6u zw#&M(z&Gzgwvc%}JH_*bxW{kI?V7cVdt~bCA7+YTH!_iirv5j=w)y10S0=S z+`}H%FEs8o-Sp|~^UXNUsPP$Pp)H}Nak`c4S+<5|8ncm1R!2dbaVNi7_%5akm#{+| zAoND9nR>FMti@@*GqBS`#3`ygGm8$T8bblLk~&g3CNt6`evK!?+0L=cZZ0GWZsa6p zP08Z_l+HY!e(cXU`#!0hB-_IZnsUpG1A^-XRr2-vn0&24!BL-MbuqWX*9C)JxSdmfkXIqULzUn@PB_1Z)FOUN5p zZAvu{@g3s#kN+b-r+K9@R{uzQUQ>k)qt6m`6|eBaqd|@N58M-u_D=D<@C0-3g(FG{ zY8E{nu~=G@tD9=hOf&ST8BC z&y49llDV?qW54N^);^_77xgEhOEXT}n!YA$g#WR07GP49@86$Ob7p2|d+BbFknTo0 zC6q2{B$N^a1nCe&z#rWp-ICHMC=Jr7w8YlL>3KiH|GmyN!|t#%bIx;~xS#v}#@6sp z!Ia?E;ORi2jK`^$QpJodfrFvWT0N@siz7pYqB4w*n1it~@o(b?Cv;1A7QZoGj$anr zIOefBPL@T#>!dx^?61EMjS8H~sFN`uV^~H?#?kZ@=_Aq~r9aBZ9_SN17c>IyK=nYk z!0ODg8JE(>X2fRB4wMb9*CtB!ymuwdNVX36YGv)6bx-2Z_#1J{<9vyWQVwQmo!G&@ z%GcOi$9-2g;VLBbiFh5KwIozL{c%Ps*2<_gV3ZRM|7j6LRPG ze&)~T|G`%sJ;ADSij*NwhS5-(>fufOZ74l_Uw@#z3GEBz3x`8rg_r1;%=We~@{Mbh z@K#)r0fzi=jDuU4Ns6P>CANoO2gf z$tgxYE8d=o!%$O{#0$9|xL%L}`2oMQRA+^K+w>Ykv_Pms@NLHDX^-Fi{-*waXI|%j z^YrcD%)!nGVVIGYS;t8CjmR=0>(-Q7$$gWaC6&pxGS|DD2eMvDZk1%mu2OCB6JY^) zi^EPY{Zgv@Mt{FApa(i-<_f+FPRM*8xM1BE>q-;kjo$zHfAMwp?TyWq;7{zBbS+Vi zy^hY`EBAl0OPVK4vCoCQ;1=MRWsJe&+Qj=hC&IjNK=$u>K5Ox z{z?9ZF(-VL;lZS+P1%8K(sXg5>#-Hks)eg*3-vSFw;>xY;H_Zoa8CV!anrP{f1SbT z=oA%8$i>|K)pnjC9+x^Fm6{pui%L#;srb&d#|c^^QDOMO{LKjHb@Y3h17l~i-q~1S zcC)6T*S^Z`fS0CCwc!w%^RAH>Xw&~;m$dU*J&h|`&+z|(_cK@F1^spE<+T3BPATB} zB9t|Agq5b&_aE@5`fJ2I_m=ZFU`+oTS0!#(!qkNR{uS=rVm8+%;RI_;KId3)dthvM zZP*EYtpCsG8=e!KssCa<11E6SHOM{AeNFZ$j@(Fz^EQZm;Xm&!>S?3)R7T(mGgbaf zhy~I4o83bn79OXy&jNP+>^ue%3Z)c zQpuEaDc`f7Y$=_m%GAN>$DY5Gb;P`Dv^P|v5OHG}{deP{ImfDi-t}bMu!^`g2;;=+ zQViPoSEZ*Y<>!&x$*bi-awn;V*j@O6YEu-4Stg=7y>RNP0V}5~>KwmY20WcQ@cA#W zifkk|T#D*-LsZA}h^s_XY>cKvqI49+ofL67x>TQ1kDN}yBS*`%UyL?{Bnp#^N4Tw-OE-s_7^@uIVZ!$NN&j-RO``-PkI zrUIfja?tJ@nIK#d?vj}sh~Hujz9%u#3Dvj(k$U7aJ2|cF>UIfd0a(o(=<+oLZ6i0bQ1WlDQG7~R?|EMNLedxwgw;M-x?g`k@v7n4|gXGm|_piV;KcgyB99*Z& zD=zY&+4(sS=uV03|0S?OUobLvl0lrsCyeDWl-ItYpEZnpLME{-8R`b)8Jm+a9!IWk zJE-Qn=|jk3*8!=}ozEQ3clG3X zE_u`W@$=-3ZE~3ps7I%9_T|6}=cl^Y4JD>oWMSu! z9b87n5;+Bs`p0SiehO5Cy9-Y+cZT0jQuW+U&h`d>Q-ph1i26oHvhRJVLJs(lX&#M2PZu(~-Koxf zP8B65ZSSZ0+Lfa**Ib4B(dNT%-FOByNJh06?a>it!?*m_33fnLTBQc9Tby?WIKwBL z$0e@lcj^L1xPwt-@*ZloH$UK-4{>)wREKo>RTLfkp4L;SZ%NEeCeM$#XLmXG`~3fL zTJj3n;!afm*V4Aj>8bm;vZxE^NopLEA`|G%lX>oo%KJci#&GH$edseI=o>4@Yj5J7 z9HdTjg=*3Rj(?~#S&WK!?qDvEi-owt##~Pu*c7$7w;j0S^||h{V9yKE!w>Q)Yx&+N z9(5L1vW+UtZoc_3b&pQ;jjw5i?`X-1RH>G5{LH^ocx^P^1!HNeR@8>0V<#t9nnE9s zqSLeSa}wjI$Om+OdqzM}^4SfU37YcEu8T@m^xO*YzI^;`lra#;zXkc9lALj2#z_TQ zygKiy^Pxgpk^Ff@jxzjje%_abTsvS>t}=?%*0|;%^#$pJr)C7KJecHi03%YuPFbUo8L;%R^|A&5br9_ z*+-cJ1^BrPE%*`di(X*W~X9Q(NhWR#$hh_+9CX9r!nj z*zZ8TybbpzdX3GwGYz=nO5CF=d`n5br97Wn@Wc7!Wh55j=OnIGp|#@aNvTw-o^w4{ znQcxnBmBwzfBfNUuhT!DaJA2wEuz)2H2Q-I{vn1O6bvUm;s07Fis=tiPrJ$pT18J< zMIT&3t?egf#GmL9o2VoF%=|cnS#A3N?}5GPhu!IkgQ&B2q8i+r*Qd~Lexz2nhB@*$ zz4ij<@s7DIfwL~md4I%})Z$)N;!%a``-CxCj-v|g8m-^{$-TM#fg`Ytdd>epg|q|# z8*~EH)o)SpSsMA3YFR$cwmRsud5ps>=x)7Z7E7TQ3_&etBJ7R^wEh>gVsq}mXS8TX z@`PVAa&ytbqnHb#b^g};{1NxH5@(c+)+j_fCw=HE(Rx>Q&N#}f$o_#XRE9enz5BKJ zjNJTBLC&@`*OQli8c#cB=Pwg!Qy=Hxp|?rAMvjk}OQvUP^fH}Fl8em7CfemU#^@gA zti3${%GG9aq*H4bKg_Se5A$D`{u`Ye)3|FcfYTwyEvS8K+2%MbmnGp~T!q3T*I(p zJ4wt|Pe1T+9#FS^%=xZi4O+!Kev~`o>GDH0hr6U}xH;O|Cfrsw zD|5s{P7&0Vb~+-+qFuPZT*A>Or#;imZw+<+bsZC$yEF(DRZ+goDLh4~skN&OOn|*k zTjrnW%-t-~7u;SOyOW(^r|^x{T(!Zm7sNdz1ysl#MpM7YRM2O!Tz5-qg6%om*0lIP z&QEqV=VRAGbi=*@>oEpRwHK~NXak-AnVe5(i|T1lxCJkTW?~Vsj?j%Ztxly?BI~-E z*6u=2i(-EsGFRu~PClkJ7Pu0HMkpmMM(wp02>t!2`dviZZVm1Gh93G241s0b(Id3e zJ+K%$zPy#?cXBs*132kB@;v1Wd(rDscx3{CClxkJCW}E*QO^qu=h(onqXxI3|5`I|!g%ZM-!&8k@uAkM{-tO+c za8_4%@5il57!g0%Kh0Cy{aERtd?C37m{_1DhZ-fdJE7$882w8tjX3GKU6{Sb3t^Q~ z(zD8w-IK1aP``1HQfzsMoLA~9Y>s?wUpGP^d+X_k^(*>q>UGtOps~|>?wkU{+YwFM zwenG-+*V3^B|~m4e+8m3KoxTrpS{uc;-s?zSKal_6XyroAKx1Mif+OV&qR^+GU{YK zq(6xKiV5(AoZQY`R8Wsverq)~7STM2zD-uMmpKLHjsxg-^o5O^39?hRAE0j4&1wWr zw*_7+z0D=YXU1WpviT!;4Ndt=wY@tE`+ZIZw4!(k8vqnKU9s~{+o<(nddW~Wqub}7d#iL z9^MtstCzKVDVcG%lS?LK^Zumvjoq12CR^JqpC&$zk<=}6N7=-IwHKa3l_S}0)mX1J z&?B%>zm@m9i@UGO3*>Tc*&7!#o@$2ZO;iWSS;UEqk8AcSaL+Dt2kvv9h3A#rE zZINj5t}q`L!=*8SnADi6{^tIi{@F2geOWy#loM20rn(k6s@=_MVvVwvTkEU`)*O44 zvmX}5d1A?uuD@6dCOV?M8r*aaBa41C+=g86pg_*R9ds5#c=%<{%$8|pe3n@#5DrWY z9t~Q-o}s?sYWe{qkGa|yWIQ)UTSLU6qZX4- zo64T6BgkFTF==Y6EW)5?%*~U&5>-zd64NLhxPExbXc@>(C;6`5vQ8 zF)Oq@loGlhY!SK{x)M4NYKX_Mt~Y`WA(Q3E%j$3om%E3!1(vX~S{=*^W>y&ZyL}z} z_dPG=i2UADIks7R!T6W43;ma4>c*`1hMAKzcWz((nCqU$a(`GR&!vWF>dsM8<(=|% zd6ryOI*1Bnx>!^`Atj65;I)lH9l0g$YPWG}D}cHzin`Vq>sveEw0BJvK1Z9i7Ro6m z^Z!9H3p?zgXolVf-(8j{ssPb!2K2^NtP11c_*`Hg>&4^T0PDK~4$HUbKNhqyaWwqe ztZn+x9-m-*Otw|U=%g-34gSq#BpR_sQKOY{!l;2F`*L%%*%eOC5>qo9qa(SIb#bHh z-YV}5blnnL%H8Fs=rmTAV-!ES&zjoV)7TSG2deko*WF9h>7MhRN}g$IH8m@}`W{+? zhvnV!75R}oo0+VxSY5b;)@DWUL4)9q*LQ~5XTeqci=ttwnZpvT^JvVTFpr?_nQ5-E zme?Dep{!9ih@y6bVS0-)h)1l7d(?Abj?fRR$1O5At#GzJj%wm|BBsiqe@3t)ZGkV~ zaA$}8)~drS-P|N!ZOq}iSL?O$0RL4xsa?^|qV2Oto1%@>hG^etKWcllhcG2}p<2+- z7=gR>DOe!iqjEd!gw%&-x7A z%!fEyX3*|AQO<14nwLN%+!H?fBN+1k2+PTf9Ft1QP2>Xd9n?)X^IJg5FT1E7Kf`4) zoi?cs-ux7MKNtAARWRL}+Rsrf?PGbYX{KT>M8D*&K3)F?#L8H`n_f|O>8G@L+FX=a zc4>FCg8DEVuJ7sjjBkzeMt;~QZ}2mp?;NJm{)<>x6mcaiD3zD*DdSYxGgp1dJ@=@e z&OncbyhUswJalCjvJ&HLiClGlw^vxN%p%Wb83Vi$R>yOpYX zPIxoCruvt=x*D&}aQ9YwdB5-}-kNxJzLZ7x437TpKa?SIGpP*fsa|mg(fnlG5qn0e z*x9YcW=Hd&Ig`2NA7ifmwf?XEQoo@OHoh>gnBRea_}=`)%4@fD!l1%SqR08M_=xM@ zgi=x;+-F}ZZItRtIVC{E)?WFa@?M^bGWs95>ODYZ_aObNW~8Z;!wI8`>#{3ZpPMy} z;`-0pT6k+0wf)*0t*mw`{7?8G%F!$IzWNvXmwGL|G8NE9#y7aR7dL7dKO1gy49cn= zvyS;EywG%aIkmBTR2-1?S&!hYGi`tjOEOFswp}nDU;RWH<;i=(X;V;4y z!{@>Uw7S|LZG-kwdyAsveXS#EEJu0YXmnF%TZUBGyUD#d($o3GJwASW;@jA-e3QNR zye(qx#6F1Y?9bsnb=$jaoAAYujyl*JqL|U4K&WtdSExYfrbscpz zeJ1K`&_~_?s_VJDKzfbW<9-~=K9O1rUXc3hK*-HNySles*D7w#Wd=w#BaDoWmSxJ! z0xg{@k=-Eon+wl`qoO1qQA(>{syp5J-Rs?N)N7uxxWQjmK0>efklY0vQz0pz*g*IT zmEuwCrpv+_nL{Rhqy4vyB9YzJ8f309QjEX#wR$!9oA2oX{lZ^@YdR3F7Csje`1#B5 zC-jLo;bY;1FplHeCGCvXO)Iap*G9oG{6uq`^90SELuur!v|P%#_$i60F$2_~dJdoP zym8gzQv4B5s#=iQu$kv?cSH4pn&j>vT@d%l+1y=~2ztUj#d(a=?yTcK3y0vyipE@G?s&CuP=mcSW!5{F7yRhc+E!X+b9hTQH9S%qtX0sj z+CAh(?n|!rs9~x80!c}+qI;4u#XG@YJ+@x#7ygUhk?J0Gp7&4hZ)#Qd0C!P$Zn=_p zR9uJ3beuF2b$~~xUu_i(K@i$TYM`W?j=S>?Se@UPX>e|*G6#9h2If_xzj4XfYR0T6>}~gZkEGt+3vi)vywN?lbTYI7Y8{6?SPenJwxt zTQ=n0e@U!RN=pwn(&F@s)Z@$Ob@dFpuCh^WCf>IGaove|kXSCRpyy-tQ=dQ98&@-K za%@6OA5TGa$?C<_@~(6ja{uS9;w~r8lkTz0?kQhoUl)YyTA7T(IbkhKwD$OrzQMJ& zfql^IXHH@@xPo8L5c8$c*cfA!hus-vdzNz+F+)ypJrTcDK6A$@7o}dxR!^euu*awF zaeLKd^|gDBd!^D`Mu|uK5pHHZp_=P3xvep9vX@dLiN%p)m6d4y4#u;FS=xMNR5hmR zRk@;Z`VoDkepj2IP1KHR0qqlgxSmy?rp?yc>Y{NQzsC;RHtjq8DlFm~L`+3kLwf5+ zw8eUSq>nqBXM?cRtmLZZPe|U+zNe5{+50@ET%14Q*LcA{PaW*;=WAvVc1-Ch3Uet)Msu-*4n+SzP5wn{1|J2p%__>7H~udnyJP?{UiOg z-oxByeTw@1Q+rsXI_%mGF{W zIzkSqxbxPEz%jpS#ajDtarn^;8s&@w`V`zy7iwcz3Cd|<#>2JHdFF@hp?$&6QF9#? zN(m>2$A#Vo{|QbC{Tyzh`LqV%P-u5}84*-jZAJK6xRRMBbXA+V`@4d6Hz~LOX~M(! zoBlq&n%-i*^09jozD=wUSDn=)$+On?*7wYlq%K8&e7!sk4fIoTDiKXyX@vNf@RVF^ z9-$pd^6w&bK~(0p@7Tv#)k=doOUAM4vhms+!#x~Jy!@wyH#1qU^{y4-QR%AmR?4n4 zSJONOv(XWC8g<@LsQX=0C3Put#6{GyM@V_ar{ovNpq8opb zo>$7OZf-Qr>iP6n#LAPjTxeR(33m(U4_hG-#NFgjv(VVk3-!^Ofm!WF}V;a^`4 zS0{%0Qu~B^nH}=<<{4ugWJ~rU@`d7v* zjydf+@7Ybf;CHuoN6=<3telh2OMS%YctaNy^TIB!g9>{N*G*?J`rQ(01=Xv1S?78Tv;jKfZdop#(AJj4K zGD;S?veZ`mSg1>6z9Nz@Qp*{D8tYuEsI?r&jTUA-uqfln3*6L)QUxwR{IoB;E^u zPo+!B3HL_U%dfm1-*8_-%&C}K{=WVu{)T>^e@x66Uwv;A&vaE)54eZBE4Ys;vC2R> zLpsi#yN##vcS2*KEM6C9;A||6tZ*(-Q9O<7`xVai2{Y(<4lAaGY!(y6dQ;)c>j5;dR$kPvFB2-&uYry%66B1{vI3ksi(?oVW7W zzp^S9A~LyPt|CeuLA#VgO`x<{(=2Z$n(vIK#(#!j=3=MyIWb2cw0TFP=1&IoebUNq55iyJiCxjT>l}j#v|E@WmcSEZ8cyM<(*NXY?41&n-STC5 zt}@kKQf;di24%IAxoxI;SnZ-tbMI5yD#Ji@f?Sq=XI)K_baB6!59B}x7%DX*3Gl+Y zJ10@C|CfFI4QoGs1X+nRFPl$IpH+#g8Eg&V9^bd>qC|h1{YH1Vu;XD}{Rv0B1v%Fo z;x4h4w2>A+C7s2&yrDc1mBmh|%>PMta3hZ1vGmRTc(lJJBU_g|X)M}}d7XWBHG4ZA z2$Rh?^A!4){fvA@s(zom=fj z7_z$mvA?f__rqG@k@!v;O#It{eYB%AQ{z2FJ@1Jva(E7Ss(Q zLRp{=JBJjs?%Ps_+X1>^jBrt?CtecUN{5+OEGaAA2z%v!PGc9`t)u07G;L~3}1ehM$;{q&G8s%;x!9f&ahE(^5C^ktay|Gi9pqf{DQTkgDShLH*ht)t z`_A{wDbwM*wsN-F1?<)Mxc!gs{Mq=(I6*sPBktHplr=>AOslWe(Au&?|AE}ZSuIS= zIZnT&7c+Wse)o+W%rLjj_2^&3Q9G(Y9jt+?uCPtKCk+ClzEW-j+sE&T_tx~9o@<`6 zUemkNcOB=_j@}cVZJyhnedzbTLFpmCa!l^YSzZ>mQcrnD)u?P_DSHNs99%heA-l|0 z<{M*^G0*sk=Objma}$kSL_u?=^~7p!-@qOImD81)(?q!OuY~^OAtbPyBAJSSoTPX` zzm!#aDHD`_A6B!8a&tLJK8-)!MtCiw(G!|TwXHkaK80Y3_*lIiOHq&%_K63{mF zX_bd$tk!G$v=b=LtRic5MoZVq({s-2vQZDN%RVC~nWKvKEN4A={$Aw#R|tyK5og~5 zidz|>jCG$zb4K%A@+|b6^L*p2?OWw*==++!vD|ZoSIV=GN^&<=9@2YDp*!19tVyOu zhEEw2Y20L!usb-Q{4Q5lYPy@NPt~{N?zS<5vWxU?^)~Wu@ig^(?wQGB zh58e#OCII8Tvr~-{<^a;lY2CYd7~X!RmWP&+L3H+FbkO5m`A??yZRA(%8mMU9XvJ> zY87*XSX?VVy}pt$8)DLML3|Cqb2*5>#opVVn(BY@UeQ6BvluL_fgf;N@7W<84u`?; z9!Cpl8c|J0+98{9kMBLJkJ9t%am?xI`VU57=I}VXtMei<8J5pFxtUvlzi`v{BxZ#_ z4*i^#JZ}3-`76X!^VRUS@XS^hxVtHd@?r5?p`|M>a=`x73L0C;!%Pc52;~g*52glI z2EGh*4U7&f3j7feiOcT>+lMZPN`?D`zYC8D_Y5}*$CKgsHdHM17~J}R;Mc*0!9Rok z(3sGYa6dfXcSL$hZB)m1F>Z9yuq;=zwa(Ek=iHpTaumz%%KBGw>%{GG!5GCm!#!Ef z334}=FzEvpBLp&|5?ngbwHRh^SvZ$DQI)yq+#$~VoO)^$--Kc*3W@`82295(Ct$uZ zUU7$dX>W*xRc#jhjCiB0`Lk7+$TT1rlCHc{)4k_oys@`po5g(-mp`sV?9~{{dx6|e zKFM+2ani|?6*4N2!5fRl_5*UXU!%^{fm!gdagh=<1;Ga*YIchbu(Aw znOIT%I%Y`x$>dGhF6GS3T|Muvyle9Olq)U!@+=DztHjRr7Ia?|KSE_C$w_p+cKV@u zpn$_1EDuu*Je2m(hd0aDrA)CVHOP4XC;rmYaO{8tW742nEam#$nx~*QMXQ3-t<}36>6? z35*Nm#b4zIs$A7Gi)I$eESp(B^BbH>%F(B!;M8C^xFghueM>2#lWfL9^RyjwZIefN z9>tjPby8MltCLgB-7t?Y&!Sv=a@@>XKiL=GH>S9!ms|&(yvleQU1ryDKGIaUK*nx? zG63Jb4es3Tg~}rNYpIpkNa%>lfeW@&Bl13vsBbkTF32RhE@=H`bkS>S6~I+Wq2{3+ z;jUUqW0>_3e7@aEK5th)X5AoM%OrP6?wm9zAtr7?%zRH-<(*(V-&t*pbJ{h$7Eh72 zOAnvY`Z5z;HM$eq95D~$nU;ghzsESmYVt1`?urD}1R3f|_Z^?hO(#C%Cvi;08PwwRTJ?3_N z+mx5tw&$Fd`*EIadCXkzawKPKpS(G)i*Jv6f_T&k!X{CTrTQ1(L~}U#Ty8P9d_hT8 zcc{D7m8$7h-H+u1(y!teVLq6T=3r#D!_ptdC|d|exs6lU{>_|Z^d&3PCY&$aJZylw zy z_d}{yPA_77Wex$uv6+4JO}hs>yc|{^^ASq*Ent1UWbEV#uVV*yIaoD#GcY}nLFM;W z#=4BL8AUSQr@u|llhG_=MMh>u^~`mdLg4FwC-^P><$Y+6CYu}VyRJ0(JMYTaUlVI* zt(w!$y)*Bje7o~K%Hz#-H(Mb2W!zS8bESw-#r7GcwE1LIZ-%?+J(x8Gk=r#V~M+ycc#2d$(m9j>1g~z|8L$*rKtGMaa&9EY2lioL&5Q6euE%* zOX|&xAIvFMDg4U*hGX0er^hN}rafjaW1}9?#%WggtMJuO$TLzjDu3?#bWb-M3L13%}WsJSdM9C6nBqIM%rNNzMM_ww^*7;R zp$Wk;!PmjPp-YUv;l@<+JFBDp)-D1f=%L*ojmB}7gac~{vH*LDfMyz#jW))ARFs@lx$eWb6N=tVEc7$uyJMLunbNOx*P2!}Q z9ra+ST<}!p*^JQ{*-aAFQ{$>9Y)lxN@JD>ZxETD$N~vAs zZv=PbnPnSO^bO>A3TnBjpw`s)>2bykLuDVmi=Ffl_LaNM-^^p?ChG5x&0=I5##r^O zw`4>rm_v*K`atcw@PJU!;Ct{TRWoL#htoEsO-fskwkGXfTD|n2({=bGqcTqhwuBOl z!Hz8H?xr!{Ctk|-Huv)U^$U$Gq7)rhx{9Ps{&aC^yq0FVt9aYR9Prhz@f1Ye>?~3L-yr4+S;frHjm{`ft_}4LHV+iZESNDQ{Yl!F^qW#?4bw)Y-At>Q zzK+)E3#YPSpi?-Db;I?Syu`CAu0YnAxmy=#TXRvLQK+VI*DfKXOV!-p zd6xOI`|HP+kGm81B(7gvD7Hgv0e@3p0na0)t@JDUZdJ{tS~JG>(adU@c{9gmei3*Q zJRY8`7dLNMO;Eb8jvh*Np|Z%n(E zIv}+}YVXu8sXwPCrCm=ul>TXkkU25)QD*mmJM@XsEAp3ANc}u!TT; zq7#ZPEt0L!fjsB34vF97-61`&z6y`Ztd5)6`gbed2hx^hjtxE!ozxx}IAF65Ix&FHGYU2CElS#{x+=)fwPWUFPwH!SMChytZRwA z&IpHp#lgqSc$hJrd(B@1!dQM3fBm449_zdzZLI5B(j_~h3~jS?%z8D3rP>}q7ChBszr zORN4q@qK*iy0p<5p+HWppE(hI-!jqzIYZg$9;mkW)bW<`>AunI!>0LTWAFP%#+37J zb?1?*2&wiQV{Eud&=nY#xt`J88vfc~W2&{m`OdXP$jUBy6*-pg&`SKqV(uNRgkYLt>`m{MIh%MAGkxXX3B=JA11s zYh6vPue77VPXa45S7#;%S_d13=aVUquv4l8@+U^v1QKkGR8+a`UgVkQ!-pw$eO$e` zQn7tv++Nw;S=@kj#8&N8Xlbxn@KEqkXe@mu&RT8%OFW)TWO)?6$3RqO^20Ts3_iIZ z41kg#BUgh#dT%x`mTTF=SA*9AO#%%9^8&u${9txm)k{+i+#g;U?hTf;a(GhsVz@Y! zu_Hyr2hBuhgD~A4@6VU`F6C_YCAll*Tb8eHo-8?=XALH{iEZv# zCQXX0gflw~EJ`1vgjo*MPYVk{NEl2z%)a1Y9^fnT9*F!!^{8)K?7R4+#A69Pm3%tBf|r<1^RrW9v-(vVUM+gr;{vvFHVq;y03U<`F6yN@*nh9 z_22Pr@nm(Ek;=IS+J($L)GnH_UY*l^)pr}K%$g{quC#u#ej#^r8jaFP_D9ZhXGdfR zdDbg%9r{PAICrh_W&z`#b}0OsT2D(VKFvcdLN(c0Cxzayr(PZ05_}nKiSqH(@TXd` zzC*80ZfF}(fD4{-3vE0zf0ytMEojtm3^C-%7vD5xU$$;Jf6cuu&o{XrWN)2vA$~|q zM|G)m3l)P4_BZs{wYbgJus2&v&Ai5bO{DT#O5bVxZtaZJklgB4-?Z5L@!!P1j@uWz zCgy}^jxt{a?e2U}AJ1V{qV~|3IApkyWIWP$bCflvnLF&yT@$2B?lkWhe;~FAy1gC! z-F?T@o$_Acv&bHDvU#mX<^wYZ6xNR*M3dnk*D`aOP0T;cE#w%JU}1euowkygN!6tk zx{7V=+*VU_FICyxMhP+>6O4|=Lma+(Xj8)nLh+%U!Slf;Aw9G{{Fzojf3GhwZW+gn zjmC0NWntsJamm*(%(Z9Oa~X%%gjL^gLDF>K{sZ zc`m5qPhp{;@`{euKh7{#=y*ud;+G(dBxQh0VKIrI>H?9!n%p^vDsXT`O(qE?j3>2SR4n}ND2NvqVQ zPV|wLo9}sL6gDQ|fYzFR^~N0LJQ2pZpTxXRSeP;(>y@nivkuSl7d+_vai@IG)ECMx z@?TPd)L&dH407#owptI3^?G(aQSU`YeXm*CX(qIhcerzUR+8sSRfELNm*l^xl~fZq z3-yFl)X+@Wm)-44<{s)Y$Bp}DGy4`k1KFq`K2eIQU#sWc|5F;uwWOcLa^&cC2*Xfz z-V5$@I~vxts3;Fa7wWQ}12phWbB6UMisORUI2YuFVBZF}{TSWjdAJE2LYJ^FI>@7} zN#;o~SF`m;%)7y`6K<{TCc7J?MzUP%$U2o@Ye)XOgPx|(HtL&x>raq@9my1bYdzua zm85EY3gl)JbFMWH*M;r!FP^{r9TWaadYpVYxpZ=)Bu`?qcz^76-$GBE8YCZ`RemYX z6vC0Z&QDe)l#_cICeB8MsVksxNdCN`;&bP7|EY9g0AQ50hu2-jErW7+WN!vUj4I?EXjIMdlj?!pKS1%vE46el%|2Sy$P9 z=gf0CLV5Do_tC%kl&Zl)^qnTjZDm$f|{Y(QLAEiRKd&4emSJ zQN8Udy^*rZmE>U{*m5W}KqM@d{{Xi(1COd-$QAwy4)hVI+fS)HePpe${<9M7;;>o! z;|lizu0DCEbcjmjQh)bxtTmjkhm%Krq zBF~qf%R`mh%4tPau7G4HA$ZVzJz{SJbF&S__>Ib_^3k6~W@3mfJ?*Ky&7=%Pk{ zQi*Y=Dzm`)l>oU?PW(t{>pBfnH5(epo2c0(QpfCK7qW|j82gj{GuOH2ltevd6)dij z#K?J^t?*Fl+C^|G4_RmM5J*Qu{STuoxr|DBs`fXv#mo2_@t1#=P?O^|Vlz zx$C>HDjVhYVq#>k*;@xKrguRh=sD=^p02W@A(a8^UQ^k|^;l8`Ia8M0L3a&MFiYev zVkg)?_d#jDHxpP%Oyhz1$Xbr;`4v!$4ei?Gsn#m4)&gc_7;U_wmiFDRyID z@E3j#gXte$XHN4*{KmUX7!YvRVZBFiRuEP!T2L{T1V#YwfJ!?c7AXMI58+S zE#e;f%zotShl7ZHrajRlJvTY~^I8|}1(~p~!c)S(l1t34<<#DWmxSwsddU$k5O#;} zg?=spDN6ad+EFESX39 z!LY1V&gsZMuAX9Xd9jimO+L3WTmC_olvB!P*z@DndF~(Oh_E+Ojhd5TwX;@$WmsWu zwf?iuI05GX9syOI@4z>H=DH-*km|#DSs<5`&q+h1Ct?PA-2>qPOn3ffJ<1X}2isb9 zE`pf3LvOp|>}2O#RY((l7HdjdrTtPz>74kfm?*vw_6eP+wRA)^rcGpn<6@r532O8u z44&5Xvc8T^eQzew<6QV;H{n;6fv498c4R9mWIuqNSqawifL+PHXl-P6%VQ5`Mq6M{ zv!~cU5Xl@u@jeOE*=*+gf8lrMXSb`8`#r!N+3Xwz>vhsO;+%G_JMTE|@!KKixN{%< z?wl~R7org>2nx7A6}`d|N`F+O#wmlSHmk}$xrZFjiuavV1$Ttr;w_;K*YN{B5tDI? zc!z%5PNB58Rt$-Gr4mwc?#KvfH`t&v(iAD1^go#JGX)3U;VrbD?>JN0f0w7lPB6lr zTK&jVj6?@$hEaxHRT@}@JgicAjdUWaF4W;}QMZ2zIPSjX%LDxV(-tHF)_>f2AzeGR&%%jKU)R3J0eKHyUvoxWY<_276Ns- zrL0Yq2%V zuxIQ`qdujjge{;Ae?(h*6ZL+Bd#~G{IQyI;Fj5sbwCh|qT&AnE@D;JcP2o9i3CG}r zbj6@_9ji4lv>O5lwRpIo{fajf+dhj;1(vyVZ%ocINd#d(}>A$t$PzGUuFNPCa z0ft{1EpnR-Q8J@o8Jx|}QBuF@3efvvIM6^wF+mhwec%xeCqr+++FgjR!x$=~GeJ%L z20K224n#pvr_EsuPlU(6+jRnk*K;rsf1;n&fbaO0Z`?*cVH(WaNoX+7C$e0Hy3#H_ z?GClG7-HKf&$~4^{^~G3W7${V=IR&H1_Mya?Sx)IQ_wwCBef%~$cs#%U5`?I53vJ^ zy5!{oU+@l$=oz@G+u(sOgWEfmbL>wGHDNrprq(``{og{Etbf6p{D=2G;SuGY--d;L z0S@qX-Zh12wiDk}ohn};j#7MIJDBTV^Nl~ii9N&LXTX2W0oEmYzAZT4sPmtK?)Gf- z6(;dKi>q7DXJ>=I+JIVaYxvV&a>u5iaj*a``ZCzSM@`Pa0}EK-IgWFP=iBqZhy9qlSBJmq#(M^H)&IvM z@4;2H<663K7Ezva%MZ-)*09Oj@LBcvwAvg|u6@*daVk0x({WUo#uY@bA^Mm}+s$Mg zOyrCva!$iOeD+XUr5~Re^^=R9Sxp$y(W|P+BkFw@^<2zD>t^Q>JyJMNFNd4+wqYSZ zL(SlC`or&J`w!DIqCIC5_ii1JZ5&%r(B6vz#6cbhU_kHS_l^AT7XEMhhhrzN?c%4+ zXi6+(+)ZX=d`}Pf3BK)O*h`Bz7SbPP(Kq@t!WzR-sZBqw09&gB{i`64V*Ff|dVf{) zG#b2u`UlHzoQ~@EXo{*if9X*n{|4|pf|9ejP(9&^?Vn2^KX2$<({WyM(Duzq6 zKZ1_@6!ojq;qym%=N38>(dP(C8Y2Aes1IJWhp|xblbaqM?Qxi^@yKKxM!op9!%{!W z91=bL{UdSC0OE-o zL|iA~F`Zy#Z^Hfjl4y6U&`11RScro6_w=42L}r6wyu_hw-;7!DE6{qE?15+-4{;{K z#f`piCEwSbdr_CY*9Ip7&NqSCem5-?owIa$?NF+v|HE0WH}@NFRPOhA&{_CcxDMm) z<$@=dPn-yv@dPZcfvo$t(8W=OAzW`Pcj^V_F_mxrp5E~*dJL~&w2vVFTmtU-w?Y{( zbKU4;d#PDJ!5gcJ<8jjP4w?nqy%Zeh8`eWBnH_&`m>hR;(f*VSKxg*0U5LLrIep-W z4Gg(-E#?+{}$A)EMma*P6FT6 z(m6mSLMK*Dk(x@?rGipeJRsJAK{$tdxkXqhWEb+I;lGaFw%e|6r`WmdUUq_`Fq5@M zBd3PY93`SPN;|yJ>UmP|NV}{4?GDNJM2~PbVmf0%6ivpFOa!Sq)4paKL`3Rg5 zo}h}u#I3Moo1pMiTqr@koU3&`Ay(TU>=z~yy>uo|(-8c{YMgQIu~XV(KL&$53ryFi zMeGH5n>^-T*A*keze0X;F5S^Am@3Q4VsK?e+)5l<_gqdGhvF`tK` z{6gltF~pM};p#cpsqJKgg>`{mG0JWSZ}tpGac^hn0c40d*_>;v{C zSn2^dRh5bI>%wDh3*&m2^9{Sa`OZLi6ouFmHbzaXBm2?gjs?=^Yv%Fe#2NXBV-Dl# z^PkIb<%Ffx22b!M!Y=j!mtZqL7v2jYAyaq*(}uQamr?*foK%Gh&5pWvG8J|iM_Z?{KZc8 z3F})LtDwUw7$Ne`indAt)|m>#24!iz=<1)HxjmYR z1tFVY;ui83d)`^>b*i(XMWceVtZFNXexe@GOS$$bI1LTJS>anEv+2a}i-^pke!RPh z9k$U*+xU40ZFZd2`<=%**3h%;YWA`F*vhei-m;AMO^3nSjq6ST!4+_xIu}^okFvh) z?dcMGujzMwyGPe;Frv`(bey^6J&OdVTTq=HY*=3QiP0ERa&qEZq-v50Rv#f zN8PTY9wL|M`|;7d4m*S>`#U$Q)Nt196{yYqO$)uGN5Gt8{en^>YQk$?yNW{HdYs)R z;63`B@vsDytDf}ts0P|Q@@%)bs_p2U-6d{_Do(usulNK{i%TddMOB!NbH$f@YjgTUxOuHbQHz7Do+Quu7yNt3_$rp&~z5;g#CNK;4N$mJx&hNgMe1{A%n< zM$yOjqoQNc5|!Bb_Jor#Rrrx{vqD%S{KDv2CE&%1V_h@y_#cyTPJ$Ve2YyTip(Ovt z5S!g~9fzeciFusj;$^h_pDTMM#!F>((@~|RY9FHKsPF~~1e9txd{T}#Pi`fxO zr(K4y3+P3wv?kWC!#kJaAWkIXb%Us@2E3( zbeE^{39-B{JNvD??E6cwLyIcR)%}orXveD8k?cTUMtpBp#~DQaQ&}&gN{Op^-r+jQ zKHv!J&RJHB!^|nWdF?ps=}A`^MvRBs5yv?33jx=CGKZh=n@70iN)UX~c=0n-t@ja+ zccxuR<4AIe>vH4N6U}4RVOB3dKg|QCHAm!)^C|sy5wqPp9xI3ezr(S)hocf*wj{5) zGK!XFrajLJu#TDTJojH=FWUA)P9hnnsAIIiHD<#md}20Yg05u3vN(eCiuE?96YKm( zJ>FRqd^GR5~jq<(OkCWANG&yKYXamZ|^ zp!1(2IZ5R0pTl11ZRW*?wxanEeAfUXD#;pWPBfQ+68ym|iYjT6T@B1vH!DAwuu1&< z+SqEn0y$RHJPevY-4LuhR0X$z061=5vo2Hli9xwQu(sI!BkkE;U2%re#(z`c??R>M zk&~PIU60kOHokgs=-YXD^&*v&IrOD$wB9uGD-qX6=&+&?EI(0RE2EX)-FMY{Y6H(` z_@}GApLko5mo4o5+S8)?#^1P zEiR{nJJ}=%{K$P_#`^wk$r%r8K zp;lNta+CKLiOfZN>?rL&j8E?bbN*XLqGB6xY}%k1`IXv?H{2arIc_kY2q=d>6DGl5 z%O2$XLGG#KZ_dpSCapI^sY_VTBIl)sVvi2T#n&JyJlFDHA z$ZPzST01S-?;UiGebA2k(m8>T<#Pv5MI8Lf;Fnd$6?9sI04Yw^VSsCeQ<-@y2bteP zcukFp^cRY`MmP^#ULikjU30{nu1l`tq9u5RS7HSW?7o-A$ir!yI@A*SOJOM%e$qba z9$b}NQeUzLyUC>=gxj=Ih!@Heb3SB-O2CCLo9heoJbz)eY91NlgdI6jmN<8})0kd+ z!g&aCM~PG;mR=A!1i~UuWRd;Sa#5jaYyZKXyp1!KO88*s8EpJSvK%o~TiQgL*!!$U ztTg?pyJUy?(kya`eE4+k!4`7MUaEm*h;pm{Kc?OT?&kXc|3Bw__PDma_ee&lsEm~C zQ3ooRI8c~1mP5h3p zA*aV-FFXKO>|^m-??WdrjHQ-t*1A-VxQU2sI@{X1(b@N5dT8fa-DG-Bv_59cwJ$+( z++w#ImFWUo+sLy}$HThG@&J5tD0-S3Z_9I76Zg?Uy(?PHg+z7BbSiHBn2g82PWZ@J zu9}#E)EE?RgskvVr|C0vzj5(ybWXb?wia&JA5Qr_RcjW-pGdT(w)X3ZEwOlF2}k?H zS6P~1@41X!@t@)!Tb{(ixB;2{PvTW%(0b&?{6syx0n-zmtREu5r-Lya(e?T!4^qRp zpJhoRn+}`Hu=)G2@1LD$Vf9(Eu{ZC+2i4yAlV{hrRk2RzbHJLx?!->x3uHqfyVD1) zLv0mtjJ2@sHnMH&jHARs3}e3}&3>~njr|tGmSMEEb+?t!@qVxIBpK^Fts{(+P~Z_G zo!ytOta-`f^j-@vb0b-Wo^Xw!)}P=ccQg9E?4BKq|Bg)CocNH4|8Dg4%%qFsYnFc! z3!&rR;Xi*T(y>4GCN|@ZyFD3Z&j!x~euj$4(}}H#S@F&2@dsj4(bZ}Im4f)bf0I3}1JudBJuxCtAu*1a z|MTRZOpaH?U$dG$^H=aB6~_jVoqabE2=`!<_oM6D!|CK=abg6`1WgCw~+bp6wqsJEVY)?dvvmO0vca4YX>yI z+1PpSgWI22`Xm}A23oe^mrNw8QsJUUe0t)C|)@`(h77 z{Zt*h6j>LW7JDIjFm_*jJk=SG#9ofwMwWOEkvdsa<(eIT9S>rK*v$CWxGPpKG1B@D zmO3`Mk!R~^Z%DVSHm+}7YutN0M?51v%iZJL>7I~#g?l*lDQmjkr9#!)j!m|6)TQV{ z70rB0A$6HH5uGqEo*ADX+Zme_doN}srnfw{E0!Ny8hg%UtOz-Y^^ zZa28{<5c)MWSc|R_*|-wwIUAZ3AoNpheKWh{*5T6lQ9gWw9 zhyEFV4?Y`+`xEcSUy0or?;W2(u6`yy&JFl+?ugco><*u0|Nkkf6xhNCL)Osv;4Wff zFHqUg6LN*_5A`Mww>Dj+d$Q)z&`7w#|6PxP}*Cru78-JnQkS z&S1aV&+Nt~0_XvU=ngnM{xduFe^cG$BfK{a;#;Ulx;<(^FKZsHM-{A7)TOXdz2Q0P zHar~IT>f_XljRxZQ_DJ(rIqa|eY^B%$>fq#B~wZZOFt~zTJ~e9o1Q&QgGVh-x@vgd zcILRO{^{v6GyCLpt9VzX=PS>zcB*>kDj7LDG8blEpZQ_tEt$5o!>$_C1h{DJkmydH zPeJHxcx>1e?ny_m{IbD;Dv=6AnXgF9vMeyF*uQs7buOh>!F}!v-d}ycdi#66aSe6e z?x+s#{9zwr`_ed$fBKQDX*`;2yNSZp^SI zEjMBRb)j$f?Fk#5h@K4(2=xrs4m2r$oxHbJWiOYemo6(AT{5ereMzU1H~8yb@$u5gwPq!U#f$LB?+?F5{i@}` z2EnQ2pO*eb-Ol?hmMPUu+qqEctch~h^}HkOSG4vAcjoh1h3Na97~Dc6tQ z7yVC?Q_wK$&di*Q=JZa?^e%FjxZJJ@_Q{stV(lV>L$3vfGDcVMa_E!DrPzr?1HAm_ z$n9$82vbjKs<*rEe&06l|2&i3FS=&Y$E>5Hi@k@@JJ~Vjja&@f5&D!`UCv-d>K6SS zSWZ>T)@03{3+)Tv6S*@|CsH216#k7WCVzyIbc6V*e0*7E>D$GwqTPk33x*e@73|C( znQ!EOk~bsoo4k150^l+7+K1t`p01TvRB2Gbk@H67J~ao{9#$t@uW`eY2IK2qSM!@H z`?KHl?XeFsHc~zKS<9NpkU;mc_9fwKZLjqyeyn^z_<>kGdL_PUuSKTPOYS@TzhsnT zwH?rRrk(1 zb8Ea;sb}VS@Ar;QmRUq*Hj959uNHkT5H9X@?Tu^i6_=NNO6R8$md9;D`&4H=_hQcw z-$VY-(kf)0%bJtDE%S{3TXNCb89gnwxsyV+`ey-NB`Xvo# zHJo0*S?!ut9ogUdrnpww23l&zU!#`lU^=9>4-79(FIbuPbKzfQ1EYg25o3=1Ycd{R za=+reDeYXw4_TM8zs=sAbzA1~wBFv&9o?*>6E{(lB2MML) z@~fdW(Z>=6mRD^tN1FSzr=PFb|5p0ij13tb)1z>b7_|Wp8yA4%o$(i=E5oJ1PXjj* zyIwtbIM|I0`IGFpG>e7MbKKOlu0cG<^Y{&l`Xg#BsZ^de(UoH_GrqCTNOX(bN>=vl;LCwiC7lZW`MnBTm351jSau^zJ5a5o zm-`R*1HPLxZpf)uv1x@Tvqq+$^WE(6M{$itzf zf#=CloK-$7@IY`v=#}u=$ZOPGokne(5%ETeONq|N)QrUH*y+g0P}5*>c_ForA1E0? zzDn2paPA$sH{{-*`)F?G+znS>zPj`3^xT{CZZ5d3Y^P;m=BG8T)Gn;vwBk+KZ&m0} z?bSMC8ss;0HE2_7TgA@)o(==8v8MH*M72oW!0@u?$ykacVuebjIm0u*_7C=CIWHRZEt!d>vFg#n(1gJ4Wd~WV5yg?B zDa8{?vx7sT^^=Ct-rmnS!Tpr?4*wr%?_^ZY`W`iBUwX*b#&guU%-+o?P5y-jl8k&F zt{d7Numr4ucLD>0Z9)UW-$lAm)5O6p&bx`5v8A5E+nm6sbuB(BwmkAA+zP$E3HF{TiF1uaM z<;sp){`x;R{9l7MwR0-@{afhEbibm-6C)!v17}M|k@r@51daOAb};@W|mqlWc8 z=X|fvU+TTVQ_Zu+S2MFxh2JY4uW&dkBmIPDyQ8mBmfV~mGcP(k{B+>XvZ|$PO8O(K zON+af8iC#6#qmt*URxP8q(Ae_^gWgKR7R()=Got7UCy{O?KN)&cdlcXZL0N#WLmss zbY8d%In`eW-U!U2!r2SKrJ;7j*WVNGLI2|#_;ioc^L#CKax+Vn}W7iCGZh~Fr3UTa>sv+%Q`-KBp7 zE64vZHo5wHJ9wA7zjd|rT=2KfYFDA8Lcg3-87+K0oE43!$(cm4Mq+CtbAp@84wW1) zzOOi0RJFKKNzbxdgNq`u_>I=M?X07(Hum;5U`73i-_1oWs8{IaGZ9F<8x4oI zQzOtFxT|bn$>gFx3*RkRo8LPBXx`|&^|{|(?Qpft)gP|DmHTLZ<&qV#C%x+{)v0l& z+Vz!&WslDOs$!tpnL48z9&PkU{j1e_X7+c@H5ORUShrfX#4m+k3e+uYQgVCIzQT5e zvBJYeqe}Avo1^2bOPs4c*F1BmO0vmq@!ycSCTDEL3l-{Sf0Xv1d%Nu-op_GniTB0} z!e;{G%YG|4Pd~D6ihnA3yUYFGg=N>F+SA6{1cZKPN8w!RN{E}Zge^1`lyl7t6{3H3Jihc<_;aHVD zt=i1$CoA8PvpB0sPS?t9Yuf8?Zxm^`sP?joCwwnBCQ(nRk1fZ#EM5|J2cIoV0~TYh zeR6GC(PgqL?+kX2{cdgVyyU9y%A=z8Mc4DbF&V3~yHxlvXH90*mv9cV9mc=E-MR{E zuzz%HC^Im)tWoLSlHR3D%SH!Agg%aRi{C-_)tekcoY$NiT~YT+Z!^CmE$L7As(9aY z{a}C8_?f=)Et6FeFT@^?j0;_(62pS>r^>%AeOsfJ7ClO?!bQ47 zjY`~4RrLjtVc~wEk-=BFqIcP8dJPXK8gcFY!tBDi1-^ne^K0aPlo!a|o?pFmXyOOo zEft$q`McthtnL|KWq5PWS6NWIYr{^Bm({PzvLR{JZu|gsT=<@yeM!A zPBEbP!Q!`y>zAAcjKKqWIbstrKaA(w2E1mvnOZI&lvAp=X%4@)bR=V z;0x{Zjd*f=+=WK)SMcfJG3flI@P zTEx~z&qRJF4*pCq5_lp|C9ta8Sze*6a_KE4vx~14Jyhf?+C--6>cUZl@uKxXhtb?$ zkhLXyN5+f(#=h(Qp^SMIf3Low&VstX)?8OR`C<~R<4Bj586@M4Mjy3Uh{H??s%M#lyu5Z29rL9lv=Re|^4;){2-bt;r z)2?Q&T*nReh&7OWGO>&ps{zDp-+;|{nHo-mh&_3fyqZEh2!k#Au<{q%-mx7ZW_cyM zZ6nxy+|T~X%gF~4HR7kq*eNIDsx_9~5i0w=LoMk=RA*@%+8UfnZr+~Yom3a^Nqwe4 z#7Vax1OBDpv%&engS^%R?+Jbv=n&Xg{y=$sI+%OQwv=37(xPltc%3!VGdZntdQJZZ zPfhnj?oHmp^qv(qRk^d;tyQMwOiWLBzw#gB%^C(vTJ+^bsBC7pA3E( zm=>55oE?6GJfKA}I!s1~M&r?b@eRpU#{JZ!_q!TVCv=Q`5?LDmqaM`Fu1!>%c+J_x zvDVhk$hJ1Jyv9E4&Fs(r#2)wGcutxV&weC!PrOZ{A3aktto5v)uw(cj`;%3Wj@PkY zSq&3{PAPFuygycUkLXE!3R9@QG=nJZa_Wm;B}?ZC>S(5i->0tIqf`aB2fP0q^?_{E zO?oe~l`aGq$%|V?j!q*Y%U=p-(0BUd;Q3G_ddYIVbE9XZubyv)XSlnzJKtT?*ED@V z*5sU}IgadCGuouR@B7$$(sR{)jvl-}Qz>wWt-v}9pX1B%!_ixTiH}O|XCejBXJRi> z$MFD}CzL>py_)E3Ib!|Y=xFOiCPfo`m)mX6*`IKhBkCw*@Wk1B>w4Ui5ArGtxZhP_Kkm^L8JVx}%2x3k$ z$nd!k+eS3b4`i^ciiTr7lNrW5`yEb)%R`OVG0xYhV)d50vUj|%vfuCz@lE#5@oaX- zsp2@n`6>MdI@{;7lVce5**UP`G5jUgIOdCuh@Fddi4Uaq+K<#f?54^Mcubb#Ra}c- z@d7)L2_hTSM@_NEJ=rZDfbaY$KFg}fU+IyzlKiZl>=B;lM6h7Z~@o{~^F6w1=gO{_v`X=LC$6o7Be*29b*E(c09!F!Dhv#rP zdzmYUm$}7w*4S>?$?*8tcG*_d-jax_KGcUEX76vmj#`BqsT^LNYJi)JH;CYGP2Aui zqUxHHNwe_Z$mdb)F#Fkgy}&N)nbhuoghLD6bU6|SvN8sa|oybt_-l`{_ zqEQud^Ce#7WyHt~W_Nrs(7Od6ZYJMP5SMwC-S)o{X+#(+dSVASb}Nw)6VSRkvU4zo zo%HY7*&Bz?cR#z}ojEs{c#E8u-Eg6a?ETt!US*=1CKJu@B+(3+tk+A#>R)ANcZua= zatxklH=+SiP>Yb)H<1y3ntiZ!mMv65Tg8s>8lrB>sLbTFo+B<`D;Xn$h>^aD2=N{G z{huZV$VKGOUF?t4XJ0xuz$Q6Ov(qBR)AysssCJwU6V1uYCKU$>)2y|lfB1ivMS>|1m`N`xpkRID@IM6DZ}D2net3k6mo=z;`a7`| zzY|ro1CR73&iE=7Ju#Mj_QC7~-_AZ#dtROK*Y-z3R02DyKrtPOsP4_Lt%)+bj&ZaD zj@^htBbyq?j)9tPV`bEv(FxwTfvUR8)kwufH{_n(xh4Vx9h}vUV~VX-{IcSm9XzKW zdwn^Ulfc;kM>}#(V~+YbUYVU=#rL;MVWW#^?lIgeEu|lxWhc}JKL0~3g^TBPPQ~O; z0)xh|_g;tCj9b_@d<|HB+6!=63qA@0dL9>X!y8(Pud{AXpCB~@h5*5<}8Tu$udynuML52Ri`TQc-`X<&^ zPiEv~)=p-Ur1#o!@ct+fOJ|^jIpk6N!cNd4_84!5mc9YM=-LGBtN^=a^Zp|*#bK=E zm*dQ{lpRS$RBb0pr#jKVJBdPBL45NA#N4cBmt+gn_$=77g$Sd)T=zft%d6nX%RI-y zuJ$%$pg2wh{xg<191D9Y>%gpyM94Ua5qh1c-Uhv{=UI(_>NUyvvW);45hi^}1 zJzr-915jx_&b^gYvD2^43B2DR4yYbjH<-98^_IJtwW^cC)qZ@xpQw{x;C=U|u%JEo z*A*Hm1TXA#zVvhaEuN=7tX&v=XMXz>nu^okpazvr&j2~~9X-Ze53_6a7WBG=6`e=4 z)1yH7Ex0%PR{Z}5-93Dk{qR`z*enL#>g5>b`fZ$%1&2KVeNSVZcR;B_!R|GdYh;8} zU>A5FP`{hpq;v4>LBz5A2HdAZf7N+E#ZLG%@b4a?0qZm4TFk5y5DT$u+>x1&u$(t=?uLgnB6@#6``WcQ?*Q0m1(I=QF^?U=_mDk%z&*tzeFJ_y4o5piWX2*e zWIXXFPr>Iap%;y24%dh)d=|O&50v&3XMX`CETKxum@dJ}##MC^&$`aPhx^ z$>+f5Pq@}DD0(^LKMe#QWo4^^eJ5DY8t|?TaM-R;$~xqsH5E(Mj1iB99(q8}A3<*$ z!Ji7?=`P@VnK*)7VBDW@k*aX5!Hm2nSh5=^O(1su0Y3l882d5GbI|8#VykWfdwyeO zC&B$r0gFoH2in2L4MbAC2TksQpLSyeec)zCfMi?nstb_?%ZZz*#JV(rPS!v*9*&IX zHHa97+jtkJd>O9VmH65)Yte&M8!Zx#2WY7`Yqpdq=OtiYTkiS|+^ZEZS4Ys@@WBs= zBv`_88Z*Pb$lw!n5-fu=jRo(g!R7Lx+NNC5k?)^E1+UU4xDS}Q8XDb=Or6R4s()q> zY*2Sc7gYEI_jSR4*6_TZVDq~?;X7#Qd9cq-zUgp&Tg?i5!E6%DCX)KUKJu^<((?gi z>0jtmR^-BMtWP%3sSFhV;g<%iMH-k9;3{>9Ro>Gc?mZf;IL-L%@FQi~U1H7gi@;Gw zFsk!N0~>dK1b(v*Tx|qhv;qSg0;enBLyHuLy~^i1!SRmdV$23|-C5fg8I$xt@!!_q zr=1bF!TtKc%K&1PxJMTCc8cMJ^;4_;CvZ?_J_Bp1663GJRporDPH#phRGNnbP&QRC zCGT!$wTVqZFX+G-UAV3W?}~cu#~QZ)5)N?hE`IL~9jdG9Gr;v$@aR5f+z06}2tDI& zsPjhV*#PeNGFSZy9O{9Q&j39u8HsI)2}uic4M z&EvPnSdH~uy9hjb583m7xbHTKis!3nTVQRuncWhc_79@O|d>o=OUbWv@~3wE>zUbTRW^xRskM1Qa_Mm)-` zsTj=RU{VI0M%iT^`0xob#;Nbv*F|yk{WyE~UqJO>n0GTz7yKty$5=tdKf$SL2g%>2lx<5%Pk2 zfg1^CU&K9X^Smf$)ZsVbSQJ=pgud?vOINXqwZR>A#XbdBO%RcyzQirT>zow-y2R)j z!~Z+;+`r+kji5Ux&r&w3`Wn~aK23S=1f1_ljjAG))Dg>|FLLB=MsXGys(jXLW?0O6 zR0gI7tKj17JDFLS75f{AD7&>2W7?0z(OKomOU%>-+$uBsmEeVy=Y+t6GIAQeWaSiN z9)}h`K&$LXw)AFn4P`L=4kqM-7vFGwbDnwvePIpPRc5sgvBF=o-ru5G{LCFPndx^a zJlcznQpQ~C5-B(ty<`Cx(w>>*KpB(3xrx|q)v>wAk|j53B;QtGah5Zq-PmzNU4lIa zlYPp^O^54+OR5FP_`^Yhtf;joPR&^r2_UtF1%tYTt|`Y4S`-9ZE83ea}3(bgG${{RDEFC z6o`EUg;7?8Pwk~zV ze*~M`@?>@Sy#(wo!)XU`ZaOkA42^%n%3lw)-pd{Sfv?Qxike{5TC|v4pf!(mABz_H zB6n;K#r5DB{o(CVM!FO^+@HHtWbG2k+sIQM1je1^>Cd7u{|;Zdj~MxBXeYlC!P*S! zevmZ^^6Wdnb2nJ39O|ZE{Xb+Qd`C~v2ROQe2(*Jpx6?#xNzZzlobF>#wvSbq2PR}; zvm9e4;ycP>b)#wBiB3NO$*eW|5-IrtvmVF1enZ=x1=p0VpbF7{0pXv)i7txI zDBykUklKtQKvlz@K-vw)4Tcv)ptu}X{4sQ#1;Bh4(8xve8-*>ig6DsQya^@M>AqTu zW1V1}S2?2)y0^lK&O<|w!P{;||7l9+mdnXj@Y&|z@dS=0(e4JLbG^i!mF-X+h*SX^ zn*&|xl%muKoTMV>M!EAzBu0!?Z2-^91t!0xc;i)8O_HPnE9&8jHsI77+_M`{AhHtv zFdxa?ocRt#BDaEaYeQu%!D8i?{SNis&+HnJcQ^~(sTF6nXZDrAxht&ejf~35Dh-A! zTt>eA#7ee89tfw@Ia?jlFLRfDe5=PbA7W2-176nyd08{@t2$> z9q}l9|5@zSoA6Y2Cb#^L#7B5h4iGKYfhsDu5E4IHJK8>{>c}YjZhL^r{WXb@zTMH!QQMJk-(i2weuw>7g)$x=fEno{aQ~1nKrM7Z3SI;l)(uxfJ@I zN<}}@;b2fGJroXBqY}>ss=3dn+TPsI?8w)Nn(SL1k31j!$Tq}#$y3SshNH4)uJ5An z?X)JDy)v&$@19nk-Zqn3r2dc7zEAI+*3~n{dCWQ8^^EgtvR-#kA8sfW?#{)>#!ACe zLN!B0!C}<6xhFI&y7j;oM`*W){ssopEx!; zTe$voE^|If55~#P{&-;h&Ss7d)PL%2bJ>=Yi`Csa$nq{4v@>~)9I*XV@HvVvej&c# z6~JK@^>J^eX4-V>sJJ?tD(#W7 zer2=Ezl$tQ7RBcUmX@xLH}>xEE;AlT)^%?655)Pn&A%n9DD$%ad;jg3S2KsCe@vX# zmMo8dr?ZcHoBuBVcGq3@g{;vQs;_T?JDiA>g!4lu=+5*(aA9Z;mFd?766K|VpF(Bf z?;|%vOX!_&HTq(_7hSLmt*ecw5jH}$FC1fBPms6UhhCqHUH$1UvVol9@0_ctJbaHM zMz4XzL?d`YAXO^!jUt4~rY)a{xz>LI3>mAWYik)Sz*^i`ca}17* zjsM{7o?hzcYWv)8uTYr&m-83*rp$?1i~M{1m9s`?PIEtPf8U##8Sr(mPcqifdv!1M zyv|w9Ti=U+7`hs402W?hy&H!&2463Kw7fd_H7~M;zAM*JDQtPv8^0&%C&$uC?CvDv zs(p*|13EXm$XWfu^|#yaz24i)Q6wea6kk7;6*Dw{-9= zj{O$qv*Rx2U&OGrXAIQ5jU&+(L!7??X2dskW0!^RwtL_(QO3&_QR$ z%k&X@H`IV`q!mKv$T-4j3WKb`izwNvzI$KlLoexEVG(J{M1#lha{ zwgS($+0`>Qc{X^x8TiHe?VxJejntuV;sLcA$8EhF+0KrRr|okc3th+D z&$xHE?sGMEopKgCpLVr#xt#;;z1g{~Og{T(*5@pJh-bc?oY~RwH)EZm>fCWGye#~7 z_*ZhKSBGka+J~-&?jn<%`nP;*8G1OhHFTOvW$a-ETY{N&sn}PQ+GMYjf&E>$X{1to zn6(2L%_APb| zbkuOq^iFhrW_)PvV*B1WkDqx0QvKoB6e<~Yq%U%-$O)diF;sy{uy==_kK9OwLKj_{ zuA?J+D1MY&-u>~piH6n@wi)aRRiM9m%>JXjk7GG?+V5d6tvwZ`&pYpN_I1o6I`?L} zPnA#!$ZB1es-={Xm_}@GJ1Xq;j$8Zb==Mxq&Uua;z1>}Ya>>y$Usxz1W@pXwXz|J4RJx&#+Z;k!dW%R+iooea#B&Nh)jr|<05?x8J zm{D{{DGv<`HKWVw<5ccz6#63gJQc=r=)YS^S&oWSt8;|fP(kz^x^oT>uMh8!jEFXj z4v5|#uWy-SrLIrni{xYW8{I2iv+RrPZQTKH1@9DhckhCfO%D&R)(p>1>%r zpT$~sAN~8bQBAA_3$O?I(9b1S$CnYibv-@6I#4gZH+2L?hT_39!2(`uf&=N9*D}~Q zI4C%n>S|5sJy4Abvrp4|v`1(ZJ#O}edV@8&VQ=)wxN2{lj7^Tsv>bMP;LNhkp-SwH zo=Tpr&V#N4zJ!0Hr>SeSr+HeJv>WN5x7%~sbECVyD~C=;bDf{s3XFSgJJ{nj&~Gm! zHphC=5pHhmpJ=N{2AyM?Q8PI${A0LNqzFF!F*UgkN9PciogJT(cn|O9$mDU$R@=|e z_)|tNdu`VOS1accM_pGf_fvF0?n=kkHdHp4=-BC)>X>GqWs4X+j270%(5=r?1FLI% zJ-eZOi8b8Fp6~Va+KeCtIz!=8L-j*9LH*s}0=I{T^8bGL-7VC-Y)$XU9C}cW36G{D z*c56zz7ZRdJZd?dXd62pA5Z1{CeEj=Pa3n`Q+#uY<~il5oBp}Kj{AV?E`K=vN&g7X zHjl%1%Cp^71z2oy+Z-3j)|hHPWW0$6*wFGqqB?nd&&C2&d0j_LcysDm45yC6iSQ}9 zAvdPCXFTGdJM%r{@6}FBN?CnPtk>D9+h^DY+8R2Bx{kX3c21=4^Hc8e?hCFxbl2+T znnBh5cFu2zxtM87(2;jJ`{gw)3yD=Ca!+V{xII#NS*!-0jzjUrvC|2w;|6yR#}m}! z?(6>9`?LEASJboF-_tk3eaU^l|EsizeeFGOdXk>E+-}!?SHj)hHPSZ3dOvs&wXP%w zqgSG0d|33G$RK#kgvghn162AOjZ``w`VoDrMd&s{Z_uqa&6jgoeGE z3U9OR?Htdrm->q1UFRuh8ht!2xW>YX8dBHc3>~$`J07Ex(PUc(qYORLZ+$=c68qxm z@piGPQ6K$47ST6hI(^^X4wX?)d311Mur>JxcQb;+!KT40ff2z&=zZ7GmANUMmp`P( zQ#CpX4Ef7{sPZ>Ph;O)|7~d#($@GtrtbVfXHOzc?d~U> zGwhd)m2l}I{3uP5wGumGZ_pL+2>kCLxL=A^&?(#sU8Q;WUO3~g;mMH!(KPA-DqH0Q zxt%BJX4Q(E`-;X=vQxsgC+r_m_470PmsCW3mi_gwsQx;Q$}Bfp{|6_0Hd#vMcylt2 z#*znFhq|0zs?Hd3KNV|QlB01wQ2_q9lbo1}#F3QHsqr9Gw=K32uCXQdWo$Lk5o=vLf2DGq?{cw=cfY!V5l`bI_ytDdf9ZxFqa|EPp7TFB|6RQ8{fP&tNM_G>*k-fH zp1zf8BEH0B;xsmpYx=+V5?&w0*T#2K*)mR#u>nM2?4rg>6Bn$XEFknC5^2zpTO!bxrz_a~8p&At?Uc$dyhF|4o zd~Iu}WR*+x!;aRW)QFfzt%xIJab2aVN~Y1!Xl&FmDjC&`s&wNk1!MM5Lt`#+aXpFD z4Ws`qVHVw3pHjRTYgp4;l99wotmrM+(Nn4J@f4Qc4E~==B-94t7!FerBtchz+ML-B zpUntnvJmgq9CksUz`wB?ui#gBZhpkya1k$07{6Br7JnR1NN+qBL$LpS(6?$MoCVUG z@PW)i2KP#~LnCiZQCY)egr>u0G$=Y+^l9iU-R@MU}QRe#vyoyR)ewHZ|uYj zR#{G1?AGgu_Ir_92*<5KYYn5F(H}~9g(}UfjL)f>^dr@%_7FGo1wH$g8gq>&sJzt4 zs18&PQ90oqU~&uj&}WE)cnc3iPvoflJ=?&5H}LEaPmQoSyV`Z2j%HB7P3+g-#*We` z_TQ%A!C#c}=^wxg@)O>cJTNH@s!$yR#YW7+Q#Y4bf@OH~-od}3x`SiDI(eNd<1q^1 z2RVcvP&l`mXU+w(eek!nXSV7d;$mG&sJm56E?7L_A?lrnk-D5Vog3zRe+g89h_NYr9GW^5>4<$YU0cxnq_Hf zrG5rSqEv#d0RQcV$8Q{?d?AG;pK!m=S@W;hvG^FQS;2f?W=Hf<;CL%fX$4>Sn^pe; zT>BqXKb7dKDTztcSQ|>swV}kQO~a!zo&RTY^iBTWLW@5ImpVrb%@wl8YN4Ay3JgDE z9ZK0ReiZNXLMWsSwRGM9H-3d@)-_t7!;LjwfN#vAi~1|ZbhyGeVGB2?6uZkG+@zA{lE3O-bwwEkLi%UfSS=?5|yS7efbG1`|~%!TOUNHodwrA!>&OK?)?~% zEf*~nnaRD@=cp;P&3XX-U>KQ34wdq1lIvZAS9NeA2}T^Y{z)eMFR9rNwccTE1Q*L= z{vUu@cY)6iY)tJ|e*j)g2KR@sa-H$xc7PggCcC&NSlb)EFo{*23`Az9Vhh%?-*yhp zozL9L@o1;vN36t4J3JnImv-bs85E!@Bwqo)XQ-q!m^=5x-_nNs`*P;}1sL%ud|)e| zmV$fF(4lV<6$?g_@B27d_$1gk4Sbx5A8;$!a1n^q0jv5F8S^w4u?5ffe)bWXq;?*M z;k8^rG?Z$9jDsgGvCgtCr1IPg^gnpe*kHem7cgwhCWdUFb+GY25Ni!lz4%?yZfQ_;p}8Bz=xS{ z*#vao;L6F+yc2)qekj(k3{D0DR4&^eekJ@SzJ&mNA|?et z3)~y{EAUUKIQn<|QfyziD0p{dSu)-J5!LJ`*=*QnpV^b1QRy?%k9zyLhP%dlpG*5Q zeP;SDf3|n8Yoe=@=aSdv{oDC9HQh%W_ZSbSGHANSTf`bgm(nx&rErhXyr7?2>W#x! z>HB?O&>j9PavxEhlZo!M#vYAlT0W+_$eY&XmfMY29mib`cQe;g=UQiDS2g#Yp0%D= z+*63UJ?zNmx3K+DvZVXsqc{iub674V7LuQ4peY9Fe6WKmAx)!C)BAEsI1nzyE}l+h zx{2XxVe)gRU7j9J4>iJad?`3QxHaGk{89c*`FrKf0)9G-jg2pfe2A~7FLAWF&I>t^Ahqw*nson^8@@cDM-re0taxyC3c2W_VY~;z6sb zYoF)!0T*_rFuhix*(JCc~@`(3%tFPt{Kh;y91>>Z7-tzFT|endC!M$d>KIoJ!L zE9u_RF>+ft9%>nGh}Up&Xjf=v=tOX4FdY1zTJ~w7TY}+0i{Sl1FMT#144f}tT)w`% zcEA~!RKBR(7b=amiXV^E4J{8BCEHPZ{X?Uhl@7D6WuC9R%YD0ik9mLc+~vC={ho~1 z)Ast_@l~PI`YO*!cSp|+o{_H2_AAD#NW{I?2QAkUcf~)9PKrE>N96A4nQ(rv8D5If z;qvhDa2|b#A0)GNPxPz!W5lv#A;+#+9x`sWd!3bB8=MOqD;#&a9`aQ62HX=|Tj^mp z*Y&Zht*fPTy?wClno-|Y1||+6Q+FrzWG2V+(Q>=c=krdmxe%XM36bL`LaVU;zNNeB zAXa<~o}<&jTk)4x4Jo_zNFYDZF4!&jLE!qp`@vA;xp=EsgYe>+_OTVABp2Aat)>2|G zPNNAp6T{=b!oAAqD}60;J`$pPM>YI?pMY2W;4gR3=ORFD&E0%=M1IBtav$)RgwJ3M zUgU;hBYb`2N^BjSCTbHGJ~esR*3y0zZMu)GDwt<+ed)>+CcCG)fABbc%f0V=x_NH) zyyl*Phhh&E_7^$&Q8#l7n*R6J+H~a@Lf7eou_>`7vGK9l(HD6B(MTV9(EmVZp~kTg zzOX~_q3BM9?B4y)vevrK*lv4}t{~0q4eaIiOy`5nZygmJSL}YrIs06D5Bd{i*|Lr2 zkqVWq4mdXq}S>@AoWo;9i%bfQ{EYaVX>7T6d7gW()g^gKGSGae>1H;dU|HeD( ziWqo`)bFEiq3wK4Xt%IQes3@yj*F4y_ko6Wff3@tx@Ncmr1w z4Zk-wmkvl@A(L)K?z};abg8uxdte{hp0d4*rTe&jwf%GZWA^j(8|aNEsI~25qk!sJ z8_`ZqqlsLk9>MVVR%%BKC*xpcw0iU__PToFS#C{!!=>Fzv8u0loU zY3F{|Yp#*b%Z_T!Kj;Q9#Xg%TcbDx2YmucpbtOAleDnyqH}Ma>B{?fx4BGup-95y9%g*|0(qZ#~L}dUACyLnCkl>+cDcJzFj0%-fJ6%=5rR=e~a}C zb}|>Szx4^a#JyDRxiP*Ywv?Kp55+3Qu0#(+KcFJ$gy@^mMbyZBj805%M?Zys-gvNcfZbSC?Wn7{xnOJ^2$WdSUXc^_1-b_UB=vzWp_a$64Zd3@^%A=cCRQ z&d*$ScU`)(7|sdKaz})|06pnpf6~}Meb;-Cn?F;#{wNwvBWg^%K|R-!_!R27_Kg=& zN9TPsp^a3-AA(M`ky!5Mh>r+awqg5y%(}Isj^Gp23~YdvyB}LzJNPHq=f4?S;;Y1P zs-qO*@mS4HMh5tMfqo~aV*iW15nGO5Vr%T5Sf#it{wG*-CYBz*Ek2#T0`tglm`#PU zd$GzkqI;fAp_RV0|IB4iP zyT`uQHksU%O17Vk`>8~=o-D0i)=NYh4r2%YJeu`;Xt3|0O;y9v?1@JEIM!xgbm8ZS zrT7L*X)7^UpP(;ZL&ts#O>;DQS92;1@1~O9oz$Buq~^x5`2F!V_+9d2o5@01KrgUY zV-Hdrq)Ti_>;>{yW|28Fm2T(9$xOMEt}6FZ{q!)Ef^Q zC~0oL8%0J{D*C=*TWh;y^RiOMY)_50Z1y312`0|G@fG z9BB)p3f@8wx)66uG(4-JR3t?%F*dI-`?1Wg zCi||3$S`=Dnwt+$r@C6gpe~A&+HnW)6kMXS%5QiM_Qvz5cYKyguUFzZbXusBsLOu% zAT;B}n$ru>7UtnS zT4{W3Y{QSV2>oG#F#tR17OIr~VO>eZ=Gov`E$db4FkVKRUCdmD;|Z!kOvxEyWcK1Y z*@#8^4mRLZ*ahRTPzLgM57tUGU~FL5oFm%!XS^FbsjB)ZnDre!PEKOag|T*{Si3Fo zd346wzMUMB=diL=b#pd%T#Uc!Juu=6tcK517NzQ09w(0FH@pw!_-Yzq0o@B+`w_EI zNUbcNHGnNNk$PXdv5U7`Pf&$AGvy!2B$tS=3nPqmq+@iuvkr!aTEgiQWCVQ48ZICb z`+mGQgYmo+U|+Q&BH%|XypN!Qt;}{b(e#g5il z5>pqV6FS3T2GMi3JCy?;#EKb#Cu~gO1uUCaIsY~I*J!xVJXU)ye=Erp`3XvoQyHo? zv@n(&k(ZeFNvtNtU$@0N9Z5`yVp)EK=LIYYESvWDdUjg(V&(58-!W?SW80p!-f{Mw$Ynh`0}ivOuj zho_(hu`<=L`;|lG#rjb`jH+ZRGlmT=zGZOK!zT-#M<-Ts8lQ@>5=)8HiQ?V51QuVx zmvn`=pi5ZLXMn~f{{F^8a5nY*G%KL!)0224_F>`fCcbJ19+nNf)&j#7_}3OfDR1IS zcoEpjVjqOxs2~2jT3~8Le7f1>CE1`u8TwCs1EPDp6aaaS)cBlKZGdiaZvCp z(A^CF-iGp4r@S~z@aet}x84B#Jqvw~V*RGD&ZAi2QQ)aC^+kMWuXEjtV97!Jz#8ub zEPe~KS6`FsfO~tO-4~8H9!z_d*gwU#y^+cXd4t%%H~IWpD*AUi$EM(8o=D93_>`x4 z7+Cobl0)x9pwIjHe;6yTOs4ywe)GLQ+_yhImwQ?B_VC!dS9#vqS zZOl;XZ{Ptjy1AfnReXe@_F6=kQic6pwLFGYco&y+NAFl7s-0or|w}P9ufT1nG#%d{Vi)yjPz^W@mnxAD2 zlu@=18dFb@jZpBq)ZdqIl?{B}gq%IV83*8~hv2CvdHn$fs+Y`PV4zm<6!CtiiQhWP z8NYDukKF4UB$oP!Z0AqD;H`Yx#7q4}^ozWITlinG@vD(Q>O=A&Sh_g%S^%cL&g)%X z!q{o>6h+soF5X9sZx=ZJ15Y}^EXwh*(!w1|F)QbK*u}_)dP>mcOVAADYl?Ebj6YRc zt_`Nu0^ZG8;hR#FD~`|&i1*~B73{^k{_hP0#V@+>>I`kG5^Z~6c_SEeBb-Pzky}CU zt$AxYSFalRY)*z>sl694DHN3>#R zSh>@zqn>b@&*#B}f4FBpBM5?>J}iCpG^xVqJHh8V!`Zq3A92mQ|9{voZYVB#Cp2Vo zzgvK4Ti~GiYu=5RcOz!5+1F#{mDn|@gr~bQ|7XGHyl_FY_lZ_e>s7!gHN#7(F=^gr z*COGXDsuky|GktibN1g~|K!MNW^)o)N$URwtbY9$YgK7^UkZPJ<+opW>Hq&e?*m)S z*B&CM6|cXG*LLt#*sESEs*e5@zkJ6%4uj!G!Md|RE0?jVuDqgRUCcxL+yy7kfuo1; zFb2Uo)jKXteU|UnLPT_kh+^TOBtvzeay>6$i6lmwfAL6|)GGB-#)dl6T$g%RAEdg_ zaee+57S~R_*W{D3DXMYDio9}IIrA^=UtF*-LUm1&?6Lp*n?7l$E5VC_@z4CM@8VIy zB=eVe>bw4@6~n(Touk|v^H=3R>KgM3U6tf}2)Zjz;bwVCZb^dWr?Bt}&?^8wqFYrN zH=l0y`O!Gjkp`JaNaemLzFYHFbn^8nyfUFB{@R#V6OK0p$Lc^?HGznrAxKo=|H{B2 z3tAJ-38S-9cb2sEr?{~*1q)G*Xos>nV4z}B%URa~Xi>bQh%syAqRcB`Wq#^+9V_5f znEET^y)gBAL5enY-UTpMSa}BO65gKSrSHPuzd2^&^*`Ldj3d85i^VBixx)E6TM@vj zr+%6VZeu32e-fON_e>zg@yJ-i*lJYv`^Oh+?@d2ZIsno1P%N6gEl1iU&r4k~M|f;w_RC5ms1P znBtoOelg?PO7jxU%jP;A!F1nE^)W0Mb_GA^52`nwOVWOqo_XQ!AI?4yL5inDwuWmj{TcUset; z!AVqFF$Kknz*P_xmSyuvSfr}@IwOZ~!Yg&&(m8^@&epH$V5K{`fUTRCutcyn@mjxV zoPxaG|9hD{K&vADENl^{*2|;`Nhf8os~UlhYMq2};&6IteY9Tx@jr1wou{kKyXboD zYzgBiN61Uvzl53kO*kpo39B9d;;)`98We@bkr(1y!Xsgn52$5p00P{p2^cq?yk|Aqc+EF;-5a3gBj}3 zX5vSA3d2g7or$Na5+Uv=jY}L=cqD#Vnwq`NF6IvE15lniE(ux4QSn`2qs|L+h3Hgx zEj)^I=0C{yOPm$rUb#?zHu*Q7ai6~EU4LLvDCa*9-P4YyQlDe{IV;Yy^YH&Q2AaR1 zzecdv?QjO|!^Yu?5%MRbmq>P6$cj@QN<*k#xM@RlW5mf(Wlm{MS}iO75eLT&-Xr{~+)#t4CF!zS z7j=Kr3W={pfk%|HMNxvE?k65#@`y<4H_21U@KPu_3Y9rIJIXl3F{Q1D>qsln%EY){ z7?I#u>z(K;8%T)vn^>30G10BQ ztDBv!6c$UOh*xCr5}y^{lZIl_i>OeNSe@#$P8zA;B3TvZQ-FDkCux?Nhw708|NSIv z5~f`P63WgmK;DV3iE9NplAq#&Cf@2gNpwj>X=Tb=yowhkAIRyM=6xjJmGh`46(Th+ za8`(K|49*L>Iqi=aSp*c0gTlXMI(&>VQ~qK-Fykw>Z~VN$l|x9Afx_!g1z{R>WrBD zOi&kHnleL>(K-qn#M%Diy>5;Rf+iQySZw@BTQg;fIRaCA6P*i}M8o1+;#K0%VPFv8 z7h#UzDvquGi*YbV-y|t@+{8t#f+*B{f@UPnVB%eZI|@F!mx;d&h9^k7GkJ|Z{ilU$ z-2b=sCf12xi!y|TI#>NbL{CosglWTv6yc{uMCH-4CDZSA=CQU_H`JW^Zd`TPZZ>p+b#ZXzBF!)T&pYiX5P_Cr7q1H^AfpmN6kdjSWFSA>W_=DES!MjGK^YraM zT}fB{XJ46^qchFpS}{pZNi$)+DanK>(*HyelEk{NFveV6t+Oafv}R(QuuAg~c9^4( z2IouRgZR624^zL>eFQ7%xatBcI0~+Uhxnjy%=|?%S-egoF|pLk{r(%T<|Mu=ZXx=X zCMz2JkC&S=Rv1&pDw_JAq^+n>I_v*ok+@kAcp*DN97otD?9%VTK1JI|j!LrX3~@cZ zOneevN*Bya{URQqX9~+C&GlqS6+KxMIMiEI@>%zm6c!H=pEd15aV7l|bWJHE{viw% zoK1~F{!McoL@x%IAWTtYpw`a`?4;po9kh~Opej5PB@2>XzDXA{VXEJSVX{%A#hLUi zs@C(x9mENQ9Xg`x#ZSc_^^49j$7^Det`P=_;?3h4gJ@npT49UH<;624y~I0oRFX+t zQP(H`g^A>+~0W0#nI5QS}WQkVdcqDbaDD?!{pK)NJHL`LyS=upXe3A1VTq`X%!1eiHkDVE( zrSMgHqbg7d0v7hJE(0MepVi$?l{^%$nvOPGlj{SFPC4?bj!=v5(z?rmUUj@NRk*uW zEzI1ibGGhgYGU?(?fBrV#!6Rn7w#Nk!7`9&Q<)om|EWOyT>;Q?RNS3H5Wr+P18aN!(x^aCgL z{tWQ#AI6`-v232E9*6~8Exo|TQOQW@TdEjgu$otS3Hzl-s-~NCHNnHQnlv`~I>pJv z`OMDisv)Q=dKVyD2ya&XVYM%AMc=adu^wrMn~w<4`Z>v)Nms8O1@ z)k5T4DVn znJ-!9;B4`HX1TM+L)5L;!LK7rtw+$Cax((AP@5hGB zRdob;ETsD+8T%jLN{l?9N{pox-X%G!&bER}Fomn?BB?qis`PgeTvI)H~JG7$u2yt*Ag+qqLzU@1h4uSLsW-r?A8HC`&gGe+s6~ z5I>f8LpXemJDJ{N2eRi6o@`^zZK2pWbGXVKs=%viF;7{~ip5hc3P~Sz@sup^!^tFl zBnxHLtKNn7HC@b8_@`=2I|C&r~H+ftl;I;id5jvSwKUc1#@n~HrdP4X;e#9R)O>)aVEh~cCT>T z^o~l8lovqW1XWj&hsoSMN%CD-CvK|yNs7tNl8qwU$JB?-|5cY&tD@I`o(NOtmE@8% zm7OAON4lXXR#*-hrEk(A zwb!UV%ci~jpJzdKf^=X}pzu$8Lw+lHwB^B)M3rVG4)dS)QF^#?gG{IkbG4p<6co)m zw5F{sy4JWwwHlqtOC%X(Ex7ni&y_79$jFB#8!XO~L*PnfFxAxZBuTY*AqvtzSq;+h zWhF>gxA09`u5426k7y=lw^L~orl&{uFm-fk>C!XAPqez?)6&VLflE)72T7dEdC@3 zRYZ~xISl2=t7U_m)ZmObpNoK*a98kAb*n3kCNYdF4zmh44$GHzltkj&y~&S0O!C5<=WZ zG$9LB_-%TB<^RM zJTw@&>;WfZ6Ws{!Wcv%#q)(XohejmG%G)nk%5sh~KH-`8nJ7ziE6kThB`Z<<$@JLB z!Z)p2c>*LkWxtxfR8gntlaVecx)=1dBasVk3H!8S@;#frKGVx0-CHpb($Tb18k>B7 zssbmzD^4p}ZSJE>uNKs$qsbE_j14z0ANHz#Bv@5J#1Jx84 zj5XpkMl5Nkr~z3HvWW#BNfoV&JUSYiFMJec$glICS6WXKrU*Ob;nZBWVSrWvXbP#(>EQFcSV1a#MgQ0lkaQ>FED?leI>s>^42Jg1YJZ`H#bhCz_nw+ssb!Ov^27e}a&y3H;9}=%? z)H@81V%#soXBr+a7zG4lFvDhor=RiejOWH4GV0DomB+X~Bda#LZ^mi`yXD4bqb6vq zMX&;Id~JLcTn}R{jE`U!&Zt@hccrl&#``g>(s+-?6&TlJ>;dDNjB7Bg%Q(e@9?v+X z49hZXz}SaD%Q39S$Q6u-VKKqy!Ai7ow*L1pDpEl#fl-w*+DO8oMcAaXRN5X$(f| zLEm6}Hh!aFMgMsw!=nG|(*-+Q!Cc(%`}lGEF}$s@zk(StgXc!&#mIM!ij0xf8yG92 zBW!eo3|llhBnC#t_&WG7c4{zZ4n7Ok`i;}VSVO}f7<<&H%^Oy3Sbeb5X~Y1=`v_iP za2FXq&3N{|hvD&zxWTZ-peHkYmEp~T_Geg%fi*DBL<4Nb6>0qZr+>efj@&p6A4W&nutcNQ`bqzYpZWf;oo3+`{?|7# z*3-CqM&;{&?<%-I3=e6XI7ZxV#-0uCVZ+M8u#Ul1G*-}9)8MKa>m2OQ8S8J{gxFnU2ov}at4v4et@ zPXpfL#mSWh5VJ`+A->489&y32Rfw?oRC-}_3+`a{R z&%oOnSU>~G`r+Tld)Uvnf33%exh7j92~o z|Nqa(a)O!oe|U>vL}gr=ktYYM*T%gF#X1>SHsixUO&VxYqw<`BYKnm{O~P{nv0E6Q zgUCt)xooU{@NWajY&;9z0poMf7Z`SFta}g_XY@skp0aTtf<08DyBF*c8=noEHO?pF z9vjb$+Hmk+jqe5@Mh;`#zu+llc;w(CXwSy;|DOH#w{fq7F`IEh1*;Fn$q>v<3~TuB zHyZZ$UtJ;?Ga0KCL=zg8VyvR^y2k%7FggZfxpCFT>I7FHh$c6_^WXZ!{d>*EZZ)cb zhD{p&(l{-Qvp9H48|QS;>;3mmf|;)2;|*&h;rN0bLc=114jHeSj2A4V} z`8a`tNTP78m{ET=JY3Lj49hX_Sp{H2hVqQjxiIdBQH3^a%7}o1SOg>6F{%kho<`y) zLs9Wh!`(D|hhZ5;?qGDejH-;`RUKICV0XjFMh!N2+rOwQqnlv(0HfZLJ3v@+-*%4 zX1;38@v<1o{l2L6dy~=BLhJ#-b7??7rdEmh0Pae~w`?t2@?kA+BmA zd`NX5=w8D|p8@+z7#T<2f-Y7uVi7r$sEDaTGZ{~2Lpdx*Z%6;iJfemYbHN8xTVF^H zp?94%cpdA!UO%lK*WM5&D6Ul}3lRzw-A_ZG zB^DDf&oCL+ikwZAAQY*%a)-@i`l+MI9OkQD7}Kb)$XI5+c3#QXI+9C>G_cXyh?ev? zy^EGZ%vO)9lj*nm95IK8g%Dyf`4`2>-e5;bnR-7yUfU%U(r4!`O z$oWz`g@c;ec{PHpON`buYPeQGu0(HRYAQ3N8*~FYQO?tP6EZM~&5g&bv$XE%Y} zUG0 zNS8WO%~ThY)2NN${P}>HS03}(`w5i@P?s@BKZcuW>ZD(g4yZQnkfkhrse^y+|LhX*;R68xbR;SQwNsrEG1LdV;Ek2pv3`U3hn0`ieOYf-d!2ISI zuxstpld0CkeYKMAC6Cjo#6QX)xtf|lda>sl6DzdA`Zj6?@mbxjucP|VUG(47@z@0` z)z5M#%Fd;c>-6H#kz5E~oSCdZ7uA2&-a|2Uplu2bdQh&acT;yNo3y@k zG447wQQe_!q?%GA=w)Dxp{cgy4E+uiovq{n!bGm5FK{VLzH&?~r%k2Dvw6e~b&oz! z3s?IqXSM582Ik?XVRn2b`IU&nP9DJi&Ap?1N)EURLX|<<8qB8@QFnml8Jwna9Vth7 zhmcLvzQ`k#uj(M^m}C)M=;P!M`b~8XQJdC@x|sScONCJ9wZY0rxtCgim;k-;4b)%Y zKFihG>%+;3OdEb0TbKAr%L2Q3Sxm`wC5uoeh~bzMuR)d}+v)AKW%^Pon>tUN0FQ)2 zy8-WW58OQO)S1d3>P>wa(UyD*rl3<`s~V{v!d>+erN{(gy=KP5@j#*_xTOM^(Ee4I z!N0u}ENDeB1#l4lwhcHrma3|9Nv(!y#J%9C*aud^N0?0itVN-2=O5n_dXBxN&+fZ5w<`ap)kf)1IlZ zVDW1VCC#;JHEkfKfTE$nI2Kd#@06x$J8=G~N;NQTHkIqhf5F->D%ns%94v2_SIY~f zE8=9SHum5v<1<)!SIDAdlSe9DwGptRRLohgC6B=$b|I3;+GKw)IJ81;F$t{ei;1tq zNX#M|S5=mYVmGm^!GbWAjo{o|Ccnfq9vsaBz)*dLWw|iU55Kmaox_%Amof27edZeT zn&Frl;KDmhf1+p6FR4f1OsGmOCCgD8sq&PaEKCg6|Hia6Yz-f$)KlO|Y$Sz?jldq> zUPuqz_J8&r15b4g-+J#lZ)vE+mIZI)G5=YAcmE^bai7WG6};BZeQ$g#z*o~RKm>mD zzx9>$&j_p&?tyD_n6L%BDvCHsBBUtsq!1w;Qp-_2xS8fLwrL@K!#_lpkFF5YD>g4~ zTY@toGGR+xi&%H`yvXKZl^x4$wQLn_-E3WKq^+X0g5`wyjXB>^({{!-&$hs3vbVE~ zwmsHsWx0w`D~tEiq6Hed)jCJL-Mvso>e_F6$OtM_gl| ztliDk&(*`V(3R~9hyJ|X^=p1+?x>t4*~PL4WPi@CmD4O|TlUJVv6-hb4rbKK{E!uy zTQHP$Hq|!5 zUf%xRw#s(TI?mGEw4E(N*CA&>?`jz70+&FBzFFNWZ*E9TSfW0|JkH<-F_NlIZ zGJecR&CoLHX8w_RCX>ue`+h&IZ+2&qwl6Jsx#Yo8iN#tJdXVHwj43!ZrB1Om#cLFK zSYS?ML&r_aEWR~0O_?L`LIxH)9LZGmN>Nnnf9{mD@BA+bZNAg%X>dxm%je>G8&r)bNF!{FhY1zpzr z>_D^KrrEr<+qMI?71od7xt&Wk)&7$2h=rlly~sb>*T~b=nVy@UGd_1n-eKIut9c9a z|8O>izVHvus`;yPTV-cuEKPrt9+}xF>ljq)?U|j@3(`LIHdDi~C3-=^i2^?-l#4S($AsUspEl3rKGD&ntR~Az;^ctp?de*cdmy_} zZhiMTVZ7Fq8pV8IqxgO%+Va$T+^#yxhD>y{w@dCq!D)Qtg1|9pI_x5_3;v_0Q~^o#O1|Pm{}$Di>z2644-LeXJfcKl)14 z_sD-Dl<;?99YcrN3!1u-cKNcO^*!^d-f-VXFYjyYV|^#R!@P^V$-XVV2fjzXivC=9 z%Vq(^=l5K4{gU4}_ifgyjN{+wv|(R&eBSr5{fArc|9pS?eXaL@z0dvpIET__L|!e{ zrNWX5Ii=v0LSKN?-KP3-L>>fKZVpK@9ZKSy$zkrRRf71)dd;E6KWM>UDoC;(e z&petf=P&UUmcQ$->52Rm%Vztf(4&#xVyhQup13)|9IHo+3T!F$@X+jGi$$e$|wE&d_y6iNo-{fnVCp6zSzFYHhC?swhHP0zHZcTFqv zE&1!~&l5hSe2n-o^nHc*p&uH4zLD|HSDarFYbyGtq`P=p%I2gZiGL?uPgzmCZs~rd z#}`uztO#q&n~BZxP@$V|fjiR~>00j|<~`vX;QzHo_>+F*>#>hXlX@mVd;ZEVmA5{3ZLTA)Oa7lOvu~BKLe5do=`L~|yTcL) zy%jw%zCgnM*fWviLZj`2turkz%|DrDvOkht6nDVk+v!>F3CH!;_J(;Ic_V$S|Dj(7 zxA8XLA#V+De{YI!ygw>1)IY%6*VQ5KO1266b$UjHjG5`BzaRfL7yYLs3y+;T;gcITyd4R6bIi?c!zeCQ4jt~7U6tg!!Je`YIWyJi^! zPj!|2S#!xXB%4?h`WgiT`=KCT$QR;`_x$Xx<*wpx?f%*Q-aW;W?49my@7?8D?e6C) z;w1Bv^G1S7_;SwBoJ-kHv)*Ku$qdb`ko7Qkmv@P_)VwczcnlxYA)&P~wHxXHCE||~iD!jG}x`03G1Jxhp>)@(l<(`^{+Ri_= z%(i|p7v}$AnljVbzxf|68Bm>GV)=>RLYE){YELLUj{*DDSLL2GN$BR^>22#7=Vsgi zm*RTo+JvZLldGuvoSXKfxtF`^xvRM+yI;FwJtaH=_cC{$tBh-^lXJ#6UpY6qKll`B zF3FnC*!nx_+wXxvucY~uH4ySB@@4GR_#1I=qas3EStfGZ*s1(BOLhBGd!E(9d#JM7 zZRvza$mwbnnFyY_WU`3XSw1b!5w}Qtln>elVgz)3GxR1}zA_0cRA1E))VOM+l938l z-C^3FU=^RL_SMq$7vxF0F#CYr%pPJU(r+o9a?=~wT6_spCclgu#_*Jtc!T`>qh>IP z9@BivQ=ENQ1FHiygvMfZX{A&}st?Oc34Hbs2CsS_@SEj{1E7177MK|55!e7O!>-Uq zA0^Pjq=16mkQJy2u)=OCo11RFYOZ9mabuWRrWD(j*UiIir|rY-Gput>scd;>FY|_b zY;I($gL9*Z#lnX%vD77!rt7l3_?D*md_^{js(@_kyOOGw&^Hr9Pw^zMTw|F3I z6`O%wtS-2ACaGp+jdT>8elw&9#jbtU9pJ(WC7VI3Yp!09=!UGJ4pWlt&)#R&(oLv7 z$QI@!UzVuOV2j>O?I9-uX{amg7Td}nl&2~Ut(MJDg19R$P(12Zodx&z z3$h<#lu6*wZV#r@VRQ^rojFb41(#C`R1wyYX6i@eiWA5wV7FU|s?9g>0oK%tX#2ns z=9SBVRsV?eS_;ED@KC;pmS#Y2=dzG1v=P^d^TmqdW8pV^Jrp_%6%pN~BaiYbs&ZW| ztRE#RP{Zk3%uZw{5121ZUv@CZn@*c|T27!P6lLyX3NckOZ8Et{ifNwd7w9lNWiEkj z-35J+Tj1UmwM}4GKLy>PRmwc2qH+=}bj!e^_k*-XN`#KQBB$WH1C)D8do@p8 ztJOn2ZZ4RuCLmW#q832$s55<#&Y_c;W>8FT%@k(R=>zm^u-(PbSHT$l6`V*CSXa`a zj~5TErx98J3hsB6mf#A!DQ|(kkq!5JhSW`}4*tG^|30iz4)i6?icds~)LA+qm6ii? zVYMX~Xx~Fk-T{TMt7H@{(LXWwnd0nTwg#8V73QzKMVBoWYBeuQF%9Pw(J|h=b z7AlXy)YeHos5-Ry;E6g7O<_0Dk=%exIToy4kEts3D*7p{(k3Pit1ygi1dj9?w29t} zjPN|!81;aGKr1cQi=cklRr{#+z^hGz%8LvOS|<;b3&^LWc9M)My@|(j@jbW=Q=w=w zO&l$bf}+h)aQ|hCZKRj zTCONtkXesIrhQcYDtAG3XrB5`jYhrDt)0{dA%Ct22JDk$G}yl1Q)TGs^ac8F=v`f; zpU@P;V-0rEz33$R4Ku99C2DyGHAxzsg!J!4@fvI@J2tI6Mj zveZ>mA#;CoPiUK*G7U4;#GRkYzvfzT0(*o_Ve2vVp{bCIJ64Wdh`Q`I=$M^TTYaxdkB(p;SnzWgU@V=%Lyg~wk-d?f0F!>lUU+cK!; z^dGdDsl=247oUY;7!Q4$UP^bOOG7th8ubx;$NN#m2KO9Un_5mr>r<6+u%&uXA$Tvx zO95exuuAwH+6+B}F2VqzA=GS+1ZF|Ug;&zp;TtO z=?_b?WwWWXX@~i$rIe+=X*Qo_`rR6Ce`URH>c@@d(qR8p;n`}V8q=Qb%1j|AYxC7l z+92Sd`>ThgsbJ2OpwRG}Py_s^v&1pt0^vj;BQRMUFCS3SloZrwj_7w_-+r<*{e&qB z<+3Mi2pbKC(p#K`Kgkv1wt%~H5VMkYfc>AK=TU=5GjSg~`V_bf3#n0xOJbzj;%p%= zur5$LaLFI<|IOFWHw)E`KT!F2;G_JN{4M=M{44!`L%X7Opd1uz_67O~&xC)#T{~OY zDgLPrV=kEInLW%H>OI@c_B!OX{+e}XJ>z9wz8A>!KSl(IWCkv#*AmO=)v>|@LEoUB~>B^fxBo2_CX_@ zKM{JiIzjEFc2#541@Ijoz(iVKJPSpXWkPph7dV#d`uF(f1o8uCpj$E;YAruOz2s=X zEEE@_g-_6{;{p|Nu04cqN&i4Y|6#$UZ{$6u+VmEUqlQ}MhKzMww|2FP)*;qc<^twL z=HZqa)*9B`=0RKs`U|z0Wz84N;rt}}9J!e4#7soB%87d2DsmOkOK%Gml?UoyY9sZ9 zoGBg^9O6{zvRoca(QBZvR|ed)<543ROFg4!fkUzYL$WWq`ldmqdEf|L4(+}vtc$gB zjaiOip(wTu9RFLvTUZ+>S!ZG>I6VvK_4F4w!>1`j<+t$CJK?L(iqD0Aa92P269Rt) znhI5f6#)<8m5~7}6pD^QzwH5Zj9LY@L$^!{q(I#%9yVwW{O&I!bk{0yA5Hz~Yf6Gn znn#B|4C!O-Wv*qmnVZ4l3)<@8D`<4_OSx=jBI9OjnYNgovmL1S#B8c9dyIR_HlRR z!Y0*8SK;cD6P3ZlM)Q3~PunY;*(XigOdrevn{0PmR`M6P_NI~M7p8su8*ukZ+%@R7 z6=tfiSJ_zh1-%ayNu790Oa&KX17euEUyf6XsdlxzatcvK8>ytUQf{FxRxc^-l`?9) z9w72j^9_e)!7%zFBQifT-{>u1;Jn80>{3K9CFxT1TKY6jGn!gQJ_OT$Eph~L7F>20 zv_t4u%~Csn5BCCCT-(VLq}^hK_*uviqM_7u3s>0;s&z}S6Aqzr`4Erif>mq*_TJyc z5#liDCQX-`N(Cf`)J2*ixupr(HOj_JBL=Cr$cpBHwn65VV5>c4(oDT9k@iaV;^t3W zV-s)v)%u6IvuO}mqIZIOJ(6$EWwMRfv&?m79&{DMs10Np%1mt|?rHT9X~aYGrmb39 z4OdwCv|K}7qK;CY%2Db>s3F|enqiJ)5EvQPk%>^^D+TuU&r~{9g6;;c-b7lVD$;qV z><$F0{~d60t|DI%^TFv?8%)&8wY^ZIIjWRVZh^11r~FbHA|*f#>mKs*_X}zfyX0hRELN9A8Cu^9a_rs&-rLr*i5!rG=7- zXf9P=h^(+5c)$lBf@&sD0juvUdAIzBd|o~&zm<8#q?AR?z9G)t1ZAl56QaNt;P{PG zD?o2Sg1*2+!a;4O9#Ferk-M0g%m}FGecEWwhS+E1kK}To4HkjlH&WXJO+iwf3VzyZN^L4Hn_QaRO*iAew~NiBVYPqkIrd#)afY*cla-SmmxfL=Fc(x=a2Iyt&Jjs)|z% zRR)7Qx0BLD>4#@mmHz1D^+Ietg(!qwb`#OvKKdnHoylUlu;bY;>^Uxp|H>Tz8~#On z{0<$tQ*0>Cs!~jVzCt&o5r;!{qyVlyia4yx;6iAdS5+7tFMi(Q~VgkD5^CO2xl$h-a@N zri&p)>sD|&R|hNbAzXb9^h(F#&gSAi8{EzDN>3-czHq2 zg#TX;HXM>LI+#zu{;R1g5zi;6hn4EeXZbR^Y>RNR8cf#nVD-!7wTLS_;xQcieFUQY zEAj_qQxx7yYpl-%uqFS7So6MmA5{T@cu&yOZtS};^hf44tFiUDOWa598dsiAG3_+9 zHaYp%;A$VsH{px&8@M-YKbA&x(vG=?j?~Y9VjaZ(or&&oUoe)(LLFg(8mS%uv-mjd zFdnB=Ib45la8c7>r(TWv`vt8b*hP=&Rl!r7Pt=6^m6c+kTJjTh4KZ#mrBF}6Ogs!4 zSR%O{yuD6fDe{oH*VNBLiz7|#19tG~*i9dyPFqmUlrBn3@c3O?Bu#}fyZ13%RUZLw>Eyg(pta?-4`snnw`fA7l@6i*fDS zalfx|3?F9dZMw|&!0u1tA8`Az3ob)9(8lD^*N|u0>E6_2@&&O69G;n|%dF5!gV#G5 z9Y#)FuM|>fr4`njR4mF&aJJ9Eif>mtp$~aO-%HrY?%?g6fW2Oi&ZavtMVV;e305*w zv97(rSbvw^3@-OR^f7374wZ3A}kZS3%L zc``D`zok}^59$XM#fOOGk3)syxUd3|O%tIVlsc`@(ftre3;3X3=Luv&nc`!>B0LTB z7iX!{$;)&p=%nuChnsHkfALezcPyJMlPm>s+V`-0Gu1M!=Rb2N*ze3I<}Kp0+e`+1 zo=QV^+zd^OL)f(`)N9lunvreEUx+`^X+DU!w*VAa^5oArh2}|JQDGY{B_iS|ifYDr zxJK_xS8=4Y0KLi0E-8H$X(Ssi+X+vo`N^qW#|!S}usc`Qln1sm@a z#FzEp`yz3&?!=io4BhF)$eKS(o1j!3BP~N5bXWK;WFgNR1(n*Lg@yQB4>}}SP;N{` z&2MpFb6`Ng6u9Sq?7xoM`Z@87wvlcImDE0F()PucY+G(^18w3iHqpA**3;g_cF!`w zJjT?7Ph^MFhpDyLo#jwz;84$~MQ=jP7DcO6LAn6qwVrey{THIv@4#1G)AwkZ%1&gq zk@8My0&?sEaun`U0zC9qrM&t`^=M;>cces#bPOBKZQ)*WC83V+9x4G0^jZsXo!J^p z44p^~A#VelwG_%(b>UwIf-!s=@JzJ65lUwLHH$VG`ZLSm?aL`gQNx;zsBRKsz)Hf= z02vtJ|H0qF-wFDJ9^`*}d~iDPRKG;bg$Vwwz;->)}Jix&7DlYaD|wEfO9H|S|SamYnJFtGV~gz8@rm_ z!_H+(v9poQtY8{4?dYLYW2kl1B)jVyVGTE>x#*hI6`DgICQgi&n#((tYw8rOtX_)f zPS%4L$fLWm_c@t2nG-C3S>9TzqL$p7pU3W_?~uibBKmfy%2|QG=mV926<{0Zw8N+Y zQ|c{vmNm*|)X$m$QSu2MakVr@Tn(MZ0CZ-$`oevTFW39pdl6SX%Iosf_Dpk&uDPx# z*Jfu6XESF3=ePVAXIJ;Az<Y}8QOYW;z$=>=}pO9orJmW~|rG)}=NIYh1k z97kVSld3_%QV+QOPki0I)ji|gM_jdB`Ltz8XVrClXlSY8>tRz}rHn3J@yV5@@f5*-PBV(LW<3|V50HXUWhF%ir;I-agawIO@J2_~xR zq!6Kre~#~=?}UGtP(WU)HKY!+FHt3FWS?)}X=`acWFE_3VM z;Qe2%3W^_eYg3{Dkv6e`RC7|V9T)K*?@@Kh$(os>6F|3rf zi9ZQ!V54uecckYh_Y&7dXHDmq{1y2L`CIcU<{i(C&t0B#EN4sZ$^0FjTS9Yf3SHS; zGqiZjkpg9scc&C8;w{`iWmG{WVQSu90u_Vp>79k+KT4j-|pG|1EDf zkKnrLjCCeDySSQq{JwGGYBh)ai~ZRw+dLs3!ls2+2oDMC6H?2*7J616nXXg=V!9TC zoM|kw`kK;K(IV~=l7;<&j?f-9L*phO?N;_{MaWG09#`8u!kT2~9RnST-Dj%|9qEDG zRbbQh=*QJ}Ky)n>i;Ee;9<1kV_`wZObT@SC&j(hZ8qy|EA6mxiQ5_j8Q0Tfm#Ho1$ z%3L906QOY6ysxVFqPw%}PJaD7PfpvM$vM??>*wzVrPms{AaRaOu?-EM64N+gZ<3|Z zsFdz0iG`?w_Y)e#c8r`Jdd*hM9LJTWKN3atbTt}vsVF&LEGSMErU$zE1Kv>Y8c%ai zFHgWT#8*8qM2uB_)X!1h*!!mL)@k-CAx%Pm3;i0h)bY%A-cr%@6Z@QMMtoOa$(^M! z;zyiopM_=OD(R!#QeCWV(svQp$7N6_6!^{0n?p-ZkD+UaPO3?|`q7U-0h;R07tgnm9pRgca!pwX6y7 zB5{GX{%O7=-aF`iEqAYRr@8leTKb%UG$~zeLZTXKPIT-Fdl7js=6+mc!m@-i34DCR zSa;M95s9InZFkLoa4+b#WDR|b+Er;K&w^giB~+H@ij~DooYQMjiA+Sr?pvTTl#`Oh zg22OkR(5FJiIP-3#>ySwNpq(8n#FFtY-wfbYL@s-oRjHAE96L`6;47$y{|r2PpfI_ zJZ&WWz&fZNl!Mkv9QhPn>W@$-8mYa&-nftGtFO`>iXp$?41S|{6|<^9^}aW9$3gNX zDM}hG;@X8%fljCh-G{pEHJpxKe*_e6`vz78E(Px6{9F~75NI5r0t@_AQAd33KO0C8 zuS)lo`g#=Afn8;qY_03~5E>WpAu=xdcJ%A$Hqo7;J_6kQCS8$A&Q>F>)65PAFwHK2Fs6!n7c*L1|1d!-{#WFHQ!-+S^jJ(q=Yl_}oR#oEyp z3gz>i_GR`u_Omv@TF9DcDQ|9MLPo{aXNNI+5JilJX2(&o6SP)uLmQwrx;873>pwyr z!LJ@w$Eq!$%rppUve{~DWDwP%j8GpH6bs_$CD6C}mFx?>*85=CuLQL97chfAMs^)S z)J2715Ale|fMQ4n@erDwL%>w*0~7cRL=;y2D{`@wP#tWAib9GOhS&NI-I(K8r%9-- z)==YA9+vVQO1~GeR@0Ho)kS2l$S=@Em@BtJ{GBH~kj^37cqFBxchCb7*&m27#$P|-2;gU zVLH$cpvKi4wIK$YI!{m^=!c%hDj?PO>!tJ)&>E3o_uEkWUV}9`rrt!iT0I z#MDF<+7zEFK#8`BRvCZS0~T@)5Z`Y!2CF&(Ec_RNe*EX(xs~@=t#iPNFV=@a9|GAh zR^R8ZDK@9_u}YiRjk6^;btg=V&(+cps~8)UVLs%~0P%W#*e-GS7v0(&vEGDXZhWMY!+BR&iUvp2wYX5;Ty;E;L>*7>ct zl3(zchsPqUmf^JOK-E;#PF54+UyYK zPpnE)x+qp9Kz&6n+mt$vdJ!1XbURMe?uZQM;df6($D zs3}i_TJ?KqTGK!yzEUTlB9sIbnMJ6IbfJDmSFIDW@oi)U)L}MbRg4){V;0PqiqVln z-vV=QD&`+z^nakZG)haB!V1NGS+VlnjJe-d(`68Rd4)a^dA?kfMcUlB{WZ3SAPEqxu zw?>bQULRF8a#{Fap=Cmz+V|OJTANxbnR}Zi@=07jW&$P{QnVFvxabHh@eS}E@*MH3 z^VaYe7Jii0D;B*2S()C+9Ary#)zGVL&u(C{>Gt#;XmIC2Gw(SRR!5KuW-;DDfAj~f zI}|3Xp{7v~J-j*U9^`QX(U@95PhlkB4=n6{Xh>C|TTso(F+ghgQKS0@*Zm3gsnO`K zKM_UrUfKj+`S5FG@h)z6Qe4WH!tV=DOlgw5A#rJZ&6t-F8KEB? z`|X=;Udsm4SJq37(5nNzw%#3t|yP=7p zeL|i)HaeC$wmZ%`?l_J)vh12Q-4xC~AgU^h1Fby%{2RH~axA%5@={%0eU-#S%s9Me z9H!nD*_v$E>mTMr56+MA>zOJ5BXJ#&$T_WRBTETA>zi#*< zeV+39+UH$gpQP{4OAY)&ezk0lAPWeEHZuC_i*XyxvdyCE|rJ>vV;?>jQ~W=H0acK_@vC=8SSR-WsB(#=g~ z`}44C(d!c06#Sf=T`;3S!`L?A7p+s-Fk**XRmkzZ^`7>=@NV?p^E`F8ch7M*@?7*5 z2~3fSYum{q49!QGW+0{?NOjkzDEGtxfqlMB_}NG9UaqeBi*pKR9!q=n<=&_LAJ>1J z|Ec?z^ly*STW2-NS(;bfT_W&Y_3&k)QVYE+@k{9^W$#w_v%=xh!;0L8czIlSw(XnQ zZi;5_>Klc-E++SK=C1E2zSjJD{d>{eL*5Gt!A`X;4cidWG;(Rw>6nRe-Qz~b&WkM> zS2H#v(jSs#?nQkPx4YluR?5!K+?5%V)jE53?jdJc&vtKu|AH_`*-LoYhPF!KWux22 zZi%ZIzc=o9%$>;gVZYlY^EEDwHj%f~Flng&oM(c&Fz{q!JoSCA11_ngCK376KXeG= zK&7V>Gnt{8G^#J@23ns~7%{`=b(e6y$z7GxC8tBq1(usHM(k`S6AXql-N({7=%J#7jx{3RH_tiYgx2BHZNoY#z>cWU|zC z-j+FYGOuM%%};jE@KzPR$+22A(VHI2@3w5Qt#q6Xi;O-U_bM(gYJBM5){T5a`n@iw zMU)G|WABe{hpVbHJ#SfV&0H-n(NkH-RdR_6Ob@OvPO3GQ681xmb&fd4P)Aruwj<7d z!K||%(G4o9Cu+6TIOVl;O6)6^m72&e<&w%^^|{^~%Cn*DAaq<_1GUhG642#fi0;}^ zWiF!q2|@{fW$#t@AXg>lnfy@aXlJ~$QGScO{ka2jYvtC?U7g!AuWY_6zrM4V^Nw?v z`>SuOSdn;ab0$nH5}!Odx_87iWGHjvaZeKJ$8QK%O$t4lx~|0pl>Daomp#)0hXU37 z7Jp5Dwy(1AL7U3gaTE$m2C7V`QvTP6SdA3Q`W|jt)omQ8vvTdK~37td~*7^eX zK&#JzL3kvb6P}2rWeyQ}FSUvebvot(YM<|^m*hm$;OkQP^Nk(9~t}kLOo&wsc;|Uy)ZguZi;)U$iotcH94sbtkH^ zXF|T%OGJey&M!DN>2`q%v8x<~n1i|>a~P%6T7ggAtG))l{_Z}`bFSmws)0!PG+EhH z80h8@^AvLf+t|?Dh+iTrh0k+zv&3@8sSHfuja3JTr+kM!XWjc;Eu8!ES2_2&r}|zB zWt8VyHJp`8saZG;;5oKp3TgpU-uB6-w z{jlDMRMNC9L>W||6X*m{f!^~YQC8CEE8ee2- zp+1SNVt%zV$QPcOcUY^jxw0wX@K^NKa)rAlcozi91L-wUeMoMnR{-nM)3n-NJbYB- z>WI^!=j@BD-7SSJ#Vw~yBbgtyB=HXPjNkdLd*66wdy0B;+z&nF{Ds6$ath{$exTcM zBEQyD%^WcQY{|0hu>N3kS{GQ3m{cyF)u@(aOX84zNGpx$j)zhWRHRo)e)&%|U5nR? zAr_^{81w-i(}PfH_hYgt68OD2dVTG?vRIClh6s=ROMQdANgfGvANO1?S36e&=kNKa z^6NNHI4M`0Ymw8QpO)J(Ki1n?nnrM@wxMj~&mpZ$<;}$-`xHz{2}}AhZh7Q1`!`PI z?Dl&h;bxC&4J`NF!1`YH+WY(ac*e zTT|?pZF!cdz*Ouvxw!F6FX|-mM7yF4MyKK%s`kG_M>tiCMFy5DJC&K}7OVg=vI(>( zn^7k*JF=T>MHV2|K=HYtasd0u9N6va>TT?~=3b2nn90shdGUEI@^0tZ^SkCh!fr(8@_<7P#_b=Y_x-z4;A#8@kOw}B0Q zuTS>e@wOF;h{wE2{&Qz3e~3I26KTV#j_hmhA|GdNj=2EM@!pYdx7v%^^36**CtZiW zL3JW#DILW#fja&+-W8sE-o}1ErjEScm4QjpPf9nrJH^kU|5YjF>@OAiG2L z{5n|_)1sU7;aV#-8Z(x2g?s*2zS_RR-ahX2&MEnMc`NgN$Sa>WI`4X3le~Vp4f4*r z%KHiNm2!j&4eu2hW?e=Xwzf$4rEuBgV+n}~t74AWGx@iU)YwYV$M|IVk?>ZG3e5K2 z_OlWZ$jb}KPRKv&D-XrvC$gZ$kcUYZRgb%Dsq3hTnz3jv$P}VC5O39%$_%-xv<)~99yqQ!h?853m!&s0CT}N@2OvxLFD6W6b@a)@|7rCF$dnj)=A(2TB2@fsD z&L-k*;|in|x|&E8=#n@qHpH>kszohL(qoF4IwMOipq&%j1|EtoslC^heHju^2R)?k zcW{3KSD=W4O^7VZ>pGJQ3(_L?E7A@f5fhfH^vY$wdy(2M=d z{z~n^tl|$!IjNG67|8Q4_5Tf&*CPL~=7Kv6vZ4%Gr=pJMXcxo4*j@GY_zJV%O_;*-nvZ zNiCDxC%r0gD)w5~zK~DR{v=O<#$ln{bMhYXQL7IecAm05a4~;d_SW2hD-s?t%=_4P zJ`gTekrVWXbRs{-G|N2MdfC3yVRyW;pK#bidfWS2viNXr6TKKyWL4F-atTrJ74=%Y zo4g5_)Vtyj7v_qS_#NtP207n}y-iEP+S@DBxkNQ{m&nZtwUg-- zB5`K;Pu5JwuQBx#Mn^xiH|OK%e?WIITPsH_)OH9Vp3?4Y?+U-$SH?eFXoYI-LTRu% zhTI3$=8~+=Z?te$xA~EIfn_T?Oh1^?co$cgd5YQKS$b2YCW|=*0!-RAxTYjBZ86QGU!bC+V-$BEU|!mxlw}+88y{YJxZL zQFtXD*XeSG{7jySid70~0gq7U8wIpXraW9(4MpXn z%HPUUJo^>TenU5+rTk1vmHv?KOIy)1UJJz7DM1ka#02Qi(r0Aq@5Nry0%;Qdy*w~j zAEh$#9z1QFSiPT;iUYt|2cRV;yVW&S&!*%$qD(=250)d;xwd_niHSJVTm|;N4_<`-YiAqybR3o;lh1K0ahLl$v z$~HL(k;e+D9_rC%WSsw?pLAHdAZ18rrMl>kr%Tsl54aV2qb55?o`99Q2?WtKIT>p3 zNpdf_lpG4oZYoelE+G7FN?)XhK>OKXu`PhIS);5#ePE9wD6$Hi1C+FDk)z1+)HHer z^CSBsdb}rCnp?|N<+t%&&|l8s>hP2P#ZIs0W^mWh3ths6W165U`<}_gN>l_QFqa-f zmq52Q8Q74;&~ui_O+a1PfQ>i?oy)3FX|70|gGHnOBUT(b<*C{%^p+TOmn`TT=BQs# z=lY7?b25-=FHtjGpw2>FY>2u}J%N=OpcY5 z%)tS57*N}z@feH$Z`L-WrpIah@RO>#5jDcvdQp9cmJikHs?avB58Qbes)+|sBlGFy zpz6B~b);pud((lHy+>W4PEoIcefS+(+m|uZTn0H%J~f*z&nyFCd>EZWndx6>A3cD% z%XpY`P*QKojDhC3p*wyE*qa&18Fpaq+e}ONe;(>7HI(9kL}@_2hNAEi=*>=qs_IPW z%#J{B(Dg4f)IHQMt3#W(jV@|awI6^b`B96~Txtna|2m*fN&>l42DM9GTLk^^-dKe{ z)II7(tinKG`C6!>)cxvt^%r29;(_m(pnOFYI0`l4i^^T}6B?=Wu#RqE9G0nrpjYq4 zV;k0fGQQ7i%V3puy&$lwAGI%<@89}$b7C*vVP$Y5H%Ddn1&}bm1L6KB9_eHVRUhcL zIne6s^#%ws zioS~(`9Gj~{Q-6FkAw_u<%!_Exr$2fL(DPkC7wg$`vkBxCD9?60v|I6lO%1x(_(Pw zsZc4M11;g5m?!^TyY;VDc}vV|q@!n27wEa`%5!9JiOOrBjt0w113(C8R=#NZ;+V^?wEjR(9 zz*G@Qx}eY-27YTZ^mvZ~A$1k_lX6frTx#$n0K<6}^B`XB9nh$gw3gUa>DV8u)RVZv zX4)9kyvt%29l$P|t8PIZ)C$ZBp`B10qUtLu)q#Ra!--rE_3{Yd*qn$}25FbI=BUEw z0aZ{6?`bgRL=3j^{X_>c6RPIV$zRAez!+8{tD)0z8km94)O5_gbY#}y1dL?HVjmm> zTJSDahu)7hxPuuGk@$-!1IsXec0L^njL{9c9GDVX!)_K+(eySuQ&QaWktfpgdFH z9d@g?!K_eFT?{-%2jvtz;0I+stmKKj9#+x~$jt7*Y3@d^Yc|`XqptJ|ppyDCC783Qr8lHj1K(5*n(Ah99A-Eo(T}=?`u0dITziFU zeWQlpPA6(tQQ4lQMBtPikM5#?-dYt*GmXUga2HklvA}^z*psz zwZKA_0>8{Q#32WPiaDyv*dJTfV(KAaTFxp7m=Gd>?yrRDv~HLPd8Oq7H+v51@h1?& zJ;&Mh8E0EvVD4JL-|W<<5^cz2c*5W5mCSp#Ic7!gn=(v-DaBmWoM{?s!tl4Lp=pe1 zhN-UUIX|ASfU`IPy_NYuKAqsJ^9Q&h+)1_$`;6(syrl;KgS-_}R21;HRe@=*1yoNb zAaCwzv$f(t-_C|dj8JbV-IRRby9NML*WzDnU3EEHevQaL1LAt5*j_9vhKh{Hi;eMh zjMPsWEwu((Ym?YqY$@&)YXF^)gV~!}$b4Pm6!d?aD_;Un`RcqFxyE{J8s zi$EXb2p&wB%oJ>xJ~#^WSxsS`a2qxM34%YcC{Q9$I9}m`XMJprHtpaBuxpt_Mxf^dQ?DR~+6a`s341?{8icv261ei2bTxE~vM3IJH=s3C zooY}H^gTR4CSDsri2-i75qN9TA5rrFBL*@zJ(;0bDoijXP17Veh9a*o9x@-UFJFMx#+L2df8Hroe|5U?nY|iox>J|20}N6 zKZ)!TofXwN;`fkn+cvYvcjSsPhcP+Z2|CYzfSu+5@T1d-+PWFp-&pMk(TVQECUIBU z&kW0q1A;RRXxmowC`^v^p?)Rp=pZi!meYey@H*g&!_n0=={>;bwM4I~zriWkL5ET{ zI6O)M^-BQty#UDMspt(Z0Y1MgJbWqj3lPbblv(m=V9^eWOT@{Tdm1D5gZ+OKh6)*& zKiM1j2i=YmLX?mb_$@#OO8ZOtihHYh=DP{^T2~)eHK4PSTo0U`ot^U!Y)l1p~y&BHG6rgoQ@*>@t9l(9%NIs97&TVI>G8JhzqVeDKMcQ+mcZ<}Y zfcp2#k0law8TZ7d*xhIU&9}T*mjcQeMAId)HoVkP`~%&vet}W`T0YKu#yt!Dt`7MN z^ZdCza@{%ea`f!~qv}22q$s-f?di$0vl*6*Ai<1+2~-qOFkluDg~yD7ia7@q#DI#5 zh?o@oTUH2UO&--ovW_G%}y1K$$C)KG_)3$ZlHg$Wl zZhp2+tXIv@_8mJOw#Ujg=eFG3{F3GqYVK||t?jUOZ?&G&?B>!J!pHd?cUiV`s!L*I zLpfHw?;BHzN0a60dR8+Zm_8@{R(7E8x^Vkaf92w;B~@=!&MzNQ+OcGQv~%>2$o6nN z^oqZ|Z+(7S_Oo=0wl>$5MVjc_-FD|e7QdGy+ItJod3;WY2>lk2l1X|;qn&c zhm~CscZj5MNU#g5tn2b6xh0uv@x(kLGYw{N zgUm(2ci7G<+-?|FnI~HJXt}kfqQ!wN$F%yXRa1*~)ji94M#BCt^EH_nO;0v-B3{gc zyRNS7Q~yEZ`N=QS7iUky>;3J*L}z7aUP+&dtE$&Dd$n1p`mu_crCm$PS-&?gIM?6L zHzA+Ne46^MX=me}4bRoxS=*`hgWCP;pKn}53rr|*FefIX}w%@h=-R(c@ z7*Km(S0=~l9>YGcbw+qP`CqRkJjcC^@1^LEX$n(u0sG@nqtt9-AL4Z*oY^BR&o zwDHHfQ)}1nnpgW_{aJ}#soKoJ`9BNId^b8r2d0Mnv2^J@mCLGsYSz2i|EiuZzbSrk z^y$z(fl8-?+bw@kwom$&Wox~27hH!e)>lYPCg%sDSu9xh`&NZaT? z(Z*QM`1P!Azpt!K*{*o2l4+4`!H)h1-9h=+@N`HH0xHPbh);OQ|V5bo|?_>M|hv%9yH>X>s^7yU3owzD-R^o`nzKQmU zLgU}eB0kl4SK}Ft%^K%6ywT9L@vg-C$vZRY{8|3TBIRY%s;+NdQ4?%_V)cWSJt{6J z-+^UyZdtZ868|zXKd`}lIoC2%mb$#@q(mg~YvQit*z~j6aru+*MxW@kX0^?2fx|-Y zMD{BwEB&|hxzfS$i(^~E{e#~)-?&2x1M&lMM`qh)_Q!XyHhFpS*QU!j0?C1?gEH4+ z!@S&iIM5WFOw_8qLk+=H@EGK)MaWkdgx(8290>ad5zT1`@x?l@qWitvIr%ztq)~XY zc1APbobQ2O$gA!XzA?_R{#RLrbBKSe^9|7s1|x6Xh?Kl zv#ZUTnA{LIs7SI*$wW4r79_bxeWquFq5$;7de+#+v zJ$!lYE82e^AMy!>_OiQ^pZ221$dTU!5XlsXf-$Z z2lbt9-tGcLiKt-?elZfwh zTKwVIj_^x?V|;t($7dcULSh9L{H0Czrryo$$}ykmeB*Bxbb}vT^fq?Q(gc7rtN{ ze6KpEVs$v3RpZsc!_k@^56t)f;WYUA_&#NI-8WbS=HW;C309((@T$SNH__|w&6i*` z?TbHIAKygZ0{rUc`X-<$5A&Tuw9#8w?{x|*QSU)kKMcve3#-yTB*I7uw(>{tpGx9Y zv^O@HVaO0u+^5|u+&ze7cpaXLkL2U|XYnRUq80p+-I@&$17JNir7yFOB)BhH>5d-vf) zmFPrVfW^UQiB52P_~LLV{8H#x^qhl(rw8`(Z}rW0XJeOGL_~=hh3oNm+7q9_3w@vY zI-+APLKEzPmF)-3z(EsQ`Y*wknS<|Nhjc?~ zN$Ts=$H;csWNC6oVsYaW4ZZ3=LVn5bJgN4&hCh<2-248|qK}tvtFEr8su|Vnt*Wxh zx5~@PTE`EMJskNlG$t_7*PB>kRmndaFKJj_Kd)g#Q@_m9h2j2dLJK2L$99ywA8&}S zkI#(16#rj*a>+iiK9MECTIan&i|qX5A&K7_-f0-z_;%ug()Tu z(1P&HNF@4k>}nt4WjYSc zt|t32?Y2*$r*EErd+?d?Ly-xQ^CO|iW#N9I?*b?K%blZG?e-7egwN*p$gf6I9+N+( za6D^9XR~5rg|nwWhdtzYUuU+h`lan}Jm&)9Lq&dRz26YVoQeBDa+Y5s8A+3mZv+qZRDiy75R%8rfh6?oWv zINu}JhP5OmneC~X>l^CpcQicGbYu3lz|gYqYbLgSp!IXD%35}9KE3?h z=raG!1)|1gGMV++lUSb`%6^^MBR8=y-}jimd#E=0LFok*>nlT5y(_Md&kR51@8iCo z8<&}ts>d(xmvsMZzg$D^1|ldB!^F4R9|>(B(!@d0JS#Du39ax?b+_YJ^HTcb^l91p z{MSBzU_r1sYZsqm9tG+i7=icuO6ObRWQLp`zJuM7g*@KFE%En#%Kgh1!d~`0ob_C1 z2a#YO%GWXras=Aav8*P#hP5=eXBSfU24STroZoI{tCy>;jehDoKI2O~+)&>1L1qd% z{t=>?1b^QL|oh7XUt4%fUYGCF)uaGbMWVNkYzx)-b5AIdzMJE*YOZE_A}1^1JY zH=~!uE{@HK&WKD3_Xq_8(|ofF&oSax=Jp`I$dbZ#x8RErS>ah`jKa=Z;wi)n{qh&& zKF2HbHlnbkvO`!~Gy_Z2$;_BMoE?*Cn|?icW7DKWJ*$bnZ~V4#Y2!YLj}n^`uO$wL z;@{A?e`ARFoEJ8XW)|_>hH<&MrMI-5+p*amPq!&){d@_cA(;oj8u4J#Y&N$tpA z<~z+D$tu>*+)@6U1H|F}mWK()MBY0mvu8&KJm=Bhw|h3yD0thkrg1 zq)x|Xy~;T$ur_dD@DyVH93ANGf6sS+Vb9!gnZc}q9Gl&hKham`3=Vu3>=AAgxivB@ za(_4$`ji=z^1v~EA`ufUbpbT^8uws)M<{wMsyoki}@LVxU+t>~pgkrrb} zm`A{`s|thj#N=V-=c>#V>GM*LCif<0+HuJ~$sJ9Xuz^amrg5wnC{1o{`joW;>zXDt ztxv2&gSen>VdE(O2{ni8IeE`*?dG*Pr}fLteklnAp3Ti|I-zlRa&~@_|AxSePJ(z4 z9RmA@+J_beuLz$OdokX%Y_IYI%0@+(I(z3AWJhI(W?H1bOpnOzi68k0|C@m$1KpiZ z++%#Joge&5@PyvVoXVd5*3MMoZ!gMD$-J3ancbT20M6$Hj%7{nqv8JHe_6MFTX3EK zOlLSTlP_RSd^78D??6g<+I`hG$BFuXAhzXY&H`GfF~5{nn3kQJ9mv}KJBa>tEmp=2 z_{u*GeVCA)fo12M^rX}m$*#$E$qSN?CGSgKl{_@>p4}1dcpX$5b ze?aK0@WY{VLtDaKW1C90#7Da zOke))taP?IPh;87Gt<+%Fg14>E1@UlW)w=Dc7eNy&ow0USm>V6Ex{}Ohxw-Bp}RW& zVBrjRHXh8|-8-4BI|V-cCXtgr!@K=v{G-nq~D7K{bO<$5eJN;Se z?&OP!PwQW)o1dB#Eon7!k0tH8wAtC}ubR^;xw?E1$$b%;6@EK%>_cV%PIcdA#;hAubWHvyR@s-L%}vN%nZKB|0?)Aa`7NS@?ExoVNaVEP z#60PpeF;77ylf1r|5)Za=HuHzMgKr6ZcXId8dm0gli8YSK|JMSSbsA)wJ3ReQ)%Pc zx`xJEd{Zkb+MM3*fi}}xEvdPra((R8;K{!A*}Kw*WK-^x;MDNU(A>Z>|9z~9Ig!=F z>F~g4@7Uw9*JA6UgTqJpnc2(jmz&Lc`%Zi8kV{G6Be^+9a zpTs)+>#>8cXU_Bl;(WBk3uHQL-bbP{w9QXtg~9mzFRbl3GW$a2bnL++h{UlYGlKcT z*U>%?fxEm*TTH`o&CgebZUV zN3#8bqssf%e9&S=^JA;4E50i!#}8$6VQ_9v_Q?DI-|>MbgH3_={k_45!h3}G zjLeBl0U&>brU#b#&MoZ2OyxiM@x-jY+*b`(Ih06o)x;*A<(`QZvnw+xXW?b`sB=AY z)^A`JSnYp>sA_-vzh@rp0oLIk;y%FYldlV}Ank2&9`R3Lrf#uwuhWevSEJk!g;{v~ zPr93QkQKu(XWKLLF$HP%U}hC3=UOrkJD!=K&-0U+s~(q|n0-033txmu=?gON z=YIAbAKn!oTzOpez^bw3J>rSTmf%VL2YgFe2lOUr9!`=Jh(5jJ$HxthCe07 z!8PG2#H;!NdsKkk2}-a=TvFHzIxyK6XXeL`l}h_F%pqRSTDKP(XjcIugj^at zB=D(o50TR^M!Fx)8jusPi62hO&EdG;IgYgwXK<7`AF+b_6XxU(DNJEj`SScTwEuC; zr9F24W#v6tUx*{A42;5JvR@Tq?T5D7(HQDdcSDI%M}C5%hcbUS?KtU|~BFNkjfnqGEl;8(t`!#{1sPpG_Qze{(+* zx#V@?lw_DII}QYV#C{T8dEx`s_ngnl*~xfYY|q_{&YefLJHBugTKszM-CdlG{F!x8 zOF&*db8u&40lJfQ(gA4K&ivlaC85!=!KF8qttySiKZy3lUwxDFI5QdJu!pT8D#CNY zO~K#s#CbV5J6I9ACiF?@yU<;s8NnL@S^E7<_f9+pRx!7G19SUlyU(~Ev108o_hd>>orL0#^{j`SL{?Cx|7tB|%ar}`u^*Wt`3Ee#%teedA# z09Iy=3jRZUvNzmo3!mUOG6@}H40E)P;%D%#dnD_=j|UA$7sfDW-qRi9{^XAEJ>pDY zuG$Be+T`BK2pZ4&xU<<~;$PqX&Yk!&O@li3XQq29Gf^)%gPeMHOgY~@nW##KvwpDy z8u3h4axLb6CU+BSiQYzcJvDn2*1Ow@1e3^iMwUN`S=W(xGtGm(MDoAop3aS9R&{3X zqud5!t{($>Udp#COk(E!LAcsR)++UeIvs&Ndn*xiPJtRfUpUv_H#{a*AAhjyy|Sv( z5wR8FpMrtFlgu>tb4u~F7)eC_Aw-8bHS}^Q9DYB1CJ_w!MP3e1XGY}~f0H~7+()os zzES8(+@3F)v0epEKn65pEaT`0c9EKnm(0t^8ix}*Yz6k0_gQl^!WmBOPh#fvDCd88 zcrC;WppJEbZ#XXz4=aO&^McdIS;uO|rAR2Rf{ko`4eefnmbZ@8fHR;n_s}}0&|Y1M zNm9YA%(qwq-h%rq&AI67z0nd6VEnwvy4dEqABns;nYsB}K*Q%yjV-y+`A3Ol@nEiN zZWZ#=>zR7ya#yCNr?1Vw6IdO4t!!s`ro5*7^Z4-S0HWJ3V(#dEUkhS5#qmVAf!Hmp z{p*QsHa`@PoE2RX?HOGW?jAZOfL`yOg+JD4=-*vvo}VE}J;;3e^`K)e`u$4A%$Z1T zm*XE^1!{i-wVxK2xko#L;NZ9UYn{Q)e7v2m@h!o6@(Xc5yZHZRZRC32c;+E5$KU4- z-xJJ7CW)3YlX$0nn6WM74u>cG$ZY+y`D3wPg3*~~~H%mndbn3A0bbzKbFhGb4o-=127q&7UYHFs_B zBi0gyDmRx`mK|5}U3h+=C0yl0R-B&V97%kg2Ip_zWZ(PF=Yfwx7e?x$?Msd>3B_t6 z`-M&ooa33{^^~>^E%JhUm9I6{;$NLt;lJ!<@7@T#d=k%-Dpv1( zg3OZjRYPI!L$|rZUyZ%)B>z=J(K?WI6w{rDK+$1XjK71c7kph=A^R;WNRM(y6)FlV zK*>@p)}?GVI5R(r@vu4fcJ5iErf~jXu+b*}JJj+(biYzG_fotTu7MK&j`ipy{O7XF z>>fe%(3R;1<_Yg30^h^T>#t8=fv4bCL>+l8nP~E-Uw8M6)m1*(EKohTVo7N#_IkK9 zIG<=%+wpGc^qx? zn%}3_5lwYLs#U6c@(O%h{7~E|7$*%PvP-5 z99~laSNe)sl_Qx)c_n)ZqvkJsejnk}F0(xSOu94eFqVBpI;CGACeZB^# z{qC=gO)PgS&Z}5dmWZET@?rGU$c^Faurc=ZFJ@l3DSupUQug)SaqbZR*TK`nm#|V{ zZDd4bpU9zno(+!;of=H}4|KK`KFjsW&dS8m(sDTm{k}gdQoH+@ss<|~0@I0+a2NLe z$DJd6rS5^qsq@k7zlBdF3faQFNGUD+!vn2@7voPg5kJE}f-eTEg98Fb`_Ch4?>VeT zAME~H__5FqPQMGkun^v1S_7=z24~|Vu!$L;v*6H+;lLLmWA}k84}cSw<{{y%+!4b4#9& z4U6@PIkESn8zQHMI|h?ZAKy=fv+@VyF?DBtjN8-cg0J8wNMG+D|Nj_T6Yhe&0VoH2MEzK)JK+$ks4P^IrCqT4*d zteN~ko8x7)%AMsq#<|Oxfp5`!{^Ri(J2u!QIF_;G!(*a3a?K?72ju(1nUDGn$^Svt zH}3(>KGeO`-QWEM+u|Kq8+W2HJc{k$Bz%~+!JQreGnXLCT+6OIvHT2pke>)X*AX*m zFFXe~5(VK#;xYfp%8y+7XuOG@$lROhmx-YlJd?lNe|_Zok~t;s#5P4MVhdt-lr)w6 zQ1W=}?8vg>F#aQE_U@+II06VdDT_kSH29jXtHiR>3SGdwKxN^mu@>ZQci zDiBky#9f!)L~OunR_u2ruJ`7`mB{>I^qt;L&Uwb4@Rt)|_gy>@Tl?oYm$M(yzKp|7 zzB`C@6lTrERJ_lYI)nZ9Kz)S88c$QR*M&b1o+OSr@eKFSDn( zCkFG83!?9Y7lv9!(y>QMCo*3@Dt>OPZ{&;6Z^T*~K@_`Ji7?rf=t5(eWBw~Y%H2+M z*6Gd@&WFz4@Q`DJeM8f+oqfk#<7H6nuEb>BAFr2c-w|$KV(?VLAI9f9;}=@RjLg&U zqVIj*JD(FFy*05-&kQC4j}h0ifLHE(*7x1yjDwooj0QD|dEB#*-*yzj_}VTaO2i=d z0i?pGkcju<>aWn7*~CxifF0tvd;`9;W0{-GKu0>@pZ_9OmY$i4OfJ2i_}v@QZ9&5o z*jX+nD#t*+PbSXWQKY#zGbU4?yTrddG9)%4{8-?D;8oFnrS)Y$mR(iat|S)yIebR= zZ=x$-9o&S6`CHh5=3)E0J%5P%5Hj5(cy)h>7PsDatp8Xv#gB+Ia6;e=qD~*n+J?cz zS|8xAaW)|5{!+-H37&?Qb1OcHdlsH!6!r1_;X4>>T%*&^e>=A3zwu8U890gP7VZ6` zoleNnf5WM|;``u2Kl`93w1@J#$e`R4`G`2B#LVxNWiA97t2>*1V!kq=ezeM)D(*F+8 zOIu?n`0~D~@>F&a{fT&=ClbeVmhTfh(8mz3wK~XF?mZ_=?o!zhbq*9{D9m>$jrCz0UfI+j9NzxZOcqnr*Dr_=Ohempz!z z1w^@ROI+k7@QP_jr7u%&0taMPXa35(O&mzP`JJxBPQS%}zkgKlgvh+u>G4U ztWK+U`$8qJ139;lu7|G|p91{nCT4*bp&1>9?d&=DWGOu1cI;(`=4$BorO?1(%+PJ5 zUcY5t!>{%x#($^G#`F*A)#-n+v(81*x<7qk`aJv($E1C%vi*~kXJr4O1)d4qPlUl! z11E-FiF_Dq5g!wuRnmhvG^`+41|pJt#XMW1|3}uWT<83P zel`FtrZb)svyiyQGJ}0CUORuegIL3|FHwIEk`xO8DW7#G42lp0MY#hpH_yn1x z2|cAHmWxGb0ykmd{ULizws*D{GD8HPOdXJ0S@_C1lL(9V`u7h^4Bi)hIGT@jEZLj=gqp`j#}0_ijLs*%cUiPL@_DFd zuobjwEpd4#FcZ5BU1=5e@viu9ti=AX13Pay{zpC8H}+rOWcc7L#`P`an+{A4{t|p3 z_;=uvKsQ>USpa{1qGSh{z51KTub1Exasb}c@1YB~Wt?BYT9I|o|BYB9KETU6=yu14 z^)x)rml7%WDy-{KY+lpRMP9}O>LvV+56JqmUu1@4+F%1+fG73T^fJ5{BAg$>n7zREjbvwIt1`y(^`O)5n@E-!qJ%ori|_04$rl(UEC0%F}BiA>ZVJ!}Hp;D_v{ ztRI?qEm3tk=0_97rWyPEC|cAK_ju=OJbnAJ-l!({UT|#aTB5riMLg$A@q1s+ZUa|@ z?+D)%?iGGHG%L8A9$QKL>AwE2oxPp$SmQo}Pgk*Os0COY%4(I%SrIXnKED;(-qksc z$OTU!Lw`x_*KquV1?PIC=}++I8pyuFJK*RSxuy7Xyu)1loy@vlMr5Q@XvG7GV|*g( zMII#$zuvFGlBmX>0@ zJOL}_MM%GQqLKZ^XC|I3t3g>atjK49IX`_k9%IB;@c0P<`9z)>6r6QlXXz$hZ>XSh3;7^eU8@8#dlNPWS%+Ubn^ zza6{l@4jbS5k|pxd7Mx5Kk=6|1CaP}vld|C}U2CuM(h!*t{ zZyC!@s}u0)S&UR%kF_-jw>*Oy-is#x4bntUEar!>`sYII7PF8_-{gA*azr~~k8dRE z{tU)y0V!)fF`6ba2QiiXeUBoV+ITd#y_x07`JVHw^mSmx;0oV#c+yN(oDK9{PggAI7?N8QxPDxYxpU7b3w_(F2cC%X6W~ zjc%@Rzgy~?iGB8PcP_EMu5jCe|F=jvf?CvIzrT%iN3mad0}}poAY(0@`z_WVZFWzB zQyu62<{pHNWHvFsMnm6UWyW+x;cl$`|GH12jUGWe52RjqAo-k*<>^i={qx<2IqQV& zzdLJe_V*2fX0)b`UD%=gM&IM`z}BpEJ>EB!=es!f@$QN6#$%8suEgr~2K$dsglDFZ zvxB}5;iKO&li7?`IgHzjnuP2`I ziS*o4Xhi?up*et5$NR>D-}k8faPF_B-A<#I2C^RWP^7!9>?2-5x&H?)Hqn~*Qmz-= zZ}4pUgE5f>|2HurrqU~YLCKLw1Pdwud06<`Ql?6$KT>ujd+fFZHSb|1+=snYuOr_F zeQSM}QQJqb01pH?quCSwOv?M7yDw7ZzE~57Q}YIDGl!D-kwrqqUDv14$L})o?m+Hb zN$V_wx;0_}n9DlI)<}PUxUAO13U&=WaXV{TFQqT%xO=nP?-+F3P2g@Um|5j6fYLwD z4B55d(PbWS5wYdJgnEWa|1_ve7bU)CzHdRudBo9s5I^Mwl;$}QIt__$D`~D__xP{y z7u_37T@LO2m}?WVedE5LoQxTSq^>%c}Jk zp-6WyezgYj1mDlltnRd3iV|K1lB>~IZlZ>wmgm!&d1~YuKN|MGe2_+#Zg?(bHm^- zOTf+t%r$)qcE?bomuZJPm|OUq`kw)kzJRjd4Q2lT3_i-Jd!KRk2_y79{(r{vpBFj6 z3Tpl_Is6K`f2Sl>)U6qGw;N;bZ+iY@P*TchI-PdOP{tTNHI-Ss9bo!g@bF-fw|z=! zZgc0kJ&N|y8de~WV$SMu*6!b29M3oM>k8sG-v%mu@ZJGnpcE=|4DD7yC+^Qotqj;!yHw4a(Bsza7sU2#sl3%wa3^N_FqRTT-AT z#M@n{gi`NInWf!Sb2Z7>*W7L?@gw)(UB8356(Bpp`&&>V%?0K-tEM!ap}m!~Z6#Qq z2KPM~9@(3`;*7Hh*SmndL&4ifs99HXEdyf%De(}>O?+_fR`Z@VjEoMnMlZhel&l^1 zx>IvMT6}Nt7x!(Tr?QNKo;;C*j_rVs)G;d8fPR;D&VZ!ZaKP^vO?l2;Qf#B#fAY~v zezjW+-m(I@;5QH&V`RBdmMvg(16cWyTJCgfNxg)TBx=5f>zkmQ>*1D*p_%hR_K)zX zMckc%yzm|9_>%MQc<(pd`I^rvp8A9Q|D>;1gZ2VPp0-Moa}zz*lCjs4x>nLQ5s(t2 zW;OJ@#=TJd0m%J~+^6yTP2RbT_bYa10?OK%C#(7F2UZK>t0}?4KyYU zH;t02HNDi1|54g1MXxyg)|*AyGK{taNT?>qJ;}jOe>c&#W#rYCCpGT>ru>^}r2=hR zp#4^J7TeHbLCJn4g+m-414rIF24IA}r6dr-obq>gb% zweG;%+wdz-K6$uy3C~o3J|EmRO?e9BBs?l7KYgW~^;|PrFG|j8TOaB5WQ-K`lyNIp zVmwjG!FG-u>i3q^6hB%%`_N88Y&XWm!F+e7m0D5fj+CVXC2ULn)s&)~qd7+cDQIABak?OfO{IM(lTT#1eK0-%fearC;am;=O=?kIIvq9juAagTy zXUmLY{bM}Y1!^IgWN+F}ZPu1nRQo9>jb_o97%eA!tG9&&J9MTJh5x%3q%Fe#jRYGH zyPw(pT!d%B#gFQu+Bh7_MV|n7bl8ZB7JFUeD#jJvQ!2eP#F^d~p!NNnS)TT+${@t( zoytMqW#FI!1XS>=q?mi0Hq?2XyJ1>GWl^qva`iDT>cK=7B&#I47T~#R+NLw8>sV|b zeRnAS?m(-xro9NWO^)TfLx=FHHnVn%@D?2`+4}E_mDYMf?-YtOT7;vfA{vD;p)SdP zHq_#7288L81BphByfZ}x7LM6!n`0BlS}?Q`6#Yw|>$A0pZJ~KPnB2*?Qf}q5h5MVJ zjT`>ov9b8XrXpIEW=C;^H1TeYQ?#MmFfO6C@l3PJD+S7uapm}>Up1abz?Tc+G9Ey)L`>##aQ)*X@9Zqr>|z zdLxSCT{Ef?r+g}-D2V!7eIoiITpB%53Z)UoluAe0f4}Xjp-BA}CYAjcesu^j!iVA6 z!=>R=R}JenCL+8=?@~#;LnF%@4<4dL3Dvt94wo@6oa@lxuVp)@Ap+&UW zc#6)|Z=y){vHlV|gbnqxj_m(C)cc|dx}rOx45Apqh5ignMumhKp+KWq_beynDlROP z3Rfk?)@fZFr)nEfxJpnMrM!AZ?OaDMiC=D_uhoxraK|(`Mo1@~BsoOVf#{}j7^9R` z+>aIW(fdXBggNobZR983r{nLUw6cocTLC5cqbQRZP1{0R$ziIWS|~)RN+`GJgpDip zg%F-EB2<=$&CoM(VbP)A(PVxgKEooo;zEwEi^n%b9=Wv0gI7YqHi4rAr4!GsfM&IY zT8U%s56|q5EPNP}$3aN>l117U$72BQzlC!C2Cw}J8CE{))6l1;p<90n7hV8IUJI68 zq^)M)ZZGobg|5&ax%wpZ+J4w9`XDDCLEhbqT)91MC|yC^)>o92B!j4DHS!D#`65+T z4{NlGG8@NJ57;4DONg)HsN^tCDB5Qf#ORFC8p#KSQJs6&gh#`--M8!FXNFbreD#1v zrAM9B7vh3SWho8+>KBz(^v74ET^@2(FXMOm<ixHoW_OG_szvq2M*3toMcP3hdYp1sk^V_a6b%yXYvf~x&}XA8#hpg- z(IM(4xmsuz=iF69wQ*bVQfV{Bao2){bsYNs8-A%nG)wXQ{{j1}`MsvT35P&ogGJXSGYBu{E*fMwwQH$JPlBdMUFh@U*ItEbhn;+|FXmi3_8 zP}mTaupZbg(HY0`D4wXW=z(~%dPy<){akC!^Zp{@L{X&QB+)lIlUqG@lJ;nCNi2D# zXx0JF^0Y}c<7*c!*PfDWV@$W>xhA|cTF^rd{a#KDOXzj!2bH9i_mgleN^HDcWhyE5 zzSlC6?3^O@{Ny4Gsqa-cp;0{5P_17!7EIbyyTqx9q)z1}s&4qUR2pHTHIjJ5du%+3 z8|hqaX=A{XRW#Z(4otcb|5ppEy-b1?2E|3|pf|hupmYf1fZL1LcYy%)zYrkWoh{Nn z!?E~+sHh}IeTx>0w;FDYGuLv~Kpr|IW9q)n8~877Z-|xVtY6+4TMiWwE$m6^l$}AX zqS8wy5nmK{6V+91)Mgq3De@O37jIBKHgaqR`I0|HQLSf0AFLf~=u>fB$vA35wPktn z4Qe~RLw%(;nYcE4QZ&Q&LN((n#{NJKfDvB(NnR+2YtB7)muSkazuhecLc9fH= zsNHT3CrY3;lk}!`)w|WJrazcm@9D}$FHA?4gf1B(QtTZeS9MX&Du;*1Krtty$f8sM z@+~2S(x{I`zf9jS34S+*)FMK*-s1hzx#3qe^{{B|WjM6l^sO=&Z`3cfqPU4Dr*~bl zhw{@1R1MVvk`s;giSz1A|3&>wUhyb{(Fd;;ZG?EWRvoO&Y8umi)W;?d2vx$OWLeXA z?U&)zp3pckDb{3Tabj_2(I8K2G2ST-EmR3hhEkI?Jhbaw4MoanT8yZ%IJhKDkz`-mL5);D_dm(i#;B0!SyZgIWP{sH z3zS+^M>6|vuB5(Iuj{|sPjb2Hque!~#Vyo!q6z9hje1cA!>(wmuGw|VOV8Os7R5ic zv;K_Em}aSW8tqfA!m4pdp=tL=?IXl$gy^@vt^b7vqvpD*Gw-hct0YP8EPLiNUXFN)%78i1aZBaqVgW|&f7Nty~QM!|OxvZ74B~})BZfo>l z(|}tQ+fl7(ttB3@8;7c~dRyaPBiu9%>E8M!jxDMxtFF>%)CP*$qRxfO7_HWX4Zj&w zy3v(Rxt-GNNnUG7s~Lf{=tg@WA+9HFCGuheGvYB)Yo0Ze#9?K zqm@Q1?q(Fivt6lQ^}LNhI}-{_OE8q1J zZ)zlYV^L$!G;ZZ7ybH@l6ICm{U1bto5*;!O8NU@)tZpXV8U0cXEw$O$v$U$EH@k{i zwxxsYaHi1ARtS31c&lT@AdMn;- z(xFnDW@6Mxqg{yeBqO6%>O0X9qfs{ImC~M3TJ@PwBXp?O?a%Nc8Oi&zoOR}1)X%BwV1ZlOk$v9yRbaay%nGh}DAoHTsN z(UM|xRl2@fO5;b=(zPGB+(Kj7?Dw+SSPPj%o27Ib)h5~Qrd%d<>$+M|G+a`$o>lwHilw&Fm{g0Zy{uo9 z%KAlb5e+o-{$EaRXjK^vwW1#;TdAC)n)aM%mB}V*9gRfMHd&6Oqqq3a`qHYXr!iHj9F?^}*q>1RdQ0CD{!>Ycwa?PWK(u|efBuJC2qzjwuBT3n4 zl@RHJ0*XHBPxufQw6Uu>6G_QtiBpRTWjdM{N3QJAvXSc0{D^GRn!9gT{MNb6k!Y@8 zeW2&fGHXb!{BPtde~o|pHaSTt^lr1#iOQOkq~5Wa3H6qDcoLN4chyYv*bY$;QDvi` zM$J4OSCma-!%|45GVUP0xO=QxDqU0iDJT8XeUtf2E)^dQ74fGM=#J&gd5Dr{p{v`phcvVG(gKX13Mo~wD6~mF z(#R405MCQ8_ZDiP{FR^PwIu^eepIXK8TGGebdurBf^7PnT2C#hmKI`cR9oBHxvpqb z>nTa?>OHTg)I+94TK{Y0+g{4fRf)Yy2utPZuCj@8|m+`|m#V)DQG?3#_Q?v3JCfvv*R{EGdO>NoSJiMEP|^ zz9ho5G!O9}A=BhZ;aZ3ne@PU1gszxXMjX`ZHI-E()a-*fQp@i{(nNsz$hNDVkOzzG zAM$7r0)!)te;dovAv7~&Ji^oXB-^PE4C^M#*_gH8rnM=B`bB4IOOvV0R$`pluwnhJ zKJpNx8rYapxirftUMN0jT-?4*&K4$=Rw<1K*l*K1BsHo<^`!2r#)by7UFqFIpYdf$ znuZ9ICxug^O&XJWQYDl=C9AjPq;o^E@TRdQ44N*nTPHJ`Z4$KEs)T>x%^P)U{oRLJ z;Qx*UJM+HPhpMIOqyAPM)f%#dnA~gk?aW%p+Dz?XsYQ?Ugh?7^c~P5LPDX8YU(b2A zL(fuU?^3JkJyuSWwrq3?QPz(3U*k)DIMO$TbjdTCVKn{Sq^m}*s@5jyC^xl;+Fx|c z<8_i&giuM&#__B*jUO4OlXb)6geIvQjZb3N-r!5=fb#nx%G+hFj6=X_D+>ddbaqK z@gn0-LY%Y>c?7F`qIQy_gg(WpC={lRR;cIIb7tWdN`#63i$`HXX@pDTfu77}DTN%5x*3-j25rRY zeTH3~t2~;okgTB|H62Z8uyLyv63*=0xV7FTEzLN-dWYz)+2w^K(FXOW{76j}lN=_hAq$#*CBTU&hkR|+ z+y8K0g=Dv!Cn|h@vNk}o_sfy-YnVO%p4^+WDq}4k=|R@_{7H#xIn#{dR_0-A(Q7w@ z4b7BE%au)7mgxi~sOOh-5Ly4sE4H3Ho5@*cLVuDp(u!o45T=AGvu5PLo!Uk1rn;+b z)g!7`6aR$EFR(>U~Kx5sUTI`x?>R-$a;grYH$pNulu46tY86U`DXOmmRV zDl4(B$Yy2Qv-q#_lICeMX`)Kv#YXSsk6@CYo>3kiEl!YL+*$85Nl{c*y0FRTInJcl z)N#ej!Ki8-d77*sE^oFr;ZY+>-U#wEG>fFX3vI4Nsbn)WOSmYrO0G6gD~OgF%@>;0 zDxxIfD7$4h8~LJPYGI+(#-&QC_o)R{3zb*&Q`C@ulu8stG$uj6iJFT~ z*U|?$u84ZdH&3<=v-a7%f0jP0`|p`Nsnt&#=t*h0N++r&%~yyM1*?bB8UHoApv}CQ zl|=SKk1GoYIy|eRG+~uaDATt_i1@6vt-i%U#CuIo)G7$c(k7XxSJb-V9475~Z6s-0 zeXd>?tuflAGfN@Fh(4+vZS;#e8ub+3L_358PulZjNkfJAHtPlTqe^6{RTi_wS^7&JXiMtv<#s5c9GZA>5g$!<)ku*wwW%h zu^}9p)TTNJU%H}0H8pudDO5W>Eg8rtq5ZP5T3zb7r=*5(!BMxdck_rLx_6H#=vegsq7{_>5ip0pAOUQgbk14dU!HvUb(8z zg-1^hQ4UrnQ5DNi=R%&+8RGQY@F5A zPISoQh-y`%0!As-&Y}_?GELGn{$xB-cRjAPn|B$v)Ehk=#OR_?6T_l}z3;t4Pw6>*Tm3DS#*bP{vO*MO$+Jo_s?a0;qGx1- zQJp38s%;cCQE$^IG=0o;Ht|=rgP~mgqd(DSqZ~qy@Z#yWHU}x1;Za4SU!HA5|2=wU zbkXM1M72aiMQ0?z^rmIh?9tx9N`wXiIkl&FM<4&#&7uJV!bv{XOp3racPNUu){St0eR)$P*Oj&Pv-^NvSN54e%?8CqR ze%sS_raUZn!;|Q{p0IZEIES7PvQ-*=EQMz;k*|9 zBn`tP6}5(NY8Y3EJlV*&n0irHbeO&%IwTJ!(V<=Ro^X>ZVj_doCT}QWO3;fNNms`# zM+OvaAz!UVDHJu_0#|FvO=~W8;sGzM_FvL96t!J>2h@^oJx^(koY?_|3`4N^y5{1f z#aLS1k=!7BNg9>iNF@+jjVhYIxe%bbYBiE+5a#nI-}P$dYn#(T!ilw_MxC&s(JNi9 zhM8SyeBw14!NQ@&z2@QNBO^`E_>81Mjdke+W-ByXtZ^N+xM+>Zf@V{+be6C1r@mGi zy~SvvuBmU$CSo+tIJ(lSFKxa>crk9Op0*s6laMRP&ipvV4V0hJyxsRCd6{KYm=%Q= z#WH*dJBEC>3F3(2?RI!LGYL}n zC8?Pvt6#b=iP1_S8mEtyPrTOH0}MZYYqJ(h=z zv|;&X$^Ii-qx?(cx$1er?kv_vNLMXvj?AZ);mOcvm=Jb^D7!A}qG+i+ z5=2X-1xxRd{%gJwvd74;L>>#f(QW+A!?@^?XpLzGp7betV_LG&T%o{_>fu*+g$P5x z@pR$V8v))}5)M^*;X-#+GL25zVrq(QN*{A{)!OUC_~Ht zgEXa;{)$#nTf~{xGSx#B)p!5$OWap_v%DE(*|XKb^3jv0s`)9~%3%46ng_kDT=%?J zg>~gF%ZWUagmmdcqH^LGrjKZ5M;ur<7PU8yq8=ChGP|)*A}VO}NQQ{!|M{Y74Z7Ll zG_uvATC=H@U-Ephm9V;I);y(=&zn-Jhdgf=8~3u3sVBt0?U(qNQDLD$$kVrIvaWk_ zq-okhg2|xjYwK_0WWuv(kf@A)>02pv#SV`Onbu%3nMdc8&iJnKG&xqXhHQzZZD|F7 zJy}V;R4Z$Vmc=yXyh(Il96&yS<^gDaK^`qstu<~uyQcZZSpLQvR8ApJblYg6Y38Eg zCjFV@XT5CpJy9gnVvPohO6ZUzr*FfB&e5}Buw*kS%-am8Icfv!Cw9g9utayqBVA_ z`L}2R*nWIbmg$yM;Iy1XmN)W@@|gd*C7jUhIMbE(bZfHaxcvj6@1GhyP@a> zo#Be~C&fO|*fAa}ON!(?%~I$JgRVI4OtLa*(c^pSXN^)@8=yLh)@d!BdHY$Ngns$- zR2D~DJABUMlctCy76C*Sb(4h^WkbkpNm^Z1E14D|{ENHWxG~$HeoHE`wl%3#+(9(l z%3;7%y7UUnF9cFk#j9f{5M44pC)y&4ru(LA7|k+XC2nP{uGQ5Z9a0bI@Fu zex7B;%B7MSe$?KkZ<%dYEg-Cl=kKN=yZbL<}J;xQNu6A zxRB3rDgCeb79s9S{}vrpO7*F@opDX$!4`K-@OQxcjCU)G=Me<8p)r0(dtj@`J_HJfFVT}5mNZ*lt(fkfHs-$~MDv(TI zoLSVgk(!!hqcLnUuZ=rS{-ZylW|FH-nzk8Q*%yp9>Ri9PLo`u`Ntk8)wjn!sKDGkcA;EQAIKJ;GW3;M5U zT|$nioFT(JQ*0hYND?KH_UHW)vb?zr^}EgO>aK@L{kK(>=1njCQaF=6Md|G+<)VCa z#V}&uqDD$@D=^JhrjECpmTha;Y~D>=N+Zbli)3+)ZH-xtO(9V9RHZkbuXB~HfxNf! z-)vywV&YHMqOz3=8N!gsdlvu1YftMhjZ z{x*G7GKA3tuTH9+aVE(=#x>3I#*N|~rBo{KOwy}HmY%YECRHR{5l~=#i8f{pOpjbKWicwuU`V@Ukx{BzZsI?Bss*05&Ing5PXhn^%sBbTNk;b4T zN41(-RJLnt1C`7CchoB?q37i%eZWd7sbjb7<YikymNxms=z-Qk3TK9S zqrOJLHO{1cn=h)@qoxUE*btbvk*M41c=_FHEjXEhri1A>|fKSG@PLNr_7-kEA`vYLl8ZvXdZ&v^3-8c(8= zdW$|*0+Yye)w^bQJW0*wD=bD^fSR=7Py|B7;!#W?A<#If(PAM-_Cj%AdCsU+Bwv^< zNxF$F2x?D7!;yVKb^^7V=9_G!h!Ps774?#IqmOE@b`VXmvZ{WH7H1l$(Jb*HPZL4; z4x*^uOyt@B5znXxy;M?^sw;^k+7pTq#AD4zs~pAAnO;j!ilrQir}`Z+k{;(h`>{Jk zbE59=M~nPWc)oC3;qtBKZ2N2K$q>~(p)d#>*Q=XUl$TH_n&3}bJt9f7~u z;pVu2%dUT`*#~I0|1keu&SEff3sLWHr#(L*%Kaktm21u(M_cpvv5VFNL}yw`ROpQF z5B3cB&EFw#b6{HF^T5mOUOX@G8D~|2Y5sHl+nh6bG?JZD-ekX!t=T|f zHoIS}^iLzo>~jCgQ19r{=$gpOS93VDB7vufm=A`|=+W#s6eVy&-=XyK(j>!u=8KfH)WnNiC7p*A<4b%f(~t0Wdi~ zGZ)F-kbN(+ESa8-6W1F4m^x(D=yGD@s2sc{|!C+BSA;tY`F=(1HGO z?iuU=Jgo3$eoM9~y)?Bkbv66`4NUJy_s$;9?j#9zXgY%ZBNrB~b~hBh&Oeh|z*mhdN=Y(?CN;1baniRc%S&l_}%fA@fIb$qOowdpzH7C zU+z1^{g$0XC*|6()4jva8k4i^m&QIPjrju$7qf5I7mWVz*{f+>VGjE+y_@ZkxjPj} z<`SugL*~is&4n4xionMp^R-ZqNIH6FNw;{<__HN1#imAw zMt%vc3H<78c7MnplFcP&H{M@=UftN*Uc1KZ_-)&`tp{u!v~}6m{kN5FtJ|908s6T2 z$NM{jwU5_MsH>>2tv|1EV&ci9+;vxWV-MrUtk2l2bu zpQ${*Bzs?Ga{9c?G1;>-zob`XuFYPX*_b*z)jU;`e81`83%M!yAUnbTk zW+x^l-bzeJ9Gv*Bv47+ChHD!;H$9d5DRX1)VRpGm=J$0!bFK*fFI*oP7ppJ%A>N|2 zPw7jgL(A?cpH?vt_WpDEpt2K7UyiqpPbygx`#jn^@^k3);QjvFd>h#{rlPQrU4zzS z-=WQWr~9*0O&omAXW#d$+3EaZcBtvePS+O{P9zreGWLFoIGz3d0(S+MhoX_Y*put$ z@R;!4?AxuYOz|yt*Rpd;4|W+`Ti8jB6S;P|mE?E`yFx9`<}&B9_t}Y= zx#*P)8 z6SGI=`se=61>KBud+=0hd~58bl5z2CN{5&AEO#nODvz#OQ+0AxsB&3(vh4D*=4B6; z_K#m4dyw6th6FG2-|UOHdlatBzml7p9iGXgvdMYLmZ`^6b5cL0@~IQk3)B7Cld4m8 zYj*Ga$U;AMh?tAcb)D~Y_6~eMaA@eL@U7un!WEI4=(OnMXz%FXk!#^h--ezG_6wZK zep;iQrM?&34-5OT@ANn9zSKIun!RtPXJ5;{nK>hUT6#WIIGH*;-8#KIbyMn{1Q*qCKo35NnV%QkgRWduj!PgvlAT? zlM=%clm0)Bt^qu*t&5&>HHhsZO`RgO+tjx8YTLGLwK3In z!ranpi1!Ac%D#JjC;GMZujOCJ@1ReDcfPrV`H(5a*vSwi{^nXx16jukYonkUxfiOQ z1Ld_i2@klNd+I?Yc%pJtNmqKQ0Z?yy!gfFf?iyJ@DOUl0(J1KC>_F1M!yn|M_!rzF zcn&kfv%(F&uTWXqW*BNHE+vR%#d`<=>xvKfwOkMG9MrRVlBZBi4%E&1?!Vfm+Esa_ zoS}?RugM?W7d&U>G9IHl$-N6ovx+mpEx8Ljf8_tpZ|j(k=Z}U(e`Jf!%g8_AXynXw zEs+1HPA!%ZJ&~3ZM8k4ZbFWOV0PpwSBYac*R{7ubuN-hXU~xb@|INNepQ~Pv&4bLB zOt%f+h34E5GF`9Eg0-6J9Oao@TVC(E2+I=W&T%I~TkJ7($+DDr>RK(C`9rtnBAvqp z^F5&X*9#iO;lfRU35$en(Czi-7IOhYsMuFXh7#L%s3JF%W{97JYGNs=qZBMo#giZ7 zM)4>3YTSK2fkm>}(339*_2b%XoHj+xfdX?YXsC|xB)i|bG9CBwqV1Ah%S+R8!A!B*2r;v3@yuP45x0|EnE1hx!{4_O-e zF?3bvtB`}iaY5w+%zlzjviYV_mLkPboR@A=8_V52o4bCv zSIge2k9I@*%(jx%+z{cNxKUE1`iA2M$rxz-V3=!&G0cZft-^Qayy$fJ&NI1ATqjZn zidvQEcHx)Mo1e(}@E3)};zwxlj_11w4soWK#J$kFVUD^$@uQ4ZOS zV5jyn^EfLX=W$H-i0pk?2drJKU#y+7E?N&-&RH_DuGs219=RIIuhqtEw7!>H zVL0v8-gl3`7&IvOSn#LdSs^1sD~2V7Ee^{Ic@;Ry&*^=@YpHpGF_>Gi<)`gk7f%AH?uvuIQTlZcnFFh5@O5F^328MaJ zaQ$gC;;ET>Yi&FBbw_uQGdzE&-I7}{_hHVi?3LDYnb$G~X0*&Go^d9w)uu}ElTU0{o`V`FJwC~VoV zi@OR94Zol*w+K3u@1Mm*2cZ)5;l6Q3`b*m&e{-L6yIjj1Gws%##o3A3yRyDp z>=`s8E~9G3;Pk|_>#1YX)bysAi!#lb-!m+k-Lif23cHe(UUa(nUeYDWeARDbaIx?L zQM01`qCI%#PZ2F5j)gZ0j|p29Jl#LoYq3;V_{t5VgY-Q0yJwE`citv@UHd3|pS;ug zCmr$5YR-wyyRHfHSItDH^Vz}yF;XgNcx)_Z9^f_6JIi~dx7GX$+IX5cUu+^S7anuX z$P0}r1(geOh|)qa$wI`mf<|4)4B9;0`oeW9Fi>Bk| zcHSPJHNMe4k~z$$LxDqd07Xt+vND>)XqAe zxgq1<%SN9R`Wk$eq(rIXasL;lu(DRWoOhlrJSdm>$S72yOTTx zI@BSaUhZw4!|HE6f(u97Jy@6^&XK|lmke&>K=Wj;VqX1CgAL!r3!=aDT6`|ZTzh&= ze+^B}Ci+<@j!lAI=`Hx_A4oK`M9=W|gml42SjT;*OK1!?f^W;$;cU7;quN#FoNV>D z-9KGs*FZ<-ysNg&x!-d~*qYfo*hb~{$l0B70_;s}H*#*-%6k5x66X9Oc|dDQiQa_*?gifr+aGZ!;$D~l$%6|0H4Py;{3tK2efBT{XHp!IfNe@PC~7Sx5@%w%Ll zs?%27a;_!Ui@Qo+BFj~n?1i%24dgApvDxZ=C|mo<6FgNtOWj3W-JHuE_41eHh2}qS zn4Iy>0Oxf_59f5}ELRuTTvubLUVrsWaqo5S^H6B}u7i5(R!^{JlIJDf_c*6GCpd38 z$I4mSJu;j2CSz$J&l}d7dwTcuZR@w(UkrHfU)_I<-$?&HfwrLBzzY6JKAXK_yfVxm zO&yKhVIw!3lxE&gabJvF*E8*f`VKm>yVY%4Ble7K)rXKA@*Ju-!$=?cg3ID- z3&(khZ_HKaK68nDYrYRMHGBC`z7n^XM$#&j=Q3$^x`3R8n)*xj8oAZ$dTDYGxv!s4 z0y+e}(0kBuTzg5Zb1|gs4D6*^%ayZt0>*o`_M@4 zq?DF#sZ-(CHRcV_HXl#t@h`*|Vo9hxb~X+&wKPe_BHYYvl z2IwqZ)UQ(``=TlI3NngiNFjig!_@KGYGhoaF)C--UiCVwrhiZan3I)ZI~joo+)4eb zrFd40r_rpmW+OM*b#|WY(p#zTNEa?yYf4Kar?!kPQ2)@Y+%{zi6q>JS?V#dWg;j+P zaW@jCm!p@V%h#6M&wA(uFfIw?D%87nv-MgwR62jKi=>JE5398G-<~!;A@Z8fD=hey&fz;GCIMufcEhf zE{r@Nyii^bghpdO{XTxTJvW;OY$DCTetUq3wvql>D}nr5E%sb*g74KKFIgShitLl` z>yOAx^_aey{6_Ay8V%GZFfTHQt<=XLSDuaMnho`-W27zfrGaD=xr0o4FgePqaH4*S z&EnFuD*8J9HuT=hQm599jip_otGYw4%Z_NsPO`q*NZ>$IkXOE>w@3DIJ#-R#AjYuj zgV{9QL!aogwcfNDnXEBdj-1jWkwM-I)mI-V!j+((^m5!-I+M*M51|D%mfDe%FNu5T z#%)DS^l(~+%%ywiP-H^iK<9aez6P;hO=vsb(;Gl*F$9*z1O-3Jozml35dEV+VT<`g zB#8~>YLa8Djoyh~gLYeet~>JmYl*^MWBvH+gD_|3^<37LrP3;-q~42rgPC1Gl4&Qb zlQy&uJnOP#IvWPt+zmE}qd#G>N7H5+#)0ppnRNq~qkU)FX+Pr9rsM94>+QLF@Y;sc z`Px`AfuE?yu@dw*jc1S9VtSZ;*WS?M`h7(vtM$oRcYcaqk;QN(L?s`%G5Qp3IN*p@ zYy$UGm)L$947E*f?5^)v3A?!)upRs8NPLx{%~>jJNgoCaftax6->fgm*Y+a%zClVL z$KRe7WdrmK(u}o&La;x^FMkW#p-rKn`jMeh2pKA(iMMC7ug*gmM>o`L!>bv$QBlCCBhtub{d;pREA8^pZ5D6QHlB=wscch9c!!=(m}0 z>R$(LGo99@xePcCvg<~=hW5fqSeX1n=OC}VkYvzJq=8Jdgf8Y9+JdCPp5#HLa5ydfe;z`r-krX~8FCj^fnYDoq&k!Z!w7>y>RX(FwVVxRi{_D}zi1y2r)1sC zy6KJZ-Hz}vz;TAnF+k06J#egF)Ss@>H$h=rV~?Q$+5vuuH*y~D;A52{ZJ|`S4X5lM z{UB+MIZ1;8@h-g{R0|J6fA}ZXM+kJY6On03ffnc=U`&1K5Z%cl$SYXyIIuXO!U)yT zSNcdM(O#sM{uQd3i&!SE^NKyzdg3bU_0eo8*a^QG)N9#Pwi+5D|)Kq)to-XeCE#d_&q*dslM9AWh^ zE0f_nOr(#XAv;I^NwVlk%*c0In2aTz=tyYw{-e)?uE{4hl9Z%ovJQ5B3R#Z+Vx>tx zy**ins}y2sdKJR4&#V>J+GpU2zTmXpfST_Bs1wK26kvnF$QJsMPSDeZ zXJZ~{of^=EWDogC=R$9=HITIrP$gca?1%XB zOJ?yTiW@+)uzDKOF3{Kwry`k)m0t9unBq>;P1rS4aAf5GM(5~F0-^FQh znze;u%V*ff571NYs4rysY$i_Qn|P{=*r~xdjn1=a*wZok5{%qo_MA;1Kj=>V8!Hdo zxEj#(saVAVtEYFNB0a?ZXdm_3WCx4FOzeT8a|tM+LhG3&f(3jL80~d^7>glh_%18{ zYCHoo{GfdxuSiSnHET}&Nj^&gCOHBtYO7w0#Ote=O)rQ2HVjIeBgie-^a$Mzb#Mpv zn;kpFSBum&(ga?%AN;C8z_!mLFLDKV>t6bhCg|_U6`I5jVN^N!j{ld)E6TWMy*hJ zK7%Y~4OlN8YC2cwJw25bpu=(IpM}3TpWel(OSukE1z!s-=N9^X7E5$CS$}|W`H4u# z!^WXAN(2qTE}IXf-dXT-rjV2Lp;i=DtRU1+6Tvij5A9kLxrJJa`mpEo$VE~J)+8N@ zv>WsmQ2b5Ppr=P(V8{Q}kM039+>1KVXQdi;axipP>tT(rWB=-v=mM;|&2&2{tP?t! zW&(A$=w|vA_rC)_^^P6G*cQdiTd{+8VkNDFH<5~S$fnoOqmgY6#_rw@Yc-mjf&KM{ zO-{qz`N8((Va^uP%S6H)Ehcl=9@vK0ut1G~Xj9DAEcjy`peVZtnEN!^38TFNdqdR| z*#Z3vJ)ys3LvS{=W9{_HbeW#ccxbf0!Ra=ZECqH@1gF(V_K=mP&-Hv34j(H8+T6on z%PW!u_&c?+OO4p4P;O>T$UB{pd2~Eh?p4-}4x$ZV0|Q7RtY$Mh66fDly&j#+6`(#k z&%L5ww4W@2`%H#v%k(~UxW1Nc1b#9OD}K0s0WnTD_+D0Ms$a(m`BHzVH6f*FXPgwb z7#Q;Cci0MZbQ2c3IXv13dR{N6+h_p%$11Qoi^w)M4g$_GGx}X$jn5d2>_AEI z08H@Ne?rMTl?}#BjMu9H#h68M@obOu6Hw3PNo~49-;Aq!(+kk+Ci)FFhFr!D{LadO z!C}#ZxO>1>TEKq0a7wJw&#}hfGgQYI6{!p%6p+9Gb5!-dfeQjZ8y(?|X)%|;K_`Dd@1Z}2aG>(Na7rjKfG8L!h zS*)~iWCWL`cZRiCM18rQ*av3fAU4(l`}aMog%c`>w4yw)l#ApKqN4=(EtLM%>OF>d z`V;K`4z@-ALkBRsmQ8=qqWUu}0I~UDy(1fe-&hQrI0etO1X|OLfUktYf4RwSfJM>` zYhgXq=H2855T`n1n*I$aOcJ;;a}dS;WkGz0M)`i=8U4UFUBU|Kv-R$J8Ty3|#2z05 z4`C^*kKXWKupd6)x8Lf0h?~7)Nr)P|!RtRsm*XsmfoIYV=Zqbed^5cPYxx-&g!SN| zbb$}>7Cvt%mEcid#x9e!VetA3BIc=ppNJy+u?t(l&)7jb(1WmXqltsffq(44DgFVU z*#&ED9cu(XEE0;)y+|X(6Z6Pe+^dKE!HyfGPsMt3V+3}C8B-bx*}bWSRwFwRD-0lc z7`GUB;Wc43ROqu0!I-v$+W0EWepk{G{#$uiu*Uiu){jh}H({mwV?8X)Y=AfLfK7)# zR*l_YYuPpI|L)ie^|>OniSCabu^ubL3C8yWwqI|K{je6h=mSXvEouwNC%s8i=r6B@ z_qY&oBgfup8<;md(Ilv>t)csI+TN#|VH2xEgC4a6@V`#$lHQkX1%|O5S^6XJ^j_hN zo(HdO59iHq#jJ$Tt>Cd-1VUSYC2IrN4@96naMHGdZ`p<(2lr@=J`O8sE8??i*ctcW zQLoaw=y#wrpMX1UMgJ6Uql;dKQu01bqetrxX5+ zg^kr)!4KHP@>nsPZ8nn4l35wrhBjjf`ZM@e57-x+ubE^mtHdT@tf<#!hDWpH$O=qy~q#z$dTC*#9YZ?fPx(mv3 z;hK$Y1Mg}8nFItol$!Y)WINkJVt5PTaKgUFDZG=p^?n%1Iym)*(L3`rs zHiCsC|5thLgV=K^-2lr_6a1=cq%AE$0+2}^tY1Wa6rW@ErC77)Tx2>-}otEV0VqNak{lG(d3!8Zy6!sLY zgq{r6+h_e9m~b5sVQ z&rdj!L%4u*`xExTXyiDq=(E{Voc$xfS{j3_S9h(E{+$QIhV4V_av$gHf7}lJEpn;J zyp3<87uFd2LMpRdts-&{2jtS)Eqs5WGG7@-?A!s)L5g#IXcyK&-^LZ?Ut=vJ9>WO` zL7!=U>RI-NjK@qqV!g17)@WliBdx+WBsEw$>LLf(do7WE;#0JA?VVVkmsM{q3BKER zRzsgd>%(VUNy^h(Y`9AG8C+MLX}{@1l)H4*Ucna|u4iaJbvIv@sNnkjr=RA|X-~B4 z^ayQ%tlPgl#~GlKK9MaauMrn5;9jt|+9JI_Sq9tZkCo=otmKq%kjUyRRvF%Ry!sfY znHPORnf!|^=4&7#pQa4q5=93XYSZDZ-H@v?L0pS`#&%WG+wdje%S7p6>?t2f{wQ`X zQ0T^%Xmx2ie~tB)WinWL%nZs>ek=DX?av)S}3*OgRP&kL1|TUn+okc$E@&LN{c=NJ`VFrxNhHMr}vxDu<~6I=6% zp51a4;WU3(xuLY?W(X1FfLcszPZJFNNEi1FJyp1gD19~A&#uT%s2~4IU&q~6lGMu5 zb@oIlPDisOZZ$3DnMcn`;WS1m?p`R|GQCl|tG9qJ6(W@+yS&6FL5W^1k(piM@Qm`~lS>c`}yyfO= zcW8=oT+0(Ks+;s;d`;Nge>?@~Bl915jN8NQ;{CaWT5nHhnk*KF<*KZ9m;0Ef3;P|j zXbr9{^N17KF=aMb8OU`2sHJ^LM$Evw1M=#O&+zqhh{^dVt6}4ZS zTYsu~A=V66Ggy7TDXGbZu~keUrQ_^1;_MQLsu&o!gWa#%1uu%IF!M_IyWuygG}IM=tXw zSHm-bZ_GDyEg-Y`<=h{_DL=?fI*yLw)@tAN>iRTRUf9OfQLm~o{1d({d9So&ufz}R zoE9Kdz+Rst1i|l2Bu_O9XAs_l<=C7{dD*;x=`J$6k>fukss%dQ!fc~DX&~pXKy*{Ds>@+*RdxjN zu$wK{>T-|B9rZnZ3T03Y8R0{`B(^6Lm8+EV*3}wmv6#-(hreuh(UUA8<5?+3*`5&7IO!Bh)&b`Y1I%J+Vps`KxwXY*4OLj zF~>&r0=PPwRu49^AW^hG>S$(Te#k!$MSf%i{m9i2O7VS>zrQCIFzz?3kV1eo|1mf8 z(#_4hI(f(Vbobuj-Op#cx5ulW`M1%_)Xa3(bp=RW1xV~@@5 z=WHrRxHIgHbK>)VyKcCTJ7V*WJN|JUbaSrRdCvU)?sK00o;|LmZl>nJ_qYUJFfx78 zKJQim=HM|QL7|&Mi-s);zaIH6x^m2@=wDIyqLQOVMv7tfzySYjKZpMd|K7gT{8DVm zHwDVtnsi|a+6{1iJ8F;A2Ff1K755uYJ!O@;R^6vKfFCsh#?;R}2F#e1?uPD>jv9H7 z>__q^IJ(*oX79+Z56u6Qt(Pq!XKap`lbGd`GblGb_j2y`oB}xyvkK?bvlp{X%ITD| zHs`+0J8xdz)%+#-ykmi@r2Mb)QtQN5_X-Yr6V^0hS43vSm8j+g*Tp8r9gI5=w?8f} z?m*!wF`41Vg6{?F_Z#oi(@Qkfm40&z^;%kYu#;yX4le|+eKxX(v7D9OqUX6iLWVTk z_{ez8;1ov-^Z8HQIl2t-;Y00>67A{fyq4d>{xT;ydrr0|YgCrQ(!kQd+Sl^XlATpG zyKBz8+|{bbM9J;(-^(;8Tff}K^3TeaEA=eydBGCVzrvj%i-I2e=Xw`2 zxb}*S+t)Ykcj(0O(hi{&5LEi!1hs_^MnkmC1 z8gKLUSa&o6JeD`jb}y$y_F!vKOI}7o#_^23nddB5tW~n2v!v{OIW=xZFOw* zZR2hAbJt~!unfptopC0u`kyk%6Mql?_eOYx$Wxd>&lAnsyELbI~NoZ<-k1yv{-B^RKr;c?Et%;*QirDK zKl8r!`c&$}sduH{U3NI!z{0BK3ET_{arj1WKl{O+REj2Cm zcv{Jf^O=pU_p(^FlCvt8vz4{I&E1pJBP%g;UuyPm-yfa6O!_$Oec0PNuk>e~pGYs# zzMQl*F^f^J3$H9RE@o$e@kMTzURU`-ofD1LG%3(vclF}s$`|h)>xtB<&A+1!9vj{#Tnas&G2s>8WEKpGoj$6f>J@Rn9~tALaqfg z_Wf%9A~ol(YtKAsj$O7KYj*mw)F*$mKaEqjrTV75N!yuTHX|dWilso-;q2=UrA2;-uQFJ_q$*BezEx3y~kA^dOvZ!n{53h9gWx&+p_SQm>n_43SBN< zEx}fELZgArVj2b25X!uY{SebA@^|oepG#6lZnEA^dFa@hGbyud+U}Hn$){3pW-W1P ze`C5apn@sILji-Ez3dc!K`iB2JFtZx#nzhW?oA6kCk8gefat2>WlkN zraTC`v;6*ux4Bl?SUWm5ZZ%dhSKxYq+_*#K&emGd^mEI*O)Athmnk1xFnWL3r+^<` zR`EFSm!s~`JTAL+rYWsqviGm~$vrGJo!R7-;i6Yl-}h+p5fMB+?0fXD!c&XJ7yVGE zR#aAqF_8Io@`^LqIGHt5in`j{*JTG;`(%Dg|CpNb$DZ0TL$S2ZYLI2la#&wjuUo5T z)y}S#vnP9{rETh;A5ow0zbo?k`19wFC*QqvBkgv=tGbpNQbeQ>_r6H|!dqibMy%-fc$QzI zzjX_C->fm2^U}xuDV;ndrANkUTV44nrKUx`Wddsj)eSTRbq{?Ql~8znk+res3q(b{ z2t4V%#4t}N#GeAXQ_>5i-AbLB zTI$cbU+2Cm?~A{x^yJ{3l{W(Ku6jE-rGIBGbZE1A5+TF)UjF1od?-hG0A&HK$+m6 zpxA)Qfzw0NqAwJ_5j&~SrzmU4KEL%QPP~T`Cz)8ZhwkS2rEMp2#%FJ~wzmwitgs%< zx(;^yf~+Or8+OdxoVNAP(v;w2zn@3He0(RoJoxbP?J4)Vyvem5GgOL9i9J;~E2dV& z^bj?yQQ@8CH`eXnxN`k6mA4nI7_r3vhRG@zIRU6tQ`~3G>;{(HOlO8E?PAK-KTRwd z`QG{%V^jYTA%nt>gzO4@7N7+mis~1;w`ildA_f11welZf>M3|=PimkktfZWspPieP zEoT3~7`Cz8wT!dwx9+oQ)|FWwtV1#rQy=`Q{Jq~7+s8TY2EDxZ==KBpGAMbsIyqo_ zfyPCe7VaO@I&56X^{BNahE_|cKO(VP)%@ZYB8K=qHVFK4_?F}JLdt@?_SX98kJ3nb zFWB~Fsp|cD2C~z)}22Syr?Ums_ASSy^l^We@JS^KWW~=WK$&cI2cB|*r6m6wC z&zYG$G=rrL$_UGNlv?Re$F#CJeLQWrC8h|!grHu*D*|fxD!#Kr_(BUxxJreU*jSiG zZVo7GO68BBK}nX>St!qnxU9CSIbE`@Sy<*zOF;JQoHNKOw>s#we%iDC{)Ojh} zQ?@2g|JCAWLUJSPc6Wd<#Y^t8)!m4AP~$^M-~K1UBI;*P6QcyQ#hpcvm?CQ*D% z2Vj<6Y=`3I>XP@!wi3Qa!`urw)on2jizk>Jq|xFE<6SfLs%P$Ic6o0L7#zASVngKJ zh+$!AK^y%JcnvpwGX5}LHT)6Gw7B-oLtS(8zuNoRHT%iD9eE7@DvJ{9Vzxaw1+#l) zwa==NHOiWrSvw;l{Y+Yq^t+iKb4I!L=!K-orsC#rrjMo%<`AEm{#Sz^hx}(gM^hnR&W;5)U_QZOMc(!<2$eglKF{nM%BdSwv0zQ~c zo5+IobEv54gG^vCpqib5rfg!%ThiTTXspHfvrJS-@zJ(f`TvVc1^K0gfwZF|<=-RF}r3Hiw!gzi*7;2+QGFRR( z()+dl+rXrNGQKv`b?Jn7MEYe6_iFF`)~l1LruZN6(ON326pPvhi26DAP?&<8ay5FGTgmf+mjDJfc&QgrLA+UC3JtGyKnwkWp*7Ol zs(+M2$|q&K+DmK7E&>xXvI$x_tvML|6M<=z1lsK(CoqB+z}UEhPhAI&Nia^GOQZ_f zqZa|Pq<|Cnw>ozQP~Q+BeCya!)DP>(?Z5bct!^-K?D@!elp_D2+IIn2@t2)~I^E*H z_X?9Xz>|y7@~9^+P8qWLd&vfJm^{W+vVnr6+!a$uA;0Q0INsSXV3 zZwE$Vb@f^30#giptPx;U%tdx#7}ywH@j2Dd7a$avOHtJ3`XW31 zS6?CpJc^sZLr)>c`xq5T7ZHyi*Z1n1!Fu@te4{Ax&$WR+MB{ocFfM+9Pn8OEMg!9J z6$su<@W##q(>RVlui*b51Cc(3-(L&1NoOECZGg~>(T4$HZUcNc2;3VEqf{0-`j(id zQmDU<)MNFM$T79kMmW1H3{}{J339U}f%XGi zVi+p0yVFs0F>*IMX%an3_tP!-znOSV0VdiK)dVTPsjnm7vMj{v@!&)%T6Y&G^sJ1`GJQNtF=YT^I>Ww+RCb{~k$b@qUz1N9n( zu`7cyK1%+>*I{7SGO3F@NvCDF-dsPf6<3KX!Bynia1*#y+-hzT*NhA1?$c?s9u2{4 zTfht4j^`;0&E+JFQz;hoD-4*r~p5nhbfQXr+vk?oxi_R(ZA39Q6Ez#*Dp9W_98dS#rof7Ma4F?#2L z_wK{jG0gTJU{*sh6Qh6)_0}hW<8c-Dcn=uq6RbHqc6UkqZc9|UkH*TL09x#MA}SEcp1PRIEocA1+zE{KRFR=_7-6zfQDmz z`hopr01A~uGJtA-#ae!c9kLB8yC#VPCn_GSzw(&fx?sX|!FLDXYGd&kLtxwLVz&LU zpYvg(UI9~FhclozR&O!Ps2MZrz|MLPHs>91TOZ=j>+BS)^nP{}f8WMv76Yek8&-%1 zxbz&@Uo&lheR>Yu_AaPI%j07CM7}3Kjo-?j=CAM<`TzJm{91kn-<>bbr*WIGvo(zR z5LyI0ll`!_f#5Suft~YUKWzt3bO^?-8|#9-+YPUNSXaxjzTPtz=CLAdVL$MkmSWBC z#CTrBx<87YISp1R4*a-7V9%S`MQwzZpk=E&)p+%ZvR0Xg%FkQKZuVBw)FD9hR%=(Z z>Ufe9!1=F`;`9LR2mX6W;kJ-1c!YRyBWii(NZX{bQhlki)E?ClrKR&?us9vn(uerU z{6_8*m|Xv%3&k1O!P)TqMrr}tLv@N8u6|UWDeu9sf2^ES?kOInn|ezP)LLpAQH^;U zOdkjMyz597c$+_H3vM&_fOFv5yZA4BD*uW<%unQ-@)7(yZXUACx3O2gWA;9Qd28qg#>MK8^ zsxZG|=JOapsS0PE#B@dtW*h`Y#l6D9~9g<-;f!VBRnDjd6`RyiH(qb2{6s{+T6fw5PSl&*O1tyWgUMbESJes+I4$^W23!O*{iUn>`j! zRe6-WSWc4@)l&?GwDc1=1s1N0Fjp)fjgjt3%M4EAaC4lOzn9(Y#RZW=L!m6U&S1zK;;8;FGn6gK$ zk6sl5JHQr!>2jC+hJ}lRRVe|>b)9o_|6u*U=8vN~^9H|^kLS;Eaoko~8+&&1iOpl#eOvRfl>} zt*L5C9rdnyR_hMd=sNBf_lzrp3X}z6WvPYKTKbL}Wyv(iG~Tq@bkme%`p5L%*wa|V zm}VGYm@Ewv7YY7CkZ_D&jie#Wv8-T>47=ut(;LJ z)FJ9+wW20#U)glouXayxw{+KacXCg2pK^b8e?g6Gf@i(wr)QU^G_aI* zp3|s&StE~C-l`+O4!J?z(-!U8 zZ}V)^U8Bp;(C`Z5u}6G>P6w$RMQv${eim4Dpf+B0D$|tG=w!hug7Qh;jU4?xnJV3s zMalt128NrYZDEbztF=KVo24`d9EoLIE;pFJ&Hv&p@bnuCZPCr-8Q+`d_(NQAZVo+# z(`hj3CaSUxno}LF%F0S5PC16>{oq-L9wi3PE%(3feeNsnPj0t66tUP)&kD@#4^JVv z6Ka*O$rd>pW3*5?tK=y4)gLOy&Vj#h0dqTzuOW05M@qR;VZ#(dYvT-4FY`}xS+Cw+ zv%JQ5757?+%BAimmvNTyvf-0dOgfBi0O>+aVIbchtL`g2xT1P1c2ui^JNuv}sdLpq zYG1XBT0|wPpW0P@j)-WDc0nrvkLeAV>^b0Vuc92cmJ8tf!k(w{iNbtTcx@Fn2}^~Z zf?1fyXLEfy8}QH1@O*db0bn#aw2skK^A4_E#L51|GBRcI}pl9oyP zq#cGBQ+>1B?Ctf<{N4P+yxsf;)wI)$KE^qy7hNKr#+(cmVuWD+U)lq^I6xoI{sZRa zLACi~@Raj$B9?-+xTx9C8R4Q*1|4Z?%QnQ>?>yf; z_dQ!ZQ#~U+{XCuVH4E$6;wdJ#mB(YY3oEmfeabeRQcIK_*m1uUP93E(tsyYdHoVN; z=lb#B(}%|kH) z7Yxk}InowMLr;KBxX+{fC2j}Z@wXmApNh&oUhANSsI}GJSnbDgf2#aTwj+!4RlX^Q zDL<5bsz3aKO6VQ1k@>>I-Us_O54^V-bSgD-7q~cn7QYG=`abYcL*c33#JUi;%ZQ-* zgCX6EoWphY!0TM6F?GD^hV|?PtFlFIDErBuJo_;Fojh$})z@J)eDmabeDKa#?Dr)3 zk?fG;mFYNHexONP2yA3EwYl0!ZGnDF!_+<+7<;57ErAH-CoRWs5eA8u#OvaC>5?J9 zRKh&MT+^IyDrVkiK51@ZUSN7=tZ2Mzm~Y6JN=c){Cqhx74L^iiNAH8z-A{kYBG@Qc zK280pZda?Qx6l`(7p&be%)t#@eX8oGodfH73wz5-!1E75ghJ^oY6rg|1E<+7bYv)p zl@N^;mdm^OY*@nSsBgT&4d+5RaPts#90%hyiM3+Cw4s_+U7(g%UnoN`7N_KCa%((w zJvkBH!U%b_yhlDK-$q}GeA%cJR%$4H(dFc|;;R-_Yv9@Yse`d*yQ(G8wPYYVW2^#y zRX{cEP&yk8-FW^Fyou$)E8(b^EA=t}FbN8fyA(Y;4Rm$OdNEf!cXp ztR?mnX7R_k54gu#;3Kxy-?5h1ud&)fH5aQeUp_70kp0jj>9q1u@mBxANmo!CtNp9x zYExJ|vJw_@n|9(pa82ONzCkCe5<*vDjqpK;5UYwl@Em#wFZiqGyc0Vx58{bPL_ zdKdG0Z2HgG$(UsLB&vKlelfR`oIy0akey_$Q75ntbsIR35ue7Q6B{bMzz#ePzOp}f zuhFbA;+pzw5UavkYhBeuSfZh-ul9*8L4Dp1WDH)>mRv19LR=zcN`Z!0!%@i~eHVk_ zCx=MC#W`Y(P?5WfzHDvu4D>)7&Q54Ma1yLk{1qQq9Y5^A-AWYtpRDu5&@f~9sNmU&{%!Y>m8t~>iXi);b69a$qr>bg zDkd%3EtX3x+#8|2v{7m(m6M)IGYv-L9b;3|2V;H1HgS@$i7yOqwmkP8Ea~D}Z}=Er zkvDsQ%I0yhuN*8_MK6{=a-4D=e32xK!X2D`<>WHx`Bl|D!g3H+Eew4dWhYpXZZ+W zCHlaP<0Hji=mqr7uwQy5&Jvr6f|wu#@!wHBQ&hjDja1hvzUTlm)id7xudA-BBl--v zT~}OFE3|ij>V4DK5Lz@^h+Y^L8+6t0gzql@{edk5 z>UkeEyc1gpk=$eCw}$Bi-C;g@ihGv33wox?iOLVTCq}22@H+IOV;|r(;OznDqtJh(RG|12Y5oU^z57bV`7a*DN%L+$CKRj=(|N&N8_DsLb%@!J?zrhZy_oIf^rcLHJ^Lt|jV=LoQ z;~aDvlf{>aiZ_9;dIp^Hg8DmTh8t>Sa0>L)7Q>c)f`3sRJtNyQk!fJ^q_a?D(XOIu ztR(X5+tBZD5j^FQ$^-eWXM=kws`n>51D(AcBl2J7+3Zv7{p~5Xv$nRjJLre-C-+%y z>s&3TY3|kBAGtSk#$-><5^RDJ&$Xd9<%6#CU^Y7Z`URPT3j0m)>f{?8#ueBZQz>F} zNKW9Uz-l3%!!yI`2aNN|F`YoG&vw!(u@1kDJ&?QMEN<-C3JcuaRnb|(vDvZNImXq` zo$1M!AIlw;_S$1qCCJ<~Aw~=n=ZXysKBmd0Wu`r*fAM!UL|PAw%?%#05*o}_7qXGP z+=HsI#>kn~0W0vZHW>I&9CB3gUy$V0 zInNz;9rrC)1=m97FGpR+`ut+~Px8j2H&ebn#lFiPhxtEjv)bBWH5{~8uvb7^;*zH zhR##PU3?rroi1kM5!a89U%K`?+Bx1iJ~(7Yc_2H>9NQfk&O7e8vH{%Jn_3;zDUGKi z_~k-vk%`l#*U}0@hSA%6%v9Lul5RuobvH@7sMGE9)Jhy#TL@Zm2buhf9q z)QajQM2QpR$?|#mklY2`NlwT`5Tg`P{{@cJM*FO7WG%tcuZh^<3Ta1sAb*+0jYXy_ zlbOY%EUNS{Kv6QX>1u1}CQ4o2n@95U~QoE)7?WVa7SYkoV zgzUpKL}+DIifI2gPLg~?2z!*xh-LP{Q+a`Yk(<=B>Q^;fD-A5EIWk`>wFBCJ+FETE zzBX#hwc&{OblB=)IEl9@7Nw5Hqej<3zS3ayBP}AV5N3(}q_0wULtUe8EM+QVYG>+W z@;99^9ycyCW*Q0^wo65%KH^>>0TyEqH<%tqh1(`nk}Bw~wnnS1d1(ej_lwl3@F<6> zOVl;$b(NyiXfM3`7}@xitS6g@{uwX9>Xh-7iT&?Jje&yv@OS(>74c>Se1;CFI|)V& zQY_-sqIv~nj*FnCOavAZub0Qaivj_mdM2~79Aub}AltW(%|pM4qAUu?gCDXCe#`@Z z?XQi&e;Q?lSOs7KRnR@O50HwN$Y(ysu6v1ncjy1eMMI#vLoq^etO^jQNx({Ka4Odd z*~-u8kJv$2B@{!Cns(Aq zO1bm;Ok|u(BJ)uQ6_*cyQJlsO--Hb66?PBz@Bnvz8`r;vcdp{S|9~|9ediO#hGB## zaG$Ekvsb_;G{WDN^vbB!t&f_!MErk4j8iXghzG;6^w7H^FWm#UL>qYBz8Jl5JrAG# zAI4@g#%2cc@@vqUc09Vvj%5>&OC5pk8UryJquC<7<{?YHmTdsPe;$5n1UjmQU_^ca z5$ z8tq2GUN+#FDggVcgJ*7qoj4R-0G8mj2AJ0kR2h6mM*pq;1zm)5z#RVDxB0Iwfj5v% zAK-w=K;3@e-6ufV4j`gig}%5$QPnX5+3qGdfh%M8#i3S_*BQp$f|d9KuMa>B9ss$z zglzX=jLbZsyV9sUn0IG%_p;B-xu#x5%8z0mwT>_>z z?{B9Z)Kh)L>bMRJ`T%BgFWUnT{t0rb(bykFu)3-O8T<$6&nzH_JHeMfg(v+0r06kT zzc3%v3f=!j<|t5d^cp=7wOx9!(;(DABo<|7f@B5j*7X)6g880 z!alSUBDML*3jPDcwkmMj(x{V-MGt5@sx;rjKl)qwW+OlTk2oIx|0r|>?iC9ZiUGrV zkNaJNkr)V6;x5|_Twpp@^CaBK|A_hu@FueF?PTKKRFb+w1t?J5b%BM&-DPnWcU#=u zS=?C`S=`;-tt~BeiF;-;lK-3T?|VMxNl4OUD!S!ydt_MjX|OT+MB)+Wy8-SNP{+>w>7L2j2 z_;j6rCeE%eJk&whAcq`)1|uiX;v$)SqF`PoWg^4f$dlysHS--lWo-EXBDV!?<~X2=6zHZxeb^3_B6Sj|8eOxN!CX zZ$1Mp+7BM69Y#!ZY;`eW(&3w;;BV8AyV?N$=q6nKW?cVz_@{N~qYY@Qu^3*Ai&0%zZ$QZ0QbZ09#z*$SNdYP7>CT<25dCrgkYvSHNZS9*{Cs&xhGZv|>1sBi=V z?PJCFvf*L8=!d;H$_n)QDfD(YvSAsxLs7WDdf-E@!=EJJ{(TMp>lD0cF&7E17MS@c zaWAijGj#i_z~WkCBu)V;AO{)HmSDGb#>%g)J>c~GAfLC~y9mFnTAF{966>M|13gYcq%qQd{mqU5CA@U$r zcDsK!JVhEtT?BmPHbj=%B44@uAFG@J)Btb%1Fk3^dF?*j6huB}p2ze-toIzE7^ReuYD9kbZoq22q5Gw?z!B}JVvn<}uqZ5->{F@Z{Jx^yh#Km} z1EsfQyOlkHx9UfQ%XPEEuIdF5Gj%((UDQ2Q*~)CiM|o@c5NV8{1v86$N-YK|YBD0m zR^L9K1S;K6Xf=0|eaLj@ni2y=XGCAoHV)wxQHF$+tyNxAc92h%bQO<+2J&~o2k_Yb zgm%diwhgOcee_V@-%wq@!N$S@?!q_tG1I7z@x1^FO$PQ1GnpC8DCo6hwma6f&w0vu z%qelMuxqUGrbxq>3TZ{{^3u{nB_+j6iW?WtDSBE^mNz)NFl%zjQRZq0FM6S_P$SnK z(NBoEo9wCCy3xgkqiW7d-I{tbb#=Txc$H+A=q%>YKlrWL$KDF_sDi~g`oa`TPxpK` z&pnaMW2X5Z^B;-RREb(9c#!T^bXa2b1R`>ZcDizkVyR-KTq12QJkIU#j&}U8zjIyn zY;wu$D{NNhF7hZH!H$RGd5B=N_=EhDn$)aQak59!#_~;2Jl`xj!rRRJLjCTMxvDun zSXY^TuXtLfD!)+9l+`Y^6sMG=lpZLZQ<_@Zr^H({u#hRJU(~ahUtGOlQm*j(Z=X~0 zOjO&LZncD|e}^?t_tf`Io?l&8w@=gW&EM7)Rp}a?7d0^=NqNRs>S#bV0_VBCXPaq7 z$@QP&A8iUU?0X5fe5driu$rWe`b21J{p0ZUQSR8L3Euef*mIE;!MTz~?0B-P?*m)G ztoL-XRyBS%B%ANpTDTiijp<()1Gs4m6dg5b8io2d&AH&*u&ugN~54MS^VH`A}?(`Ko8k#V4*t)b8`+3>n-O7XOU$b8k$ zMY+RrYJboD)Z)#~ce6@zN=~KKG~N*Cp*j9v$#j@nVjl*Ysx-*+so#L3=J8sv8vO;M#QvBI1qm^c1@fk!4l6z zp3^>&@VUdTZI)%0Uz|%k@0{(-^U5m>7JD3dlb*nuc^^e`Mfad?p;vTw!ukdWX@;rG zfa$BMt)X}(_{Pm-tiBoKIJepH-WCQ@O$|$km2YisE;amOXklt%&9tAeO|ZyJbqom= z7t2JY=ZdoO`{vHe(tm6HW#K37Tgxj`_D8xn@p-+qHM&Kw3lT@l(mT~1*wo&tdK-S@ zwW(vm%QZ^XAraxVl{YL2GGB7_a8@&P%MblAEcbI!ye)yB5!^mHJZ5@$cG&ZXKjNK9 z-xK=99Ey4zwJ%~=aCgZRrnPgo@qB4{X%kZ$$35pgYfTenUg1LB2;N`Beeo9MbXS4|;sHL7ZHnPsY0c6;29*OfAgG@1o%Ui! zPpUD0nq;jaRig>n93}{h2!5v?rEDZ$C+R547v2@=L|=GC)B)!co7_Iok?vgUjC37x z)o>L#{;_YfFGk$P0G8H8tKPQKy3jPHEVpoc{vSX8$dzR8|MK(0t+!`iaUX|O%uo!g zwik@1^U()&YvOuVn^;fNG_m>YrqeQ)Bzl8=(!N43d(aVG+O|MY=Cr1`RJPD^rm%0( zsfu&1fr2X`j`-zOS0^=y(M9%(o)~`sP^~GtgpiTi6RKEo3fs<|Z5e79UB1!K$3BI; zNJscuc$T`CQ+nPa@m#q=wMH{pdtaL#^d5TF4`n~aGX#C0QrLy)z)xfMd1IYJ?El!O zp|8@NBb=?A^Bvvor)>&*BYU{*fVr`8ks;SG#c;1|PSJ}zfA)-TqdqC#&40E1gQ;Y! z(3AkKTzb=W+aS43=r!qDB9%p)w?bWK8uf%!#6e8n?U znDY&J(Wjukcusi+Q5V=`qPaL;USHK)T~BpN{+sl%I8OALXbnWSnbZ1Dv+aBm&k@Hz zHiuPeJ7#-p|L!n3UfSK(5=%=`7zoxRpwQO<4t7MjeM4M{5!Iz&nJ1N1E;hD3bU?F_lC?HA-%j*|F!wduMZz5T1Hx2cCM z$UBMY&dTVmQAQsolWX<(?$lGc(ZqNl`7WZ%;eh1$cF`<8mL9SdyL zt^Zm^TbtNj4#F8{A86TOx@hccs8Zg#WK~h~qKu-ff@gWvvnPDn^{qqkHabn$pemhq zBI!+JV)(HLas0W|0X3uQoT%Bi%7pOWRcVrNfr>45J~19Dk1)=%^>HP;Mb0Hw9{5i0 z>^$a`?7q$)Gbpx4EHy{jHcPXrCXHf9B`1yw*7jvonhRbVsXe#$TxNZ{pUdwh^kR{Mc(% zc-6^Ti)v-09*g={Qy@JeYR_*)rP@M_J&d)i6C4el5sorzw)v20x8sK&0ZHr95+b(@^v8%Mc33lTBx<~>a* z+4lV5qEo2L&>y-o28bws2{STd|pZQ zq7?-%f7Z_Hm>Zfq|A#&Mac;AUb?nqoU&7}kGNxG15C0OSPxPhM$Z*%%T;prfWPKME z3%=A7?y0-nOc~ypPT1x-rr9@I_89FIn+!T@4f2U_a?pO=OI<<8)!<8^+w_eha>66R z8GTLth0t+935v&}2YwN?#hGb;>e%QpkrT<=?o{U`hrl(_bCBx7_TjG*mO^7+B;Jge z?rqTw!5pkXto#9lmv^1LNIiEqbNp_dYi3N7%$eq)#>VAUN~#v+6!ghomsj}1l*4B0 zvPb9sQ!Zd++GjDh;|@eV37e{G7$r}%rY2+zsufqGeR7M4$w4y|OC%z`lH6d+H*c{f zIp#ZPD`lKhzNmCz*$2}~&v9a_GD2GtG+xssXm5yCA0P20VnC!Ta&lxycx)(NBb2oz zHqv}=v3tMwF7Q&NoK`jwAL)OkIJSVsz5CZ47Sws=g zBci)`rLGlrk*%wBon^6UYDL@9n?+>>y8Kyr?Q*aGaO5QB{K#Eb#*nR**6_~J{%~*D zc>Vn7g-JEjC)HeCdvHdds+*$ALK_C%QtT5Rpy#`;+6OrzoQLf;^ZJUPCBC8|CCNs& z>!p9AWS+dSe64(<>XY_<*u?N1k^7?`#E4_YMefr*)jm>u6z=1CLd$mmqh{pP->x6F z63ZRSSF7Ifhg(G5r`xj=I0Lk@`e9{s7Yg~Kgwf&_;-SJ!{sGoaZS~|h!yOVZ(bCMj zDsGl6EX@AdKeuJhkF0H3nOOtB@6K*fG~IbwbW7VOtVYOj?Sjz1BiAQ-(>~U$kxA6- zkTN`$7akFML^DQu-QR{=9;@vq~q#N3ITsM`}Hl)o3g1Aez75#>Klb#dLaF_try?beg_O|Ee7cJeee zjQ+^pg=WnpC~^)FPL=$WilzHRH=*$Ml^N>0iJBaQr?#`cwXTsVOD$S7F{U7DmHEV)eXm^EbBHJWvOs&Z1oB5#T=TuSRx|mB5lS8+w)Z%Wu znQUh|(YwX|mnpC8YSGDpIYkF6hS}FZ@BgK*3saSMPBc|IWs0Bj%D`lP;<#XwgXx~+XyRPuEOt(Jhmo^=IrM907k7YvM|fEr zDw!m1BU&w33YC{T+()*Q9!65mZ`RSK*64|}LT8>JSMuXWc6t`?Tbu8){0^3p+!yId zRE~NBKF6Q>^YKm7|EztdPDRZoX-$&sar0uHhZkuHMSbx>VhH!0JnNWjYFE0Ypj-a# zqBujUP3-*c80gsJDx%ChrF5#A3^D6tA}2*Xi_VVe6mu>zO@AzOaL9SBP#q+l!#_@^ zdndRuoTTHRqp3Z?`o_H2!rFc~8TSG2E~*aWqw_->sEcH^MaQ}3-lI0F;c&_R!m=Vm`CIdP%SPjc@>s)G+ad}} z9$9_$AnoN~V+f)D5_LK*Dn2V_OVq5$sEF2K@6=7CwFT$=DoC%kqQ2qoR@XAtm}J^) z6*==(kgE4nI*M!bH5c#craFJf8xmhUdv-<@U4F`g^67Hr9j z&3*o(EJu`e=5y-jH$UE*_i}qg)dkI=MX^Lq>V76o%J{9$yjpuwmn5!CJYRKVYEbh0 zsCyx$>Spq1f@{oJ&m~)~K~=WD476MGHp8xx_@d#ZV=TM9Ke?F#xpfDwuKFV84_zDWM1@+| zi9Jewa)sJ7CYNE3vAv;f>4n0HMGYz%*zbAg)4kas|9$>h(RM{@h%~Z!%#7$|k(x+J zRNJW45u0@rLc)VSDXikz#7WNO^LvV%+Z<~gX^u&@J(g9Lt+o};E}mDU(YK$u%9d~z z-X%e__?tLbY!%iJcEb8?m_Ly%@kNpYT@u?cQ?&|h$?1GD_sfsrIV-c?eBJSReAb8Z z@4nH5lIu^7_VpBX2-PKCuGX#Q)oR<5w!}V7=#jpu=DTWp5^m_TwXamOrTd{_@~iKf z>x^ZDG0U(LEKE!p7dX^9p^SBW5W z>X4n9{)!e7hrrES%aT+TuiVW#p4;YIebxf|JLdtH&>i9_@iy^2Wajx}h-hIW(Jav) zqAXDz@eYwts3M}E>>ES#$mgzg_BhL7LqeIa=$C?zd6#~K=KPtZ{HFg_<7YK1&9>yv z@hx%Rr+X@9M*UsYUF~GGB~^FDr^TD94zJmxc4+#&*a)3T`$C-{|0!J0(=cZD4{L2x zd&Bp#_r;pxgS;vB^Mp_vw6l(x9~qqHSShhy#W=h9}@rfPjk84`Uqbe{GU7;8dN65qnS^o+69 zHZ8BnFYR1HlvOj^#I0{NhpaF^UPwZR%`oe%Pmo8qrsxLL!262SaXZ zn+6?H2Pu6LBk>z&@jXSIybE&7$!IL-YUn>Ce;URs1N+o}C?8 zIKdeulFR0Cd)!r+D9yh3U()i@=cKex7#_1T{*P2o_2KDtlMX~p30oV)R~CpnKo6E? z29k&D=Z(c>7fWm9Y@g^HuRk-HiXu_%&$G+D)-}`l%yG!M**(JhjNIlEu*G1W7V>5i zKLu8yQ#4k5PSjuc00`mPgb`>P6BF&5=Y8aEvg*^&? zDJcJWKCfNg;-cY>P*FeCVqqh4nJ-egF?LtVsI}0iniWxZtxDKck}v*WYTH!KUH$g zN_9(3R0yM+q+cKQHDsywi|UEIxBR<2S2|kA=P}G>-xbsf3iht_d~B$ShyV712yNSGRp#WGoi6K04%6;gpdE7_ZQ|wZJ|Tj4qCP&XtA$1C7?8< z&)os|k0H)w_BPf=mOJL5=0vmDTwSqi!PBm^YUNm&9NHOYNZr%s!1o5B9@H-?U z)vvS?oh<5Wlt27h_{^xLk;g*jYlz^#!={BYnruagBo4|;huL=ANM;$LxJl$0@1Gu? z`<}DJNx4;2E`5dFz$I|&ffq~W|B8A<{|dW{k4m0MW`J)SCh09&AUq=)B>4xG$odHf z5dT8cX*qJM6S&J@LtWy+{0ZC|<|ddoC2TchA{k`VE`XIa3^Dh0+z$3GdlYJ$wV?~# zk6)XcL)T~DNI^7({pPu4)aC7)-Z zr_7V$TZlTRjhN+?C;aHAe#kb>hGyn;%-`xE*8Cn7GM6xoz$7!%-9RtM4|w&i9OpW8V7hyl4Jmp!O5^D~Ml32PA2d>Z0eO zO|mnJs`9SVaA|$1R?0~Gqq@>=ti|7mN1^JX6+DpB(3S3wJX!*;4wnH$(sMQjSWOjh7NpYaNQg0s~CmD}#1 z9&dZWL$p9ELP1Pv#4alTZ~*?GxXVg)XI7&Y9=}- zoGYFr`y`V~S4aj*`${KD`bl0$UW>!MoU|u2IgUjQ`|vRO+<=ji;lnxLyg#F2-qWj4sb{sXxsT`D3trnsvI{At9-?y30`L+pvI)GoP%-%IUj@8* zI9j`b=%=W^sK5B2)GOU8@ksWAr)LmP6hD>pmJ|z{3nz+x7mXKGK_6N8w*yQRV0n5o z?dcGv7gLX31ibMRIuXYU#nrB$f2U=Pls(OM2Q#_^-nW!@1u@=9V4KVplnY#fWZ@%W zmaxAt0}Q>Y!l?oeQ36EEPw;#;VtWs4MSv0C4xEo+P)d++D}mw6g$HiVHbvfZ5m5D? zz+5U|wlVF&!Hhr!uu-&_KIv=XyG_MYKgliReeV}fJ?}B{OFHOX@Mc7;UFlZ%xb6?v%i$&>An&J`sH@1qtT0kruwFjO}1ejwkx zft$rV@uh*!a@UuN>XP^Art}Hl7`htjH?KuJWiWe#tvPOlHNt&(JOrbu zhHyA?i6@0+V0DEHcM1L^-ayTN2>%b%HF^kK4bR_}8w#Z6Q#KNJCnxZ2d!Zolm?3e0 zILXXl^vo@~7Fb?&fLH87T_XvyvG;F8Pfoi2aZYgTvVArVsVKG#^1SuA*uR9$of{lV`!UNnQ=;)w#e}l_qGS1{+|~I9o%58`tpOPVWa<=LR;p?SKs`V8G=ZG~c# zxSjB@upxh|uiRA#b(9qz8F`ru@)fcT1T~~Na!$TkR#$RFR8_P?;PLOHKO&4)P zpA~gT2hqahr2st2L!jOsxa4Y-nSd%a>)p?w;y*(a+4^OBa}1xFhUijP32e(wzh^Rkve4R*SC2 z#UG1)8{JReNmh?u;xgI#TFzO!I)>ZzsAku}GSt zf;Nv*(e+S$ax6EL`2_y#N#AyAiD!Yco1=;2h<%8yx3$0=Wr{F-FL#!CO8zaf=Wong zmi?tHHn?}qPIc$j8CTO>w^!Z$@q;zzi9w#*rVS>IuM;oI`LTF$iNjv#HQKWstpBBI zM#TOqeQNK`oSu{#@>n%U-kkTvF~P9Ef-t?Yo^y@yTy(fB3a5t|BwQvvsyZ5~h|Gvu z5E&dkLeqjsq1KXF^esBu+t1U28q4%%KVi;j=kEG-f;*Bo^6TI9ZJ%fyXj?=d^WU%g0BLO}&_5u01cr{({1m_s^U38y~Esk zZ{(S%(c$xg)8(ksL9TE%@v!tS>{@D(r#0D>i3ej|$-BU}ij?5)WXi{hm+!-Q*YKr~bx;S<;+%)*-6TgtM{}pv`N3*2Y%QH# ze4waPVa(41KgND7ElQ1;+cc{An1=tRE=fIJ{dB|#$zblIr_`S5I?jaqKiZTzqrOE{ zjP^OWkN$hg^u)V0nq*{Ddzi30I7axIQTXTcmXJ#8V&i1fNy|sG$k@mD-u9Adr);O! zM%_ooZH2yeaFXJNXbiuC)%#3j2t9zxb|%}JIZ~;7u7sZ~Tq9|wn4_2@bqZS%Km1AT z51L|r0l&Ew-NmP6Y6Fd1#t5K-yM(vWf1I(R*LHg}?)s=9I?ye1*F&$>cMfqpu}F-f z@`SQCrJqY?7hTF9ofDTkOWd*k_!hr4=u)*f?o{fxxPGdof*<}f^nPkS3xx`2+Z_4F zy9I5@n_%@6$~VPMOS_fwThg+a1)-1R9?@9&LG@|zSMOods*1;k-4$=kbcQL8y}Y^V zCAx#*qx3mpQK8SYCe3~IRfSTtn2V>oQjc7REd9(k9UA&i)Jp0s8YjIbn=Rc84&p=6 zcvM2Y0W6S>_4=INhU6If5(~9#_A2Um=y*#|6L!4s36(*ndpf)OyNet?%Qdsg9AWy^ z@VKm1nX>#-*>A=2{0`Z5jhfiL&BrxcR((Nat=RCSmf>HO4`hp^$BDL3_1{i)EX;rR z^V=HBTK*7Jy>1(PChBe^Kca19r-+zPkv1#rVk8^9m4DyXq&&Jjt|HA4X4cx(i9K{|T)|6empQstk3w9?jNqk80RbiFr zMGb^{0jhE^QOp&#A17kNXqFlPmDnd_8FdwN!vbhJr+V9a9=f(UhT67UUs@#QWrl<0 zwaXWkttdTKoLw*>cbd6o)RTt7dJW>AsGo;Uiya@eLT5znT)T7(9~vTVGPl>KZTah* z54k{vkav${Q|`BJvFjqLZI&QA-^6saTu|PjLTFfC;V(N| zY|5YF&W+txJ0$f@@E7UL(EG9Z@ktACXh3S`1KVKObU*R*8WcY8{``+cLJj&h3o&U1-o z%IhmDl%G_g>Nucy9*PnWJz#;CtHtTqn)Fz*)YF9=O)tT`EtUHln7nqt^Bv}uV&ypq zh{Bfs*34I|k>V&l)ywO1KX*-r?@cxhGQKswG%T%{Y1ml6b7S+YdQYAVEYgYM{Ch1$9uoJ(!>%D0)Gu@%xZX}EZ^e4Og4 ztgmvq{(SUn-2}}Pb*TC`XuMo)CC2;Q}lB3n)9kF#fJ))sO+)@Jcunsb%9Xa z4E1eihz0^FeT>LLr5Z7+2Uch0nE$6U61sxCk9FO1dIO^S3QA7xA(xW%sCLwKWc1p( ze|HDFR@={__UU3mriwr?iTWlADB6RitK? zY=JmI9K>r(uJv3X%iS@y_KwliDf$Z~@hPzONM`y8IOSz^5BVMG9}2m0sl1o6t>&8& zEOWsJ;ZR|oKa9PIHEJ&X1)2-H=!H}{xssj+zQP99$V>nWW;tqMTm@pczyCaRZoYxr zGm?G9gkl!@5^7_7a=1t6UgE0fn&BMpsBdSj&8=>W5jAW_TT9IQOi@OEh1XFcXszaxJeNi{4}(d_O;@+@@Yu*@J+fCA-`!8 z)F%|Zl+#rTd9W~0*RL+eW9P6%^bT(=cP&(MJx(T*&8ca=oxW)1lm8>)CK5pt(H5{w zM@d`C>&u-|qx7PTkPQRFzkzs{NuQ@gOf z8wvd2Sg0I0aBb_EKbX$UUswm7LC^R|j_gAwcwZrIyvGr5f8!iSkLPG#xVMX!$E5ML z3Jj9IvhAWR#2)c*RjDRl{Zuta*+y|o`9+hjnXeScSIWwzuO-(dgC%8xAZW(T!%A?5 zKZ5(nG=Wma8Mc61z&1q;XFk+sj&Pm9eDHA9{3P!Sq9t9huE+ytz6W1Vg!8vxtvnle z*!AFHsJNS08E$1n>~Lld9YGiSZh;xSlx{`u^z{J>veb9T*U6WQx!o4(I(3a|iB;oQ z?=bHm&jwGt?+5dY38yJ)1O1MD=2sI}1>FUwQAhK$FkSLkvH>jUd16w0PWo1MSgMzt z7H3M1Ne)OxOH#zcg;xY!u-@w(^_9^&la>S0HiU@^}J8WI{8FQA&VOnD4Uk_{k`^+nb zWkSHj|H8~-rZC5tm&^htj=6%FcpLTw*T%m9DuYv5ITWUk0_!^xu}~KurYhVkY$-e| zTp{WvmP_hMMvCu=>Wb~C!2Cd*A)Y0=0414ZQM4#scoAAU8G?FXTfK(D%6q6qtp&1} z20}ZJTMca?o<9LKYJLOL?q4<$yoxPc1-lS4NgtwlgTTK!z%E5B^d5A?Zlcn~Wp*E4 zIlv=wupwX>jlx>~ALu!~fi@DyH31s93>fqGV9Ib@Q!ss4t~*#M?}4RWjObn*Q1uKi z6O4yQep|30K7c)=fntD$Xb6A!5Ik~?pb?@*X9Q&sSIme0k4?ZAenQ^$H$fEi1#*cE zL^yF0x+?AXX{c9t6uA6gUKnunTByt22BJF{`dYj@qCvhAPVn9#8B6tKQ4Ii^t4!n{gVAxrxSfzs}PzC&nMDW^%LSbqq^v0%xNw6Kv zg0oPox(@}df1&vH2(L~8ojng2kB;D4a-a>x1CK|IURVbdbQ)L_ z9l^xd1cdxqFjKDJEH0z|(0Oo~4&&?smDL*IT4ZQZ9lTLMukRy}{((9{A!y%3pr?O> z0$Ufh-vV*v z75IM}cn$$hOHQS77zfoY0)0S%cVj{y2=UsCGrt9{#}*vpA+F;Mj(rVB+yiDqKD@aT z{gs2Q5Wk|q#t?u76S$&LywN!ODQJ(I&B zNAOwT^B>`F$MJqQI2oI8#T&rp*ob&!PqZhn7TDKm^z%?e>xP1{F$OAUEm41`11v>= zDwfttZk85(4<(064o*5)CMxKWRR?mvG5ReM@x!X%i8R8iK>vuqH*nx8zQb0sFt#4T zVvd5#wHK|k46K7AXw$&E!(aZ-@q@+VD~KclU=FdD1@EVg+(;Qxqm|BON93U zy*vfDBB`+WBS745MN91D?L+&V0;l5^?EeKU?+9AxD&oumm9{sPt+yXam1n{G=!a{S zV3XrP<5(hGaWq=02JEXQ)Gp^jBk)hy-vVSLCW1NBANJJ@XV4N`V|*hCZ4?EbPBJta z6S1Y>lQ`^C2dfUPd?-;{!;Dt_vsWMc^9&wa~y8iSuiU(a{}sG6z9Rt}|XYL(61mF#_BX21Rt91O^@qK5gD+`^{dU3jE5=(R zDB3kg%lAbK48?O}{IV*d_KC2lB3S!-c&umW#Z#3dtq64y^V!|`O@;M0CB!;!A(W*bd#p;RNQD;1N1#6`jUbTkj z2rwiE;9WoD8pgn{tf@TzO|W+j+I1wfO(%edxeAJ1%fOXc1FPx2 zwD}nH%8W{m*nEuaY2aQ3^eQLAR|I-Iuyw=M9z79ISVavKjP~yE0IgvIz0rsDD{Ztf zykcEg#4pH!)P#+s;23qVN@)WRS{+*|`bP@8tAbalu$e^IR$xp86jxL6h(SMUU@`Id zTY$YML!Sm$T3~?Z6zEq8Y)J>((pUOy4caDP^-|bHfN`e4uQm848WpV>XhjizA;IU; z%HJz+w!L6aePL^;mxb%-f%m;I?z-TLI$`SwyK4;VtBWhBfoo`ptEmM`51e}yv|2KH zq#S-J5B@0&zT+w4#BX7dKPr1SFy;bB4Ezh6tsi601Iw_&dz8W3e8u)1-z~uJvtdi` z@XPO&Ugi)k5o4!}}PF z-8g(F7SA%=2l6m3%HgpK@F>Gdm_tR`K#!`>b|P$nK9ixPL4QI^ir_7!cmy^LtU`_F zz$fC$aT4fj9=;j4L!w$AHV&Wgu?GlVU@w9F5ZFtgH%PSZdu(s<_=Ic!fOdL`md~&3 zou4?Xz(_SrSiq+Q{0*=2`n{EB<-jkz@I`iT zZY{W8W2OHoz;j@pS%#|#T%jM|kf8Uy_=X?Xatp0igjUN(EB-{^72!D#)>DX2{$man zRsJm>R{Ra4kZly;90fRMHO6*SrJW?hW}-0i z1C|p|sue?DC!OWUD@)1yI`OP0@nARB?o#V;K~2{G+^HW>kL?C;8oxe=%v7M3A_$?v%tTA z7xBZd$t(B7!{>p027Vc^|A18odiB5W{r8H+l?C*)U9b-(@F)EHZ(Ias8zk-uDcBM! zJ#GU259r2Lh35^tPQXg*J8G!g|!>tF-q`Ufh`}cAMjDu z$}!@B7m?tc0#_3F7Z{b^N-MKsES3LntH8Gd-~aD9FgN~xuL7SsDtpFNX`>WgyKyxX zJ}*YHz!D}CTq%!_Z}5%~WOEI1F}bKr@Xj#k%xW7 z!bbFXMB@92l{2(>y!vl;l>i$KZ1LD%2-+mjKVCdMxQf7a1bRyZ8`9wD!SI72@WSC3 z@$vs__5W-qFmDXlg0Rvv2Ie|}J`Py20Iy}R78ch=;aypJ$U_UbKu>ijm0J+ixN_R(j|6)}e$8SNr6Uf2MIFFUd zXWo6jhu@eV!K>w;3N;^83J0(97w~{3Tmv?YJ;FAF7h2{26H$TzP>n&GVZJxazXoa) zBY+>tgv##rhg7V!XW*O90 zXVNxb2;G?;NvG2HeOrB1eO|hZo8+&}r86Da)rk175&j|+3hojA2-=DNk*Z`JC09jL z#WkcWq_ZU(Pr%;Oh3%LHn{MYK*eqzR3X=4om~t3=PBG~;BUIIIf%nVGFR!dP%WGSrM^SZ;hcsR zfhHMHHW}2fi0xhWu67H_x;!8~nTM{S?t{FeKpuDnZJ;Mtk60`nuD-1rC+#h|BfcPC zt?I740}Yv>@=MZk;VxlcAdd{fZ^Rb9fLlU$cX1D)n`f@Y{iK9)BC z9Ql1v9(1w6j0085dU4xOsUw~_MpvP~(c9R4(CQ9iF9QRxi^H7`D%w+^0a)sv%};_d zZxw+ET9IG)ouTyE6ndju_`ktFAA&WW#u%LjG}~eJ4e|(Oh&Y^L9n5ED8vC4kiOAhB z=)6+g0p4QNr~Msqz03Ue#1rB#K}%tUU@KxdIjE{|0ji0Q1!@5aMX@ztt=C7L^X2|q zTnfjrdOTXO(M(q$d(L3f(_^7XHiK#aeZ0QV3OzzPy#u|MJ=Hvs9zFdRZzZQDTf35d zO9WNL7GXUi&_;g<&dctqM9M*uaN$#-MYc!fQ+$?okZcm`glU4d;#HCqVG^$b_=pl_ zK2rzyfIPYdwaPol+lTrcLBGt#y^Dr0GeO!=8F}=>rlU3EJ%W~pioc^Rc&8EpAy>Y&@O6%cpsHX+wZByV zP4Ma0vi-5mLVS@yFDzwOvQG4c9?Ym0AJM@PfDw9F`6h z$#=stS^|H!k@piTqTz@VorH#aYbg1ZLmlY}_VtN1KryHR&>hFX2b_QigTvn#YoLF5 zYW_@Ucf8~0Vl0XY84(1|jR*BU7Qy3bpl^H=YxJs!2W-QNt2yFR4#c9iB09AhZG999 zDkf-6RK@GlfX_Txy65Gi!=9%9Yk1G5b~ zelri4qV|Y0Dv>K`jn_?3Te}DDT|&%f#-TdKZK$Vo$M0`K4`CgDIWY@Jw0s~(dBklZ zM$iYfM|8mU%|INX1pNNKyhwjt#Bi%~PP8H)dgaN?Mfz`ycrD$-rv=)qyDuAPGpBdF zH-?<=GqO|sKiHKt>3hkZ=RXqmhlX;Ju$N$fkP-KmmC8Cu6U3|F5lhA4;``7}Y)C`_ z?WX}pVJJHkShf4~I4EZe=owTYd5#j$^`QwB!cK)69EF;fU)_5=q31F_zYO1 zTTl``AP_;Zw5Q-P`1?)J4g_y7^wmeOeVHJjY;M3$Wz)TZ+j>H8Vv=x2*~C)pW$vv1 zHE$+Rh%cZN>marXoPz4YkHD$56*Ly43!X!TOU}Uw9^tq8Rj9{&>w4|WXxTPv4`XM?ij>4Qs51;x$oGrxIAcv1=K?t zVZVK`w_R9w=3;iEgr`oyJXHak(PDO`!;JI4eXK|1br7&>^$|@DMm$-M*-0(D9|R=i z65Ll`Vnt%dTs0Bvg_c;2bwizuftZB^VgaqO=8D3-z>V|yhO2s183}&?Bx`4k$0>*k zZG(QsJ**Tf;CML&ccG3+eftC-#G6JSjC^gJn0_R z?w|2_K4y}Ed8-|-CiA?NvF{Y<=$L?69nCL+%1kQi|9;`+@JAAj;KywIZqVF+g1gpt zMAkl{B?EOuMxmX=z}H)l(UhXb*OBwS2FjaO@;^lhw&wZxiw%<)(k{O^<_^2W&NBUPYdX$Pyv46^ZS<3jTtxdh<(O+{8jnMP}Z*r zRQW>GU~VF~gS<>L{!YZm`oYSsp(@HO%rgU{MdNpIE-o3ZXhsiIz>mD*H-}>US*V0{ zhmOrg==c<)XDgr#6p!rDYOXm~429vbxOX;(`eA+GV~4TZQA5NHyy$xUvr;u|KpzOc|mSrF7bv6Z%ASk*VVznnW0ZZUxhr? zF4iSkG(F%S1Wrdm@g$SY8#Z0!vpH&MR>V+J)P{u04)(I1li zQlac$X|Y5m2@yXLrV2*zZz8967Fofz%rYpPZ(ts?FS&iF0)K%2n)m??|4+hF;REQn z3_}=6=tUEfr?H<8{7m^cLej1iD6qKbNqlUV~dcTn+U!2 zhRhD!ZKePt-hw&+^+%((%*%OcughzO!g76b43GpX$@5U#kD;zmv+3^K2L3c*6Uk)x zMb+LQTX5shdZEgYCfcSNtFo$GCt(DC@DKQ_ae8(jQ%-NDW3cz>&?WD|tbjUp7Y<{b zPzs-lCP06Cs=T9OCTeF)#Oi<+z97~EA?BfVzHG7)G;BwcF7h&U+?U2AaD3ir%r$Qd zkBBEpOJpD9wG|x{jJ%%QEv+R95?v?S^J;TznMk?_+36Fcot#f~_tl}z^c7|bTL*qT z6OrAj$c24_GTmVADDv_r>0Q3nsMQ?qy#bbPedi3vPkUovSR!n z(_mAUX^rKQ>m7STbXoCMdpU}#@~+ye8mFqSO&gZ{I_8k>o@$$b@V#)vTPB*?n4TF2 zm{`jUXM)#6E~Hbr1Kdw`4af1O3&KVB#0_POlu_EvVRl_zUCS_SXt?&RLN8YEzK{m{ zbTea!uBcmHzrtp`X`SraPepUf_)UZz#TzAGqzmQ!71{E2a*1-5+OJs{bU5gbpcR_8 zs%nbKlE*|2+magTzU7Fqx3P_~$sPUOW2jE-dtR}ik@&o%pR|@VNa7UMBzF71vs;+j z^bP7CC}$r=1&~#wmhA8C_DnvRd@AWed|Wi8dlQr*tHGb=+vxt=5dsy6tJXOC zC|4XQqK~i>c>|#K*B+H(yih)zAzUKtDT^F^Rm)YAmB~<@9ws=)&G4zc zMrXF|q(y1Tws>vhTuaGy%qQL+Q9pSdwKeG9;MkC!!6UR$LC;l7Q3sb3>_z2vJ$IZL zirN8peD#43--UVrI_7ue$=g83b5Z3i_$oLvN5GbNin-Mzx&b}Uw}o0jPV{c|Ty+cG zU0fTTC62zRq4?R})xODg+8S@|V_9f+nkJjPMx!y#+{|{<-G(_wgveZ)4dJQ;UCQkA z3)MzdtC7~YN_kv8s5J7mhjI%%H$2$OwoSDu$4uxZw z{iN-q^`vDG5X4SH4MVDdGsK&3*iGJPoLNXINzdLiVL(LN zpbwH7y#CaD=VXh>I02QDEykvn7dF1@gEt*)$pyU4ghY5%*hN%LoFW-5Jq$hMJ&G=> zo|@A^r9r<3&D5Mz@fA-bPX!g+E2^4%pgq&_x3QYxN5u_;!o1G5+2tl_c7fo8v_g3_ z$Q;r^S66S;kzrj!KWd+;e^=a=V4cN#z^ds_WSUp(>Edn%Wr@9>e%@x}UsNdF5%v&^ z+GjnWptcX0phw`5Okx)@)9Iz)68uY^^*;A_+){U<>sM!gM_YSyTVLxSO9L}!Bn?#! zpDRWf{xWVh2iezpBH2v>S~g8v5-~2pUG-{u-5OCDWz`*NldH6h>!&9)jU-?FF;t8* z1ANvW<@GAA7`oOXdGZLR@e*~<`ml}=U6h29V{3njZzKJeh5w1XN1=X zPu26ooZ8OdT7$VlByop*72q;49`PKBZxHGo^2xs5Tb>g47cl4kb*!+**ydS2nOYjZSL`d#Dmz+My?lK|chd;_ zDen{Zq~K5apb%~J%tS-gRq2T}7S>>@Nz(cy?}(WhW>IB{@A@mq0(i))hMpAH>w6K_%+*7OMJtxE{M93C5fS~o3p zjW$#rBhL{X=DS&&Zv}bO6AfQC+?DR`429~eWTtNiMg}U6A*MDCT6T>whx>_@#5&wR zdoy`Zo?qZQL46}>D6J=Y(%lu#!HzGs309TmvZ=qZwqaSt%8C|-mBw_-OUG>TIs1nA zO?F26XM{AaN>cTd%ygofAe~B?pL{mI4=vd`i#f@Qoi zrV90o`=mY6`qJz(O)wcvc^20Gz+FfU;2gw4Nx7m(BMNC6Hd(hycQ&kQXhd+bW|m@| zq+D=>w~@{E9U${PAKi=HSKVUIVozW1FmgOK#W$B81YDaMGpgsTAIjU)*(A&r?gA6W z&R4OSN9pu^zla+5n zMn`OpsYq~F(Wb_ywo2JqjC?A z=T~=I^v)>yFQysy-hUq}uYFJuKPyZXPnYCL2goldTuP1lwtA-ecU6w!i)^H1r?4A; zBHN0(>dtifY}dhRb()IJtF52y8h1H)n>oaLCj25DtW;^v2CdUR)o#^33aU_dQ!bW$ z5hn^~@`J(smD4k+ljJe-$p4jf9&l0>%iEqavpKTtvg9B+2NeXAprA-lQ4~o=lw<}K zMZ^G#Vh*5)*()N7q6mnB0Rsj=0TUo93JPpalf(DCQ}_J$-uvzO&FsvXbNX~wzg=Bj zRb3T)o~%37!OKFmp@-R3R1oQdH`(Dx^XO^O1<)cGJrVz>4fr~(WG!7A9Ur}oT&ZV; zGJ|jXd(}N(Q$$pxQ598X)umHPN0mNM+P7>(#mCj#{0qVv@mlZ0nK=!e_$& zt%COjmxS(&3?X-32D?=jdzyL2`_`u1k~TFXka=eI=Q*SDy5tw-Ps+PK_w1Y%S=}-k zr*3hU$9F^uLs$BT)i$VkuKMNbu{D9(34zkklhNxE*LWwU{*it(b6j?Voa=M$%t_7J zoi!kHd3s%H)0A%BCp<$FzsK&5UK{y|J!a|Qg`pnoLmEO(mf_^XeUnTS=Z9}5w)d&* z2pd4=k>$}qbWp54-kf)_Kkog=E;J*7V2fZ;Ai^x$tM;~!ccGoBRJ8yc|`QA!NPg{}h z%M4~dnpK{an!PYa7q;}`ljqpDm=n~40oE)2x80#GI z_9KHqtF)=?>OY0Z^n-ofy*oT-CW!OQj=0AnIjnlNl2`19@K^X7t|j;G321%K3)LZ8 ztspxr@DD|)NZ}tcL4FlcC zBrrC(3Z2iH(bchGi4o4bz7x}4%Baa2o%?QtjITEbd0hFsn~{$Xqc@i?!b_EnL}NOi;?X&AjU`c?EKv{4=LZ#;xG<{@mQ zAuMQrqv!aXHU1p3mG;47w~+juDeMTyz)M~)MS%VhtmJ0GGlyu)cMxxRF2^mj z;~sYIE?_^=g=BF!lT4pE=&qyi)_Bapcd{X|*88IkyNvrDAb0W_qLBZFZaf=F&0**3 zk7%Mcu{-i%%9@Q2-?e0ixq>})m!Yk?hSFDabxX9bUnWk#SFjap>tb(MFw$Ca5F!lLFeG#rnMnEp8UyTC@Jd3z{|8O#FXVpl$gO zjg@pP^2Ly4QkFVdBWrn=?@wLQ`Y+3^tnBgvsKF9?2z#1*j+8fe4gIeyvXA4Vcn`ir zH=#eeih5kfaV`Ejx1%>u1nZ4>@cl)Z+33Me#hP^!hzrYzldGi zbIDJ&z`2VE()V!O&Hn}HmFJMr_Db|R7ZP{+Y%+A6gr2Yn%TEc}fY;)qHWwYlIq2RR zL$yX|rCOjhGYfb*7D9QX$aiT!@4rGZd1m~LMxz?rxV#5*xvw31v0muQ`r{*W8pjay zRL5g=Z;wVKo%YMxD~}BypJZ7Lara;NS$s+BUPWhi5BkNc(4dV4$-{||d5rvo;P?ogyZcE>@qdaS>P;26vHnPD5&zz~*}lOYja*`wr`iMR1In=wYX# z0lo&5E(6J1&^hh_C-T`jl(Z|$JLxFxt>$j^xO{hHsg^!ez5-cLKwdYp-5)?-`7KzG zuhO?X=NHb(_brcWYS4F1{{TLU;d}>=wy)`Q^x4LOtG*dUV4s zAg`(R;7ZrF;L4_a>O@QB)psr&<|_OfZ^F~|u4Ej1%g}9fxfX0qXFi#l)Xq<4Hkq8X z4#*F39NPNx@Yfmx-G){0CS5$xEK4KVu2lfzO8e{7tNutKe2IB@g*9 zzs{T``@kBw(>ktrn`0fb(0gEO4aZu(%a{6DtTEzS_cOAE)4Q039%hWc%oWNVu$S6Z zP*W$_9}S^=BYHp{wvMNUI(&u?;5$5T1*JTRzpOM8FYx^>X4+%SiSooOgxc+(Oc(k? zerxibY(rnkf4O-Q5$gY(q(2s5fPV7h)*)X>9i^Z#l$?LOT-TfuZ2l4F90=u41a%>!9)5G$`7#M#@AZ!7r={szvzfm_Djl2y?=>B-qNLVfn&$Mp?7To%VqC}k_l#o#>Ao<|@gO=9N{MM4yLeHwIgpjXH8wd&tyu#M2HIdFVCg>4rp&F!}OzNY>Ft-Eq{UDObv?%MVq{k|>bZXC*u-ow~N< z`kwIbLGZJ2$=P*Ea!#GZx3fW_P&NoE9nV$rgEntL?L3grzuGPjLE{Xy%<|%C@&!Er zQuczGgGuQi$T*fyX*o4Gmb^-yqQa6m-`?bv@+Orpp*(O8C1=2DIHl_wSV1|$wy?Ml z3G5pXxD~{y|K*MRDaYsF;VTZ4F}5SS>)6eiFTloLUYnBqP`vTIB>&t9Nq{^<#YY zLz?Rd!jzpze!GrX9oYcC0RP|8p1){SRnlkkAI8f-Rv<0$)4G&Vb1yXtrW9w-&l;TC zB;|pORhjM6vVCK)L%s^+x8=gru0qYq>cXw++-iU zb}fDzv&q+VkQ_bpSzSydlg}ki+?nJ36W@&@-_JyN8s}s>d*RNPCmv@#{7w8q*5w;P z*|&HZy-7y0XV6u7dgN4zu=%QFUJNCB5o3grG05@#R>aGCkeGa<8 ziR`)@N<^zpWa79W{uwcn(kWvQK3g}%|BBCoLw*lZG{=3*xO$E~r0=s+GTVC$KcaQ` zrhMl7gFoSGtoG)CwFRu19DGjtdR`2 zqh}_@$CD#$1<{M{BYVpLM!3SPz_>0+PH=kA`b)SR}Ln##K8sCE#m|~EPCzePSx}|UF zq3&cbzYpEgC@Aq`ya{spyKsyOG!wUzkEj_d(C*$0-=Dskl>T_v?I-Wy3E6{lGIO8E z{U-Oh+^$+|hipY~$vz?8qdpAeTPf!yB;jY}TS{f&%wf03i;A$D!%$G4Cx zCW=052AQc#A|H{B{MpC?a?qa;X-H1``@|5Z@zd4LlO=8QbJ(k=8q> ze?x!qu;w?lZq~MMyOOrSl1`l_K-|Em0~qEAI` z#ibQbRNP!Kk7G#1r{#B+pI4q*?k~NibVkLSwUyzw64QN4vwTIjwLGr<-cFml&FT4C z&)2&>(dn#q6I&E8sULKL(eyy1^2^d|N|%(qR`zcBi0ZL{0pu1>i(Zhp&U>5p5W78J za&~(Q$ZXgotuCW!c6H8u`HLFfUG#NPS>c?9lN$8TJv;Nq6vs0q(k&3I`GcC>QCUKy@4Aex5eM^Z1fhOuN;-0n!bh{!J|{J_Ek8?IlVm(#-EJNjQk!x8d`w8@)ZB5 zx*DvnPW25{b1Sc}___So^4y9)%ik$~wfwd6H_AtqA1+%`wyQK!GV<7WN4u9*RWAbdd6jiJ6rzIuB_w!t_OSk(WAD@Jsk!f=Wn_zzgy}n@oC|QYG;(MId);m(b6Gh z>&sJW?j*MGy6{brZSmiU?lCp7BYu^q2YjhxN_pDN#KmfoUsaGO+E@H=@!dsx3Vz5t zGkbs9erIp=#$ZM5HPr*E#t;Q}U(Ne<4+Td?^E_+Ge7q%fbb9lQugD`mo(!M^Q-*qT z@R*uST(XyA>xh*1ROH-nJh(gXs{e$#%9=KKT&$^jtFlw&sg(;VU$0zTIid3W%2t&t zD$*-XDj#09x8&2Kj~$y;F~PqFY<`)tJa0@hU;7E2^SaIGnb&()j~<R7yVRk=X&3HsqyrjoZ9>; z4ND6bH_C5ZQG8`lqXx6HbJ9n9=f~CupQ+ta<*#_9;-|`iHQ~BvgO@}ejF&j~5hM1h zv=`GJN!yotFvUy0W}oknvkeW9A6@t_iHY&aM7vxZ*%4|N{LcSk-Q-$gCf8h7b0ayp zC)UiYxvFM1Hue38VXG@=RkSK^MRct5j-7O*|B=(nzNn2xpLgy~{UUF2vy%3^yA*Y6 z(qn6nLEUa?A8t7qe}cAY-J@ImU5R;>Qa-6{VMS5Z_Nx6gU4v&tTSd2qx`u{D7RTns zmPfyd^>O}5nV!)iXIF!(8-3NdW0U(DZ!XShbXLJb*=z33mm#D4!+^WtFp+%vJ~EFGkmej|JNY z*4FK+J+XF8&7_(SYF?_nsjjN7p+8tRqi$L4)|vz4mu^C&fbPS@hj*R;K+d7#k=`Tn#?@r#0o>fWmPp4iIW zYx-lS{<@}@|4yQo-WG8D0|J$ybBTf7+;c@rhs=NFE-1LPQS+u5&8IiJtMO-rU+2%s zZjsi)vnu>gom11G`u*xNYggAj6zCbgE_StN4Dkm)^Zn|(i>zN;XjK9zHF&(BHCdLhW<4x75w@X9gArItTA34%+fyzu^zT_PQW8|w0EvudZ+9jbe|_JNv>wO0q8CVKA){%`zqL%&7CiHCjZ z887F&*28)iu@QYvS1X7S+u52gB!kE=Zf1lhUwH@%gBcE@<+2@w~!AdBZYC`qJX3 zhi@hi+WmnW0$T$qp>5$Q(Yg5YJx^xdz0s=ZYq9)98M)){NI95#clw~rGqNI?mt*C$8Z`H+?U;#Q1&4~;T^$s2mvUsN82==;BRW3vcqkG$=%3<0$$uI# zd_(?*!409yBGa&6-4SUUekfS$zq&4l{PJMc1C@uFua{OFt|+gVUNNNnnz8{EFV>bu zFG>A7yKzD5;x)||wz;xhRlCf#jatoYa&N=YxyNVT%07{e-YK3DvDw6!&J7L;oL<+W z`e=EVvPa8$SN5*02zuhbc^^oBIlFQG?*(rc_9*(ZVTT3}=UkEbRQlIxW77JkzU|BL z-sU+0JL%NuVtRHto&sa)9`=t2{T+SO^SbW`dhz#+Q0Dxsvdn8TW~9B4GTT?@4SO5= zVt8n`!Qb-D*se%^co8^ywQfW0C-|*w4!j#`9QiDAEb?@uU*xgyvqW&28aU5?bKM`c zt7>1WeXO=??Zq`+sy9_msmLp@C|zGtb!>a-pz7X{KB>!cD+`}$GQCB+)&tvI-FkbA zrcECz8q;82&f3fyGJ0k#z?=5nlnI{Mk$!>wHI1w0m5(burKGOJsaR7pF62p6_ztC? znU$5jG^-@@tIT#;3$n&%Rb;Ns%pw~1@a%W99?IyL+R14ido|Q5@H5^Y{~(XEuDg!HT?=x!rTJa~5Vln7JzT z6wkZiSY6|qpQ?gYN2{mQPVi45BgcejQT#~aN#`f;DBqdv0a%)nk={RRdG2TV9U8RF zAD;VJmXom}b%c)?@Wc%`)=1moP@Bk4(MUYc`HbCPdwfr_ z+vR6+8c%ZudwL}15?f^)d6l1J&&ZXrM$swZ+k!v(KdGBSj)7f7FrUge-Hjc*(!bR2 z^f=pAjmHMSR6sDf#IQ7Ztu$*uQXC!wvbB@cIK_0F?>3e(wJdx;S;a$NCg9V`l;W5#-W1Zr4tXiJK zB7IFX7c1-;i7z~_uy5_EwBn2jnZ2^cW^G}Y#=^8e$irSqZ06P8_1=k$5<2@u_SzQWC!|z%h->!h@A&>*mZMlY&+}Kj*P}VQDlQyMNBb+*PsKa3%3c+ z52cZx{{7$|!GWRILdD^$!jFa@3(p7-WFJXpxDTJxSi_`87ZVMrAoev`!Al}n<0U>C zkAN2O+sIdVed-zMGcq=0Ov!jIeJ}FXX!2_hP0!ERpK)JiII}}mG;;={s#E&5)EOzm zeACbeeCGV&9OoVCE%e^v+~OIQsK9@rN3=2$iwq;C;9s$|>@&I_z2d`IVwxt(*ekF- zUcoAMe`19v16x>%vx3!W*TjYK4cPWCBS(5)avh(GO?EZw$sUPsv2LFcAHvG`Hf;7) z@pI6Oe}y*S8?-OWupQQs5&0QbgnL-IpULX-4s3nOfqM<>(GkfQHnJODs9h$kYl~T# zp1`WLj8*Ha=yOUEJ&3(<(9@4yjE}Ng=N4?4W7z|9V{+e0jZ@_9f>-`P+INb#Cu^b* zdyc+$cCx4C3)b};Sko`Tn%mIX!ycmh(UV-mTIFi=6f?0y3`8H2jy7JJ@}p>-{y}@T zkJav5NzMN2XtOp_&-tv}uE$PwF2`l8X>VXfdMSSE1F&~CL*p<6Thzx%o2zu>#ptNQ zNiA|2`Y%Ol=!5=mEUWGFS)=zyv)Bavqjm?kLkBeyjrC>hShyHF$+d8a`RskTmmML? zi4-#zt@a4)Bd2j)7c>{`&>jv(vv?hCm`Zu)QEoq|)e{ZDU})AoX+_fBt>>rxQd>V z9dkOZo=nuCbFl9Z!v5bKt8jaAYAd7m2yA&XvB%w>wDT#O@e<0qkDiuwV;PnW>8;;E zqp=-LjG{OEj+W#$ygSBIrfh*1Q~q^4b0PXg#ke^hTJ_+i-TYn9e`s%ZDjL9I`b^PU zr2Un?SG%~ipIZ?t6#YZ_%yZGE%HooPrB6F}W9SSO&p`ILg5<9)=!fp;tYs-}j5e|% z--gi}XY#y3SmhNvpf@z_19fE|F$?OMTyX}}9zeZhIq67UWCJm)ge=uj^d&XPCq}7@ zEV+%qa!WM5UASHmJ`}NF6c&pKXlce!+EB{s$8U;_B>iwNjvkyD#4jf&pL9yHKC%#Y zqrTEt%Wfx)xmwW`9N$Syp$D-1I)LwP?v!qT_iQZ-{&N{T6JN z*JAsfioR$2F`h>h%PiNBN%> zDMq&3`_XUS0v-l|u#TXs3;P*6qtQK%y->yM|LBh%Wi~ij!q|Kj%gFkqwL}(-k1137 zYekqj2FTgVBN+La#c!_kkU8_%mq`hrL+ul?cslAMP8 zBXEllnmt)IgvbWqSr#xiI)Xm6fc|Y52ePXuf`wwgNQd5lcF5Ldhwv`!%etmr?WxIe z;^i8B6Ru@X(V<=WHdbUsQ~U#QD?L{hn0CBm5k4;YO?&Tk*x1P7dRg>z$hsq&UI=}? zW%`zdGn1B2#@0I%U5UI3uHu-^aR~?;pS1HD()%*1WN9<{ccg9F*VTi=Y>;iJgX}|Q z6KX!Up_3uOHzqUnI4@vWRWN zauJ30zhEi&jX5S24)`4wy&viSH^J#EyjJr58dzS#=eM!Lt^`{;UgpeOT(=forYvwT z!p+3rbUeZ5WuQd}S^Q0Zzt|tGC?$EU}v?CQ= z5=r7s;zo+@P)UC|ApSgf!wyfmCx?0cR-#3=@cv2!fSSaI@onT%n86E-(wd#^GN`;t6P zJkN)f<6HXSO-hnR*$YZEYxV%?HI$u5jD)i-f$M!q{4nHdXeMiW5oJC|Jh8*rKJH}{ z)S}Jag$=DM^XDSC>z8664-2lEf zK(Q-1+khTe$;`W&akGxPYyqoRU@y5GnjVC1+t{J~3S3JW=J;U#7`zRnFM{+-lnCSJbKf}l z&aa6f;JY+2m#ZrhkAvJP%ohce@Fe?RZw9g7GnbSy(q2IBDgbRO;CO!~rqfFa>UtR? zs0G-bhK*t^y*eL`-@~Ily*ue=FC6)LBqv#V<)d?$F(Z3!F4xOvZ5AV;J)Unta7m@4d6JpF==b3@e(tgl<6H)xe^29gb%>zO3cgR9} z@r(S6pk>4f9r5brNYnB~Y!)3C~IWM{oYZ4~b$7aq18 z3J(J7$M6olJuwMSvewRS#&8|8-)~qmhBHg|r|cg+Un2Q5qAtsjgqDzb{;(%PI~K=t z>G#!%F3dBhlXK!9k9<&jC$`56oNdnaphJEi_duzZjL*`#_=!l;fZD7<7Te-0L(SS-Z`1NT}?jO z?-PE{!h{#w;cnzp*<>#Cl+d5gGdE`=ySJp(-#Ozw@5Rr@6J~Vc7<;bEu+tol7Za!B zYxef{b!r(Qr+PN9A{vFCYG3DeXJ9-n_965+CGj#*KU<LnAJM=u;xqxVjixWR!!EBw_$F72&o+ZpJ-JHvvkCFebj{9ScSb1E;Uag(ZiR`s+ z1(I~~@Eqr3@6UJ%Zz4MDtBDA`_-NtHS7 z-ocA^3lSKWG2+)bT^+@|`2@LsmS-9svoAPrdxON{80)*-yV`T9()M)sQcf%tnw%e_5uhSLqq9ZQVDO8vHHjIYGGk`c3oqZ88Le9vNU4H}e1@s06E z6XQMez|JYeys1pQ<9P{x@h{lDJ>I*|`vKQc5oWtIc+dsNq>HhT=d&`~?_e!FK4{q_zmeGR_zOZ@lvIXwRl&+$a4D1k5SBHsX` zBR)Uzq2~hU1Gv=5*!fQF>zQO_O3-|!HF zp(he{Q}!pnmY4}AdX14ZFA>GI9s@^%@%1hA_?-PtN3c#NT4Los3Ex@m=|udF4tUG1 zr_cY6-54LA*oaSlcOvcH?AgRR?+Rv{ogis6v+Fd^_edKvp;-@Rszq?XhneF?aCAeO zd=%c-2Ax<4-rJ8^wHp*|O`N1hkk_-ByYHZmvxvs~T>MwYspN>gFM{4b&QczESbJbe?)L0QNy`{8--5Qp_+=BCS#<}0E8B=}!0^1>=SNf*M= z+OitUL_%4IyyY;Hoeb~q#TAN$IT23Zk&+aTVJ-EYN89#+nH9{@0pguZqy$NB5Ar*8 zgWLZo{3V7V6*i(bhQhhGA>Vz+byJzKSJJX6$Rf|eJ*P2i^h1-2t_+1!Rm`f#1o}$dT|h*cfjNc|*H`D?*P&R>U_uTYOig6cE4qvXp3QyNp6C z@&C%6oPBTBU70s$e3afV{rARMg&@G7-bZY!PB2M2F`5jI37Ggo&5giom zNp$cQvAlQ|lJqoUTC^w6(HBmM6DB9lSI#~n%XB~=v%vX|{yUABG&is-VqN@OEON8) zKJmxW;uGSZutR%YVgT~l?cCYS+tyn|mZ;unf){vKcxQWk#E4o;Kd(nW&@%BAdskbc zhnf-V8q0|Vi8WBiZY*Cc7eATi*c$ueV{>z4EU~E$hwox%dymkK!H)uM0=M|TC7;AS zb+`J%fnniI#AWH1nw36=mMtq9DYp_ws=YS?=Q}ro_nL34@38kG?@DJgQiumGvk9(oXEYdD9=R$qA#!eH8vYIryK6dQRp^j7 zN0xY^kN@S|<-N)`Hf19j2ZyJ8hTZE=?+h}SO(4qGNuH%d@%WUzBv-_n5kX;JEG0e^ z&z8@S`r9Bse!(bx3xD? zG~#7n#;%mB=-+Gjj5iYz!@iEDMB7Hr4Br}hKiD)lH?Yha_1^1J#-~-MZ^&4Zk(zODdQtkEv@=q7`|kC= zL@eeqX4ZS7N69VFo&0Du!AYU&&=cW?WCmXOa!%qRd?z|5J{VR~660z?1L#@GgEI|}+ z#qmDOOn7~~S3H%hh>OU}w<>ya^e9=VmVlUH*u=MluL}RnqUds z5M#0Qr>swXE`3KvcIL+!w`a`AXr3`W{o=GPskOe<-tkT^&s65~_VH%1 zO5%gHiEJQFRR*3~hhr<_4Cv(?>-Y;qh2O=hX_==H-dLlF`|&N2$}WKporxt>?0p<6&V`38 zVW#{5ydB5hvyWoyVw-ugA3DV0B*(>jkR7lBpR8H2RxXP%xjBG;QcDQ9{13t5vhOVV#lYnEDy=D)}r zWPX_#Ya4k0PSP^GDpC@?i5;;wG2*;%^RJz@zKJQ*Q#+*HkTxpqtJG$x*APGHdS7RH z@jK^o&-(a)Sbw4oyc~IkQTsw^I_#kWMa*9fyzJ2hpe4z4s;&!tTLe@F4Uq#tY$Z%6P+xfZutcGjMALF*4HslZe&KSV{N0jvWh+% zK%es&YuDw>wu+V$<4NBmeZ0(_&r#0ac$D~Mefez$xdN6l3+_OZ=3}kX9o{CL+ld@K zn8|aggH}b_N%k{-*`Ko>-opy`A6D-X)`8l^FeE9ZOk!@4$FhUApfPf@e1#P~JxIxl z;@Bs-17sF@0_nD9Fc;s64&_y(@y$s89}|1=2UcB2Sd|?^(mTXF_9L_OMs%)AJ$JJr zxt4X=XlhiBB=9Nrj5m=4)}SNV&78RpUBNM)T*S(57W8|S)#n`U`oL4^ah#*l#@~hkcDTTZvk=ae)GOeG`cj#W|8v{?UPsH53HqLK;!Ws z`m9&b;CuiLdm*20WB*l})70t4?y3>Q@;S>HLr&Me_<`=G)l<QEx_?ECpo+XzZu6Ci)3F zdK|U?1T0Y#$ZI^B|Cc&f;4#MRS<*$!tFem-*PB>HYn(q8ol zdYu>99rqrsc@8P}9`tAn_)S)Zi|Hxtkxof`ShnSe!Y}C5}+J^L2!wl;7Ilp>9cYe?b>y!_{FB#Rg5X9aGRkefcecs>VkVQn9KvtpQvOPwoX?~3}+Hs_BYms}`@xCf)eYl$y z*=?+o9){*m@{)deDbISA9($duRi3*~>1Fg?+I^%*`(`y+gyrNu?$s`$=jpq*=*=(T zczfZ0RZvRyfkJ4c{bjxA2U!W*b7)^tE;_{zK*!r?EA^Ds-2EOS`7^YNZ}a*S4ZzyRBzVv+8^yGjy>f8UZgmPAxi8%Kcn)dzrR`M|{}Sq>-LijE z?lJnU4d|5(vm37-TrVqFcV4o4Xvba)`lt}jEXzmz&Y&C+q#YrOOfGAP?G2L6!)$Si z1Dyc(u4PVFnXN^;+!TLWmRjvG(Yx74^0`L#6j>VGmtun3Q3bF59v$@8NN$^%PkyBS z`}p=LJm(i$qg_{FDAkYFYcA5>zBAzbitv9P+Q-pQa5%s9Wu+$zU*}{iJJZTm=)H8v z0@REV(iWO%Pe{SPTc`Ng+7}fo z(52AYcC5$-pxsNddhASEM)a;I?ux+wF+DD9+Sq>sRLYvCnD^Raw2|j+f*Wa{()#36 zg=!sU=X>Yh*J@9aBHMq>T&MWRKY>5Za53}=$}MAAU__U*k~TDPk+Gc5LF=!5Q3tqM zc^~#B+aX)LS`|hIb~bZ09%OKR?X8)@p2ACbzl38F=e5^sRMI-9ceBrpNj_r?=g$C{ zr$hU`&^8Ai=>s$3l-f~~2A#Wr&;f8&q3=|#9gyt3)8Vz+uY3}HHUxaDS7m|i&Q$}r zR=uX|9NHVCoh{9h_B8FJ)DAbrER>yAdy>=XN!gimd7|RpYoA^fs7m8}F}{A#v+-lNpA}mG$-0VFKsbIjb515R*kkm=eat;G;0(LyKkXj-153pv zaJ>nXHHw*W7BwD??)zq17o`URNeWlO;}o^N5>5Zle3F&CZ4!~~Xwzw+wl#G>jgfsm zz15Zacjo@~(5;wv$MY395wCt09mK0(=W{+iC#^i3>^=HRT>1c`K^_hj;4p@+TymId z5Q1MjwB#7ywSzN=rYBClmAB(Z@V1-!tcRz5%Ejv~oza&luWy((>OYZ?%hAKf~9qjoT`@tSo2x018Lq^Qq%;U%C3+1{vg_*IL=VSUwT)B>Q$TP@KO?gf~ zkb3|--Np324;kV@O49yltT#MsBv!rA+);qtvNX{RjGYU={Fdhwf|;!#Vt?XV=G!tx z?`f2ghCOE#D~Vp9=LYmwZE1N=>fIi0J(;^(Vqa^E|itUF$T-%m3LWr{K6XKUJ&4TgrU}~cOC%~ek2vOp%1HxW%Qk}?xB9g z$y&$ZfU*WRgu}LB_RGhfcLWT+3lAh-J9PVik?{*{ejiTh<5>wTMP>BQTBNWH&j-u^ z%H{PT$M10C7m?Dm($K81gV*~Y?^o*JLx2A{Bdm<)$FZWEMEexkK^YEm7-=IJrTLsu zhJ?Iiq=lcT#}~;N{R^o3C%pDuO8AEQyhDBS=;u~Yz6+yTUMs!0|2TTA3FnKE_~hRs z?w<-4j^~#YdOm|IJ99-p=AXWdoCGrcNuWr%N`9o&SE*|?H9f?W-k`2oaIO?^)PeUh zu+$MWr14bQShWW`!4-;8k;=0wc$Q*qXr<&yN}OKq`HkZ!Bc&x{B?DyX%r2;#MU9$s zZ#k6x8VVjp9#KY$MwC+U@L0Ce5V^BTZ=`*FqL z~`9KkVbI z=BYaRUip^(;+ia4kjr(4sLyUFTSL9`x#DwV`4>U#PD(or?fzo!Rm9yLd{b7KhLoeI zDq(u7nA*vcs0Y+<24!=pue_Wx89mzF`7L)Uzr*je`b$dQ0`8UftBMgLZmkwuJdTD? zLtZqJ&?HZ_0x#WoX(nt;i;JN`2TIq@R>dSpgL3k{iqlHPNjU(`R9oe=I7Xe7iz3R{ zY6^d7&E3kD(TK7YiJ$`|Dq~m*aB2PVe@;OKg%4T$!w#YkBk#iJJN3HvfR}JLh*1Ye{&s zs1~kduParqJwcQfh1H|VuNDK%rO-%nvC&aJVpSY9ph-Bjz2X)H#oaHiI18eh(NSe- z%$iS>c}H2a1Fcmh?I^F2Uh+y*-WZ*;m+MO<>g&%UAE|D7uAZ)Sjy@^hjcP9B3t?4B z>R0`n@|FdtB-vAL=?-@)|BqI;@)5LH5!&CbZ<1AQ29kf6O4hhkn{1_I=+INe8H84y z(QkSePBb8 z(N3|gL=oke(QbFkFC;oCKacV^xtu^4Hw1`7iRu>rNcd8QD&0wdX6~~QDL$xjB1sOV zXQ?b5!m8qB2tlcw6}K|}EmR7fhCuxnC$MM{MlIcK-_`5&f9gLWTzzIOvQl*3vMY(M zipQWx4K}((AIpDat9EOh5N;S$jwhi`@fkG6GI-Zm5Dw+rD_=*Wq3~zkhZ?)$LdJuX zS4i1}l)Xs&N3q2eT}-qQ!mNeT>!{B4BU5@M(b1))d>YNKQCY3*lN+f_<>_Te)Newf z8(+aVqPyCz|HoktL#so z_4@BW-D7$bqrUK_@9v+AJzXcn8fxo58Ga3gs;#aPh7G6oU!UFUq~p?jqIgK6jHsb^ zi&~?xsW>5$1oVrcQ)Agis*O2OPPjDG33-}7Z7kV;tAR@kJzt+i9ph@o`-LlcscM`W zVw6wTMUtUYW7g$?LZL>K?lI)L*BIioiZPxjyoo!N@{;6as5c(y&i*C*YT3Xv8;cWK z>;UOFt#^e+^BJ~mSjOLtnr=HKZ7FVyjZ8&NF=`2I_RXj)KYSfVb>pap%lsr3ZEU+U znNd`8r|2bQ*t}@i6P`p%9jb?@t@t(8^Ln?l`o)kUOzRrMv)yUPG<><5yn2+|ukKUC zUDbnzCu_S9WuwKOW993K>TS2btz_M8he{Ae6>UdU)mS&$nLHs%>M$N>d|D-{bd_ND z8@IQ5hoDy#`H;`^Fy#gX(bjOh?|ggir+a3yp}g44TwGY+nEZaG4lq0~yW z`B>#iB6JVqN-l4*=jof~Z2fm@Vl^SOzVuk8G5Y# zl1;4zns01w)qmq^%HODS^|Pug(W>o+{Qn#UHuiMJaH?^nIoOb6ZLn2=_?!@ATurUg z;eNAr*?WB-*Hb~f(m17y=6Zf<}kFb_H5Rz$E3}=#_epBi+(QuaPcFwSRcBd z+__k1jZ^8r;lTZ5_qb>EoAFYszuI7PiRx}GG6~!`?f*Xxjq|wZ7aok~3h&~tCPfGlJ$|$%8icn#7{Dlq>&;ZpI~TQN^e zYE;{}b~&E@yW`#+@#avHcWI(?Hb1yHSDCuIer&k+=&WAuEMl~>HIKEto-($sFn(xqv943w zG@|uxT+T3UV?s0#Z*X63eN|U?1{T-S+-FDq`c!DKZ_+G?cj?f4YrNCAi+#44Kqyw! zG|jc*sru&5#V!vtsnM=9E@xL8dTo|AEbCeBGt@gSeML>R%I!s))wIqO7cq3&oa?^Z zIrWNkXPSRq+8Q2BW)`LE55uC#kGfiGJ^R(2kwtsA9wrZ}X7*Q)dErmJqo-#uUTr%; z*XTJq#1(`y^^%q6atK+DtsUwGcf_c!E_brl83!}Ip%RVXso#tmYNN@Gs)5>UrRlSt zpl>dhv;Vr@&Ka-PJ*tWOr)Qb`>DJ#JDaOO~Zf7*|G**NxVZ9z>DobP0&}L&oEwSfV zt$1kisn$={8p#m3tU+b%*t`$tEVN7&={^D4f>Mh*q-D^xtRgPFp)0 zq7C)NXC;BEE>;8UBbO5xPf%+_3-zohZ8MoZi8`8fg&UhOY|Of=V%Z|Z^-Nk37c^+ALR|QCY5j!5!n8b=}#%zPAjCLiGP~KwV>p^`|?gY*y1RcHSLl7D3la z5u&Z{T?DB2^=EueXKif$Csz_y>=(mN{b(@M=x&=OH0F%U880*%+S;U^i@W1Wb+zZY z@1luY7xj?KwS`9aPb0xank&r-G3t9E!p4|8;%pw%IrXy8XTKY#F*Ld;Gv4VU$yPBg zFBJw2Px^1@a{o(e(`T2<+8nCqX!O{#ReR$P)-Lz6ez$UTzi|pZ$DU^Mx7#D4m*x!@ z5jJnt>a%-|Va(p^?>DL0P^Y`}i>PeZsiiKr(|`A~&9(LGGM6&O z1H}RAQDR6}Ir`O*ZK!u^sb^{)*5UTLaV%j_9In28#$DX{>A!I!q0#*l0@UB?1;eL{ zP1UO&bH+=J>Uy`U>#tCq)DAo2_Kql{cDVg21gJgr*wHFP$aU8qZrOUW_@SukqRvI0 zzKgy#e%&(N|2Dtdufm0>s6TfdA#W(7oQ+1c#W3@qNK=j7Khes1+tA{2aZyoMx%d6& zJ$8+u)~$hipN;$frMBv!KkFUi7<$*?*2KOU?e%oK$9@%dj8-<*fnZdJw{zR zxsPo?;p2xJws<*O1U&tzgw$}N4WLT z^Q}(S%er1At5@_=%XCg<8bVz(=o&j~yuv7JW8X%L=<43(mZy76Zjpo``NidVqMd2_ zjowC0lS6DC{@)Q|yg_v^?qxNv$C93<+KUUCTxRp8N&RY-4r`(D5<8>&496})U2bU7 zpm9^Ru^xfeOE$9#>o&)m6e(_NbG0;nhCaQER#pRjGI`RFxz7wLuyU1f8%=%;qtd?CIosor|p+U{aN^fKP9{;)o= zI=Z#|PaFxQHU>=Y*SmZcR1eWwZFB#ut#-Fz-$tzAS>@QDjSNGcjedPL6x-dpt{#1o zecd&e$-0IzcO@t9ZFfxD8ci~y<`o+=_AadI8LGF@!W|8Uc-2b&1ExDMU7lep>`o7h9{VqQ0N|E(z4V9p)jk~G8^qZdKu6T8yo~Xk#i*}W9K^KQc zTcetd2EEG%Nqu88uyJxb?~X9_h9doF42v>0dPQHYG~Bg;&boS*`m2OcjeKz;<4(d~ z{k$)1i5~jXyE}W>e62eSw}z_vcb%=rr+bZ+rurJr4Rb=Qtxr@7^=9&M@UQx@pgVPy z4y%!*0c*J{U+7LrBBFr!s;<-(Do5q(UX^X-YJ}SQ%|@wFLO3&OxcC!YHHTQ8RF0Le zvJ7YHF)P`X(%qR!vz1vd%m(J_P7DdAEwPnp=->Jo7hT4MOj;AN41xd4;|!s~nqESk zu231Swb;*RliPGohl@j3m!o^#*;qVA&$1S&r5bfM?lju07wX5HYb~}_tlo9VZZ7M& ztitjZlFe6|X&b|uRcu8pG}e!M;|Zd*&D4fbaTrN4s-rtTT$_gVr21O@Y_xD^OCd|t zv)RD(AvQ)uN%3~|xA_9vSQ9$bMr)_btqn)+TF6DT5U5(HMj91Df}zj-WdB`mtn2mJ z#+E3c)|$_UYdtrLxxPoDn65CdM%hYSKQr+|X;)pOYRn0%s<-%`tvZAs@hs8KxSgx_ zlOw!e>u1=MURtF~uWhI@U8KAX zs*~JL7?j7F{1)ViW%ID=V9(PC7OxU-*Z7GheK6Hpd05JuQo96{4^6%+39b;v6-!H~ zk~gTZARNeJPdYyNfQlOO<&+q-@ zR-N>V{cd Date: Mon, 26 Jan 2026 20:48:56 -0800 Subject: [PATCH 6/9] whisper works --- backends/cuda/runtime/cuda_backend.cpp | 210 +++++++++++++++++++++++-- backends/cuda/runtime/shims/memory.cpp | 10 +- 2 files changed, 206 insertions(+), 14 deletions(-) diff --git a/backends/cuda/runtime/cuda_backend.cpp b/backends/cuda/runtime/cuda_backend.cpp index 09001239a30..c0f39fea16e 100644 --- a/backends/cuda/runtime/cuda_backend.cpp +++ b/backends/cuda/runtime/cuda_backend.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -75,6 +76,124 @@ using slim::c10::DeviceType; namespace { constexpr char kSkipCopyOutputToCpuForMethod[] = "skip_copy_output_to_cpu_for_method"; + +/** + * Print SlimTensor debug information in a formatted style. + * + * Output format: + * SlimTensor { + * data_ptr: 0x... + * sizes: [d0, d1, ...] + * strides: [s0, s1, ...] + * n_dim: X + * numel: Y + * dtype: TypeName + * } + */ +void print_tensor(const SlimTensor* tensor, const char* name = nullptr) { + if (tensor == nullptr) { + ET_LOG(Info, "SlimTensor%s%s: nullptr", name ? " " : "", name ? name : ""); + return; + } + + auto sizes = tensor->sizes(); + auto strides = tensor->strides(); + + std::string sizes_str = "["; + for (size_t i = 0; i < sizes.size(); ++i) { + if (i > 0) + sizes_str += ", "; + sizes_str += std::to_string(sizes[i]); + } + sizes_str += "]"; + + std::string strides_str = "["; + for (size_t i = 0; i < strides.size(); ++i) { + if (i > 0) + strides_str += ", "; + strides_str += std::to_string(strides[i]); + } + strides_str += "]"; + + ET_LOG( + Info, + "SlimTensor%s%s {\n" + " data_ptr: %p\n" + " sizes: %s\n" + " strides: %s\n" + " n_dim: %zu\n" + " numel: %zu\n" + " dtype: %s\n" + "}", + name ? " " : "", + name ? name : "", + tensor->data_ptr(), + sizes_str.c_str(), + strides_str.c_str(), + tensor->dim(), + tensor->numel(), + slim::c10::toString(tensor->dtype())); +} + +/** + * Print ETensor (executorch::runtime::etensor::Tensor) debug information + * in a formatted style. + * + * Output format: + * ETensor { + * data_ptr: 0x... + * sizes: [d0, d1, ...] + * strides: [s0, s1, ...] + * n_dim: X + * numel: Y + * dtype: TypeName + * } + */ +void print_tensor(const Tensor* tensor, const char* name = nullptr) { + if (tensor == nullptr) { + ET_LOG(Info, "ETensor%s%s: nullptr", name ? " " : "", name ? name : ""); + return; + } + + auto sizes = tensor->sizes(); + auto strides = tensor->strides(); + + std::string sizes_str = "["; + for (size_t i = 0; i < sizes.size(); ++i) { + if (i > 0) + sizes_str += ", "; + sizes_str += std::to_string(sizes[i]); + } + sizes_str += "]"; + + std::string strides_str = "["; + for (size_t i = 0; i < strides.size(); ++i) { + if (i > 0) + strides_str += ", "; + strides_str += std::to_string(strides[i]); + } + strides_str += "]"; + + ET_LOG( + Info, + "ETensor%s%s {\n" + " data_ptr: %p\n" + " sizes: %s\n" + " strides: %s\n" + " n_dim: %zu\n" + " numel: %zu\n" + " dtype: %s\n" + "}", + name ? " " : "", + name ? name : "", + tensor->const_data_ptr(), + sizes_str.c_str(), + strides_str.c_str(), + static_cast(tensor->dim()), + static_cast(tensor->numel()), + executorch::runtime::toString(tensor->scalar_type())); +} + } // anonymous namespace class ET_EXPERIMENTAL CudaBackend final @@ -320,6 +439,7 @@ class ET_EXPERIMENTAL CudaBackend final // Process input tensors: convert ETensor (CPU) to SlimTensor (GPU) for (size_t i = 0; i < n_inputs; i++) { auto* cpu_tensor = &(args[i]->toTensor()); + print_tensor(cpu_tensor, "cpu_tensor[0]"); // Check if input data is already on GPU (skip-copy optimization for // inputs) This can happen when the caller has pre-staged data on GPU @@ -342,6 +462,9 @@ class ET_EXPERIMENTAL CudaBackend final DEFAULT_CUDA_DEVICE, 0 // storage_offset )); + + print_tensor(gpu_inputs[i], "gpu_input[0]"); + continue; } } @@ -349,6 +472,7 @@ class ET_EXPERIMENTAL CudaBackend final // Data is on CPU - use from_etensor to copy to GPU gpu_inputs[i] = new SlimTensor(from_etensor(*cpu_tensor, CPU_DEVICE, DEFAULT_CUDA_DEVICE)); + print_tensor(gpu_inputs[i], "gpu_input[0]"); } // Process output tensors: create GPU SlimTensors for kernel output @@ -368,9 +492,12 @@ class ET_EXPERIMENTAL CudaBackend final DEFAULT_CUDA_DEVICE)); } - ET_LOG(Info, "line 374"); + ET_LOG(Info, "line 374"); // Run AOTI container with GPU SlimTensors + // NOTE: The AOTI model may REPLACE the output tensor pointers during run(). + // Our pre-allocated tensors might be deleted by the model, and gpu_outputs + // will contain pointers to NEW tensors that the model allocated. AOTIRuntimeError error = handle->run( handle->container_handle, reinterpret_cast(gpu_inputs.data()), @@ -380,8 +507,7 @@ class ET_EXPERIMENTAL CudaBackend final handle->cuda_stream, nullptr); - ET_LOG(Info, "line 387"); - + ET_LOG(Info, "line 387"); ET_CHECK_OR_RETURN_ERROR( error == Error::Ok, @@ -389,11 +515,26 @@ class ET_EXPERIMENTAL CudaBackend final "AOTInductorModelContainerRun failed with error code %d", error); + print_tensor(gpu_outputs[0], "gpu_output[0]"); + + // Synchronize CUDA stream to ensure all GPU operations are complete + // before reading output tensor metadata and copying data back to CPU. + // Without this, the GPU operations are asynchronous and the output + // tensor data/metadata may not be ready yet. + cudaStream_t cuda_stream = static_cast(handle->cuda_stream); + cudaError_t sync_err = cudaStreamSynchronize(cuda_stream); + ET_CHECK_OR_RETURN_ERROR( + sync_err == cudaSuccess, + Internal, + "Failed to synchronize CUDA stream: %s", + cudaGetErrorString(sync_err)); + const bool copy_outputs = !should_skip_copy_for_method(handle->method_name); ET_LOG(Info, "line 398"); if (copy_outputs) { + ET_LOG(Info, "copy_outputs = true -- copying outputs back to CPU"); // Copy GPU SlimTensor results back to CPU ETensors for (size_t i = 0; i < n_outputs; i++) { auto* cpu_output_tensor = &(args[i + n_inputs]->toTensor()); @@ -403,20 +544,39 @@ class ET_EXPERIMENTAL CudaBackend final i); } } else { + ET_LOG(Info, "copy_outputs = false -- keep gpu tensor on gpu"); // Skip-copy optimization: wrap GPU data as ETensor using from_blob // The caller is responsible for handling GPU data directly + // + // IMPORTANT: The AOTI model may replace the output tensor pointers during + // handle->run(). The tensors we pre-allocated might have been deleted by + // the model, and gpu_outputs now contains pointers to NEW tensors that + // the model allocated. We store these NEW tensors for lifetime management. { std::lock_guard guard(cached_outputs_mutex_); auto& cached_outputs = cached_outputs_[handle]; - - // Clear cached outputs for previous round + auto& cached_tensor_ptrs = cached_tensor_ptrs_[handle]; + + // Delete the PREVIOUS round's tensors (allocated by AOTI model in the + // previous run). We must delete them because the AOTI model expects us + // to manage lifetimes of outputs it returns. + for (auto* tensor : cached_outputs) { + if (tensor != nullptr) { + delete tensor; + } + } cached_outputs.clear(); + cached_tensor_ptrs.clear(); + for (size_t i = 0; i < n_outputs; i++) { - // Move output SlimTensors to cached_outputs for lifetime management - cached_outputs.push_back(std::move(gpu_outputs[i])); + // gpu_outputs[i] now points to a tensor allocated by the AOTI model + // (it may have replaced our pre-allocated tensor during handle->run). + // Store this pointer for lifetime management. + cached_outputs.push_back(gpu_outputs[i]); + + print_tensor(cached_outputs[i], "cached_outputs[0]"); // Create an ETensor wrapper pointing to the GPU data - // The data stays on GPU and the caller handles it SlimTensor* cached = cached_outputs.back(); auto slim_sizes = cached->sizes(); auto slim_strides = cached->strides(); @@ -430,8 +590,9 @@ class ET_EXPERIMENTAL CudaBackend final static_cast(slim_strides[d]); } - // Use tensor_ptr_maker to create a non-owning ETensor wrapper - // Note: This creates a view into the SlimTensor's GPU memory + // Create TensorPtr wrapper - MUST be stored to keep TensorImpl alive! + // The TensorImpl owns the sizes/strides arrays. If TensorPtr is + // destroyed, the ETensor in args will have dangling pointers. auto tensor_ptr = executorch::extension::from_blob( cached->data_ptr(), std::move(et_sizes), @@ -440,6 +601,11 @@ class ET_EXPERIMENTAL CudaBackend final // Assign the wrapped tensor to the output EValue args[i + n_inputs]->toTensor() = *tensor_ptr; + + print_tensor(&args[i + n_inputs]->toTensor(), "args[i + n_inputs]->toTensor()"); + + // Store TensorPtr to keep TensorImpl alive until next execution + cached_tensor_ptrs.push_back(std::move(tensor_ptr)); } } } @@ -455,6 +621,23 @@ class ET_EXPERIMENTAL CudaBackend final } AOTIDelegateHandle* handle = (AOTIDelegateHandle*)handle_; + // Clean up cached output tensors and TensorPtrs for this handle + { + std::lock_guard guard(cached_outputs_mutex_); + auto it = cached_outputs_.find(handle); + if (it != cached_outputs_.end()) { + for (auto* tensor : it->second) { + if (tensor != nullptr) { + delete tensor; + } + } + cached_outputs_.erase(it); + } + // Also clean up cached TensorPtrs (they will be destroyed automatically + // when erased, releasing the TensorImpl ownership) + cached_tensor_ptrs_.erase(handle); + } + // Destroy the CUDA stream if it exists if (handle->cuda_stream != nullptr) { cudaStream_t cuda_stream = static_cast(handle->cuda_stream); @@ -508,6 +691,13 @@ class ET_EXPERIMENTAL CudaBackend final mutable std::mutex cached_outputs_mutex_; mutable std::unordered_map> cached_outputs_; + // TensorPtr wrappers must be kept alive so the ETensor's TensorImpl + // (which owns sizes/strides arrays) isn't destroyed when TensorPtr goes + // out of scope. Store them alongside cached SlimTensors. + mutable std::unordered_map< + AOTIDelegateHandle*, + std::vector> + cached_tensor_ptrs_; }; } // namespace executorch::backends::cuda diff --git a/backends/cuda/runtime/shims/memory.cpp b/backends/cuda/runtime/shims/memory.cpp index 7efa7e13e3b..23511ab3b14 100644 --- a/backends/cuda/runtime/shims/memory.cpp +++ b/backends/cuda/runtime/shims/memory.cpp @@ -265,10 +265,12 @@ AOTITorchError aoti_torch_assign_tensors_out( InvalidArgument, "aoti_torch_assign_tensors_out: ret_dst is null"); - // Move the source tensor into the destination. After this operation, - // the source tensor will be left in an undefined state (reset). - // This differs from aoti_torch_new_tensor_handle which copies the tensor. - *ret_dst = new SlimTensor(std::move(*src)); + // Create a shallow copy of the source tensor that shares the same underlying + // storage. The source tensor remains valid after this operation. + // This matches ATen's behavior where the copy constructor shares storage + // via reference counting. + SlimTensor dst_tensor = *src; + *ret_dst = new SlimTensor(std::move(dst_tensor)); return Error::Ok; } From 5e9f6549d8d9e798ea5d07cde02f3bac0f21b970 Mon Sep 17 00:00:00 2001 From: gasoonjia Date: Mon, 26 Jan 2026 22:33:00 -0800 Subject: [PATCH 7/9] parakeet works - 2 --- backends/aoti/CMakeLists.txt | 8 +- backends/aoti/common_shims_slim.cpp | 4 +- backends/aoti/common_shims_slim.h | 1 - backends/cuda/CMakeLists.txt | 13 ++-- backends/cuda/runtime/cuda_backend.cpp | 102 ++++++------------------- backends/cuda/runtime/utils.h | 78 +++++++++++++------ 6 files changed, 95 insertions(+), 111 deletions(-) diff --git a/backends/aoti/CMakeLists.txt b/backends/aoti/CMakeLists.txt index 20e8d83a36c..cd992d24031 100644 --- a/backends/aoti/CMakeLists.txt +++ b/backends/aoti/CMakeLists.txt @@ -43,7 +43,9 @@ target_compile_options( target_compile_definitions( aoti_common_shims PUBLIC $<$:EXPORT_AOTI_FUNCTIONS> ) -target_link_libraries(aoti_common_shims PUBLIC extension_tensor ${CMAKE_DL_LIBS}) +target_link_libraries( + aoti_common_shims PUBLIC extension_tensor ${CMAKE_DL_LIBS} +) install( TARGETS aoti_common_shims @@ -51,8 +53,8 @@ install( DESTINATION ${CMAKE_INSTALL_LIBDIR} ) # ============================================================================== -# AOTI common shims using SlimTensor (for CUDA backend) -# Uses SlimTensor for all tensor operations +# AOTI common shims using SlimTensor (for CUDA backend) Uses SlimTensor for all +# tensor operations # TODO(gasoonjia): Replace aoti_common_shims with this one after metal migration # ============================================================================== add_library(aoti_common_shims_slim STATIC common_shims_slim.cpp) diff --git a/backends/aoti/common_shims_slim.cpp b/backends/aoti/common_shims_slim.cpp index 4a3ba43381c..79b6f343b1c 100644 --- a/backends/aoti/common_shims_slim.cpp +++ b/backends/aoti/common_shims_slim.cpp @@ -59,8 +59,8 @@ AOTITorchError aoti_torch_get_dim(Tensor* tensor, int64_t* ret_dim) { } int32_t aoti_torch_layout_strided() { - // Slimtensor only support strided layout, the return value will always be 0, a.k.a - // at::Layout::Strided; + // Slimtensor only support strided layout, the return value will always be 0, + // a.k.a at::Layout::Strided; return 0; } diff --git a/backends/aoti/common_shims_slim.h b/backends/aoti/common_shims_slim.h index b4d70ee47b0..5ac4e4c45a8 100644 --- a/backends/aoti/common_shims_slim.h +++ b/backends/aoti/common_shims_slim.h @@ -66,7 +66,6 @@ aoti_torch_get_device_type(Tensor* tensor, int32_t* ret_device_type); AOTI_SHIM_EXPORT AOTITorchError aoti_torch_get_device_index(Tensor* tensor, int32_t* ret_device_index); - // ============================================================ // DType Constants - Declarations // ============================================================ diff --git a/backends/cuda/CMakeLists.txt b/backends/cuda/CMakeLists.txt index 6b683127508..dfb581a7062 100644 --- a/backends/cuda/CMakeLists.txt +++ b/backends/cuda/CMakeLists.txt @@ -99,12 +99,13 @@ install( # CUDA-specific AOTI shim symbols (dynamically linked) Uses # common_shims_slim.cpp for SlimTensor-based shim implementations - set(_aoti_cuda_shim_sources - runtime/shims/memory.cpp runtime/shims/cuda_guard.cpp - runtime/shims/int4mm.cu - ${EXECUTORCH_ROOT}/backends/aoti/common_shims_slim.cpp - ${EXECUTORCH_ROOT}/backends/aoti/slim/cuda/guard.cpp - ) +set(_aoti_cuda_shim_sources + runtime/shims/memory.cpp + runtime/shims/cuda_guard.cpp + runtime/shims/int4mm.cu + ${EXECUTORCH_ROOT}/backends/aoti/common_shims_slim.cpp + ${EXECUTORCH_ROOT}/backends/aoti/slim/cuda/guard.cpp +) add_library(aoti_cuda_shims SHARED ${_aoti_cuda_shim_sources}) diff --git a/backends/cuda/runtime/cuda_backend.cpp b/backends/cuda/runtime/cuda_backend.cpp index c0f39fea16e..ab4d8c94b75 100644 --- a/backends/cuda/runtime/cuda_backend.cpp +++ b/backends/cuda/runtime/cuda_backend.cpp @@ -11,8 +11,8 @@ #include #include #include -#include #include +#include #include #include @@ -32,7 +32,6 @@ #include #include #include -#include // Include our shim layer headers #include @@ -408,7 +407,7 @@ class ET_EXPERIMENTAL CudaBackend final DelegateHandle* handle_, Span args) const override { AOTIDelegateHandle* handle = (AOTIDelegateHandle*)handle_; - ET_LOG(Info, "line 292"); + // ET_LOG(Info, "line 292"); // executorch::backends::cuda::setCurrentCUDAStream( // static_cast(handle->cuda_stream), @@ -428,7 +427,7 @@ class ET_EXPERIMENTAL CudaBackend final n_inputs, n_outputs, args.size()) - ET_LOG(Info, "line 307"); + // ET_LOG(Info, "line 307"); // NOTE: ExecuTorch tensors maybe on CPU or GPU due to the skip-copy // optimization We need to create GPU copies for CUDA kernel execution using @@ -439,7 +438,7 @@ class ET_EXPERIMENTAL CudaBackend final // Process input tensors: convert ETensor (CPU) to SlimTensor (GPU) for (size_t i = 0; i < n_inputs; i++) { auto* cpu_tensor = &(args[i]->toTensor()); - print_tensor(cpu_tensor, "cpu_tensor[0]"); + // print_tensor(cpu_tensor, "cpu_tensor[0]"); // Check if input data is already on GPU (skip-copy optimization for // inputs) This can happen when the caller has pre-staged data on GPU @@ -461,18 +460,18 @@ class ET_EXPERIMENTAL CudaBackend final static_cast(cpu_tensor->scalar_type()), DEFAULT_CUDA_DEVICE, 0 // storage_offset - )); + )); - print_tensor(gpu_inputs[i], "gpu_input[0]"); + // print_tensor(gpu_inputs[i], "gpu_input[0]"); continue; } } // Data is on CPU - use from_etensor to copy to GPU - gpu_inputs[i] = - new SlimTensor(from_etensor(*cpu_tensor, CPU_DEVICE, DEFAULT_CUDA_DEVICE)); - print_tensor(gpu_inputs[i], "gpu_input[0]"); + gpu_inputs[i] = new SlimTensor( + from_etensor(*cpu_tensor, CPU_DEVICE, DEFAULT_CUDA_DEVICE)); + // print_tensor(gpu_inputs[i], "gpu_input[0]"); } // Process output tensors: create GPU SlimTensors for kernel output @@ -492,7 +491,7 @@ class ET_EXPERIMENTAL CudaBackend final DEFAULT_CUDA_DEVICE)); } - ET_LOG(Info, "line 374"); + // ET_LOG(Info, "line 374"); // Run AOTI container with GPU SlimTensors // NOTE: The AOTI model may REPLACE the output tensor pointers during run(). @@ -507,7 +506,7 @@ class ET_EXPERIMENTAL CudaBackend final handle->cuda_stream, nullptr); - ET_LOG(Info, "line 387"); + // ET_LOG(Info, "line 387"); ET_CHECK_OR_RETURN_ERROR( error == Error::Ok, @@ -515,26 +514,13 @@ class ET_EXPERIMENTAL CudaBackend final "AOTInductorModelContainerRun failed with error code %d", error); - print_tensor(gpu_outputs[0], "gpu_output[0]"); - - // Synchronize CUDA stream to ensure all GPU operations are complete - // before reading output tensor metadata and copying data back to CPU. - // Without this, the GPU operations are asynchronous and the output - // tensor data/metadata may not be ready yet. - cudaStream_t cuda_stream = static_cast(handle->cuda_stream); - cudaError_t sync_err = cudaStreamSynchronize(cuda_stream); - ET_CHECK_OR_RETURN_ERROR( - sync_err == cudaSuccess, - Internal, - "Failed to synchronize CUDA stream: %s", - cudaGetErrorString(sync_err)); const bool copy_outputs = !should_skip_copy_for_method(handle->method_name); - ET_LOG(Info, "line 398"); + // ET_LOG(Info, "line 398"); if (copy_outputs) { - ET_LOG(Info, "copy_outputs = true -- copying outputs back to CPU"); + // ET_LOG(Info, "copy_outputs = true -- copying outputs back to CPU"); // Copy GPU SlimTensor results back to CPU ETensors for (size_t i = 0; i < n_outputs; i++) { auto* cpu_output_tensor = &(args[i + n_inputs]->toTensor()); @@ -544,18 +530,17 @@ class ET_EXPERIMENTAL CudaBackend final i); } } else { - ET_LOG(Info, "copy_outputs = false -- keep gpu tensor on gpu"); - // Skip-copy optimization: wrap GPU data as ETensor using from_blob + // Skip-copy optimization: point ETensor directly to GPU data // The caller is responsible for handling GPU data directly // // IMPORTANT: The AOTI model may replace the output tensor pointers during // handle->run(). The tensors we pre-allocated might have been deleted by // the model, and gpu_outputs now contains pointers to NEW tensors that - // the model allocated. We store these NEW tensors for lifetime management. + // the model allocated. We store these NEW tensors for lifetime + // management. { std::lock_guard guard(cached_outputs_mutex_); auto& cached_outputs = cached_outputs_[handle]; - auto& cached_tensor_ptrs = cached_tensor_ptrs_[handle]; // Delete the PREVIOUS round's tensors (allocated by AOTI model in the // previous run). We must delete them because the AOTI model expects us @@ -566,7 +551,6 @@ class ET_EXPERIMENTAL CudaBackend final } } cached_outputs.clear(); - cached_tensor_ptrs.clear(); for (size_t i = 0; i < n_outputs; i++) { // gpu_outputs[i] now points to a tensor allocated by the AOTI model @@ -574,44 +558,18 @@ class ET_EXPERIMENTAL CudaBackend final // Store this pointer for lifetime management. cached_outputs.push_back(gpu_outputs[i]); - print_tensor(cached_outputs[i], "cached_outputs[0]"); - - // Create an ETensor wrapper pointing to the GPU data - SlimTensor* cached = cached_outputs.back(); - auto slim_sizes = cached->sizes(); - auto slim_strides = cached->strides(); - - std::vector et_sizes(cached->dim()); - std::vector et_strides(cached->dim()); - for (size_t d = 0; d < cached->dim(); d++) { - et_sizes[d] = - static_cast(slim_sizes[d]); - et_strides[d] = - static_cast(slim_strides[d]); - } - - // Create TensorPtr wrapper - MUST be stored to keep TensorImpl alive! - // The TensorImpl owns the sizes/strides arrays. If TensorPtr is - // destroyed, the ETensor in args will have dangling pointers. - auto tensor_ptr = executorch::extension::from_blob( - cached->data_ptr(), - std::move(et_sizes), - std::move(et_strides), - static_cast(cached->dtype())); - - // Assign the wrapped tensor to the output EValue - args[i + n_inputs]->toTensor() = *tensor_ptr; - - print_tensor(&args[i + n_inputs]->toTensor(), "args[i + n_inputs]->toTensor()"); - - // Store TensorPtr to keep TensorImpl alive until next execution - cached_tensor_ptrs.push_back(std::move(tensor_ptr)); + // Wrap the GPU SlimTensor data into the ETensor (zero-copy). + // This resizes the ETensor to match the SlimTensor shape and sets + // its data pointer to point directly to the GPU data. + auto* output_etensor = &(args[i + n_inputs]->toTensor()); + ET_CHECK_OK_OR_RETURN_ERROR( + wrap_slimtensor_to_etensor(gpu_outputs[i], output_etensor), + "Failed to wrap GPU output %zu into ETensor", + i); } } } - ET_LOG(Info, "line 451"); - return Error::Ok; } @@ -621,7 +579,7 @@ class ET_EXPERIMENTAL CudaBackend final } AOTIDelegateHandle* handle = (AOTIDelegateHandle*)handle_; - // Clean up cached output tensors and TensorPtrs for this handle + // Clean up cached output tensors for this handle { std::lock_guard guard(cached_outputs_mutex_); auto it = cached_outputs_.find(handle); @@ -633,9 +591,6 @@ class ET_EXPERIMENTAL CudaBackend final } cached_outputs_.erase(it); } - // Also clean up cached TensorPtrs (they will be destroyed automatically - // when erased, releasing the TensorImpl ownership) - cached_tensor_ptrs_.erase(handle); } // Destroy the CUDA stream if it exists @@ -691,13 +646,6 @@ class ET_EXPERIMENTAL CudaBackend final mutable std::mutex cached_outputs_mutex_; mutable std::unordered_map> cached_outputs_; - // TensorPtr wrappers must be kept alive so the ETensor's TensorImpl - // (which owns sizes/strides arrays) isn't destroyed when TensorPtr goes - // out of scope. Store them alongside cached SlimTensors. - mutable std::unordered_map< - AOTIDelegateHandle*, - std::vector> - cached_tensor_ptrs_; }; } // namespace executorch::backends::cuda diff --git a/backends/cuda/runtime/utils.h b/backends/cuda/runtime/utils.h index 9f0658cb8bc..399de23a172 100644 --- a/backends/cuda/runtime/utils.h +++ b/backends/cuda/runtime/utils.h @@ -19,42 +19,30 @@ namespace executorch::backends::cuda { -/** - * Copies data from a SlimTensor to an ETensor. - * - * This function converts a SlimTensor back to an ETensor. The ETensor is - * assumed to always reside on CPU, so this handles both CPU→CPU and GPU→CPU - * copies. The function will resize the ETensor if needed and copy the data. - * - * @param slim_tensor Pointer to the source SlimTensor (must not be null). - * @param etensor Pointer to the destination ETensor (must not be null). - * @return Error::Ok on success, or an appropriate error code on failure. - */ -inline executorch::runtime::Error copy_slimtensor_to_etensor( +namespace { +inline executorch::runtime::Error _check_tensor_metadata( const executorch::backends::aoti::slim::SlimTensor* slim_tensor, executorch::runtime::etensor::Tensor* etensor) { ET_CHECK_OR_RETURN_ERROR( slim_tensor != nullptr, InvalidArgument, - "copy_slimtensor_to_etensor: slim_tensor pointer cannot be nullptr"); + "slim_tensor pointer cannot be nullptr"); ET_CHECK_OR_RETURN_ERROR( - etensor != nullptr, - InvalidArgument, - "copy_slimtensor_to_etensor: etensor pointer cannot be nullptr"); + etensor != nullptr, InvalidArgument, "etensor pointer cannot be nullptr"); // Check storage_offset is 0 (ETensor does not support storage offset) ET_CHECK_OR_RETURN_ERROR( slim_tensor->storage_offset() == 0, InvalidArgument, - "copy_slimtensor_to_etensor: SlimTensor storage_offset must be 0, got %ld", + "SlimTensor storage_offset must be 0, got %ld", static_cast(slim_tensor->storage_offset())); // Check that SlimTensor is contiguous ET_CHECK_OR_RETURN_ERROR( slim_tensor->is_contiguous(), InvalidArgument, - "copy_slimtensor_to_etensor: SlimTensor must be contiguous"); + "SlimTensor must be contiguous"); // Check dtype matches executorch::backends::aoti::slim::c10::ScalarType slim_dtype = @@ -64,7 +52,7 @@ inline executorch::runtime::Error copy_slimtensor_to_etensor( ET_CHECK_OR_RETURN_ERROR( static_cast(slim_dtype) == static_cast(etensor_dtype), InvalidArgument, - "copy_slimtensor_to_etensor: dtype mismatch, SlimTensor dtype %d != ETensor dtype %d", + "dtype mismatch, SlimTensor dtype %d != ETensor dtype %d", static_cast(slim_dtype), static_cast(etensor_dtype)); @@ -72,7 +60,7 @@ inline executorch::runtime::Error copy_slimtensor_to_etensor( ET_CHECK_OR_RETURN_ERROR( static_cast(slim_tensor->dim()) == etensor->dim(), InvalidArgument, - "copy_slimtensor_to_etensor: dimension mismatch, SlimTensor dim %zu != ETensor dim %zd", + "dimension mismatch, SlimTensor dim %zu != ETensor dim %zd", slim_tensor->dim(), etensor->dim()); @@ -94,8 +82,27 @@ inline executorch::runtime::Error copy_slimtensor_to_etensor( executorch::runtime::ArrayRef< executorch::runtime::etensor::TensorImpl::SizesType>( new_sizes.data(), new_sizes.size())); - ET_CHECK_OK_OR_RETURN_ERROR( - resize_err, "copy_slimtensor_to_etensor: failed to resize ETensor"); + ET_CHECK_OK_OR_RETURN_ERROR(resize_err, "failed to resize ETensor"); + + return executorch::runtime::Error::Ok; +} +} // namespace + +/** + * Copies data from a SlimTensor to an ETensor. + * + * This function converts a SlimTensor back to an ETensor. The ETensor is + * assumed to always reside on CPU, so this handles both CPU→CPU and GPU→CPU + * copies. The function will resize the ETensor if needed and copy the data. + * + * @param slim_tensor Pointer to the source SlimTensor (must not be null). + * @param etensor Pointer to the destination ETensor (must not be null). + * @return Error::Ok on success, or an appropriate error code on failure. + */ +inline executorch::runtime::Error copy_slimtensor_to_etensor( + const executorch::backends::aoti::slim::SlimTensor* slim_tensor, + executorch::runtime::etensor::Tensor* etensor) { + _check_tensor_metadata(slim_tensor, etensor); // Copy data from SlimTensor to ETensor // SlimTensor may be on GPU or CPU, ETensor is always on CPU @@ -123,4 +130,31 @@ inline executorch::runtime::Error copy_slimtensor_to_etensor( return executorch::runtime::Error::Ok; } +/** + * Wraps a SlimTensor's data into an existing ETensor (zero-copy). + * + * This function resizes the ETensor to match the SlimTensor's shape and + * sets its data pointer to point directly to the SlimTensor's data buffer. + * No data is copied - the ETensor becomes a view of the SlimTensor's data. + * + * IMPORTANT: The caller must ensure the SlimTensor remains alive as long + * as the ETensor is in use, since the ETensor will reference the SlimTensor's + * data directly. + * + * @param slim_tensor Pointer to the source SlimTensor (must not be null). + * @param etensor Pointer to the destination ETensor (must not be null). + * @return Error::Ok on success, or an appropriate error code on failure. + */ +inline executorch::runtime::Error wrap_slimtensor_to_etensor( + const executorch::backends::aoti::slim::SlimTensor* slim_tensor, + executorch::runtime::etensor::Tensor* etensor) { + _check_tensor_metadata(slim_tensor, etensor); + + // Set data pointer to point directly to SlimTensor's data (zero-copy) + etensor->unsafeGetTensorImpl()->set_data( + const_cast(slim_tensor->data_ptr())); + + return executorch::runtime::Error::Ok; +} + } // namespace executorch::backends::cuda From 512a3e4e868053727c9a1a077aea30a781141536 Mon Sep 17 00:00:00 2001 From: gasoonjia Date: Tue, 27 Jan 2026 10:40:45 -0800 Subject: [PATCH 8/9] remove nonnecessary debug info --- backends/cuda/runtime/cuda_backend.cpp | 147 +------------------------ 1 file changed, 3 insertions(+), 144 deletions(-) diff --git a/backends/cuda/runtime/cuda_backend.cpp b/backends/cuda/runtime/cuda_backend.cpp index ab4d8c94b75..f404325371e 100644 --- a/backends/cuda/runtime/cuda_backend.cpp +++ b/backends/cuda/runtime/cuda_backend.cpp @@ -75,124 +75,6 @@ using slim::c10::DeviceType; namespace { constexpr char kSkipCopyOutputToCpuForMethod[] = "skip_copy_output_to_cpu_for_method"; - -/** - * Print SlimTensor debug information in a formatted style. - * - * Output format: - * SlimTensor { - * data_ptr: 0x... - * sizes: [d0, d1, ...] - * strides: [s0, s1, ...] - * n_dim: X - * numel: Y - * dtype: TypeName - * } - */ -void print_tensor(const SlimTensor* tensor, const char* name = nullptr) { - if (tensor == nullptr) { - ET_LOG(Info, "SlimTensor%s%s: nullptr", name ? " " : "", name ? name : ""); - return; - } - - auto sizes = tensor->sizes(); - auto strides = tensor->strides(); - - std::string sizes_str = "["; - for (size_t i = 0; i < sizes.size(); ++i) { - if (i > 0) - sizes_str += ", "; - sizes_str += std::to_string(sizes[i]); - } - sizes_str += "]"; - - std::string strides_str = "["; - for (size_t i = 0; i < strides.size(); ++i) { - if (i > 0) - strides_str += ", "; - strides_str += std::to_string(strides[i]); - } - strides_str += "]"; - - ET_LOG( - Info, - "SlimTensor%s%s {\n" - " data_ptr: %p\n" - " sizes: %s\n" - " strides: %s\n" - " n_dim: %zu\n" - " numel: %zu\n" - " dtype: %s\n" - "}", - name ? " " : "", - name ? name : "", - tensor->data_ptr(), - sizes_str.c_str(), - strides_str.c_str(), - tensor->dim(), - tensor->numel(), - slim::c10::toString(tensor->dtype())); -} - -/** - * Print ETensor (executorch::runtime::etensor::Tensor) debug information - * in a formatted style. - * - * Output format: - * ETensor { - * data_ptr: 0x... - * sizes: [d0, d1, ...] - * strides: [s0, s1, ...] - * n_dim: X - * numel: Y - * dtype: TypeName - * } - */ -void print_tensor(const Tensor* tensor, const char* name = nullptr) { - if (tensor == nullptr) { - ET_LOG(Info, "ETensor%s%s: nullptr", name ? " " : "", name ? name : ""); - return; - } - - auto sizes = tensor->sizes(); - auto strides = tensor->strides(); - - std::string sizes_str = "["; - for (size_t i = 0; i < sizes.size(); ++i) { - if (i > 0) - sizes_str += ", "; - sizes_str += std::to_string(sizes[i]); - } - sizes_str += "]"; - - std::string strides_str = "["; - for (size_t i = 0; i < strides.size(); ++i) { - if (i > 0) - strides_str += ", "; - strides_str += std::to_string(strides[i]); - } - strides_str += "]"; - - ET_LOG( - Info, - "ETensor%s%s {\n" - " data_ptr: %p\n" - " sizes: %s\n" - " strides: %s\n" - " n_dim: %zu\n" - " numel: %zu\n" - " dtype: %s\n" - "}", - name ? " " : "", - name ? name : "", - tensor->const_data_ptr(), - sizes_str.c_str(), - strides_str.c_str(), - static_cast(tensor->dim()), - static_cast(tensor->numel()), - executorch::runtime::toString(tensor->scalar_type())); -} - } // anonymous namespace class ET_EXPERIMENTAL CudaBackend final @@ -407,12 +289,6 @@ class ET_EXPERIMENTAL CudaBackend final DelegateHandle* handle_, Span args) const override { AOTIDelegateHandle* handle = (AOTIDelegateHandle*)handle_; - // ET_LOG(Info, "line 292"); - - // executorch::backends::cuda::setCurrentCUDAStream( - // static_cast(handle->cuda_stream), - // 0 // device index - // ); size_t n_inputs; handle->get_num_inputs(handle->container_handle, &n_inputs); @@ -427,7 +303,6 @@ class ET_EXPERIMENTAL CudaBackend final n_inputs, n_outputs, args.size()) - // ET_LOG(Info, "line 307"); // NOTE: ExecuTorch tensors maybe on CPU or GPU due to the skip-copy // optimization We need to create GPU copies for CUDA kernel execution using @@ -438,7 +313,6 @@ class ET_EXPERIMENTAL CudaBackend final // Process input tensors: convert ETensor (CPU) to SlimTensor (GPU) for (size_t i = 0; i < n_inputs; i++) { auto* cpu_tensor = &(args[i]->toTensor()); - // print_tensor(cpu_tensor, "cpu_tensor[0]"); // Check if input data is already on GPU (skip-copy optimization for // inputs) This can happen when the caller has pre-staged data on GPU @@ -462,8 +336,6 @@ class ET_EXPERIMENTAL CudaBackend final 0 // storage_offset )); - // print_tensor(gpu_inputs[i], "gpu_input[0]"); - continue; } } @@ -471,7 +343,6 @@ class ET_EXPERIMENTAL CudaBackend final // Data is on CPU - use from_etensor to copy to GPU gpu_inputs[i] = new SlimTensor( from_etensor(*cpu_tensor, CPU_DEVICE, DEFAULT_CUDA_DEVICE)); - // print_tensor(gpu_inputs[i], "gpu_input[0]"); } // Process output tensors: create GPU SlimTensors for kernel output @@ -491,8 +362,6 @@ class ET_EXPERIMENTAL CudaBackend final DEFAULT_CUDA_DEVICE)); } - // ET_LOG(Info, "line 374"); - // Run AOTI container with GPU SlimTensors // NOTE: The AOTI model may REPLACE the output tensor pointers during run(). // Our pre-allocated tensors might be deleted by the model, and gpu_outputs @@ -506,8 +375,6 @@ class ET_EXPERIMENTAL CudaBackend final handle->cuda_stream, nullptr); - // ET_LOG(Info, "line 387"); - ET_CHECK_OR_RETURN_ERROR( error == Error::Ok, Internal, @@ -517,8 +384,6 @@ class ET_EXPERIMENTAL CudaBackend final const bool copy_outputs = !should_skip_copy_for_method(handle->method_name); - // ET_LOG(Info, "line 398"); - if (copy_outputs) { // ET_LOG(Info, "copy_outputs = true -- copying outputs back to CPU"); // Copy GPU SlimTensor results back to CPU ETensors @@ -532,19 +397,13 @@ class ET_EXPERIMENTAL CudaBackend final } else { // Skip-copy optimization: point ETensor directly to GPU data // The caller is responsible for handling GPU data directly - // - // IMPORTANT: The AOTI model may replace the output tensor pointers during - // handle->run(). The tensors we pre-allocated might have been deleted by - // the model, and gpu_outputs now contains pointers to NEW tensors that - // the model allocated. We store these NEW tensors for lifetime - // management. + // We store these NEW tensors for next cycle usage and delete + // out-of-date tensors for lifetime management. { std::lock_guard guard(cached_outputs_mutex_); auto& cached_outputs = cached_outputs_[handle]; - // Delete the PREVIOUS round's tensors (allocated by AOTI model in the - // previous run). We must delete them because the AOTI model expects us - // to manage lifetimes of outputs it returns. + // Delete the PREVIOUS round's tensors for life management since they will never be used. for (auto* tensor : cached_outputs) { if (tensor != nullptr) { delete tensor; From 18afded78f2a74f6b29bdf09f2c51dd0bcda1670 Mon Sep 17 00:00:00 2001 From: gasoonjia Date: Tue, 27 Jan 2026 11:36:50 -0800 Subject: [PATCH 9/9] polish cuda backend.cpp comment --- backends/cuda/runtime/cuda_backend.cpp | 39 +++++++++++++------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/backends/cuda/runtime/cuda_backend.cpp b/backends/cuda/runtime/cuda_backend.cpp index f404325371e..43662c9cd12 100644 --- a/backends/cuda/runtime/cuda_backend.cpp +++ b/backends/cuda/runtime/cuda_backend.cpp @@ -304,9 +304,9 @@ class ET_EXPERIMENTAL CudaBackend final n_outputs, args.size()) - // NOTE: ExecuTorch tensors maybe on CPU or GPU due to the skip-copy - // optimization We need to create GPU copies for CUDA kernel execution using - // SlimTensor + // NOTE: ExecuTorch tensors may be on CPU or GPU due to the skip-copy + // optimization. We need to create GPU copies for CUDA kernel execution + // using SlimTensor. std::vector gpu_inputs(n_inputs); std::vector gpu_outputs(n_outputs); @@ -362,10 +362,11 @@ class ET_EXPERIMENTAL CudaBackend final DEFAULT_CUDA_DEVICE)); } - // Run AOTI container with GPU SlimTensors - // NOTE: The AOTI model may REPLACE the output tensor pointers during run(). - // Our pre-allocated tensors might be deleted by the model, and gpu_outputs - // will contain pointers to NEW tensors that the model allocated. + // Run the AOTI container with SlimTensors. + // + // NOTE: The handle->run function (defined in aoti_delegate_handle.h) expects + // ETensor* as input/output. We avoid changing its signature since it's shared + // with the Metal backend. Instead, we reinterpret_cast SlimTensor* to Tensor* AOTIRuntimeError error = handle->run( handle->container_handle, reinterpret_cast(gpu_inputs.data()), @@ -385,8 +386,7 @@ class ET_EXPERIMENTAL CudaBackend final const bool copy_outputs = !should_skip_copy_for_method(handle->method_name); if (copy_outputs) { - // ET_LOG(Info, "copy_outputs = true -- copying outputs back to CPU"); - // Copy GPU SlimTensor results back to CPU ETensors + // Deep copy GPU SlimTensor results back to CPU ETensors for (size_t i = 0; i < n_outputs; i++) { auto* cpu_output_tensor = &(args[i + n_inputs]->toTensor()); ET_CHECK_OK_OR_RETURN_ERROR( @@ -395,15 +395,16 @@ class ET_EXPERIMENTAL CudaBackend final i); } } else { - // Skip-copy optimization: point ETensor directly to GPU data - // The caller is responsible for handling GPU data directly - // We store these NEW tensors for next cycle usage and delete - // out-of-date tensors for lifetime management. + // Skip-copy optimization: point ETensor directly to GPU data. + // The caller is responsible for handling GPU data directly. + // + // Lifetime management: We cache the newly created GPU tensors and delete + // the previous round's tensors, since they are no longer needed. { std::lock_guard guard(cached_outputs_mutex_); auto& cached_outputs = cached_outputs_[handle]; - // Delete the PREVIOUS round's tensors for life management since they will never be used. + // Delete the previous round's tensors since they are no longer in use. for (auto* tensor : cached_outputs) { if (tensor != nullptr) { delete tensor; @@ -412,9 +413,7 @@ class ET_EXPERIMENTAL CudaBackend final cached_outputs.clear(); for (size_t i = 0; i < n_outputs; i++) { - // gpu_outputs[i] now points to a tensor allocated by the AOTI model - // (it may have replaced our pre-allocated tensor during handle->run). - // Store this pointer for lifetime management. + // Cache this output tensor to keep the underlying GPU data alive. cached_outputs.push_back(gpu_outputs[i]); // Wrap the GPU SlimTensor data into the ETensor (zero-copy). @@ -499,9 +498,9 @@ class ET_EXPERIMENTAL CudaBackend final std::string skip_copy_method_; // Cached output tensors for skip-copy optimization. - // When copy-skip is enabled, output SlimTensors are cached here to keep - // GPU memory alive while the caller processes the results. - // Maps from AOTIDelegateHandle* to its cached outputs. + // When skip-copy is enabled, output SlimTensors are cached here to keep + // the underlying GPU memory alive while the caller processes the results. + // Maps each AOTIDelegateHandle* to its vector of cached output tensors. mutable std::mutex cached_outputs_mutex_; mutable std::unordered_map> cached_outputs_;