@@ -32,13 +32,13 @@ Stack a list of 2D arrays into a 3D volume.
3232 slices = [np.random.rand(512 , 512 ) for _ in range (100 )]
3333
3434 # Stack into 3D volume (100, 512, 512)
35- volume = stack_slices(slices, target_type = ' numpy' )
35+ volume = stack_slices(slices, memory_type = ' numpy' , gpu_id = 0 )
3636
3737 **Parameters: **
3838
3939- ``slices ``: List of 2D arrays
40- - ``target_type ``: Target memory type ('numpy', 'cupy', 'torch', etc. )
41- - ``gpu_id ``: GPU device ID (default: 0 for GPU types, None for CPU )
40+ - ``memory_type ``: Target memory type ('numpy', 'cupy', 'torch', 'tensorflow', 'jax', 'pyclesperanto' )
41+ - ``gpu_id ``: GPU device ID (required, validated for GPU memory types )
4242
4343**Returns: **
4444
@@ -61,16 +61,17 @@ Unstack a 3D volume into a list of 2D slices.
6161 volume = np.random.rand(100 , 512 , 512 )
6262
6363 # Unstack into list of 2D slices
64- slices = unstack_slices(volume, target_type = ' numpy' )
64+ slices = unstack_slices(volume, memory_type = ' numpy' , gpu_id = 0 )
6565
6666 print (len (slices)) # 100
6767 print (slices[0 ].shape) # (512, 512)
6868
6969 **Parameters: **
7070
71- - ``volume ``: 3D array/tensor with shape (depth, height, width)
72- - ``target_type ``: Target memory type for output slices
73- - ``gpu_id ``: GPU device ID (default: 0 for GPU types, None for CPU)
71+ - ``array ``: 3D array/tensor with shape (depth, height, width)
72+ - ``memory_type ``: Target memory type for output slices
73+ - ``gpu_id ``: GPU device ID (required, validated for GPU memory types)
74+ - ``validate_slices ``: If True, validates that each extracted slice is 2D (default: True)
7475
7576**Returns: **
7677
@@ -95,11 +96,11 @@ NumPy Stacking
9596 slices = [np.random.rand(256 , 256 ) for _ in range (50 )]
9697
9798 # Stack
98- volume = stack_slices(slices, target_type = ' numpy' )
99+ volume = stack_slices(slices, memory_type = ' numpy' , gpu_id = 0 )
99100 print (volume.shape) # (50, 256, 256)
100101
101102 # Unstack
102- recovered_slices = unstack_slices(volume, target_type = ' numpy' )
103+ recovered_slices = unstack_slices(volume, memory_type = ' numpy' , gpu_id = 0 )
103104 print (len (recovered_slices)) # 50
104105
105106 GPU Stacking
@@ -116,7 +117,7 @@ GPU Stacking
116117 # Stack directly to GPU
117118 gpu_volume = stack_slices(
118119 cpu_slices,
119- target_type = ' cupy' ,
120+ memory_type = ' cupy' ,
120121 gpu_id = 0
121122 )
122123
@@ -135,10 +136,10 @@ Cross-Framework Stacking
135136 torch_slices = [torch.rand(128 , 128 ) for _ in range (20 )]
136137
137138 # Stack to NumPy
138- np_volume = stack_slices(torch_slices, target_type = ' numpy' )
139+ np_volume = stack_slices(torch_slices, memory_type = ' numpy' , gpu_id = 0 )
139140
140141 # Or stack to CuPy
141- cupy_volume = stack_slices(torch_slices, target_type = ' cupy' , gpu_id = 0 )
142+ cupy_volume = stack_slices(torch_slices, memory_type = ' cupy' , gpu_id = 0 )
142143
143144 Image Processing
144145----------------
@@ -161,9 +162,9 @@ Process each slice individually:
161162 volume = np.random.rand(100 , 512 , 512 )
162163
163164 # Unstack, process, restack
164- slices = unstack_slices(volume, target_type = ' numpy' )
165+ slices = unstack_slices(volume, memory_type = ' numpy' , gpu_id = 0 )
165166 processed_slices = [process_slice(s) for s in slices]
166- processed_volume = stack_slices(processed_slices, target_type = ' numpy' )
167+ processed_volume = stack_slices(processed_slices, memory_type = ' numpy' , gpu_id = 0 )
167168
168169 GPU-Accelerated Processing
169170~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -176,7 +177,7 @@ GPU-Accelerated Processing
176177 def gpu_filter_volume (volume ):
177178 """ Apply GPU filtering to each slice."""
178179 # Unstack to list
179- slices = unstack_slices(volume, target_type = ' numpy' )
180+ slices = unstack_slices(volume, memory_type = ' numpy' , gpu_id = 0 )
180181
181182 # Process each slice on GPU
182183 filtered_slices = []
@@ -194,12 +195,13 @@ GPU-Accelerated Processing
194195 cpu_filtered = convert_memory(
195196 filtered,
196197 source_type = ' cupy' ,
197- target_type = ' numpy'
198+ target_type = ' numpy' ,
199+ gpu_id = 0
198200 )
199201 filtered_slices.append(cpu_filtered)
200202
201203 # Restack
202- return stack_slices(filtered_slices, target_type = ' numpy' )
204+ return stack_slices(filtered_slices, memory_type = ' numpy' , gpu_id = 0 )
203205
204206 Batch Processing
205207~~~~~~~~~~~~~~~~
@@ -212,23 +214,23 @@ Process slices in batches for efficiency:
212214
213215 def batch_process_volume (volume , batch_size = 10 ):
214216 """ Process volume in batches."""
215- slices = unstack_slices(volume, target_type = ' numpy' )
217+ slices = unstack_slices(volume, memory_type = ' numpy' , gpu_id = 0 )
216218 processed_slices = []
217219
218220 for i in range (0 , len (slices), batch_size):
219221 batch = slices[i:i+ batch_size]
220222
221223 # Stack batch
222- batch_volume = stack_slices(batch, target_type = ' torch' , gpu_id = 0 )
224+ batch_volume = stack_slices(batch, memory_type = ' torch' , gpu_id = 0 )
223225
224226 # Process batch on GPU
225227 processed_batch = process_on_gpu(batch_volume)
226228
227229 # Unstack batch
228- batch_slices = unstack_slices(processed_batch, target_type = ' numpy' )
230+ batch_slices = unstack_slices(processed_batch, memory_type = ' numpy' , gpu_id = 0 )
229231 processed_slices.extend(batch_slices)
230232
231- return stack_slices(processed_slices, target_type = ' numpy' )
233+ return stack_slices(processed_slices, memory_type = ' numpy' , gpu_id = 0 )
232234
233235 Medical Imaging Applications
234236-----------------------------
@@ -256,13 +258,13 @@ CT/MRI Volume Processing
256258 slices = load_dicom_slices(dicom_dir)
257259
258260 # Stack into volume
259- volume = stack_slices(slices, target_type = ' numpy' )
261+ volume = stack_slices(slices, memory_type = ' numpy' , gpu_id = 0 )
260262
261263 # Apply processing (e.g., segmentation, registration)
262264 processed = medical_processing(volume)
263265
264266 # Unstack for saving
265- output_slices = unstack_slices(processed, target_type = ' numpy' )
267+ output_slices = unstack_slices(processed, memory_type = ' numpy' , gpu_id = 0 )
266268
267269 return output_slices
268270
@@ -280,7 +282,7 @@ Microscopy Image Stacks
280282 slices = [load_image(f) for f in image_files]
281283
282284 # Stack
283- stack = stack_slices(slices, target_type = ' numpy' )
285+ stack = stack_slices(slices, memory_type = ' numpy' , gpu_id = 0 )
284286
285287 # Maximum intensity projection
286288 mip = stack.max(axis = 0 )
@@ -306,7 +308,7 @@ For large volumes, consider processing in chunks:
306308
307309 for i in range (0 , len (slices), chunk_size):
308310 chunk_slices = slices[i:i+ chunk_size]
309- chunk = stack_slices(chunk_slices, target_type = ' numpy' )
311+ chunk = stack_slices(chunk_slices, memory_type = ' numpy' , gpu_id = 0 )
310312 chunks.append(chunk)
311313
312314 # Concatenate chunks
@@ -336,7 +338,7 @@ Load and process slices on-demand:
336338 def to_volume (self ):
337339 """ Stack all slices."""
338340 slices = [self [i] for i in range (len (self ))]
339- return stack_slices(slices, target_type = ' numpy' )
341+ return stack_slices(slices, memory_type = ' numpy' , gpu_id = 0 )
340342
341343 Parallel Processing
342344~~~~~~~~~~~~~~~~~~~
@@ -356,7 +358,7 @@ Process slices in parallel:
356358 with ThreadPoolExecutor(max_workers = num_workers) as executor:
357359 processed_slices = list (executor.map(process_one, slices))
358360
359- return stack_slices(processed_slices, target_type = ' numpy' )
361+ return stack_slices(processed_slices, memory_type = ' numpy' , gpu_id = 0 )
360362
361363 Multi-GPU Stacking
362364~~~~~~~~~~~~~~~~~~
@@ -389,11 +391,12 @@ Distribute slices across GPUs:
389391 cpu_slice = convert_memory(
390392 processed,
391393 source_type = ' torch' ,
392- target_type = ' numpy'
394+ target_type = ' numpy' ,
395+ gpu_id = gpu_id
393396 )
394397 results.append(cpu_slice)
395398
396- return stack_slices(results, target_type = ' numpy' )
399+ return stack_slices(results, memory_type = ' numpy' , gpu_id = 0 )
397400
398401 Advanced Usage
399402--------------
@@ -411,7 +414,7 @@ Stack along different axes:
411414 slices = [np.random.rand(10 , 20 ) for _ in range (5 )]
412415
413416 # Default: stack along axis 0 → (5, 10, 20)
414- volume1 = stack_slices(slices, target_type = ' numpy' )
417+ volume1 = stack_slices(slices, memory_type = ' numpy' , gpu_id = 0 )
415418
416419 # For other axes, use NumPy directly after stacking
417420 volume2 = np.moveaxis(volume1, 0 , 2 ) # (10, 20, 5)
@@ -429,7 +432,7 @@ Mixed Precision Stacking
429432 for _ in range (50 )]
430433
431434 # Stack maintains dtype
432- volume = stack_slices(slices_f32, target_type = ' numpy' )
435+ volume = stack_slices(slices_f32, memory_type = ' numpy' , gpu_id = 0 )
433436 print (volume.dtype) # float32
434437
435438 Weighted Stacking
@@ -445,7 +448,7 @@ Apply weights during stacking:
445448 def weighted_stack (slices , weights ):
446449 """ Stack with per-slice weights."""
447450 # Stack normally
448- volume = stack_slices(slices, target_type = ' numpy' )
451+ volume = stack_slices(slices, memory_type = ' numpy' , gpu_id = 0 )
449452
450453 # Apply weights
451454 weights = np.array(weights).reshape(- 1 , 1 , 1 )
@@ -470,7 +473,7 @@ Pattern: Volume Iterator
470473
471474 def __init__ (self , volume ):
472475 self .volume = volume
473- self .slices = unstack_slices(volume, target_type = ' numpy' )
476+ self .slices = unstack_slices(volume, memory_type = ' numpy' , gpu_id = 0 )
474477
475478 def __iter__ (self ):
476479 return iter (self .slices)
@@ -498,7 +501,7 @@ Pattern: Slice Cache
498501
499502 def __init__ (self , volume ):
500503 self .volume = volume
501- self .slices = unstack_slices(volume, target_type = ' numpy' )
504+ self .slices = unstack_slices(volume, memory_type = ' numpy' , gpu_id = 0 )
502505
503506 @lru_cache (maxsize = 32 )
504507 def get_processed_slice (self , idx ):
@@ -523,7 +526,7 @@ Pattern: Progressive Loading
523526 if len (processed_slices) % 10 == 0 :
524527 save_checkpoint(processed_slices)
525528
526- return stack_slices(processed_slices, target_type = ' numpy' )
529+ return stack_slices(processed_slices, memory_type = ' numpy' , gpu_id = 0 )
527530
528531 Error Handling
529532--------------
@@ -548,17 +551,17 @@ Shape Validation
548551 f " Slice { i} shape { s.shape} != first shape { first_shape} "
549552 )
550553
551- return stack_slices(slices, target_type = ' numpy' )
554+ return stack_slices(slices, memory_type = ' numpy' , gpu_id = 0 )
552555
553556 Memory Error Handling
554557~~~~~~~~~~~~~~~~~~~~~
555558
556559.. code-block :: python
557560
558- def robust_stack (slices , target_type = ' numpy' ):
561+ def robust_stack (slices , memory_type = ' numpy' , gpu_id = 0 ):
559562 """ Stack with memory error handling."""
560563 try :
561- return stack_slices(slices, target_type = target_type )
564+ return stack_slices(slices, memory_type = memory_type, gpu_id = gpu_id )
562565 except MemoryError :
563566 # Try processing in smaller chunks
564567 print (" Memory error, processing in chunks..." )
@@ -579,8 +582,8 @@ Unit Tests
579582 def test_stack_unstack_roundtrip ():
580583 """ Test stack/unstack preserves data."""
581584 slices = [np.random.rand(10 , 10 ) for _ in range (5 )]
582- volume = stack_slices(slices, target_type = ' numpy' )
583- recovered = unstack_slices(volume, target_type = ' numpy' )
585+ volume = stack_slices(slices, memory_type = ' numpy' , gpu_id = 0 )
586+ recovered = unstack_slices(volume, memory_type = ' numpy' , gpu_id = 0 )
584587
585588 assert len (recovered) == len (slices)
586589 for orig, rec in zip (slices, recovered):
0 commit comments