-
Notifications
You must be signed in to change notification settings - Fork 145
Expand file tree
/
Copy pathpatches.rs
More file actions
2130 lines (1885 loc) · 73 KB
/
patches.rs
File metadata and controls
2130 lines (1885 loc) · 73 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright the Vortex contributors
use std::cmp::Ordering;
use std::fmt::Debug;
use std::hash::Hash;
use std::ops::Range;
use num_traits::NumCast;
use vortex_buffer::BitBuffer;
use vortex_buffer::BufferMut;
use vortex_error::VortexError;
use vortex_error::VortexResult;
use vortex_error::vortex_bail;
use vortex_error::vortex_ensure;
use vortex_error::vortex_err;
use vortex_mask::AllOr;
use vortex_mask::Mask;
use vortex_utils::aliases::hash_map::HashMap;
use crate::ArrayRef;
use crate::ExecutionCtx;
use crate::IntoArray;
use crate::LEGACY_SESSION;
#[expect(deprecated)]
use crate::ToCanonical as _;
use crate::VortexSessionExecute;
use crate::arrays::PrimitiveArray;
use crate::builtins::ArrayBuiltins;
use crate::dtype::DType;
use crate::dtype::IntegerPType;
use crate::dtype::NativePType;
use crate::dtype::Nullability;
use crate::dtype::Nullability::NonNullable;
use crate::dtype::PType;
use crate::dtype::UnsignedPType;
use crate::match_each_integer_ptype;
use crate::match_each_unsigned_integer_ptype;
use crate::scalar::PValue;
use crate::scalar::Scalar;
use crate::search_sorted::SearchResult;
use crate::search_sorted::SearchSorted;
use crate::search_sorted::SearchSortedSide;
use crate::validity::Validity;
/// One patch index offset is stored for each chunk.
/// This allows for constant time patch index lookups.
pub const PATCH_CHUNK_SIZE: usize = 1024;
#[derive(Copy, Clone, prost::Message)]
pub struct PatchesMetadata {
#[prost(uint64, tag = "1")]
len: u64,
#[prost(uint64, tag = "2")]
offset: u64,
#[prost(enumeration = "PType", tag = "3")]
indices_ptype: i32,
#[prost(uint64, optional, tag = "4")]
chunk_offsets_len: Option<u64>,
#[prost(enumeration = "PType", optional, tag = "5")]
chunk_offsets_ptype: Option<i32>,
#[prost(uint64, optional, tag = "6")]
offset_within_chunk: Option<u64>,
}
impl PatchesMetadata {
#[inline]
pub fn new(
len: usize,
offset: usize,
indices_ptype: PType,
chunk_offsets_len: Option<usize>,
chunk_offsets_ptype: Option<PType>,
offset_within_chunk: Option<usize>,
) -> Self {
Self {
len: len as u64,
offset: offset as u64,
indices_ptype: indices_ptype as i32,
chunk_offsets_len: chunk_offsets_len.map(|len| len as u64),
chunk_offsets_ptype: chunk_offsets_ptype.map(|pt| pt as i32),
offset_within_chunk: offset_within_chunk.map(|len| len as u64),
}
}
#[inline]
pub fn len(&self) -> VortexResult<usize> {
usize::try_from(self.len).map_err(|_| vortex_err!("len does not fit in usize"))
}
#[inline]
pub fn is_empty(&self) -> bool {
self.len == 0
}
#[inline]
pub fn offset(&self) -> VortexResult<usize> {
usize::try_from(self.offset).map_err(|_| vortex_err!("offset does not fit in usize"))
}
#[inline]
pub fn chunk_offsets_dtype(&self) -> VortexResult<Option<DType>> {
self.chunk_offsets_ptype
.map(|t| {
PType::try_from(t)
.map_err(|e| vortex_err!("invalid i32 value {t} for PType: {}", e))
.map(|ptype| DType::Primitive(ptype, NonNullable))
})
.transpose()
}
#[inline]
pub fn indices_dtype(&self) -> VortexResult<DType> {
let ptype = PType::try_from(self.indices_ptype).map_err(|e| {
vortex_err!("invalid i32 value {} for PType: {}", self.indices_ptype, e)
})?;
vortex_ensure!(
ptype.is_unsigned_int(),
"Patch indices must be unsigned integers"
);
Ok(DType::Primitive(ptype, NonNullable))
}
}
/// A helper for working with patched arrays.
#[derive(Debug, Clone)]
pub struct Patches {
array_len: usize,
offset: usize,
indices: ArrayRef,
values: ArrayRef,
/// Stores the patch index offset for each chunk.
///
/// This allows us to lookup the patches for a given chunk in constant time via
/// `patch_indices[chunk_offsets[i]..chunk_offsets[i+1]]`.
///
/// This is optional for compatibility reasons.
chunk_offsets: Option<ArrayRef>,
/// Chunk offsets are only sliced off in case the slice is fully
/// outside of the chunk range.
///
/// Though the range for indices and values is sliced in terms of
/// individual elements, not chunks. To account for that we do a
/// saturating sub when adjusting the indices based on the chunk offset.
///
/// `offset_within_chunk` is necessary in order to keep track of how many
/// elements were sliced off within the chunk.
offset_within_chunk: Option<usize>,
}
impl Patches {
pub fn new(
array_len: usize,
offset: usize,
indices: ArrayRef,
values: ArrayRef,
chunk_offsets: Option<ArrayRef>,
) -> VortexResult<Self> {
vortex_ensure!(
indices.len() == values.len(),
"Patch indices and values must have the same length"
);
vortex_ensure!(
indices.dtype().is_unsigned_int() && !indices.dtype().is_nullable(),
"Patch indices must be non-nullable unsigned integers, got {:?}",
indices.dtype()
);
vortex_ensure!(
indices.len() <= array_len,
"Patch indices must be shorter than the array length"
);
vortex_ensure!(!indices.is_empty(), "Patch indices must not be empty");
// Perform validation of components when they are host-resident.
// This is not possible to do eagerly when the data is on GPU memory.
if indices.is_host() && values.is_host() {
let max = usize::try_from(&indices.execute_scalar(
indices.len() - 1,
&mut LEGACY_SESSION.create_execution_ctx(),
)?)
.map_err(|_| vortex_err!("indices must be a number"))?;
vortex_ensure!(
max - offset < array_len,
"Patch indices {max:?}, offset {offset} are longer than the array length {array_len}"
);
#[cfg(debug_assertions)]
{
use crate::VortexSessionExecute;
let mut ctx = LEGACY_SESSION.create_execution_ctx();
assert!(
crate::aggregate_fn::fns::is_sorted::is_sorted(&indices, &mut ctx)
.unwrap_or(false),
"Patch indices must be sorted"
);
}
}
Ok(Self {
array_len,
offset,
indices,
values,
chunk_offsets: chunk_offsets.clone(),
// Initialize with `Some(0)` only if `chunk_offsets` are set.
offset_within_chunk: chunk_offsets.map(|_| 0),
})
}
/// Construct new patches without validating any of the arguments
///
/// # Safety
///
/// Users have to assert that
/// * Indices and values have the same length
/// * Indices is an unsigned integer type
/// * Indices must be sorted
/// * Last value in indices is smaller than array_len
pub unsafe fn new_unchecked(
array_len: usize,
offset: usize,
indices: ArrayRef,
values: ArrayRef,
chunk_offsets: Option<ArrayRef>,
offset_within_chunk: Option<usize>,
) -> Self {
Self {
array_len,
offset,
indices,
values,
chunk_offsets,
offset_within_chunk,
}
}
#[inline]
pub fn array_len(&self) -> usize {
self.array_len
}
#[inline]
pub fn num_patches(&self) -> usize {
self.indices.len()
}
#[inline]
pub fn dtype(&self) -> &DType {
self.values.dtype()
}
#[inline]
pub fn indices(&self) -> &ArrayRef {
&self.indices
}
#[inline]
pub fn into_indices(self) -> ArrayRef {
self.indices
}
#[inline]
pub fn indices_mut(&mut self) -> &mut ArrayRef {
&mut self.indices
}
#[inline]
pub fn values(&self) -> &ArrayRef {
&self.values
}
#[inline]
pub fn into_values(self) -> ArrayRef {
self.values
}
#[inline]
pub fn values_mut(&mut self) -> &mut ArrayRef {
&mut self.values
}
#[inline]
// Absolute offset: 0 if the array is unsliced.
pub fn offset(&self) -> usize {
self.offset
}
#[inline]
pub fn chunk_offsets(&self) -> &Option<ArrayRef> {
&self.chunk_offsets
}
#[inline]
pub fn chunk_offset_at(&self, idx: usize) -> VortexResult<usize> {
let Some(chunk_offsets) = &self.chunk_offsets else {
vortex_bail!("chunk_offsets must be set to retrieve offset at index")
};
chunk_offsets
.execute_scalar(idx, &mut LEGACY_SESSION.create_execution_ctx())?
.as_primitive()
.as_::<usize>()
.ok_or_else(|| vortex_err!("chunk offset does not fit in usize"))
}
/// Returns the number of patches sliced off from the current first chunk.
///
/// When patches are sliced, the chunk offsets array is also sliced to only include
/// chunks that overlap with the slice range. However, the slice boundary may fall
/// in the middle of a chunk's patch range. This offset indicates how many patches
/// at the start of the first chunk should be skipped.
///
/// Returns `None` if chunk offsets are not set.
#[inline]
pub fn offset_within_chunk(&self) -> Option<usize> {
self.offset_within_chunk
}
#[inline]
pub fn indices_ptype(&self) -> VortexResult<PType> {
PType::try_from(self.indices.dtype())
.map_err(|_| vortex_err!("indices dtype is not primitive"))
}
pub fn to_metadata(&self, len: usize, dtype: &DType) -> VortexResult<PatchesMetadata> {
if self.indices.len() > len {
vortex_bail!(
"Patch indices {} are longer than the array length {}",
self.indices.len(),
len
);
}
if self.values.dtype() != dtype {
vortex_bail!(
"Patch values dtype {} does not match array dtype {}",
self.values.dtype(),
dtype
);
}
let chunk_offsets_len = self.chunk_offsets.as_ref().map(|co| co.len());
let chunk_offsets_ptype = self.chunk_offsets.as_ref().map(|co| co.dtype().as_ptype());
Ok(PatchesMetadata::new(
self.indices.len(),
self.offset,
self.indices.dtype().as_ptype(),
chunk_offsets_len,
chunk_offsets_ptype,
self.offset_within_chunk,
))
}
pub fn cast_values(self, values_dtype: &DType) -> VortexResult<Self> {
// SAFETY: casting does not affect the relationship between the indices and values
unsafe {
Ok(Self::new_unchecked(
self.array_len,
self.offset,
self.indices,
self.values.cast(values_dtype.clone())?,
self.chunk_offsets,
self.offset_within_chunk,
))
}
}
/// Get the patched value at a given index if it exists.
pub fn get_patched(&self, index: usize) -> VortexResult<Option<Scalar>> {
self.search_index(index)?
.to_found()
.map(|patch_idx| {
self.values()
.execute_scalar(patch_idx, &mut LEGACY_SESSION.create_execution_ctx())
})
.transpose()
}
/// Searches for `index` in the indices array.
///
/// Chooses between chunked search when [`Self::chunk_offsets`] is
/// available, and binary search otherwise. The `index` parameter is
/// adjusted by [`Self::offset`] for both.
///
/// # Arguments
/// * `index` - The index to search for
///
/// # Returns
/// * [`SearchResult::Found(patch_idx)`] - If a patch exists at this index, returns the
/// position in the patches array
/// * [`SearchResult::NotFound(insertion_point)`] - If no patch exists, returns where
/// a patch at this index would be inserted to maintain sorted order
///
/// [`SearchResult::Found(patch_idx)`]: SearchResult::Found
/// [`SearchResult::NotFound(insertion_point)`]: SearchResult::NotFound
pub fn search_index(&self, index: usize) -> VortexResult<SearchResult> {
if self.chunk_offsets.is_some() {
return self.search_index_chunked(index);
}
Self::search_index_binary_search(&self.indices, index + self.offset)
}
/// Binary searches for `needle` in the indices array.
///
/// # Returns
/// [`SearchResult::Found`] with the position if needle exists, or [`SearchResult::NotFound`]
/// with the insertion point if not found.
fn search_index_binary_search(indices: &ArrayRef, needle: usize) -> VortexResult<SearchResult> {
if indices.is_canonical() {
#[expect(deprecated)]
let primitive = indices.to_primitive();
match_each_integer_ptype!(primitive.ptype(), |T| {
let Ok(needle) = T::try_from(needle) else {
// If the needle is not of type T, then it cannot possibly be in this array.
//
// The needle is a non-negative integer (a usize); therefore, it must be larger
// than all values in this array.
return Ok(SearchResult::NotFound(primitive.len()));
};
return primitive
.as_slice::<T>()
.search_sorted(&needle, SearchSortedSide::Left);
});
}
indices
.as_primitive_typed()
.search_sorted(&PValue::U64(needle as u64), SearchSortedSide::Left)
}
/// Constant time searches for `index` in the indices array.
///
/// First determines which chunk the target index falls into, then performs
/// a binary search within that chunk's range.
///
/// Returns a [`SearchResult`] indicating either the exact patch index if found,
/// or the insertion point if not found.
///
/// Returns an error if `chunk_offsets` or `offset_within_chunk` are not set.
fn search_index_chunked(&self, index: usize) -> VortexResult<SearchResult> {
let Some(chunk_offsets) = &self.chunk_offsets else {
vortex_bail!("chunk_offsets is required to be set")
};
let Some(offset_within_chunk) = self.offset_within_chunk else {
vortex_bail!("offset_within_chunk is required to be set")
};
if index >= self.array_len() {
return Ok(SearchResult::NotFound(self.indices().len()));
}
let chunk_idx = (index + self.offset % PATCH_CHUNK_SIZE) / PATCH_CHUNK_SIZE;
// Patch index offsets are absolute and need to be offset by the first chunk of the current slice.
let base_offset = self.chunk_offset_at(0)?;
let patches_start_idx = (self.chunk_offset_at(chunk_idx)? - base_offset)
// Chunk offsets are only sliced off in case the slice is fully
// outside of the chunk range.
//
// Though the range for indices and values is sliced in terms of
// individual elements, not chunks. To account for that we do a
// saturating sub when adjusting the indices based on the chunk offset.
.saturating_sub(offset_within_chunk);
let patches_end_idx = if chunk_idx < chunk_offsets.len() - 1 {
(self.chunk_offset_at(chunk_idx + 1)? - base_offset)
.saturating_sub(offset_within_chunk)
.min(self.indices.len())
} else {
self.indices.len()
};
let chunk_indices = self.indices.slice(patches_start_idx..patches_end_idx)?;
let result = Self::search_index_binary_search(&chunk_indices, index + self.offset)?;
Ok(match result {
SearchResult::Found(idx) => SearchResult::Found(patches_start_idx + idx),
SearchResult::NotFound(idx) => SearchResult::NotFound(patches_start_idx + idx),
})
}
/// Batch version of `search_index`.
///
/// In contrast to `search_index`, this function requires `indices` as
/// well as `chunk_offsets` to be passed as slices. This is to avoid
/// redundant canonicalization and `scalar_at` lookups across calls.
fn search_index_chunked_batch<T, O>(
&self,
indices: &[T],
chunk_offsets: &[O],
index: T,
) -> VortexResult<SearchResult>
where
T: UnsignedPType,
O: UnsignedPType,
usize: TryFrom<T>,
usize: TryFrom<O>,
{
let Some(offset_within_chunk) = self.offset_within_chunk else {
vortex_bail!("offset_within_chunk is required to be set")
};
let chunk_idx = {
let Ok(index) = usize::try_from(index) else {
// If the needle cannot be converted to usize, it's larger than all values in this array.
return Ok(SearchResult::NotFound(indices.len()));
};
if index >= self.array_len() {
return Ok(SearchResult::NotFound(self.indices().len()));
}
(index + self.offset % PATCH_CHUNK_SIZE) / PATCH_CHUNK_SIZE
};
// Patch index offsets are absolute and need to be offset by the first chunk of the current slice.
let chunk_offset = usize::try_from(chunk_offsets[chunk_idx] - chunk_offsets[0])
.map_err(|_| vortex_err!("chunk_offset failed to convert to usize"))?;
let patches_start_idx = chunk_offset
// Chunk offsets are only sliced off in case the slice is fully
// outside of the chunk range.
//
// Though the range for indices and values is sliced in terms of
// individual elements, not chunks. To account for that we do a
// saturating sub when adjusting the indices based on the chunk offset.
.saturating_sub(offset_within_chunk);
let patches_end_idx = if chunk_idx < chunk_offsets.len() - 1 {
usize::try_from(chunk_offsets[chunk_idx + 1] - chunk_offsets[0])
.map_err(|_| vortex_err!("patches_end_idx failed to convert to usize"))?
.saturating_sub(offset_within_chunk)
.min(indices.len())
} else {
self.indices.len()
};
let Some(offset) = T::from(self.offset) else {
// If the offset cannot be converted to T, it's larger than all values in this array.
return Ok(SearchResult::NotFound(indices.len()));
};
let chunk_indices = &indices[patches_start_idx..patches_end_idx];
let result = chunk_indices.search_sorted(&(index + offset), SearchSortedSide::Left)?;
Ok(match result {
SearchResult::Found(idx) => SearchResult::Found(patches_start_idx + idx),
SearchResult::NotFound(idx) => SearchResult::NotFound(patches_start_idx + idx),
})
}
/// Returns the minimum patch index
pub fn min_index(&self) -> VortexResult<usize> {
let first = self
.indices
.execute_scalar(0, &mut LEGACY_SESSION.create_execution_ctx())?
.as_primitive()
.as_::<usize>()
.ok_or_else(|| vortex_err!("index does not fit in usize"))?;
Ok(first - self.offset)
}
/// Returns the maximum patch index
pub fn max_index(&self) -> VortexResult<usize> {
let last = self
.indices
.execute_scalar(
self.indices.len() - 1,
&mut LEGACY_SESSION.create_execution_ctx(),
)?
.as_primitive()
.as_::<usize>()
.ok_or_else(|| vortex_err!("index does not fit in usize"))?;
Ok(last - self.offset)
}
/// Filter the patches by a mask, resulting in new patches for the filtered array.
pub fn filter(&self, mask: &Mask, ctx: &mut ExecutionCtx) -> VortexResult<Option<Self>> {
if mask.len() != self.array_len {
vortex_bail!(
"Filter mask length {} does not match array length {}",
mask.len(),
self.array_len
);
}
match mask.indices() {
AllOr::All => Ok(Some(self.clone())),
AllOr::None => Ok(None),
AllOr::Some(mask_indices) => {
let flat_indices = self.indices().clone().execute::<PrimitiveArray>(ctx)?;
match_each_unsigned_integer_ptype!(flat_indices.ptype(), |I| {
filter_patches_with_mask(
flat_indices.as_slice::<I>(),
self.offset(),
self.values(),
mask_indices,
)
})
}
}
}
/// Mask the patches, REMOVING the patches where the mask is true.
/// Unlike filter, this preserves the patch indices.
/// Unlike mask on a single array, this does not set masked values to null.
// TODO(joe): make this lazy and remove the ctx.
pub fn mask(&self, mask: &Mask, ctx: &mut ExecutionCtx) -> VortexResult<Option<Self>> {
if mask.len() != self.array_len {
vortex_bail!(
"Filter mask length {} does not match array length {}",
mask.len(),
self.array_len
);
}
let filter_mask = match mask.bit_buffer() {
AllOr::All => return Ok(None),
AllOr::None => return Ok(Some(self.clone())),
AllOr::Some(masked) => {
let patch_indices = self.indices().clone().execute::<PrimitiveArray>(ctx)?;
match_each_unsigned_integer_ptype!(patch_indices.ptype(), |P| {
let patch_indices = patch_indices.as_slice::<P>();
Mask::from_buffer(BitBuffer::collect_bool(patch_indices.len(), |i| {
#[allow(clippy::cast_possible_truncation)]
let idx = (patch_indices[i] as usize) - self.offset;
!masked.value(idx)
}))
})
}
};
if filter_mask.all_false() {
return Ok(None);
}
// SAFETY: filtering indices/values with same mask maintains their 1:1 relationship
let filtered_indices = self.indices.filter(filter_mask.clone())?;
let filtered_values = self.values.filter(filter_mask)?;
Ok(Some(Self {
array_len: self.array_len,
offset: self.offset,
indices: filtered_indices,
values: filtered_values,
// TODO(0ax1): Chunk offsets are invalid after a filter is applied.
chunk_offsets: None,
offset_within_chunk: self.offset_within_chunk,
}))
}
/// Slice the patches by a range of the patched array.
pub fn slice(&self, range: Range<usize>) -> VortexResult<Option<Self>> {
let slice_start_idx = self.search_index(range.start)?.to_index();
let slice_end_idx = self.search_index(range.end)?.to_index();
if slice_start_idx == slice_end_idx {
return Ok(None);
}
let values = self.values().slice(slice_start_idx..slice_end_idx)?;
let indices = self.indices().slice(slice_start_idx..slice_end_idx)?;
let new_chunk_offsets = self
.chunk_offsets
.as_ref()
.map(|chunk_offsets| -> VortexResult<ArrayRef> {
let chunk_relative_offset = self.offset % PATCH_CHUNK_SIZE;
let chunk_start_idx = (chunk_relative_offset + range.start) / PATCH_CHUNK_SIZE;
let chunk_end_idx = (chunk_relative_offset + range.end).div_ceil(PATCH_CHUNK_SIZE);
chunk_offsets.slice(chunk_start_idx..chunk_end_idx)
})
.transpose()?;
let offset_within_chunk = new_chunk_offsets
.as_ref()
.map(|new_chunk_offsets| -> VortexResult<usize> {
let new_chunk_base = new_chunk_offsets
.execute_scalar(0, &mut LEGACY_SESSION.create_execution_ctx())?
.as_primitive()
.as_::<usize>()
.ok_or_else(|| vortex_err!("chunk offset does not fit in usize"))?;
let parent_chunk_base = self.chunk_offset_at(0)?;
let parent_within = self.offset_within_chunk.unwrap_or(0);
Ok(parent_chunk_base + parent_within + slice_start_idx - new_chunk_base)
})
.transpose()?;
Ok(Some(Self {
array_len: range.len(),
offset: range.start + self.offset(),
indices,
values,
chunk_offsets: new_chunk_offsets,
offset_within_chunk,
}))
}
// https://docs.google.com/spreadsheets/d/1D9vBZ1QJ6mwcIvV5wIL0hjGgVchcEnAyhvitqWu2ugU
const PREFER_MAP_WHEN_PATCHES_OVER_INDICES_LESS_THAN: f64 = 5.0;
fn is_map_faster_than_search(&self, take_indices: &PrimitiveArray) -> bool {
(self.num_patches() as f64 / take_indices.len() as f64)
< Self::PREFER_MAP_WHEN_PATCHES_OVER_INDICES_LESS_THAN
}
/// Take the indices from the patches
///
/// Any nulls in take_indices are added to the resulting patches.
pub fn take_with_nulls(
&self,
take_indices: &ArrayRef,
ctx: &mut ExecutionCtx,
) -> VortexResult<Option<Self>> {
if take_indices.is_empty() {
return Ok(None);
}
let take_indices = take_indices.clone().execute::<PrimitiveArray>(ctx)?;
if self.is_map_faster_than_search(&take_indices) {
self.take_map(take_indices, true, ctx)
} else {
self.take_search(take_indices, true, ctx)
}
}
/// Take the indices from the patches.
///
/// Any nulls in take_indices are ignored.
pub fn take(
&self,
take_indices: &ArrayRef,
ctx: &mut ExecutionCtx,
) -> VortexResult<Option<Self>> {
if take_indices.is_empty() {
return Ok(None);
}
let take_indices = take_indices.clone().execute::<PrimitiveArray>(ctx)?;
if self.is_map_faster_than_search(&take_indices) {
self.take_map(take_indices, false, ctx)
} else {
self.take_search(take_indices, false, ctx)
}
}
#[expect(
clippy::cognitive_complexity,
reason = "complexity is from nested match_each_* macros"
)]
pub fn take_search(
&self,
take_indices: PrimitiveArray,
include_nulls: bool,
ctx: &mut ExecutionCtx,
) -> VortexResult<Option<Self>> {
let take_indices_validity = take_indices.validity()?;
let patch_indices = self.indices.clone().execute::<PrimitiveArray>(ctx)?;
let chunk_offsets = self
.chunk_offsets()
.as_ref()
.map(|co| co.clone().execute::<PrimitiveArray>(ctx))
.transpose()?;
let (values_indices, new_indices): (BufferMut<u64>, BufferMut<u64>) =
match_each_unsigned_integer_ptype!(patch_indices.ptype(), |PatchT| {
let patch_indices_slice = patch_indices.as_slice::<PatchT>();
match_each_integer_ptype!(take_indices.ptype(), |TakeT| {
let take_slice = take_indices.as_slice::<TakeT>();
if let Some(chunk_offsets) = chunk_offsets {
match_each_unsigned_integer_ptype!(chunk_offsets.ptype(), |OffsetT| {
let chunk_offsets = chunk_offsets.as_slice::<OffsetT>();
take_indices_with_search_fn(
patch_indices_slice,
take_slice,
take_indices
.as_ref()
.validity()?
.to_mask(take_indices.as_ref().len(), ctx)?,
include_nulls,
|take_idx| {
self.search_index_chunked_batch(
patch_indices_slice,
chunk_offsets,
take_idx,
)
},
)?
})
} else {
take_indices_with_search_fn(
patch_indices_slice,
take_slice,
take_indices
.as_ref()
.validity()?
.to_mask(take_indices.as_ref().len(), ctx)?,
include_nulls,
|take_idx| {
let Some(offset) = <PatchT as NumCast>::from(self.offset) else {
// If the offset cannot be converted to T, it's larger than all values in this array.
return Ok(SearchResult::NotFound(patch_indices_slice.len()));
};
patch_indices_slice
.search_sorted(&(take_idx + offset), SearchSortedSide::Left)
},
)?
}
})
});
if new_indices.is_empty() {
return Ok(None);
}
let new_indices = new_indices.into_array();
let new_array_len = take_indices.len();
let values_validity = take_indices_validity.take(&new_indices)?;
Ok(Some(Self {
array_len: new_array_len,
offset: 0,
indices: new_indices,
values: self
.values()
.take(PrimitiveArray::new(values_indices, values_validity).into_array())?,
chunk_offsets: None,
offset_within_chunk: Some(0), // Reset when creating new Patches.
}))
}
pub fn take_map(
&self,
take_indices: PrimitiveArray,
include_nulls: bool,
ctx: &mut ExecutionCtx,
) -> VortexResult<Option<Self>> {
let indices = self.indices.clone().execute::<PrimitiveArray>(ctx)?;
let new_length = take_indices.len();
let min_index = self.min_index()?;
let max_index = self.max_index()?;
let Some((new_sparse_indices, value_indices)) =
match_each_unsigned_integer_ptype!(indices.ptype(), |Indices| {
match_each_integer_ptype!(take_indices.ptype(), |TakeIndices| {
let take_validity = take_indices
.validity()?
.execute_mask(take_indices.len(), ctx)?;
let take_nullability = take_indices.validity()?.nullability();
let take_slice = take_indices.as_slice::<TakeIndices>();
take_map::<_, TakeIndices>(
indices.as_slice::<Indices>(),
take_slice,
take_validity,
take_nullability,
self.offset(),
min_index,
max_index,
include_nulls,
)?
})
})
else {
return Ok(None);
};
let taken_values = self.values().take(value_indices)?;
Ok(Some(Patches {
array_len: new_length,
offset: 0,
indices: new_sparse_indices,
values: taken_values,
// TODO(0ax1): Chunk offsets are invalid after take is applied.
chunk_offsets: None,
offset_within_chunk: self.offset_within_chunk,
}))
}
pub fn map_values<F>(self, f: F) -> VortexResult<Self>
where
F: FnOnce(ArrayRef) -> VortexResult<ArrayRef>,
{
let values = f(self.values)?;
if self.indices.len() != values.len() {
vortex_bail!(
"map_values must preserve length: expected {} received {}",
self.indices.len(),
values.len()
)
}
Ok(Self {
array_len: self.array_len,
offset: self.offset,
indices: self.indices,
values,
chunk_offsets: self.chunk_offsets,
offset_within_chunk: self.offset_within_chunk,
})
}
}
#[expect(clippy::too_many_arguments)] // private function, can clean up one day
fn take_map<I: NativePType + Hash + Eq + TryFrom<usize>, T: NativePType>(
indices: &[I],
take_indices: &[T],
take_validity: Mask,
take_nullability: Nullability,
indices_offset: usize,
min_index: usize,
max_index: usize,
include_nulls: bool,
) -> VortexResult<Option<(ArrayRef, ArrayRef)>>
where
usize: TryFrom<T>,
VortexError: From<<I as TryFrom<usize>>::Error>,
{
let offset_i = I::try_from(indices_offset)?;
let sparse_index_to_value_index: HashMap<I, usize> = indices
.iter()
.copied()
.map(|idx| idx - offset_i)
.enumerate()
.map(|(value_index, sparse_index)| (sparse_index, value_index))
.collect();
let mut new_sparse_indices = BufferMut::<u64>::with_capacity(take_indices.len());
let mut value_indices = BufferMut::<u64>::with_capacity(take_indices.len());
for (idx_in_take, &take_idx) in take_indices.iter().enumerate() {
let ti = usize::try_from(take_idx)
.map_err(|_| vortex_err!("Failed to convert index to usize"))?;
// If we have to take nulls the take index doesn't matter, make it 0 for consistency
let is_null = match take_validity.bit_buffer() {
AllOr::All => false,
AllOr::None => true,
AllOr::Some(buf) => !buf.value(idx_in_take),
};
if is_null {
if include_nulls {
new_sparse_indices.push(idx_in_take as u64);
value_indices.push(0);
}
} else if ti >= min_index && ti <= max_index {
let ti_as_i = I::try_from(ti)
.map_err(|_| vortex_err!("take index does not fit in index type"))?;
if let Some(&value_index) = sparse_index_to_value_index.get(&ti_as_i) {
new_sparse_indices.push(idx_in_take as u64);
value_indices.push(value_index as u64);
}
}
}
if new_sparse_indices.is_empty() {
return Ok(None);
}
let new_sparse_indices = new_sparse_indices.into_array();
let values_validity =
Validity::from_mask(take_validity, take_nullability).take(&new_sparse_indices)?;
Ok(Some((
new_sparse_indices,
PrimitiveArray::new(value_indices, values_validity).into_array(),
)))
}
/// Filter patches with the provided mask (in flattened space).
///
/// The filter mask may contain indices that are non-patched. The return value of this function
/// is a new set of `Patches` with the indices relative to the provided `mask` rank, and the
/// patch values.
fn filter_patches_with_mask<T: IntegerPType>(
patch_indices: &[T],
offset: usize,
patch_values: &ArrayRef,
mask_indices: &[usize],
) -> VortexResult<Option<Patches>> {
let true_count = mask_indices.len();
let mut new_patch_indices = BufferMut::<u64>::with_capacity(true_count);
let mut new_mask_indices = Vec::with_capacity(true_count);
// Attempt to move the window by `STRIDE` elements on each iteration. This assumes that
// the patches are relatively sparse compared to the overall mask, and so many indices in the
// mask will end up being skipped.
const STRIDE: usize = 4;
let mut mask_idx = 0usize;
let mut true_idx = 0usize;
while mask_idx < patch_indices.len() && true_idx < true_count {
// NOTE: we are searching for overlaps between sorted, unaligned indices in `patch_indices`
// and `mask_indices`. We assume that Patches are sparse relative to the global space of