-
Notifications
You must be signed in to change notification settings - Fork 5
/
windbg_segment_heap.js
1563 lines (1231 loc) · 61.8 KB
/
windbg_segment_heap.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"use strict";
/*
//see initializeScript to add some function aliases
//switch to paged pool (by default uses nonpagedpoolnx)
dx @$scriptContents.set_default_pool(2)
//switch to session pool of the current process
dx @$scriptContents.set_default_pool(4)
or
dx @$scriptContents.set_session_pool()
//switch to a segment heap by address, e.g. usermode implementation
dx @$scriptContents.set_default_heap(0x230b0470000)
//finds the heap associated with the address based on the segment allocator metadata, unlike the other set_default functions which require the actual segment heap address. Limitation: requires the address to lie within a segment block (all except large allocs)
dx @$scriptContents.set_default_heap_seg(0x230b0675300)
//********LFH functions************
//return the buckets that are either enabled or disabled
dx @$scriptContents.lfh_buckets_status()
//returns more detailed information about a specific bucket index
dx @$scriptContents.lfh_bucket_stats(8)
//********VS functions************
dx @$scriptContents.vs_dynamic_lookaside_info()
dx @$scriptContents.vs_dynamic_lookaside_bucket_info(0x390)
dx @$scriptContents.vs_delay_free_list()
dx @$scriptContents.vs_freechunktree_stats(0x390,0x3b0)
dx @$scriptContents.vs_decode_header_abs(0xffff9804c6838fe0)
//same as vs_decode_header_abs, but it doesn't require the address provided as arg to be the beginning of the chunk. It can be any address within the underlying chunk boundaries
dx @$scriptContents.vs_decode_header(0xffff9804c6838ff0)
//********Segment functions************
dx @$scriptContents.seg_free_blocks()
//segment details based on any address that lies within a segment
dx @$scriptContents.seg_segment_state(0xffff9804c6838fe8)
//********Large functions************
dx @$scriptContents.large_print_allocs()
------------
dx @$cursession.Processes.Where(x => x.KernelObject.Session != 0)
//all scripts function container
host.namespace.Debugger.State.DebuggerVariables.scriptContents
//specific script by name
Debugger.State.Scripts.windbg_segment_heap.Contents.vs_freechunktree_stats(0x80)
*/
let kernel_globals = {};
let session_pool_globals = {};
let globals = {};
let cached_types = {};
let debugging = false;
let logging_enabled = true;
const zero = host.parseInt64(0);
let log = host.diagnostics.debugLog;
let logln = function (e) {
if (logging_enabled)
log(e + '\n');
};
let debug_print = function (e) {
if (debugging) {
log("[debug] " + e + '\n');
}
}
function to_hex(n){
if (n===undefined)
return "[to_hex: n===undefined]"
return "0x" + n.toString(16);
}
function x(cmd){
let output = "";
//might be useful for some commands to let the caller handle exception
try {
for (let line of host.namespace.Debugger.Utility.Control.ExecuteCommand(cmd)){
output += line + '\n';
}
} catch (err) {
output = "";
}
return output.trim();
}
function read_mem(addr, element_size, n = 1){
let elements = host.memory.readMemoryValues(addr, n, element_size);
if (n == 1)
return elements[0];
return elements;
}
function create_mask(bits) {
return 1..bitwiseShiftLeft(bits).subtract(1);
}
//maybe todo: add option to extract overlapping fields, e.g. _RTL_BALANCED_NODE.ParentValue&~3
function extract_field_64(value, type_name, field){
let tp = get_type_parts(type_name);
let current_type = get_type(tp.module, tp.type_name);
let data;
let field_base_offset = offsetof(type_name, field) % 8;
let field_base_size = type_field_size(type_name, field);
let field_mask = create_mask(field_base_size*8);
if (field_base_offset + field_base_size > 8)
throw Error("[extract_field_64] extraction field size bigger than the value size");
value = value.bitwiseShiftRight(field_base_offset*8);
let bit_info = type_field_bitinfo(type_name, field);
if (bit_info === undefined) {
value = value.bitwiseAnd(field_mask);
if (field_base_size*8<=53){
value = value.asNumber();
debug_print(field + " is bytes, base sz: " + field_base_size);
}
return value;
}
field_mask = create_mask(bit_info.bit_len);
value = value.bitwiseShiftRight(bit_info.bit_position);
value = value.bitwiseAnd(field_mask);
if (bit_info.bit_len<=53){
value = value.asNumber();
debug_print(field + " is bits, base sz: " + bit_info.bit_len);
}
return value;
}
function is_kernelspace_address(addr){
return addr.bitwiseShiftRight(47).bitwiseAnd(0x1ffff) == 0x1ffff;
}
function is_userspace_address(addr){
return addr.bitwiseShiftRight(47) === zero;
}
function verify_four_pagetable_levels() {
let output = x("!pte 0");
let output_lines = output.split("\n").splice(1);
if (output_lines[0].indexOf("PXE") != 0){
throw new Error(`[verify_four_pagetable_levels] error: currently assuming 4-level page table hierarchy, fix is_kernelspace_address/is_userspace_address probably some other places too cmd output: ${output}`);
}
return true;
}
function check_machine_configuration(){
verify_four_pagetable_levels();
//maybe add some other assumptions, e.g. vs header size
}
function create_typed_object(addr, type_name, module = null){
if (module === null)
module = globals.default_module;
return host.createTypedObject(addr, module, type_name);
}
function create_pointer_object(addr, type_name, module = null){
if (module === null)
module = globals.default_module;
return host.createPointerObject(addr, module, type_name);
}
function get_type_parts(type_name){
let type_parts = type_name.split("!");
let real_type_name = type_parts.pop();
let module = type_parts.pop() || globals.default_module;
return {
module: module,
type_name: real_type_name,
}
}
function get_type(module, type_name){
let full_name = module+"!"+type_name;
if (full_name in cached_types)
return cached_types[full_name];
let type = host.getModuleType(module, type_name);
cached_types[full_name] = type;
return type;
}
function bittest(arr, arr_element_size, bit_test_position){
let test_element_index = bit_test_position.divide(arr_element_size);
let test_bit_index = bit_test_position.subtract(test_element_index.multiply(arr_element_size));
//logln("element: " + to_hex(arr[test_element_index]) + " test_bit_index: " + to_hex(test_bit_index) + " bit_test_position: " + to_hex(bit_test_position));
return arr[test_element_index].bitwiseAnd(1..bitwiseShiftLeft(test_bit_index)) != zero;
}
function bittest64(arr, bit_test_position){
return bittest(arr, 64, bit_test_position);
}
function bitwiseNot(n){
return 0..subtract(n).subtract(1);
}
function align(n, bits, align_up = true){
let mask = 1..bitwiseShiftLeft(bits).subtract(1);
if (align_up)
n = n.add(mask);
return n.bitwiseAnd(bitwiseNot(mask));
}
function offsetof(type_name, field){
let tp = get_type_parts(type_name);
let final_offset = 0;
let current_type = get_type(tp.module, tp.type_name);
for(let current_field_name of field.split(".")){
let current_field = current_type.fields[current_field_name];
final_offset += current_field.offset;
current_type = current_field.type;
}
return final_offset;
}
function type_size(type_name){
let type_parts = get_type_parts(type_name);
let type = get_type(type_parts.module, type_parts.type_name);
return type.size;
}
function get_type_field(type_name, field){
let type_parts = get_type_parts(type_name);
let current_type = get_type(type_parts.module, type_parts.type_name);
let current_field;
for(let current_field_name of field.split(".")){
current_field = current_type.fields[current_field_name];
current_type = current_field.type;
}
return current_field;
}
function type_field_bitinfo(type_name, field){
let field_type = get_type_field(type_name, field).type;
if (!field_type.isBitField)
return undefined;
return {
bit_position: field_type.bitFieldPositions.lsb,
bit_len: field_type.bitFieldPositions.length,
};
}
function type_field_size(type_name, field){
return get_type_field(type_name, field).type.size;
}
//eval_node will receive a node as argument and should return:
// -1: if we are looking for smaller value than the one held by the current node
// 0 : if the value is good
// 1 : if we are looking for bigger value
function* rbtree_iterator(root_node, eval_node){
if (!root_node || root_node.address == zero)
return;
let node_queue = [{node:root_node, position:"root"}];
while(node_queue.length>0){
let current_node_info = node_queue.pop();
let current_node = current_node_info.node;
let eval_ret = eval_node(current_node);
let eval_result = eval_ret.result;
if (eval_result==0){
if ("extra" in eval_ret)
current_node_info.extra = eval_ret.extra;
yield current_node_info;
}
if ((current_node.Left.address!=zero) && eval_result <= 0)
node_queue.push({node:current_node.Left, position:"Left"});
if ((current_node.Right.address!=zero) && eval_result >= 0)
node_queue.push({node:current_node.Right, position:"Right"});
}
}
function get_pool(pool_index){
if (pool_index == -1)
return globals.default_heap;
if (pool_index >= 0 && pool_index<globals.pools.length)
return globals.pools[pool_index];
return get_current_session_pool();
}
function* _slist_iterator(slist_header) {
if (slist_header == zero)
return;
let current_node_addr = slist_header.Region.bitwiseAnd(~15);
while(current_node_addr != zero){
yield current_node_addr;
current_node_addr = read_mem(current_node_addr, 8);
}
}
function _list_entry_iterator(list_entry, type, field){
return host.namespace.Debugger.Utility.Collections.FromListEntry(list_entry, type, field);
}
//we may have to call this function liberally, we could cache results in case execution wasnt resumed between the different calls
function _list_entry_address(list_entry_object, offset = 0){
if (list_entry_object.Flink.address == zero){
return zero;
}
return list_entry_object.Flink.Blink.address.subtract(offset);
}
function lfh_index2size(bucket_index){
return globals.RtlpBucketBlockSizes[bucket_index]
}
function lfh_size2index(size){
let index = (size+0xf)>>>4;
//if (index>=globals.RtlpLfhBucketIndexMap.len) //add a similar check to avoid table overflow
// return -1;
return globals.RtlpLfhBucketIndexMap[index];
}
function _lfh_decode_blockoffsets(lfh_subsegment){
let decoded_blockoffsets = _list_entry_address(lfh_subsegment.ListEntry).bitwiseAnd(0xffffffff)>>>12;
decoded_blockoffsets ^= lfh_subsegment.BlockOffsets.EncodedData;
decoded_blockoffsets ^= globals.RtlpHpHeapGlobals.LfhKey.bitwiseAnd(0xffffffff);
return {
BlockSize: decoded_blockoffsets & 0xffff,
FirstBlockOffset: decoded_blockoffsets>>>16,
};
}
function _lfh_subsegment_free_blocks(subsegment){
let block_count = subsegment.BlockCount;
let free_count = subsegment.FreeCount;
let block_offsets = _lfh_decode_blockoffsets(subsegment);
let subsegment_address = _list_entry_address(subsegment.ListEntry)
let current_bitmap;
let n = 0;
let free_blocks = [];
let blocks_per_bitmap_element = type_field_size("_HEAP_LFH_SUBSEGMENT", "BlockBitmap")*8/2; //8 bits/byte, 2 bits per block
for (let i=0;i<block_count;i++){
if (i%blocks_per_bitmap_element==0) {
current_bitmap = subsegment.BlockBitmap[i/blocks_per_bitmap_element]
}
if (current_bitmap.bitwiseAnd(1) == 0) {
n += 1;
free_blocks.push(subsegment_address.add(block_offsets.FirstBlockOffset + i*block_offsets.BlockSize));
}
current_bitmap = current_bitmap.bitwiseShiftRight(2);
}
return free_blocks;
}
function lfh_buckets_status(show_disabled = 1, pool_index = -1){
const status_msg = [
"enabled",
"disabled",
][(show_disabled!=0)|0];
let current_pool = get_pool(pool_index);
let lfh_context = current_pool.segment_heap.LfhContext;
let lfh_context_addr = current_pool.segment_heap.address.add(offsetof("_SEGMENT_HEAP", "LfhContext"));
let max_block_size = lfh_context.Config.MaxBlockSize;
logln("------------LFH Info - " + current_pool.name + "----------");
logln("LfhContext Address [_HEAP_LFH_CONTEXT]: " + to_hex(lfh_context_addr));
logln("LFH Max Block Size: " + to_hex(max_block_size));
for (let i = 0; i < lfh_context.Buckets.Count(); i++){
let current_bucket = lfh_context.Buckets[i];
let current_bucket_enabled = !(current_bucket.address.convertToNumber()&1);
let current_bucket_size = lfh_index2size(i);
if (current_bucket_size>=max_block_size)
break;
if (current_bucket_enabled ^ show_disabled)
logln("Bucket[" + to_hex(i) + "] with size: " + to_hex(current_bucket_size) + " is " + status_msg);
}
logln("----------------------");
}
//if bucket_id is number, then it's interpreted as bucket index
//if it's string, then it's interpreted as bucket size, e.g. bucket_id="0x480"
function lfh_bucket_stats(bucket_id, pool_index = -1){
const location_msgs = [
"available",
"full",
"decomission"
];
let current_pool = get_pool(pool_index);
let bucket_index = bucket_id;
if (typeof bucket_id === 'string')
bucket_index = lfh_size2index(bucket_id|0);
let lfh_context = current_pool.segment_heap.LfhContext;
let lfh_context_addr = current_pool.segment_heap.address.add(offsetof("_SEGMENT_HEAP", "LfhContext"));
let max_block_size = lfh_context.Config.MaxBlockSize;
let max_affinity_slots = lfh_context.MaxAffinity;
logln("------------LFH Bucket[" + to_hex(bucket_index) + "] Stats - " + current_pool.name + "----------");
logln("LfhContext Address [_HEAP_LFH_CONTEXT]: " + to_hex(lfh_context_addr));
let bucket = lfh_context.Buckets[bucket_index];
let bucket_addr = lfh_context.Buckets[bucket_index].address;
let bucket_enabled = !(bucket_addr.convertToNumber()&1);
let bucket_size = lfh_index2size(bucket_index);
if (bucket_size>=max_block_size){
logln("Bucket size provided is bigger than LFH max block size: " + to_hex(max_block_size));
return;
}
if (!bucket_enabled){
logln("Bucket with index " + to_hex(bucket_index) + " and size " + to_hex(bucket_size) + " is disabled");
return;
}
logln("Bucket address [_HEAP_LFH_BUCKET]: " + to_hex(bucket_addr));
logln("Bucket size: " + to_hex(bucket_size));
let affinity_indexes = read_mem(bucket.ProcAffinityMapping.address, 1, max_affinity_slots);
let processed_affinity_indexes = [];
for (let i = 0; i < max_affinity_slots; i++) {
let current_affinity_index = affinity_indexes[i];
if (processed_affinity_indexes.indexOf(current_affinity_index)!==-1){
continue;
}
logln(`\n########## Affinity Slot ${current_affinity_index} ##########`);
processed_affinity_indexes.push(current_affinity_index);
let current_affinity_slot = bucket.AffinitySlots[current_affinity_index];
let active_subsegment_addr = current_affinity_slot.ActiveSubsegment.Target.address.bitwiseShiftRight(12).bitwiseShiftLeft(12);
if (active_subsegment_addr != zero){
let active_subsegment = create_typed_object(active_subsegment_addr, "_HEAP_LFH_SUBSEGMENT");
let free_blocks = _lfh_subsegment_free_blocks(active_subsegment);
logln("ActiveSubsegment" + " free blocks: " + free_blocks.length + "/" + active_subsegment.BlockCount + " (" + to_hex(active_subsegment_addr) + ", " + to_hex(active_subsegment.Owner.address) + ", " + active_subsegment.Location + ")");
//for (let free_block of free_blocks) {
// logln(to_hex(free_block));
//}
logln("Free block addresses: " + free_blocks);
} else {
logln("ActiveSubsegment is nul"); //this appears to be true in usermode segment heap implementation
}
logln("--------------------------");
let counter = 0;
for(let subsegment of _list_entry_iterator(current_affinity_slot.State.AvailableSubsegmentList, globals.default_module + "!_HEAP_LFH_SUBSEGMENT", "ListEntry")){
let subsegment_address = _list_entry_address(subsegment.ListEntry);
let free_blocks = _lfh_subsegment_free_blocks(subsegment);
logln(`AvailableSubsegment[` + to_hex(counter) + "]" + " free blocks: " + free_blocks.length + "/" + subsegment.BlockCount + " (" + to_hex(subsegment_address) + ", " + to_hex(subsegment.Owner.address) + ", " + subsegment.Location + ")");
counter += 1;
}
}
logln("\n######## Bucket State ###########");
let bucket_state = bucket.State;
let counter = 0;
for(let subsegment of _list_entry_iterator(bucket_state.AvailableSubsegmentList, globals.default_module + "!_HEAP_LFH_SUBSEGMENT", "ListEntry")){
let subsegment_address = _list_entry_address(subsegment.ListEntry);
let free_blocks = _lfh_subsegment_free_blocks(subsegment);
logln("Bucket state AvailableSubsegment[" + to_hex(counter) + "] free blocks: " + free_blocks.length + "/" + subsegment.BlockCount + " (" + to_hex(subsegment_address) + ", " + to_hex(subsegment.Owner.address) + ", " + subsegment.Location + ")");
counter += 1;
}
}
function _vs_header_sanity_check(vs_header){
let sane = true;
sane = sane && (vs_header.Allocated == 0 || vs_header.Allocated == 1);
sane = sane && (vs_header.UnsafeSize < 0x4000 && vs_header.UnsafePrevSize < 0x4000);
sane = sane && (vs_header.MemoryCost < align(vs_header.UnsafeSize*0x10, 12)/0x1000);
return sane
}
//for completeness, we need to go through the underlying segment allocations and find where our address falls into
//false positive rate is 1/2^16
function _vs_is_subsegment_first_page(addr){
let addr_page = addr.bitwiseAnd(~0xfff);
let vs_sub = create_typed_object(addr_page, "_HEAP_VS_SUBSEGMENT");
return vs_sub.Size.bitwiseXor(vs_sub.Signature).bitwiseXor(0x2bed) == zero;
}
//we can cache the chunk boundaries for a particular subsegment to speed up the execution of this function in case it's called often
//the cache lifetime might be good idea to be controlled by the caller
//fast_path should speed up execution, the function will only try to find the chunk boundaries starting from the previous page instead of the beginning of the subsegment
function vs_chunk_start(target_chunk_addr, fast_path = false){
let current_chunk_addr;
//add within the first condition: && _vs_header_sanity_check(target_chunk_addr.subtract(0x1000).bitwiseAnd(~0xfff).add(0xfe0))
//the above condition would make it unlikely to fail the fastpath in case the target chunk spans over multiple pages
if (fast_path && is_kernelspace_address(target_chunk_addr)){
current_chunk_addr = target_chunk_addr.subtract(0x1000).bitwiseAnd(~0xfff).add(0xfe0);
if (_vs_is_subsegment_first_page(target_chunk_addr))
current_chunk_addr = target_chunk_addr.bitwiseAnd(~0xfff).add(align(type_size("_HEAP_VS_SUBSEGMENT"), 4)); //another way of calculating where the vs subsegment user blocks start is by doing: (((~subsegment->Size)+1)&0xfff)*16, maybe there is a better way
} else {
current_chunk_addr = seg_subsegment_start(target_chunk_addr).add(align(type_size("_HEAP_VS_SUBSEGMENT"), 4));
}
if (current_chunk_addr.compareTo(target_chunk_addr)>0){
debug_print("[vs_chunk_start] Target address is bigger than the beginning of the lookup range: " + to_hex(current_chunk_addr));
return undefined;
}
let loop_counter = 0;
while(true){
let header = vs_decode_header_abs(current_chunk_addr, true);
let next_chunk_addr = current_chunk_addr.add(header.UnsafeSize*0x10);
if (next_chunk_addr.compareTo(target_chunk_addr)>0)
break;
current_chunk_addr = next_chunk_addr;
loop_counter+=1;
/*
if (loop_counter>0x500){
logln("something is off, going out");
return undefined;
}
*/
}
return current_chunk_addr;
}
function _vs_test(target_chunk_addr, fast_path = false){
let current_chunk_addr;
//add within the first condition: && _vs_header_sanity_check(target_chunk_addr.subtract(0x1000).bitwiseAnd(~0xfff).add(0xfe0))
//the above condition would make it unlikely to fail the fastpath in case the target chunk spans over multiple pages
if (fast_path && is_kernelspace_address(target_chunk_addr)){
current_chunk_addr = target_chunk_addr.subtract(0x1000).bitwiseAnd(~0xfff).add(0xfe0);
if (_vs_is_subsegment_first_page(target_chunk_addr))
current_chunk_addr = target_chunk_addr.bitwiseAnd(~0xfff).add(align(type_size("_HEAP_VS_SUBSEGMENT"), 4)); //another way of calculating where the vs subsegment user blocks start is by doing: (((~subsegment->Size)+1)&0xfff)*16, maybe there is a better way
} else {
current_chunk_addr = seg_subsegment_start(target_chunk_addr).add(align(type_size("_HEAP_VS_SUBSEGMENT"), 4));
}
if (current_chunk_addr.compareTo(target_chunk_addr)>0){
debug_print("[vs_chunk_start] Target address is bigger than the beginning of the lookup range: " + to_hex(current_chunk_addr));
return undefined;
}
let subsegment_end_address = current_chunk_addr.add(0x10000);
while(true){
let header = vs_decode_header_abs(current_chunk_addr, true);
let pool_header_addr = current_chunk_addr.add(0x10);
if (current_chunk_addr.bitwiseAnd(0xfff) == 0xfe0)
pool_header_addr = pool_header_addr.add(0x10);
if (header.UnsafeSize<0xfd0){
let pool_header = create_pointer_object(pool_header_addr, "_POOL_HEADER*");
logln("Current chunk: " + to_hex(current_chunk_addr) + " diff: " +to_hex(header.UnsafeSize.subtract(pool_header.BlockSize)));
}
let next_chunk_addr = current_chunk_addr.add(header.UnsafeSize*0x10);
if (next_chunk_addr.compareTo(subsegment_end_address)>=0)
break;
current_chunk_addr = next_chunk_addr;
}
return current_chunk_addr;
}
function vs_decode_header(vs_header_addr, fast_path = false, partial_decode=false){
let actual_vs_header_addr = vs_chunk_start(vs_header_addr, fast_path);
if (actual_vs_header_addr == undefined){
logln("Couldnt identify the beginning of the chunk");
return;
}
if (actual_vs_header_addr.compareTo(vs_header_addr)!=0){
logln("Original adresss: " + to_hex(vs_header_addr));
logln("Identified vs header address: " + to_hex(actual_vs_header_addr));
}
return vs_decode_header_abs(actual_vs_header_addr, partial_decode);
}
function vs_decode_header_abs(vs_header_addr, partial_decode=false){
if (typeof vs_header_addr === 'string'){
vs_header_addr = host.parseInt64(vs_header_addr);
}
let decoded_header = vs_header_addr;
decoded_header = decoded_header.bitwiseXor(host.memory.readMemoryValues(vs_header_addr, 1, 8)[0]);
decoded_header = decoded_header.bitwiseXor(globals.RtlpHpHeapGlobals.HeapKey);
//instantiating objects from js bytes would have been useful feature for the windbg js engine. Unfortunately this doesntt seem to exist yet, so we recreate the _HEAP_VS_CHUNK_HEADER ourselves
//let memory_cost = decoded_header.bitwiseAnd(0xffff);
let memory_cost = extract_field_64(decoded_header, "_HEAP_VS_CHUNK_HEADER", "Sizes.MemoryCost");
//let unsafe_size = decoded_header.bitwiseShiftRight(16) & 0xffff;
let unsafe_size = extract_field_64(decoded_header, "_HEAP_VS_CHUNK_HEADER", "Sizes.UnsafeSize");
//saves time, within this script we don't really use any of the other fields
if (partial_decode)
return {
MemoryCost: memory_cost,
UnsafeSize: unsafe_size,
};
//let unsafe_prev_size = decoded_header.bitwiseShiftRight(32) & 0xffff;
let unsafe_prev_size = extract_field_64(decoded_header, "_HEAP_VS_CHUNK_HEADER", "Sizes.UnsafePrevSize");
//let allocated = decoded_header.bitwiseShiftRight(48) & 0xff;
let allocated = extract_field_64(decoded_header, "_HEAP_VS_CHUNK_HEADER", "Sizes.Allocated");
let extra = decoded_header.bitwiseShiftRight(56);
if (allocated) {
let allocated_chunk_bits = host.memory.readMemoryValues(vs_header_addr.add(offsetof("_HEAP_VS_CHUNK_HEADER", "EncodedSegmentPageOffset")), 1, 4)[0];
let segment_page_offset = vs_header_addr.bitwiseAnd(0xff);
segment_page_offset ^= allocated_chunk_bits & 0xff;
segment_page_offset ^= globals.RtlpHpHeapGlobals.HeapKey.bitwiseAnd(0xff)
//let unused_bytes = (allocated_chunk_bits >>> 8) & 1;
let unused_bytes = extract_field_64(allocated_chunk_bits, "_HEAP_VS_CHUNK_HEADER", "UnusedBytes");
//let skip_during_walk = (allocated_chunk_bits >>> 9) & 1;
let skip_during_walk = extract_field_64(allocated_chunk_bits, "_HEAP_VS_CHUNK_HEADER", "SkipDuringWalk");
//these two require properly setting the segment heap before calling the vs decode
let dynamic_lookaside = vs_dynamic_lookaside_block(vs_header_addr, unsafe_size*0x10);
let delay_free_list = vs_delay_free_list_block(vs_header_addr);
/*
if (globals.kernelmode){
let pool_header_addr = vs_header_addr.add(0x10);
if (vs_header_addr.bitwiseAnd(0xfff) == 0xfe0){
pool_header_addr = pool_header_addr.add(0x10);
probably_delay_free_list = unsafe_size<0x100 && globals.is_valid_address(read_mem(vs_header_addr.add(0x10), 8));
}
let pool_header = create_typed_object(pool_header_addr, "_POOL_HEADER");
//test whether the pool header appears to be corrupted, ie out of sync with vs header size
if (unsafe_size>=0x100 || (pool_header.BlockSize < unsafe_size && pool_header.BlockSize+3>unsafe_size)) {
potentially_delay_free_list = false;
potentially_dynamic_lookaside = false;
}
}
*/
return {
MemoryCost: memory_cost,
UnsafeSize: unsafe_size,
UnsafePrevSize: unsafe_prev_size,
Allocated: allocated,
Extra: extra,
//allocated chunk headers
SegmentPageOffset: segment_page_offset,
UnusedBytes: unused_bytes, //todo: print the actual unused bytes
SkipDuringWalk: skip_during_walk,
AllocatedChunkBits: allocated_chunk_bits,
//specify whether the chunk is within either dynamic lookaside or delay free list
DelayFreeList: delay_free_list,
DynamicLookaside: dynamic_lookaside,
};
}
//handle the fields in case the chunk is free
let left = read_mem(vs_header_addr.add(8), 8);
let right = read_mem(vs_header_addr.add(0x10), 8);
let parent_val = read_mem(vs_header_addr.add(0x18), 8);
//extract_field_64(parent_val, "_RTL_BALANCED_NODE", "Red");
let parent = parent_val.bitwiseAnd(~3);
let red = parent_val.bitwiseAnd(1);
let balance = parent_val.bitwiseAnd(3);
return {
MemoryCost: memory_cost,
UnsafeSize: unsafe_size,
UnsafePrevSize: unsafe_prev_size,
Allocated: allocated,
Extra: extra,
//freed chunk headers
Left: left,
Right: right,
Parent: parent,
Red: red,
Balance: balance,
};
}
function vs_freechunktree_stats(min_size = 0, max_size = 0xfffff0, pool_index = -1, root_node = null){
let sizes_freq = [];
let current_pool = get_pool(pool_index);
if (!root_node)
root_node = current_pool.segment_heap.VsContext.FreeChunkTree.Root;
let free_chunk_tree_addr = current_pool.segment_heap.address.add(offsetof("_SEGMENT_HEAP", "VsContext.FreeChunkTree"));
let encoded_root = current_pool.segment_heap.VsContext.FreeChunkTree.Encoded;
if (encoded_root)
root_node = create_pointer_object(root_node.address.bitwiseXor(free_chunk_tree_addr), "_RTL_BALANCED_NODE*");
if (root_node.address == zero){
logln("FreeChunkTree is empty for " + current_pool.name);
return;
}
let total_size = 0;
let n = 0;
logln("----------FreeChunkTree Stats - " + current_pool.name + " -------------");
logln("FreeChunkTree address [_RTL_RB_TREE]: " + to_hex(free_chunk_tree_addr));
logln("Filtering sizes " + to_hex(min_size) + "-" + to_hex(max_size));
logln("[%Size%] %Frequency%:")
let node_evaluator = function (node) {
let header = vs_decode_header_abs(node.address.subtract(8), true)
let sz = header.UnsafeSize * 0x10;
let result = 0;
if (sz < min_size)
result = 1;
else if (sz > max_size)
result = -1;
return {result: result, extra: header};
};
for (let node_info of rbtree_iterator(root_node, node_evaluator)){
let current_node = node_info.node;
let current_node_vs_header = node_info.extra
let current_node_size = current_node_vs_header.UnsafeSize * 16;
let current_position = node_info.position;
sizes_freq[current_node_size] = (sizes_freq[current_node_size]||0) + 1;
total_size += current_node_size;
n++;
logln(to_hex(current_node.address) + " sz: " + to_hex(current_node_size) + " cost:" + to_hex(current_node_vs_header.MemoryCost) + " d:" + current_position);
/*
if (n%100==0)
logln(n);
*/
}
let col = 0;
for (const [i, value] of sizes_freq.entries()) {
if (value) {
log(("[" + to_hex(i) + "] " + to_hex(value)).padEnd(16) + "\t\t");
col++;
if (col==3){
logln("");
col = 0;
}
}
}
logln("\n++++++++++++++++++++++");
logln("Total number of chunks: " + to_hex(n));
logln("Total size: " + to_hex(total_size) + " (" + (total_size/(1024*1024)).toFixed(2) + " mb)");
logln("------------------------\n");
}
//vs_dynamic_lookaside_* assume lookaside start index begins at 0x21, might be more accurate to use something something: lfh_size2index(LfhContext.Config.MaxBlockSize+sizeof(generic_handler_header e.g. POOL_HEADER))
//dynanmic lookaside chunk size to index
function vs_dynamic_lookaside_s2i(sz) {
if (sz < globals.dynamic_lookaside_min_size){
debug_print(`vs_dynamic_lookaside_s2i: returning -1, ${to_hex(sz)} < ${to_hex(globals.dynamic_lookaside_min_size)}:dynamic_lookaside_min_size`);
return -1;
}
if (sz > globals.dynamic_lookaside_max_size){
debug_print(`vs_dynamic_lookaside_s2i: returning -1, ${to_hex(sz)} > ${to_hex(globals.dynamic_lookaside_max_size)}:dynamic_lookaside_max_size`);
return -1;
}
return lfh_size2index(sz)-0x21;
}
//dynanmic lookaside index to chunk size
function vs_dynamic_lookaside_i2s(bucket_index){
return globals.RtlpBucketBlockSizes[0x21 + bucket_index]
}
function vs_dynamic_lookaside_bucket_info(size, pool_index = -1){
let current_pool = get_pool(pool_index);
let dl_blocks = []
let dynamic_lookaside = current_pool.dynamic_lookaside;
if (dynamic_lookaside.address==zero) {
logln("[!] Dynamic lookaside disabled for " + current_pool.name);
return [];
}
let enable_bucket_bitmap = dynamic_lookaside.EnabledBucketBitmap;
logln("------Dynamic Lookaside Info - " + current_pool.name + "--------");
logln("Dynamic Lookaside Address [_RTL_DYNAMIC_LOOKASIDE]: " + to_hex(dynamic_lookaside.address));
logln("Enable Bucket Bitmap: " + to_hex(enable_bucket_bitmap));
logln("-------------------------");
let i = vs_dynamic_lookaside_s2i(size);
if (i<0 || i>=dynamic_lookaside.Buckets.Count())
return [];
let current_bucket_depth = dynamic_lookaside.Buckets[i].Depth;
let current_bucket_entries = dynamic_lookaside.Buckets[i].ListHead.HeaderX64.Depth;
let current_bucket_bitmap_activity = enable_bucket_bitmap.bitwiseShiftRight(i).bitwiseAnd(1) == 1;
let current_bucket_address = dynamic_lookaside.address.add(offsetof("_RTL_DYNAMIC_LOOKASIDE", "Buckets")).add(i*type_size("_RTL_LOOKASIDE"));
let bucket_activity_code = ((current_bucket_depth!=0)<<1) | (current_bucket_entries!=0);
if (!bucket_activity_code){
return [];
}
let current_bucket_next_chunk = dynamic_lookaside.Buckets[i].ListHead.Region;
let current_bucket_size = vs_dynamic_lookaside_i2s(i);
let current_bucket_maximum_depth = dynamic_lookaside.Buckets[i].MaximumDepth;
for(let addr of _slist_iterator(dynamic_lookaside.Buckets[i].ListHead)){
logln(to_hex(addr));
dl_blocks.push(addr);
}
return dl_blocks;
}
function vs_dynamic_lookaside_info(pool_index = -1){
//bitmap depth nentries
//0 0 inactive, empty
//0 1 inactive, but the bucket still has chunks
//1 0 active, but no chunks currently in the bucket
//1 1 active and contains chunks
const bucket_activity_msgs = [
"inactive",
"inactive, but the bucket still has chunks",
"active, but no chunks currently in the bucket",
"active and contains chunks"
];
let current_pool = get_pool(pool_index);
let dynamic_lookaside = current_pool.dynamic_lookaside;
if (dynamic_lookaside.address==zero) {
logln("[!] Dynamic lookaside disabled for " + current_pool.name);
return;
}
let enable_bucket_bitmap = dynamic_lookaside.EnabledBucketBitmap;
logln("------Dynamic Lookaside Info - " + current_pool.name + "--------");
logln("Dynamic Lookaside Address [_RTL_DYNAMIC_LOOKASIDE]: " + to_hex(dynamic_lookaside.address));
logln("Enable Bucket Bitmap: " + to_hex(enable_bucket_bitmap));
logln("-------------------------");
for (let i=0;i<dynamic_lookaside.Buckets.Count();i++){
let current_bucket_depth = dynamic_lookaside.Buckets[i].Depth;
let current_bucket_entries = dynamic_lookaside.Buckets[i].ListHead.HeaderX64.Depth;
let current_bucket_bitmap_activity = enable_bucket_bitmap.bitwiseShiftRight(i).bitwiseAnd(1) == 1;
let current_bucket_address = dynamic_lookaside.address.add(offsetof("_RTL_DYNAMIC_LOOKASIDE", "Buckets")).add(i*type_size("_RTL_LOOKASIDE"));
let bucket_activity_code = ((current_bucket_depth!=0)<<1) | (current_bucket_entries!=0);
let bucket_activity_msg = bucket_activity_msgs[bucket_activity_code];
if (!bucket_activity_code){
continue;
}
let current_bucket_next_chunk = dynamic_lookaside.Buckets[i].ListHead.Region;
let current_bucket_size = vs_dynamic_lookaside_i2s(i);
let current_bucket_maximum_depth = dynamic_lookaside.Buckets[i].MaximumDepth;
logln("Bucket address [_RTL_LOOKASIDE]: " + to_hex(current_bucket_address));
logln("Size " + to_hex(current_bucket_size) + ", index: " + to_hex(i) + " [" + bucket_activity_msg + "]");
if ((current_bucket_depth!=0) ^ current_bucket_bitmap_activity) {
logln("*** Note: bitmap activity != depth indication, check it out"); //never seen this, ifaict not possible
}
logln("Current instance maximum depth: " + to_hex(current_bucket_depth) + " (max depth: " + to_hex(current_bucket_maximum_depth) + ")");
logln("Number of entries in the list: " + to_hex(current_bucket_entries));
logln("Head of the list: " + to_hex(current_bucket_next_chunk));
logln("++++++++++++++++++++");
}
logln("");
}
function vs_dynamic_lookaside_vssize_blocks(vs_size) {
let lowest_size_estimation = vs_size-0x20;
let higest_size_estimation = vs_size-0x10;
let dl_blocks = [];
let prev_index = -1;
logging_enabled = false;
for (let current_size = lowest_size_estimation; current_size <= higest_size_estimation; current_size += 0x10){
let current_index = vs_dynamic_lookaside_s2i(current_size);
if (current_index < 0 || current_index == prev_index)
continue;
dl_blocks.push(...vs_dynamic_lookaside_bucket_info(current_size));
prev_index = current_index;
}
logging_enabled = true;
return dl_blocks;
}
//returns true if a block belongs to the dynamic lookaside
//block addr is expected to be the beginning of the chunk
function vs_dynamic_lookaside_block(chunk_addr, vssize) {
let chunk_start = chunk_addr;
let chunk_end = chunk_addr.add(vssize);
for (let dl_chunk_addr of vs_dynamic_lookaside_vssize_blocks(vssize)){
debug_print("currnet: " + to_hex(dl_chunk_addr));
if (chunk_start.compareTo(dl_chunk_addr) <= 0 && chunk_end.compareTo(dl_chunk_addr) >= 0)
return true;
}
return false;
}
function _vs_delay_free_list_info(pool_index = -1){
let current_pool = get_pool(pool_index);
let vs_context = current_pool.segment_heap.VsContext;
if (!vs_context.Config.Flags.EnableDelayFree){
return undefined;
}
let dfc = current_pool.segment_heap.VsContext.DelayFreeContext;
let dfc_addr = current_pool.segment_heap.address.add(offsetof("_SEGMENT_HEAP", "VsContext.DelayFreeContext"));
let depth = dfc.ListHead.HeaderX64.Depth;
let delay_free_list = [];
let current_addr = dfc.ListHead.HeaderX64.NextEntry.bitwiseShiftLeft(4);
for (let i=0;i<depth;i++){
delay_free_list.push(current_addr);
current_addr = host.memory.readMemoryValues(current_addr, 1, 8)[0];
}
return {
dfc_addr: dfc_addr,
depth: depth,
delay_free_list: delay_free_list,
}
}
function vs_delay_free_list(pool_index = -1){
let current_pool = get_pool(pool_index);
let vs_context = current_pool.segment_heap.VsContext;
let dfli = _vs_delay_free_list_info(pool_index);
if (dfli === undefined){
logln("The delayed free list is disalbed for " + current_pool.name)